@phdthesis{Schindler2021, author = {Schindler, Eva Marie}, title = {"What we have done is just to put the people in form of a structure"}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 288}, year = {2021}, abstract = {Participation has become an orthodoxy in the field of development, an essential element of projects and programmes. This book analyses participation in development interventions as an institutionalised expectation - a rationalized myth - and examines how organisations on different levels of government process it. At least two different objectives of participation are appropriate and legitimate for international organisations in the field: the empowerment of local beneficiaries and the achievement of programme goals. Both integrate participatory forums into the organisational logic of development interventions. Local administrations react to the institutionalised expectation with means-ends decoupling, where participatory forums are implemented superficially but de facto remain marginalised in local administrative processes and activities. The book furthermore provides a thick description of the organisationality of participation in development interventions. Participatory forums are shown to be a form of partial organisation. They establish an order in the relationship between administrations and citizens through the introduction of rules and the creation of a defined membership. At the same time, this order is found to be fragile and subject to criticism and negotiation.}, language = {en} } @phdthesis{Koc2021, author = {Ko{\c{c}}, Gamze}, title = {A comprehensive analysis of severe flood events in Turkey}, doi = {10.25932/publishup-51785}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517853}, school = {Universit{\"a}t Potsdam}, pages = {209}, year = {2021}, abstract = {Over the past decades, natural hazards, many of which are aggravated by climate change and reveal an increasing trend in frequency and intensity, have caused significant human and economic losses and pose a considerable obstacle to sustainable development. Hence, dedicated action toward disaster risk reduction is needed to understand the underlying drivers and create efficient risk mitigation plans. Such action is requested by the Sendai Framework for Disaster Risk Reduction 2015-2030 (SFDRR), a global agreement launched in 2015 that establishes stating priorities for action, e.g. an improved understanding of disaster risk. Turkey is one of the SFDRR contracting countries and has been severely affected by many natural hazards, in particular earthquakes and floods. However, disproportionately little is known about flood hazards and risks in Turkey. Therefore, this thesis aims to carry out a comprehensive analysis of flood hazards for the first time in Turkey from triggering drivers to impacts. It is intended to contribute to a better understanding of flood risks, improvements of flood risk mitigation and the facilitated monitoring of progress and achievements while implementing the SFDRR. In order to investigate the occurrence and severity of flooding in comparison to other natural hazards in Turkey and provide an overview of the temporal and spatial distribution of flood losses, the Turkey Disaster Database (TABB) was examined for the years 1960-2014. The TABB database was reviewed through comparison with the Emergency Events Database (EM-DAT), the Dartmouth Flood Observatory database, the scientific literature and news archives. In addition, data on the most severe flood events between 1960 and 2014 were retrieved. These served as a basis for analyzing triggering mechanisms (i.e. atmospheric circulation and precipitation amounts) and aggravating pathways (i.e. topographic features, catchment size, land use types and soil properties). For this, a new approach was developed and the events were classified using hierarchical cluster analyses to identify the main influencing factor per event and provide additional information about the dominant flood pathways for severe floods. The main idea of the study was to start with the event impacts based on a bottom-up approach and identify the causes that created damaging events, instead of applying a model chain with long-term series as input and searching for potentially impacting events as model outcomes. However, within the frequency analysis of the flood-triggering circulation pattern types, it was discovered that events in terms of heavy precipitation were not included in the list of most severe floods, i.e. their impacts were not recorded in national and international loss databases but were mentioned in news archives and reported by the Turkish State Meteorological Service. This finding challenges bottom-up modelling approaches and underlines the urgent need for consistent event and loss documentation. Therefore, as a next step, the aim was to enhance the flood loss documentation by calibrating, validating and applying the United Nations Office for Disaster Risk Reduction (UNDRR) loss estimation method for the recent severe flood events (2015-2020). This provided, a consistent flood loss estimation model for Turkey, allowing governments to estimate losses as quickly as possible after events, e.g. to better coordinate financial aid. This thesis reveals that, after earthquakes, floods have the second most destructive effects in Turkey in terms of human and economic impacts, with over 800 fatalities and US\$ 885.7 million in economic losses between 1960 and 2020, and that more attention should be paid on the national scale. The clustering results of the dominant flood-producing mechanisms (e.g. circulation pattern types, extreme rainfall, sudden snowmelt) present crucial information regarding the source and pathway identification, which can be used as base information for hazard identification in the preliminary risk assessment process. The implementation of the UNDRR loss estimation model shows that the model with country-specific parameters, calibrated damage ratios and sufficient event documentation (i.e. physically damaged units) can be recommended in order to provide first estimates of the magnitude of direct economic losses, even shortly after events have occurred, since it performed well when estimates were compared to documented losses. The presented results can contribute to improving the national disaster loss database in Turkey and thus enable a better monitoring of the national progress and achievements with regard to the targets stated by the SFDRR. In addition, the outcomes can be used to better characterize and classify flood events. Information on the main underlying factors and aggravating flood pathways further supports the selection of suitable risk reduction policies. All input variables used in this thesis were obtained from publicly available data. The results are openly accessible and can be used for further research. As an overall conclusion, it can be stated that consistent loss data collection and better event documentation should gain more attention for a reliable monitoring of the implementation of the SFDRR. Better event documentation should be established according to a globally accepted standard for disaster classification and loss estimation in Turkey. Ultimately, this enables stakeholders to create better risk mitigation actions based on clear hazard definitions, flood event classification and consistent loss estimations.}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @phdthesis{Zass2021, author = {Zass, Alexander}, title = {A multifaceted study of marked Gibbs point processes}, doi = {10.25932/publishup-51277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512775}, school = {Universit{\"a}t Potsdam}, pages = {vii, 104}, year = {2021}, abstract = {This thesis focuses on the study of marked Gibbs point processes, in particular presenting some results on their existence and uniqueness, with ideas and techniques drawn from different areas of statistical mechanics: the entropy method from large deviations theory, cluster expansion and the Kirkwood--Salsburg equations, the Dobrushin contraction principle and disagreement percolation. We first present an existence result for infinite-volume marked Gibbs point processes. More precisely, we use the so-called entropy method (and large-deviation tools) to construct marked Gibbs point processes in R^d under quite general assumptions. In particular, the random marks belong to a general normed space S and are not bounded. Moreover, we allow for interaction functionals that may be unbounded and whose range is finite but random. The entropy method relies on showing that a family of finite-volume Gibbs point processes belongs to sequentially compact entropy level sets, and is therefore tight. We then present infinite-dimensional Langevin diffusions, that we put in interaction via a Gibbsian description. In this setting, we are able to adapt the general result above to show the existence of the associated infinite-volume measure. We also study its correlation functions via cluster expansion techniques, and obtain the uniqueness of the Gibbs process for all inverse temperatures β and activities z below a certain threshold. This method relies in first showing that the correlation functions of the process satisfy a so-called Ruelle bound, and then using it to solve a fixed point problem in an appropriate Banach space. The uniqueness domain we obtain consists then of the model parameters z and β for which such a problem has exactly one solution. Finally, we explore further the question of uniqueness of infinite-volume Gibbs point processes on R^d, in the unmarked setting. We present, in the context of repulsive interactions with a hard-core component, a novel approach to uniqueness by applying the discrete Dobrushin criterion to the continuum framework. We first fix a discretisation parameter a>0 and then study the behaviour of the uniqueness domain as a goes to 0. With this technique we are able to obtain explicit thresholds for the parameters z and β, which we then compare to existing results coming from the different methods of cluster expansion and disagreement percolation. Throughout this thesis, we illustrate our theoretical results with various examples both from classical statistical mechanics and stochastic geometry.}, language = {en} } @phdthesis{TabaresJimenez2021, author = {Tabares Jimenez, Ximena del Carmen}, title = {A palaeoecological approach to savanna dynamics and shrub encroachment in Namibia}, doi = {10.25932/publishup-49281}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492815}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2021}, abstract = {The spread of shrubs in Namibian savannas raises questions about the resilience of these ecosystems to global change. This makes it necessary to understand the past dynamics of the vegetation, since there is no consensus on whether shrub encroachment is a new phenomenon, nor on its main drivers. However, a lack of long-term vegetation datasets for the region and the scarcity of suitable palaeoecological archives, makes reconstructing past vegetation and land cover of the savannas a challenge. To help meet this challenge, this study addresses three main research questions: 1) is pollen analysis a suitable tool to reflect the vegetation change associated with shrub encroachment in savanna environments? 2) Does the current encroached landscape correspond to an alternative stable state of savanna vegetation? 3) To what extent do pollen-based quantitative vegetation reconstructions reflect changes in past land cover? The research focuses on north-central Namibia, where despite being the region most affected by shrub invasion, particularly since the 21st century, little is known about the dynamics of this phenomenon. Field-based vegetation data were compared with modern pollen data to assess their correspondence in terms of composition and diversity along precipitation and grazing intensity gradients. In addition, two sediment cores from Lake Otjikoto were analysed to reveal changes in vegetation composition that have occurred in the region over the past 170 years and their possible drivers. For this, a multiproxy approach (fossil pollen, sedimentary ancient DNA (sedaDNA), biomarkers, compound specific carbon (δ13C) and deuterium (δD) isotopes, bulk carbon isotopes (δ13Corg), grain size, geochemical properties) was applied at high taxonomic and temporal resolution. REVEALS modelling of the fossil pollen record from Lake Otjikoto was run to quantitatively reconstruct past vegetation cover. For this, we first made pollen productivity estimates (PPE) of the most relevant savanna taxa in the region using the extended R-value model and two pollen dispersal options (Gaussian plume model and Lagrangian stochastic model). The REVEALS-based vegetation reconstruction was then validated using remote sensing-based regional vegetation data. The results show that modern pollen reflects the composition of the vegetation well, but diversity less well. Interestingly, precipitation and grazing explain a significant amount of the compositional change in the pollen and vegetation spectra. The multiproxy record shows that a state change from open Combretum woodland to encroached Terminalia shrubland can occur over a century, and that the transition between states spans around 80 years and is characterized by a unique vegetation composition. This transition is supported by gradual environmental changes induced by management (i.e. broad-scale logging for the mining industry, selective grazing and reduced fire activity associated with intensified farming) and related land-use change. Derived environmental changes (i.e. reduced soil moisture, reduced grass cover, changes in species composition and competitiveness, reduced fire intensity) may have affected the resilience of Combretum open woodlands, making them more susceptible to change to an encroached state by stochastic events such as consecutive years of precipitation and drought, and by high concentrations of pCO2. We assume that the resulting encroached state was further stabilized by feedback mechanisms that favour the establishment and competitiveness of woody vegetation. The REVEALS-based quantitative estimates of plant taxa indicate the predominance of a semi-open landscape throughout the 20th century and a reduction in grass cover below 50\% since the 21st century associated with the spread of encroacher woody taxa. Cover estimates show a close match with regional vegetation data, providing support for the vegetation dynamics inferred from multiproxy analyses. Reasonable PPEs were made for all woody taxa, but not for Poaceae. In conclusion, pollen analysis is a suitable tool to reconstruct past vegetation dynamics in savannas. However, because pollen cannot identify grasses beyond family level, a multiproxy approach, particularly the use of sedaDNA, is required. I was able to separate stable encroached states from mere woodland phases, and could identify drivers and speculate about related feedbacks. In addition, the REVEALS-based quantitative vegetation reconstruction clearly reflects the magnitude of the changes in the vegetation cover that occurred during the last 130 years, despite the limitations of some PPEs. This research provides new insights into pollen-vegetation relationships in savannas and highlights the importance of multiproxy approaches when reconstructing past vegetation dynamics in semi-arid environments. It also provides the first time series with sufficient taxonomic resolution to show changes in vegetation composition during shrub encroachment, as well as the first quantitative reconstruction of past land cover in the region. These results help to identify the different stages in savanna dynamics and can be used to calibrate predictive models of vegetation change, which are highly relevant to land management.}, language = {en} } @phdthesis{Kraus2021, author = {Kraus, Sara Milena}, title = {A Systems Medicine approach for heart valve diseases}, doi = {10.25932/publishup-52226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-522266}, school = {Universit{\"a}t Potsdam}, pages = {xi, 186}, year = {2021}, abstract = {In Systems Medicine, in addition to high-throughput molecular data (*omics), the wealth of clinical characterization plays a major role in the overall understanding of a disease. Unique problems and challenges arise from the heterogeneity of data and require new solutions to software and analysis methods. The SMART and EurValve studies establish a Systems Medicine approach to valvular heart disease -- the primary cause of subsequent heart failure. With the aim to ascertain a holistic understanding, different *omics as well as the clinical picture of patients with aortic stenosis (AS) and mitral regurgitation (MR) are collected. Our task within the SMART consortium was to develop an IT platform for Systems Medicine as a basis for data storage, processing, and analysis as a prerequisite for collaborative research. Based on this platform, this thesis deals on the one hand with the transfer of the used Systems Biology methods to their use in the Systems Medicine context and on the other hand with the clinical and biomolecular differences of the two heart valve diseases. To advance differential expression/abundance (DE/DA) analysis software for use in Systems Medicine, we state 21 general software requirements and features of automated DE/DA software, including a novel concept for the simple formulation of experimental designs that can represent complex hypotheses, such as comparison of multiple experimental groups, and demonstrate our handling of the wealth of clinical data in two research applications DEAME and Eatomics. In user interviews, we show that novice users are empowered to formulate and test their multiple DE hypotheses based on clinical phenotype. Furthermore, we describe insights into users' general impression and expectation of the software's performance and show their intention to continue using the software for their work in the future. Both research applications cover most of the features of existing tools or even extend them, especially with respect to complex experimental designs. Eatomics is freely available to the research community as a user-friendly R Shiny application. Eatomics continued to help drive the collaborative analysis and interpretation of the proteomic profile of 75 human left myocardial tissue samples from the SMART and EurValve studies. Here, we investigate molecular changes within the two most common types of valvular heart disease: aortic valve stenosis (AS) and mitral valve regurgitation (MR). Through DE/DA analyses, we explore shared and disease-specific protein alterations, particularly signatures that could only be found in the sex-stratified analysis. In addition, we relate changes in the myocardial proteome to parameters from clinical imaging. We find comparable cardiac hypertrophy but differences in ventricular size, the extent of fibrosis, and cardiac function. We find that AS and MR show many shared remodeling effects, the most prominent of which is an increase in the extracellular matrix and a decrease in metabolism. Both effects are stronger in AS. In muscle and cytoskeletal adaptations, we see a greater increase in mechanotransduction in AS and an increase in cortical cytoskeleton in MR. The decrease in proteostasis proteins is mainly attributable to the signature of female patients with AS. We also find relevant therapeutic targets. In addition to the new findings, our work confirms several concepts from animal and heart failure studies by providing the largest collection of human tissue from in vivo collected biopsies to date. Our dataset contributing a resource for isoform-specific protein expression in two of the most common valvular heart diseases. Apart from the general proteomic landscape, we demonstrate the added value of the dataset by showing proteomic and transcriptomic evidence for increased expression of the SARS-CoV-2- receptor at pressure load but not at volume load in the left ventricle and also provide the basis of a newly developed metabolic model of the heart.}, language = {en} } @phdthesis{Hasnat2021, author = {Hasnat, Muhammad Abrar}, title = {A-Type Carrier Proteins are involved in [4Fe-4S] Cluster insertion into the Radical S-adenosylmethionine (SAM) Protein MoaA and other molybdoenzymes}, doi = {10.25932/publishup-53079}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-530791}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2021}, abstract = {Iron-sulfur clusters are essential enzyme cofactors. The most common and stable clusters are [2Fe-2S] and [4Fe-4S] that are found in nature. They are involved in crucial biological processes like respiration, gene regulation, protein translation, replication and DNA repair in prokaryotes and eukaryotes. In Escherichia coli, Fe-S clusters are essential for molybdenum cofactor (Moco) biosynthesis, which is a ubiquitous and highly conserved pathway. The first step of Moco biosynthesis is catalyzed by the MoaA protein to produce cyclic pyranopterin monophosphate (cPMP) from 5'GTP. MoaA is a [4Fe-4S] cluster containing radical S-adenosyl-L-methionine (SAM) enzyme. The focus of this study was to investigate Fe-S cluster insertion into MoaA under nitrate and TMAO respiratory conditions using E. coli as a model organism. Nitrate and TMAO respiration usually occur under anaerobic conditions, where oxygen is depleted. Under these conditions, E. coli uses nitrate and TMAO as terminal electron. Previous studies revealed that Fe-S cluster insertion is performed by Fe-S cluster carrier proteins. In E. coli, these proteins are known as A-type carrier proteins (ATC) by phylogenomic and genetic studies. So far, three of them have been characterized in detail in E. coli, namely IscA, SufA, and ErpA. This study shows that ErpA and IscA are involved in Fe-S cluster insertion into MoaA under nitrate and TMAO respiratory conditions. ErpA and IscA can partially replace each other in their role to provide [4Fe-4S] clusters for MoaA. SufA is not able to replace the functions of IscA or ErpA under nitrate respiratory conditions. Nitrate reductase is a molybdoenzyme that coordinates Moco and Fe-S clusters. Under nitrate respiratory conditions, the expression of nitrate reductase is significantly increased in E. coli. Nitrate reductase is encoded in narGHJI genes, the expression of which is regulated by the transcriptional regulator, fumarate and nitrate reduction (FNR). The activation of FNR under conditions of nitrate respiration requires one [4Fe-4S] cluster. In this part of the study, we analyzed the insertion of Fe-S cluster into FNR for the expression of narGHJI genes in E. coli. The results indicate that ErpA is essential for the FNR-dependent expression of the narGHJI genes, a role that can be replaced partially by IscA and SufA when they are produced sufficiently under the conditions tested. This observation suggests that ErpA is indirectly regulating nitrate reductase expression via inserting Fe-S clusters into FNR. Most molybdoenzymes are complex multi-subunit and multi-cofactor-containing enzymes that coordinate Fe-S clusters, which are functioning as electron transfer chains for catalysis. In E. coli, periplasmic aldehyde oxidoreductase (PaoAC) is a heterotrimeric molybdoenzyme that consists of flavin, two [2Fe-2S], one [4Fe-4S] cluster and Moco. In the last part of this study, we investigated the insertion of Fe-S clusters into E. coli periplasmic aldehyde oxidoreductase (PaoAC). The results show that SufA and ErpA are involved in inserting [4Fe-4S] and [2Fe-2S] clusters into PaoABC, respectively under aerobic respiratory conditions.}, language = {en} } @phdthesis{Antonelli2021, author = {Antonelli, Andrea}, title = {Accurate waveform models for gravitational-wave astrophysics: synergetic approaches from analytical relativity}, doi = {10.25932/publishup-57667}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576671}, school = {Universit{\"a}t Potsdam}, pages = {XII, 259, LXXV}, year = {2021}, abstract = {Gravitational-wave (GW) astrophysics is a field in full blossom. Since the landmark detection of GWs from a binary black hole on September 14th 2015, fifty-two compact-object binaries have been reported by the LIGO-Virgo collaboration. Such events carry astrophysical and cosmological information ranging from an understanding of how black holes and neutron stars are formed, what neutron stars are composed of, how the Universe expands, and allow testing general relativity in the highly-dynamical strong-field regime. It is the goal of GW astrophysics to extract such information as accurately as possible. Yet, this is only possible if the tools and technology used to detect and analyze GWs are advanced enough. A key aspect of GW searches are waveform models, which encapsulate our best predictions for the gravitational radiation under a certain set of parameters, and that need to be cross-correlated with data to extract GW signals. Waveforms must be very accurate to avoid missing important physics in the data, which might be the key to answer the fundamental questions of GW astrophysics. The continuous improvements of the current LIGO-Virgo detectors, the development of next-generation ground-based detectors such as the Einstein Telescope or the Cosmic Explorer, as well as the development of the Laser Interferometer Space Antenna (LISA), demand accurate waveform models. While available models are enough to capture the low spins, comparable-mass binaries routinely detected in LIGO-Virgo searches, those for sources from both current and next-generation ground-based and spaceborne detectors must be accurate enough to detect binaries with large spins and asymmetry in the masses. Moreover, the thousands of sources that we expect to detect with future detectors demand accurate waveforms to mitigate biases in the estimation of signals' parameters due to the presence of a foreground of many sources that overlap in the frequency band. This is recognized as one of the biggest challenges for the analysis of future-detectors' data, since biases might hinder the extraction of important astrophysical and cosmological information from future detectors' data. In the first part of this thesis, we discuss how to improve waveform models for binaries with high spins and asymmetry in the masses. In the second, we present the first generic metrics that have been proposed to predict biases in the presence of a foreground of many overlapping signals in GW data. For the first task, we will focus on several classes of analytical techniques. Current models for LIGO and Virgo studies are based on the post-Newtonian (PN, weak-field, small velocities) approximation that is most natural for the bound orbits that are routinely detected in GW searches. However, two other approximations have risen in prominence, the post-Minkowskian (PM, weak- field only) approximation natural for unbound (scattering) orbits and the small-mass-ratio (SMR) approximation typical of binaries in which the mass of one body is much bigger than the other. These are most appropriate to binaries with high asymmetry in the masses that challenge current waveform models. Moreover, they allow one to "cover" regions of the parameter space of coalescing binaries, thereby improving the interpolation (and faithfulness) of waveform models. The analytical approximations to the relativistic two-body problem can synergically be included within the effective-one-body (EOB) formalism, in which the two-body information from each approximation can be recast into an effective problem of a mass orbiting a deformed Schwarzschild (or Kerr) black hole. The hope is that the resultant models can cover both the low-spin comparable-mass binaries that are routinely detected, and the ones that challenge current models. The first part of this thesis is dedicated to a study about how to best incorporate information from the PN, PM, SMR and EOB approaches in a synergistic way. We also discuss how accurate the resulting waveforms are, as compared against numerical-relativity (NR) simulations. We begin by comparing PM models, whether alone or recast in the EOB framework, against PN models and NR simulations. We will show that PM information has the potential to improve currently-employed models for LIGO and Virgo, especially if recast within the EOB formalism. This is very important, as the PM approximation comes with a host of new computational techniques from particle physics to exploit. Then, we show how a combination of PM and SMR approximations can be employed to access previously-unknown PN orders, deriving the third subleading PN dynamics for spin-orbit and (aligned) spin1-spin2 couplings. Such new results can then be included in the EOB models currently used in GW searches and parameter estimation studies, thereby improving them when the binaries have high spins. Finally, we build an EOB model for quasi-circular nonspinning binaries based on the SMR approximation (rather than the PN one as usually done). We show how this is done in detail without incurring in the divergences that had affected previous attempts, and compare the resultant model against NR simulations. We find that the SMR approximation is an excellent approximation for all (quasi-circular nonspinning) binaries, including both the equal-mass binaries that are routinely detected in GW searches and the ones with highly asymmetric masses. In particular, the SMR-based models compare much better than the PN models, suggesting that SMR-informed EOB models might be the key to model binaries in the future. In the second task of this thesis, we work within the linear-signal ap- proximation and describe generic metrics to predict inference biases on the parameters of a GW source of interest in the presence of confusion noise from unfitted foregrounds and from residuals of other signals that have been incorrectly fitted out. We illustrate the formalism with simple (yet realistic) LISA sources, and demonstrate its validity against Monte-Carlo simulations. The metrics we describe pave the way for more realistic studies to quantify the biases with future ground-based and spaceborne detectors.}, language = {en} } @phdthesis{Riedl2021, author = {Riedl, Simon}, title = {Active tectonics in the Kenya Rift}, doi = {10.25932/publishup-53855}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-538552}, school = {Universit{\"a}t Potsdam}, pages = {xi, 207}, year = {2021}, abstract = {Magmatische und tektonisch aktive Grabenzonen (Rifts) stellen die Vorstufen entstehender Plattengrenzen dar. Diese sich spreizenden tektonischen Provinzen zeichnen sich durch allgegenw{\"a}rtige Abschiebungen aus, und die r{\"a}umliche Verteilung, die Geometrie, und das Alter dieser Abschiebungen l{\"a}sst R{\"u}ckschl{\"u}sse auf die r{\"a}umlichen und zeitlichen Zusammenh{\"a}nge zwischen tektonischer Deformation, Magmatismus und langwelliger Krustendeformation in Rifts zu. Diese Arbeit konzentriert sich auf die St{\"o}rungsaktivit{\"a}t im Kenia-Rift des k{\"a}nozoischen Ostafrikanischen Grabensystems im Zeitraum zwischen dem mittleren Pleistoz{\"a}n und dem Holoz{\"a}n. Um die fr{\"u}hen Stadien der Entstehung kontinentaler Plattengrenzen zu untersuchen, wird in dieser Arbeit eine zeitlich gemittelte minimale Extensionsrate f{\"u}r den inneren Graben des N{\"o}rdlichen Kenia-Rifts (NKR) f{\"u}r die letzten 0,5 Mio Jahre abgeleitet. Die Analyse beruht auf Messungen mit Hilfe des digitalen TanDEM-X-H{\"o}henmodells, um die Abschiebungen entlang der vulkanisch-tektonischen Achse des inneren Grabens des NKR zu kartieren und deren Versatzbetr{\"a}ge zu bestimmen. Mithilfe von vorhandenen Geochronologiedaten der deformierten vulkanischen Einheiten sowie in dieser Arbeit erstellten ⁴⁰Ar/³⁹Ar-Datierungen werden zeitlich gemittelte Extensionsraten berechnet. Die Auswertungen zeigen, dass im inneren Graben des NKR die langfristige Extensionsrate f{\"u}r mittelpleistoz{\"a}ne bis rezente St{\"o}rungen Mindestwerte von 1,0 bis 1,6 mm yr⁻¹ aufweist und lokal allerdings auch Werte bis zu 2,0 mm yr⁻¹ existieren. In Anbetracht der nahezu inaktiven Randst{\"o}rungen des NKR zeigt sich somit, dass sich die Extension auf die Region der aktiven vulkanisch-tektonischen Achse im inneren Graben konzentriert und somit ein fortgeschrittenes Stadium kontinentaler Extensionsprozesse im NKR vorliegt. In dieser Arbeit wird diese r{\"a}umlich fokussierte Extension zudem im Rahmen einer St{\"o}rungsanalyse der j{\"u}ngsten vulkanischen Erscheinungen des Kenia-Rifts betrachtet. Die Arbeit analysiert mithilfe von Gel{\"a}ndekartierungen und eines auf Luftbildern basierenden Gel{\"a}ndemodells die St{\"o}rungscharakteristika der etwa 36 tausend Jahre alten Menengai-Kaldera und der umliegenden Gebiete im zentralen Kenia-Rift. Im Allgemeinen sind die holoz{\"a}nen St{\"o}rungen innerhalb des Rifts reine, NNO-streichende Abschiebungen, die somit das gegenw{\"a}rtige tektonische Spannungsfeld wiederspiegeln; innerhalb der Menengai-Kaldera sind die jungen Strukturen jedoch von andauernder magmatischer Aktivit{\"a}t und von Aufdomung {\"u}berpr{\"a}gt. Die Kaldera befindet sich im Zentrum eines sich aktiv dehnenden Riftsegments und zusammen mit den anderen quart{\"a}ren Vulkanen des Kenia-Rifts lassen sich diese Bereiche als Kernpunkte der extensionalen St{\"o}rungsaktivit{\"a}t verstehen, die letztlich zu einer weiter entwickelten Phase magmengest{\"u}tzter Kontinentalseparation f{\"u}hren werden. Die bereits seit dem Terti{\"a}r andauernde St{\"o}rungsaktivit{\"a}t im Kenia-Rift f{\"u}hrt zur Zergliederung der gr{\"o}ßeren Rift-Senken in kleinere Segmente und beeinflusst die Sedimentologie und die Hydrologie dieser Riftbecken. Gegenw{\"a}rtig sind die meisten, durch St{\"o}rungen begrenzten Becken des Kenia-Rifts hydrologisch isoliert, sie waren aber w{\"a}hrend feuchter Klimaphasen hydrologisch miteinander verbunden; in dieser Arbeit untersuche ich deshalb auch diese hydrologische Verbindung der Rift-Becken f{\"u}r die Zeit der Afrikanischen Feuchteperiode des fr{\"u}hen Holoz{\"a}ns. Mithilfe der Analyse von digitalen Gel{\"a}ndemodellen, unter Ber{\"u}cksichtigung von geomorphologischen Anzeigern f{\"u}r Seespiegelhochst{\"a}nde, Radiokarbondatierungen und einer {\"U}bersicht {\"u}ber Fossiliendaten konnten zwei kaskadierende Flusssysteme aus diesen Daten abgeleitet werden: eine Flusskaskade in Richtung S{\"u}den und eine in Richtung Norden. Beide Kaskaden haben die derzeit isolierten Becken w{\"a}hrend des fr{\"u}hen Holoz{\"a}ns durch {\"u}berlaufende Seen und eingeschnittene Schluchten miteinander verbunden. Diese hydrologische Verbindung f{\"u}hrte zu der Ausbreitung aquatischer Fauna entlang des Rifts, und gleichzeitig stellte die Wasserscheide zwischen den beiden Flusssystemen den einzigen terrestrischen Ausbreitungskorridor dar, der eine {\"U}berquerung des Kenia-Rifts erm{\"o}glichte. Diese tektonisch-geomorphologische Rekonstruktion erkl{\"a}rt die heute isolierten Vorkommen nilotischer Fischarten in den Riftseen Kenias sowie die isolierten Vorkommen Guineo-Congolischer S{\"a}ugetiere in W{\"a}ldern {\"o}stlich des Kenia-Rifts, die sich {\"u}ber die Wasserscheide im Kenia-Rift ausbreiten konnten. Auf l{\"a}ngeren Zeitskalen sind solche Phasen hydrologischer Verbindung und Phasen der Isolation wiederholt aufgetreten und zeigen sich in wechselnden pal{\"a}o{\"o}kologischen Indikatoren in Sedimentbohrkernen. Hier stelle ich einen Sedimentbohrkern aus dem Koora-Becken des S{\"u}dlichen Kenia-Rifts vor, der einen Datensatz der Pal{\"a}o-Umweltbedingungen der letzten 1 Million Jahre beinhaltet. Dieser Datensatz zeigt, dass etwa vor 400 tausend Jahren die zuvor relativ stabilen Umweltbedingungen zum Erliegen kamen und tektonische, hydrologische und {\"o}kologische Ver{\"a}nderungen dazu f{\"u}hrten, dass die Wasserverf{\"u}gbarkeit, die Grasland-Vergesellschaftungen und die Bedeckung durch Baumvegetation zunehmend st{\"a}rkeren und h{\"a}ufigeren Schwankungen unterlagen. Diese großen Ver{\"a}nderungen fallen zeitlich mit Phasen zusammen, in denen das s{\"u}dliche Becken des Kenia-Rifts von vulkanischer und tektonischer Aktivit{\"a}t besonders betroffen war. Die vorliegende Arbeit zeigt deshalb deutlich, inwiefern die tektonischen und geomorphologischen Gegebenheiten im Zuge einer zeitlich langanhaltenden Extension die Hydrologie, die Pal{\"a}o-Umweltbedingungen sowie die Biodiversit{\"a}t einer Riftzone beeinflussen k{\"o}nnen.}, language = {en} } @phdthesis{Risch2021, author = {Risch, Lucie}, title = {Acute effect of exercise on sonographic detectable achilles tendon blood flow}, school = {Universit{\"a}t Potsdam}, year = {2021}, language = {en} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @phdthesis{Ayzel2021, author = {Ayzel, Georgy}, title = {Advancing radar-based precipitation nowcasting}, doi = {10.25932/publishup-50426}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504267}, school = {Universit{\"a}t Potsdam}, pages = {xx, 68}, year = {2021}, abstract = {Precipitation forecasting has an important place in everyday life - during the day we may have tens of small talks discussing the likelihood that it will rain this evening or weekend. Should you take an umbrella for a walk? Or should you invite your friends for a barbecue? It will certainly depend on what your weather application shows. While for years people were guided by the precipitation forecasts issued for a particular region or city several times a day, the widespread availability of weather radars allowed us to obtain forecasts at much higher spatiotemporal resolution of minutes in time and hundreds of meters in space. Hence, radar-based precipitation nowcasting, that is, very-short-range forecasting (typically up to 1-3 h), has become an essential technique, also in various professional application contexts, e.g., early warning, sewage control, or agriculture. There are two major components comprising a system for precipitation nowcasting: radar-based precipitation estimates, and models to extrapolate that precipitation to the imminent future. While acknowledging the fundamental importance of radar-based precipitation retrieval for precipitation nowcasts, this thesis focuses only on the model development: the establishment of open and competitive benchmark models, the investigation of the potential of deep learning, and the development of procedures for nowcast errors diagnosis and isolation that can guide model development. The present landscape of computational models for precipitation nowcasting still struggles with the availability of open software implementations that could serve as benchmarks for measuring progress. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. We distribute the corresponding set of models as a software library, rainymotion, which is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion). That way, the library acts as a tool for providing fast, open, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing. One of the promising directions for model development is to challenge the potential of deep learning - a subfield of machine learning that refers to artificial neural networks with deep architectures, which may consist of many computational layers. Deep learning showed promising results in many fields of computer science, such as image and speech recognition, or natural language processing, where it started to dramatically outperform reference methods. The high benefit of using "big data" for training is among the main reasons for that. Hence, the emerging interest in deep learning in atmospheric sciences is also caused and concerted with the increasing availability of data - both observational and model-based. The large archives of weather radar data provide a solid basis for investigation of deep learning potential in precipitation nowcasting: one year of national 5-min composites for Germany comprises around 85 billion data points. To this aim, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. RainNet was trained to predict continuous precipitation intensities at a lead time of 5 min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900 km x 900 km and has a resolution of 1 km in space and 5 min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In these experiments, RainNet was applied recursively in order to achieve lead times of up to 1 h. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the previously developed rainymotion library. RainNet significantly outperformed the benchmark models at all lead times up to 60 min for the routine verification metrics mean absolute error (MAE) and critical success index (CSI) at intensity thresholds of 0.125, 1, and 5 mm/h. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15 mm/h). The limited ability of RainNet to predict high rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5 min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16 km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5 min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5 min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research on model development for precipitation nowcasting, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance. The model development together with the verification experiments for both conventional and deep learning model predictions also revealed the need to better understand the source of forecast errors. Understanding the dominant sources of error in specific situations should help in guiding further model improvement. The total error of a precipitation nowcast consists of an error in the predicted location of a precipitation feature and an error in the change of precipitation intensity over lead time. So far, verification measures did not allow to isolate the location error, making it difficult to specifically improve nowcast models with regard to location prediction. To fill this gap, we introduced a framework to directly quantify the location error. To that end, we detect and track scale-invariant precipitation features (corners) in radar images. We then consider these observed tracks as the true reference in order to evaluate the performance (or, inversely, the error) of any model that aims to predict the future location of a precipitation feature. Hence, the location error of a forecast at any lead time ahead of the forecast time corresponds to the Euclidean distance between the observed and the predicted feature location at the corresponding lead time. Based on this framework, we carried out a benchmarking case study using one year worth of weather radar composites of the DWD. We evaluated the performance of four extrapolation models, two of which are based on the linear extrapolation of corner motion; and the remaining two are based on the Dense Inverse Search (DIS) method: motion vectors obtained from DIS are used to predict feature locations by linear and Semi-Lagrangian extrapolation. For all competing models, the mean location error exceeds a distance of 5 km after 60 min, and 10 km after 110 min. At least 25\% of all forecasts exceed an error of 5 km after 50 min, and of 10 km after 90 min. Even for the best models in our experiment, at least 5 percent of the forecasts will have a location error of more than 10 km after 45 min. When we relate such errors to application scenarios that are typically suggested for precipitation nowcasting, e.g., early warning, it becomes obvious that location errors matter: the order of magnitude of these errors is about the same as the typical extent of a convective cell. Hence, the uncertainty of precipitation nowcasts at such length scales - just as a result of locational errors - can be substantial already at lead times of less than 1 h. Being able to quantify the location error should hence guide any model development that is targeted towards its minimization. To that aim, we also consider the high potential of using deep learning architectures specific to the assimilation of sequential (track) data. Last but not least, the thesis demonstrates the benefits of a general movement towards open science for model development in the field of precipitation nowcasting. All the presented models and frameworks are distributed as open repositories, thus enhancing transparency and reproducibility of the methodological approach. Furthermore, they are readily available to be used for further research studies, as well as for practical applications.}, language = {en} } @phdthesis{Foerster2021, author = {F{\"o}rster, Desiree}, title = {Aesthetic experience of metabolic processes}, publisher = {meson press}, address = {L{\"u}neburg}, isbn = {978-3-95796-180-8}, school = {Universit{\"a}t Potsdam}, pages = {184}, year = {2021}, abstract = {Simultaneously speculative and inspired by everyday experiences, this volume develops an aesthetics of metabolism that offers a new perspective on the human-environment relation, one that is processual, relational, and not dependent on conscious thought. In art installations, design prototypes, and researchcreation projects that utilize air, light, or temperature to impact subjective experience the author finds aesthetic milieus that shift our awareness to the role of different sense modalities in aesthetic experience. Metabolic and atmospheric processes allow for an aesthetics besides and beyond the usually dominant visual sense.}, language = {en} } @phdthesis{Sikkens2021, author = {Sikkens, Reinier}, title = {An analysis of cultural entrepreneurship}, doi = {10.25932/publishup-50187}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-501879}, school = {Universit{\"a}t Potsdam}, pages = {325}, year = {2021}, abstract = {Media artists have been struggling for financial survival ever since media art came into being. The non-material value of the artwork, a provocative attitude towards the traditional arts world and originally anti-capitalist mindset of the movement makes it particularly difficult to provide a constructive solution. However, a cultural entrepreneurial approach can be used to build a framework in order to find a balance between culture and business while ensuring that the cultural mission remains the top priority.}, language = {en} } @phdthesis{Davis2021, author = {Davis, Timothy}, title = {An analytical and numerical analysis of fluid-filled crack propagation in three dimensions}, doi = {10.25932/publishup-50960}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-509609}, school = {Universit{\"a}t Potsdam}, pages = {xi, 187}, year = {2021}, abstract = {Fluids in the Earth's crust can move by creating and flowing through fractures, in a process called `hydraulic fracturing'. The tip-line of such fluid-filled fractures grows at locations where stress is larger than the strength of the rock. Where the tip stress vanishes, the fracture closes and the fluid-front retreats. If stress gradients exist on the fracture's walls, induced by fluid/rock density contrasts or topographic stresses, this results in an asymmetric shape and growth of the fracture, allowing for the contained batch of fluid to propagate through the crust. The state-of-the-art analytical and numerical methods to simulate fluid-filled fracture propagation are two-dimensional (2D). In this work I extend these to three dimensions (3D). In my analytical method, I approximate the propagating 3D fracture as a penny-shaped crack that is influenced by both an internal pressure and stress gradients. In addition, I develop a numerical method to model propagation where curved fractures can be simulated as a mesh of triangular dislocations, with the displacement of faces computed using the displacement discontinuity method. I devise a rapid technique to approximate stress intensity and use this to calculate the advance of the tip-line. My 3D models can be applied to arbitrary stresses, topographic and crack shapes, whilst retaining short computation times. I cross-validate my analytical and numerical methods and apply them to various natural and man-made settings, to gain additional insights into the movements of hydraulic fractures such as magmatic dikes and fluid injections in rock. In particular, I calculate the `volumetric tipping point', which once exceeded allows a fluid-filled fracture to propagate in a `self-sustaining' manner. I discuss implications this has for hydro-fracturing in industrial operations. I also present two studies combining physical models that define fluid-filled fracture trajectories and Bayesian statistical techniques. In these studies I show that the stress history of the volcanic edifice defines the location of eruptive vents at volcanoes. Retrieval of the ratio between topographic to remote stresses allows for forecasting of probable future vent locations. Finally, I address the mechanics of 3D propagating dykes and sills in volcanic regions. I focus on Sierra Negra volcano in the Gal\'apagos islands, where in 2018, a large sill propagated with an extremely curved trajectory. Using a 3D analysis, I find that shallow horizontal intrusions are highly sensitive to topographic and buoyancy stress gradients, as well as the effects of the free surface.}, language = {en} } @phdthesis{Wolf2021, author = {Wolf, Johannes}, title = {Analysis and visualization of transport infrastructure based on large-scale geospatial mobile mapping data}, doi = {10.25932/publishup-53612}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536129}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2021}, abstract = {3D point clouds are a universal and discrete digital representation of three-dimensional objects and environments. For geospatial applications, 3D point clouds have become a fundamental type of raw data acquired and generated using various methods and techniques. In particular, 3D point clouds serve as raw data for creating digital twins of the built environment. This thesis concentrates on the research and development of concepts, methods, and techniques for preprocessing, semantically enriching, analyzing, and visualizing 3D point clouds for applications around transport infrastructure. It introduces a collection of preprocessing techniques that aim to harmonize raw 3D point cloud data, such as point density reduction and scan profile detection. Metrics such as, e.g., local density, verticality, and planarity are calculated for later use. One of the key contributions tackles the problem of analyzing and deriving semantic information in 3D point clouds. Three different approaches are investigated: a geometric analysis, a machine learning approach operating on synthetically generated 2D images, and a machine learning approach operating on 3D point clouds without intermediate representation. In the first application case, 2D image classification is applied and evaluated for mobile mapping data focusing on road networks to derive road marking vector data. The second application case investigates how 3D point clouds can be merged with ground-penetrating radar data for a combined visualization and to automatically identify atypical areas in the data. For example, the approach detects pavement regions with developing potholes. The third application case explores the combination of a 3D environment based on 3D point clouds with panoramic imagery to improve visual representation and the detection of 3D objects such as traffic signs. The presented methods were implemented and tested based on software frameworks for 3D point clouds and 3D visualization. In particular, modules for metric computation, classification procedures, and visualization techniques were integrated into a modular pipeline-based C++ research framework for geospatial data processing, extended by Python machine learning scripts. All visualization and analysis techniques scale to large real-world datasets such as road networks of entire cities or railroad networks. The thesis shows that some use cases allow taking advantage of established image vision methods to analyze images rendered from mobile mapping data efficiently. The two presented semantic classification methods working directly on 3D point clouds are use case independent and show similar overall accuracy when compared to each other. While the geometry-based method requires less computation time, the machine learning-based method supports arbitrary semantic classes but requires training the network with ground truth data. Both methods can be used in combination to gradually build this ground truth with manual corrections via a respective annotation tool. This thesis contributes results for IT system engineering of applications, systems, and services that require spatial digital twins of transport infrastructure such as road networks and railroad networks based on 3D point clouds as raw data. It demonstrates the feasibility of fully automated data flows that map captured 3D point clouds to semantically classified models. This provides a key component for seamlessly integrated spatial digital twins in IT solutions that require up-to-date, object-based, and semantically enriched information about the built environment.}, language = {en} } @phdthesis{Mancini2021, author = {Mancini, Carola}, title = {Analysis of the effects of age-related changes of metabolic flux on brown adipocyte formation and function}, doi = {10.25932/publishup-51266}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 134}, year = {2021}, abstract = {Brown adipose tissue (BAT) is responsible for non-shivering thermogenesis, thereby allowing mammals to maintain a constant body temperature in a cold environment. Thermogenic capacity of this tissue is due to a high mitochondrial density and expression of uncoupling protein 1 (UCP1), a unique brown adipocyte marker which dissipates the mitochondrial proton gradient to produce heat instead of ATP. BAT is actively involved in whole-body metabolic homeostasis and during aging there is a loss of classical brown adipose tissue with concomitantly reduced browning capacity of white adipose tissue. Therefore, an age-dependent decrease of BAT-related energy expenditure capacity may exacerbate the development of metabolic diseases, including obesity and type 2 diabetes mellitus. Given that direct effects of age-related changes of BAT-metabolic flux have yet to be unraveled, the aim of the current thesis is to investigate potential metabolic mechanisms involved in BAT-dysfunction during aging and to identify suitable metabolic candidates as functional biomarkers of BAT-aging. To this aim, integration of transcriptomic, metabolomic and proteomic data analyses of BAT from young and aged mice was performed, and a group of candidates with age-related changes was revealed. Metabolomic analysis showed age-dependent alterations of metabolic intermediates involved in energy, nucleotide and vitamin metabolism, with major alterations regarding the purine nucleotide pool. These data suggest a potential role of nucleotide intermediates in age-related BAT defects. In addition, the screening of transcriptomic and proteomic data sets from BAT of young and aged mice allowed identification of a 60-kDa lysophospholipase, also known as L-asparaginase (Aspg), whose expression declines during BAT-aging. Involvement of Aspg in brown adipocyte thermogenic function was subsequently analyzed at the molecular level using in vitro approaches and animal models. The findings revealed sensitivity of Aspg expression to β3-adrenergic activation via different metabolic cues, including cold exposure and treatment with β3-adrenergic agonist CL. To further examine ASPG function in BAT, an over-expression model of Aspg in a brown adipocyte cell line was established and showed that these cells were metabolically more active compared to controls, revealing increased expression of the main brown-adipocyte specific marker UCP1, as well as higher lipolysis rates. An in vitro loss-of-function model of Aspg was also functionally analyzed, revealing reduced brown adipogenic characteristics and an impaired lipolysis, thus confirming physiological relevance of Aspg in brown adipocyte function. Characterization of a transgenic mouse model with whole-body inactivation of the Aspg gene (Aspg-KO) allowed investigation of the role of ASPG under in vivo conditions, indicating a mild obesogenic phenotype, hypertrophic white adipocytes, impairment of the early thermogenic response upon cold-stimulation and dysfunctional insulin sensitivity. Taken together, these data show that ASPG may represent a new functional biomarker of BAT-aging that regulates thermogenesis and therefore a potential target for the treatment of age-related metabolic disease.}, language = {en} } @phdthesis{Grosso2021, author = {Grosso, Stefano}, title = {Anerkennung und Macht}, doi = {10.25932/publishup-50846}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-508461}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2021}, abstract = {In der vorliegenden Untersuchung habe ich das Ziel verfolgt, einen sachlich-eigenst{\"a}ndigen Beitrag f{\"u}r eine Debatte gegen Honneths kritische Gesellschaftstheorie zu leisten. In dieser Debatte wird Honneth dahingehend kritisiert, dass es ihm mit seiner kritischen Gesellschaftstheorie entgegen seiner eigenen systematischen Zielsetzung nicht gelingt, in modernen liberaldemokratischen Gesellschaften s{\"a}mtliche Ph{\"a}nomene von sozialer Herrschaft kritisch zu hinterfragen. Denn soziale Anerkennung, die Honneth als Schl{\"u}sselbegriff f{\"u}r diese kritische Hinterfragung behandelt, bei der soziale Herrschaft in Verbindung mit sozialer Missachtung (als mangelnde soziale Anerkennung) steht, kann laut der Kritik faktisch selbst ein Medium f{\"u}r die Stiftung von sozialer Unterwerfung sein. Dies geschieht in Prozessen von Identit{\"a}tsentwicklung, in denen soziale Anerkennung f{\"u}r Individuen als Anerkannte bestimmte Identit{\"a}tsm{\"o}glichkeiten einr{\"a}umt und auf diese Weise gleichzeitig andere Identit{\"a}tsm{\"o}glichkeiten ausschließt, womit sie auf diese Identit{\"a}t einschr{\"a}nkend und insofern herrschend wirkt. Es handelt sich um eine Form von sozialer Herrschaft, die durch soziale Anerkennung gestiftet wird. Honneth zieht dem Vorwurf zufolge nicht in Erw{\"a}gung, dass soziale Anerkennung bei Individuen als Anerkannte einen solchen negativen Effekt erzielen kann. Hieraus ergeben sich die Fragen, ob soziale Anerkennung in Prozessen von Identit{\"a}tsentwicklung jeweils mit sozialer Herrschaft einhergeht und wie dieser Typus von sozialer Herrschaft kritisiert werden kann. Diese Fragen hat Honneth zuletzt in einem pers{\"o}nlichen Gespr{\"a}ch mit Allen und Cooke (als zwei Teilnehmerinnen der Debatte gegen Honneth) beantwortet. An dieser Stelle vertritt er mit beiden Gespr{\"a}chsteilnehmerinnen die Auffassung, dass die Operation der Einschr{\"a}nkung von Identit{\"a}tsm{\"o}glichkeiten an sich keine Operation darstellt, welche, wie sonst in der Debatte gegen seine kritische Gesellschaftstheorie behauptet wird, auf soziale Herrschaft zur{\"u}ckf{\"u}hrt. Diese Auffassung beruht auf der Idee, wonach soziale Anerkennung sich in jenem praktischen Kontext nur unter der Bedingung als herrschaftsstiftend erweist, dass sie immanente Prinzipien verletzt, die substanziell kritische Maßst{\"a}be definieren. Mein Beitrag zu dieser Debatte gegen Honneth besteht auf der einen Seite in der Erkl{\"a}rung, dass sowohl jene Auffassung als auch jene Idee argumentativ mangelhaft sind, und auf der anderen Seite in der Ausf{\"u}hrung des Vorhabens, diesen argumentativen Mangel selbst zu beheben. Gegen jene Auffassung behaupte ich, dass die drei Autoren in ihrem Gespr{\"a}ch nicht erl{\"a}utern, inwiefern soziale Anerkennung nicht herrschend wirkt, wenn sie die Identit{\"a}tsm{\"o}glichkeiten von Individuen als Anerkannte einschr{\"a}nkt, denn mit dieser Einschr{\"a}nkung wird vielmehr faktisch {\"u}ber diese Individuen geherrscht - die Debatte gegen Honneth, so zur Unterst{\"u}tzung dieser Ansicht, baut haupts{\"a}chlich auf ebendiesem Faktum auf. Gegen jene Idee habe ich f{\"u}nf problematische Fragen gestellt und beantwortet, die Bezug eigentlich nicht allein auf diese Idee selbst, sondern {\"u}berdies auf weitere, naheliegende Ideen nehmen, welche die drei Autoren angesprochen haben.}, language = {de} } @phdthesis{Fominyam2021, author = {Fominyam, Henry Zamchang}, title = {Aspects of Awing grammar and information structure}, doi = {10.25932/publishup-51806}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-518068}, school = {Universit{\"a}t Potsdam}, pages = {xi, 391}, year = {2021}, abstract = {This project describes the nominal, verbal and 'truncation' systems of Awing and explains the syntactic and semantic functions of the multifunctional l<-><-> (LE) morpheme in copular and wh-focused constructions. Awing is a Bantu Grassfields language spoken in the North West region of Cameroon. The work begins with morphological processes viz. deverbals, compounding, reduplication, borrowing and a thorough presentation of the pronominal system and takes on verbal categories viz. tense, aspect, mood, verbal extensions, negation, adverbs and triggers of a homorganic N(asal)-prefix that attaches to the verb and other verbal categories. Awing grammar also has a very unusual phenomenon whereby nouns and verbs take long and short forms. A chapter entitled truncation is dedicated to the phenomenon. It is observed that the truncation process does not apply to bare singular NPs, proper names and nouns derived via morphological processes. On the other hand, with the exception of the 1st person non-emphatic possessive determiner and the class 7 noun prefix, nouns generally take the truncated form with modifiers (i.e., articles, demonstratives and other possessives). It is concluded that nominal truncation depicts movement within the DP system (Abney 1987). Truncation of the verb occurs in three contexts: a mass/plurality conspiracy (or lattice structuring in terms of Link 1983) between the verb and its internal argument (i.e., direct object); a means to align (exhaustive) focus (in terms of Fery's 2013), and a means to form polar questions. The second part of the work focuses on the role of the LE morpheme in copular and wh-focused clauses. Firstly, the syntax of the Awing copular clause is presented and it is shown that copular clauses in Awing have 'subject-focus' vs 'topic-focus' partitions and that the LE morpheme indirectly relates such functions. Semantically, it is shown that LE does not express contrast or exhaustivity in copular clauses. Turning to wh-constructions, the work adheres to Hamblin's (1973) idea that the meaning of a question is the set of its possible answers and based on Rooth's (1985) underspecified semantic notion of alternative focus, concludes that the LE morpheme is not a Focus Marker (FM) in Awing: LE does not generate or indicate the presence of alternatives (Krifka 2007); The LE morpheme can associate with wh-elements as a focus-sensitive operator with semantic import that operates on the focus alternatives by presupposing an exhaustive answer, among other notions. With focalized categories, the project further substantiates the claim in Fominyam \& Šim{\´i}k (2017), namely that exhaustivity is part of the semantics of the LE morpheme and not derived via contextual implicature, via a number of diagnostics. Hence, unlike in copular clauses, the LE morpheme with wh-focused categories is analysed as a morphological exponent of a functional head Exh corresponding to Horvath's (2010) EI (Exhaustive Identification). The work ends with the syntax of verb focus and negation and modifies the idea in Fominyam \& Šim{\´i}k (2017), namely that the focalized verb that associates with the exhaustive (LE) particle is a lower copy of the finite verb that has been moved to Agr. It is argued that the LE-focused verb 'cluster' is an instantiation of adjunction. The conclusion is that verb doubling with verb focus in Awing is neither a realization of two copies of one and the same verb (Fominyam and Šim{\´i}k 2017), nor a result of a copy triggered by a focus marker (Aboh and Dyakonova 2009). Rather, the focalized copy is said to be merged directly as the complement of LE forming a type of adjoining cluster.}, language = {en} } @phdthesis{MogrovejoArias2021, author = {Mogrovejo Arias, Diana Carolina}, title = {Assessment of the frequency and relevance of potentially pathogenic phenotypes in microbial isolates from Arctic environments}, school = {Universit{\"a}t Potsdam}, pages = {125}, year = {2021}, abstract = {The Arctic environments constitute rich and dynamic ecosystems, dominated by microorganisms extremely well adapted to survive and function under severe conditions. A range of physiological adaptations allow the microbiota in these habitats to withstand low temperatures, low water and nutrient availability, high levels of UV radiation, etc. In addition, other adaptations of clear competitive nature are directed at not only surviving but thriving in these environments, by disrupting the metabolism of neighboring cells and affecting intermicrobial communication. Since Arctic microbes are bioindicators which amplify climate alterations in the environment, the Arctic region presents the opportunity to study local microbiota and carry out research about interesting, potentially virulent phenotypes that could be dispersed into other habitats around the globe as a consequence of accelerating climate change. In this context, exploration of Arctic habitats as well as descriptions of the microbes inhabiting them are abundant but microbial competitive strategies commonly associated with virulence and pathogens are rarely reported. In this project, environmental samples from the Arctic region were collected and microorganisms (bacteria and fungi) were isolated. The clinical relevance of these microorganisms was assessed by observing the following virulence markers: ability to grow at a range of temperatures, expression of antimicrobial resistance and production of hemolysins. The aim of this project is to determine the frequency and relevance of these characteristics in an effort to understand microbial adaptations in habitats threatened by climate change. The isolates obtained and described here were able to grow at a range of temperatures, in some cases more than 30 °C higher than their original isolation temperature. A considerable number of them consistently expressed compounds capable of lysing sheep and bovine erythrocytes on blood agar at different incubation temperatures. Ethanolic extracts of these bacteria were able to cause rapid and complete lysis of erythrocyte suspensions and might even be hemolytic when assayed on human blood. In silico analyses showed a variety of resistance elements, some of them novel, against natural and synthetic antimicrobial compounds. In vitro experiments against a number of antimicrobial compounds showed resistance phenotypes belonging to wild-type populations and some non-wild type which clearly denote human influence in the acquisition of antimicrobial resistance. The results of this project demonstrate the presence of virulence-associated factors expressed by microorganisms of natural, non-clinical environments. This study contains some of the first reports, to the best of our knowledge, of hemolytic microbes isolated from the Arctic region. In addition, it provides additional information about the presence and expression of intrinsic and acquired antimicrobial resistance in environmental isolates, contributing to the understanding of the evolution of relevant pathogenic species and opportunistic pathogens. Finally, this study highlights some of the potential risks associated with changes in the polar regions (habitat melting and destruction, ecosystem transition and re-colonization) as important indirect consequences of global warming and altered climatic conditions around the planet.}, language = {en} }