@phdthesis{Sotiropoulou2019, author = {Sotiropoulou, Stavroula}, title = {Pleiotropy of phonetic indices in the expression of syllabic organization}, doi = {10.25932/publishup-54639}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-546399}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2019}, abstract = {This dissertation is concerned with the relation between qualitative phonological organization in the form of syllabic structure and continuous phonetics, that is, the spatial and temporal dimensions of vocal tract action that express syllabic structure. The main claim of the dissertation is twofold. First, we argue that syllabic organization exerts multiple effects on the spatio-temporal properties of the segments that partake in that organization. That is, there is no unique or privileged exponent of syllabic organization. Rather, syllabic organization is expressed in a pleiotropy of phonetic indices. Second, we claim that a better understanding of the relation between qualitative phonological organization and continuous phonetics is reached when one considers how the string of segments (over which the nature of the phonological organization is assessed) responds to perturbations (scaling of phonetic variables) of localized properties (such as durations) within that string. Specifically, variation in phonetic variables and more specifically prosodic variation is a crucial key to understanding the nature of the link between (phonological) syllabic organization and the phonetic spatio-temporal manifestation of that organization. The effects of prosodic variation on segmental properties and on the overlap between the segments, we argue, offer the right pathway to discover patterns related to syllabic organization. In our approach, to uncover evidence for global organization, the sequence of segments partaking in that organization as well as properties of these segments or their relations with one another must be somehow locally varied. The consequences of such variation on the rest of the sequence can then be used to unveil the span of organization. When local perturbations to segments or relations between adjacent segments have effects that ripple through the rest of the sequence, this is evidence that organization is global. If instead local perturbations stay local with no consequences for the rest of the whole, this indicates that organization is local.}, language = {en} } @phdthesis{Ruschel2019, author = {Ruschel, Matthias}, title = {Das Preußische Erbrecht in der Judikatur des Berliner Obertribunals in den Jahren 1836 bis 1865}, doi = {10.25932/publishup-52779}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-527798}, school = {Universit{\"a}t Potsdam}, pages = {283}, year = {2019}, abstract = {Die Dissertation befasst sich mit dem Allgemeinen Preußischen Landrecht von 1794 und der hierzu ergangenen Rechtsprechung des Berliner Obertribunals. Im Fokus der Untersuchung stehen die erbrechtlichen Regelungen des Landrechts und deren Anwendung sowie Auslegung in der Judikatur des h{\"o}chsten preußischen Gerichts. Der Forschungsgegenstand ergibt sich aus dem im Landrecht kodifizierten speziellen Gesetzesverst{\"a}ndnisses. Nach diesem sollte die Gesetzesauslegung durch die Rechtsprechung auf ein Minimum, n{\"a}mlich die Auslegung allein anhand des Wortlauts der Regelung reduziert werden, um dem absolutistischen Regierungsanspruch der preußischen Monarchen, namentlich Friedrich des Großen, hinreichend Rechnung zu tragen. In diesem Kontext wird der Frage nachgegangen, inwieweit das preußische Obertribunal das im Landrecht statuierte „Auslegungsverbot" beachtet hat und in welchen F{\"a}llen sich das Gericht von der Vorgabe emanzipierte und weitere Auslegungsmethoden anwendete und sich so eine unabh{\"a}ngige Rechtsprechung entwickeln konnte. Die Arbeit gliedert sich in drei Hauptabschnitte. Im Anschluss an die Einleitung, in der zun{\"a}chst die rechtshistorische Bedeutung des Landrechts und des Erbrechts sowie der Untersuchungsgegenstand umrissen werden, folgt die Darstellung der Entstehungsgeschichte des Landrechts und des Berliner Obertribunals. Hieran schließt sich in einem dritten Abschnitt eine Analyse der erbrechtlichen Vorschriften des Landrechts an. In dieser wird auf die Entstehungsgeschichte der verschiedenen erbrechtlichen Institute wie beispielsweise der gesetzlichen und gewillk{\"u}rten Erbfolge, dem Pflichtteilsrecht etc., unter Ber{\"u}cksichtigung des zeitgen{\"o}ssischen wissenschaftlichen Diskurses eingegangen. Im vierten Abschnitt geht es um die Judikate des Berliner Obertribunals aus den Jahren 1836-1865 in denen die zuvor dargestellten erbrechtlichen Regelungen entscheidungserheblich waren. Dabei wird der Forschungsfrage, inwieweit das Obertribunal das im Landrecht statuierte Auslegungsverbot beachtet hat und in welchen F{\"a}llen es von diesem abwich bzw. weitere Auslegungsmethoden anwendete, konkret nachgegangen wird. Insgesamt werden 26 Entscheidungen des Obertribunals unter dem Aspekt der Auslegungspraxis, der Kontinuit{\"a}t und der Beschleunigung der Rechtsprechung analysiert und ausgewertet.}, language = {de} } @phdthesis{Doering2019, author = {D{\"o}ring, Matthias}, title = {The public encounter}, doi = {10.25932/publishup-50227}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502276}, school = {Universit{\"a}t Potsdam}, pages = {xi, 115}, year = {2019}, abstract = {This thesis puts the citizen-state interaction at its center. Building on a comprehensive model incorporating various perspectives on this interaction, I derive selected research gaps. The three articles, comprising this thesis, tackle these gaps. A focal role plays the citizens' administrative literacy, the relevant competences and knowledge necessary to successfully interact with public organizations. The first article elaborates on the different dimensions of administrative literacy and develops a survey instrument to assess these. The second study shows that public employees change their behavior according to the competences that citizens display during public encounters. They treat citizens preferentially that are well prepared and able to persuade them of their application's potential. Thereby, they signal a higher success potential for bureaucratic success criteria which leads to the employees' cream-skimming behavior. The third article examines the dynamics of employees' communication strategies when recovering from a service failure. The study finds that different explanation strategies yield different effects on the client's frustration. While accepting the responsibility and explaining the reasons for a failure alleviates the frustration and anger, refusing the responsibility leads to no or even reinforcing effects on the client's frustration. The results emphasize the different dynamics that characterize the nature of citizen-state interactions and how they establish their short- and long-term outcomes.}, language = {en} } @phdthesis{Gong2019, author = {Gong, Chen Chris}, title = {Synchronization of coupled phase oscillators}, doi = {10.25932/publishup-48752}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487522}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 115}, year = {2019}, abstract = {Oscillatory systems under weak coupling can be described by the Kuramoto model of phase oscillators. Kuramoto phase oscillators have diverse applications ranging from phenomena such as communication between neurons and collective influences of political opinions, to engineered systems such as Josephson Junctions and synchronized electric power grids. This thesis includes the author's contribution to the theoretical framework of coupled Kuramoto oscillators and to the understanding of non-trivial N-body dynamical systems via their reduced mean-field dynamics. The main content of this thesis is composed of four parts. First, a partially integrable theory of globally coupled identical Kuramoto oscillators is extended to include pure higher-mode coupling. The extended theory is then applied to a non-trivial higher-mode coupled model, which has been found to exhibit asymmetric clustering. Using the developed theory, we could predict a number of features of the asymmetric clustering with only information of the initial state provided. The second part consists of an iterated discrete-map approach to simulate phase dynamics. The proposed map --- a Moebius map --- not only provides fast computation of phase synchronization, it also precisely reflects the underlying group structure of the dynamics. We then compare the iterated-map dynamics and various analogous continuous-time dynamics. We are able to replicate known phenomena such as the synchronization transition of the Kuramoto-Sakaguchi model of oscillators with distributed natural frequencies, and chimera states for identical oscillators under non-local coupling. The third part entails a particular model of repulsively coupled identical Kuramoto-Sakaguchi oscillators under common random forcing, which can be shown to be partially integrable. Via both numerical simulations and theoretical analysis, we determine that such a model cannot exhibit stationary multi-cluster states, contrary to the numerical findings in previous literature. Through further investigation, we find that the multi-clustering states reported previously occur due to the accumulation of discretization errors inherent in the integration algorithms, which introduce higher-mode couplings into the model. As a result, the partial integrability condition is violated. Lastly, we derive the microscopic cross-correlation of globally coupled non-identical Kuramoto oscillators under common fluctuating forcing. The effect of correlation arises naturally in finite populations, due to the non-trivial fluctuations of the meanfield. In an idealized model, we approximate the finite-sized fluctuation by a Gaussian white noise. The analytical approximation qualitatively matches the measurements in numerical experiments, however, due to other periodic components inherent in the fluctuations of the mean-field there still exist significant inconsistencies.}, language = {en} } @phdthesis{Kerutt2019, author = {Kerutt, Josephine Victoria}, title = {The high-redshift voyage of Lyman alpha and Lyman continuum emission as told by MUSE}, doi = {10.25932/publishup-47881}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-478816}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2019}, abstract = {Most of the matter in the universe consists of hydrogen. The hydrogen in the intergalactic medium (IGM), the matter between the galaxies, underwent a change of its ionisation state at the epoch of reionisation, at a redshift roughly between 6>z>10, or ~10^8 years after the Big Bang. At this time, the mostly neutral hydrogen in the IGM was ionised but the source of the responsible hydrogen ionising emission remains unclear. In this thesis I discuss the most likely candidates for the emission of this ionising radiation, which are a type of galaxy called Lyman alpha emitters (LAEs). As implied by their name, they emit Lyman alpha radiation, produced after a hydrogen atom has been ionised and recombines with a free electron. The ionising radiation itself (also called Lyman continuum emission) which is needed for this process inside the LAEs could also be responsible for ionising the IGM around those galaxies at the epoch of reionisation, given that enough Lyman continuum escapes. Through this mechanism, Lyman alpha and Lyman continuum radiation are closely linked and are both studied to better understand the properties of high redshift galaxies and the reionisation state of the universe. Before I can analyse their Lyman alpha emission lines and the escape of Lyman continuum emission from them, the first step is the detection and correct classification of LAEs in integral field spectroscopic data, specifically taken with the Multi-Unit Spectroscopic Explorer (MUSE). After detecting emission line objects in the MUSE data, the task of classifying them and determining their redshift is performed with the graphical user interface QtClassify, which I developed during the work on this thesis. It uses the strength of the combination of spectroscopic and photometric information that integral field spectroscopy offers to enable the user to quickly identify the nature of the detected emission lines. The reliable classification of LAEs and determination of their redshifts is a crucial first step towards an analysis of their properties. Through radiative transfer processes, the properties of the neutral hydrogen clouds in and around LAEs are imprinted on the shape of the Lyman alpha line. Thus after identifying the LAEs in the MUSE data, I analyse the properties of the Lyman alpha emission line, such as the equivalent width (EW) distribution, the asymmetry and width of the line as well as the double peak fraction. I challenge the common method of displaying EW distributions as histograms without taking the limits of the survey into account and construct a more independent EW distribution function that better reflects the properties of the underlying population of galaxies. I illustrate this by comparing the fraction of high EW objects between the two surveys MUSE-Wide and MUSE-Deep, both consisting of MUSE pointings (each with the size of one square arcminute) of different depths. In the 60 MUSE-Wide fields of one hour exposure time I find a fraction of objects with extreme EWs above EW_0>240A of ~20\%, while in the MUSE-Deep fields (9 fields with an exposure time of 10 hours and one with an exposure time of 31 hours) I find a fraction of only ~1\%, which is due to the differences in the limiting line flux of the surveys. The highest EW I measure is EW_0 = 600.63 +- 110A, which hints at an unusual underlying stellar population, possibly with a very low metallicity. With the knowledge of the redshifts and positions of the LAEs detected in the MUSE-Wide survey, I also look for Lyman continuum emission coming from these galaxies and analyse the connection between Lyman continuum emission and Lyman alpha emission. I use ancillary Hubble Space Telescope (HST) broadband photometry in the bands that contain the Lyman continuum and find six Lyman continuum leaker candidates. To test whether the Lyman continuum emission of LAEs is coming only from those individual objects or the whole population, I select LAEs that are most promising for the detection of Lyman continuum emission, based on their rest-frame UV continuum and Lyman alpha line shape properties. After this selection, I stack the broadband data of the resulting sample and detect a signal in Lyman continuum with a significance of S/N = 5.5, pointing towards a Lyman continuum escape fraction of ~80\%. If the signal is reliable, it strongly favours LAEs as the providers of the hydrogen ionising emission at the epoch of reionisation and beyond.}, language = {en} } @phdthesis{Kuberski2019, author = {Kuberski, Stephan R.}, title = {Fundamental motor laws and dynamics of speech}, doi = {10.25932/publishup-43771}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437714}, school = {Universit{\"a}t Potsdam}, pages = {97}, year = {2019}, abstract = {The present work is a compilation of three original research articles submitted (or already published) in international peer-reviewed venues of the field of speech science. These three articles address the topics of fundamental motor laws in speech and dynamics of corresponding speech movements: 1. Kuberski, Stephan R. and Adamantios I. Gafos (2019). "The speed-curvature power law in tongue movements of repetitive speech". PLOS ONE 14(3). Public Library of Science. doi: 10.1371/journal.pone.0213851. 2. Kuberski, Stephan R. and Adamantios I. Gafos (In press). "Fitts' law in tongue movements of repetitive speech". Phonetica: International Journal of Phonetic Science. Karger Publishers. doi: 10.1159/000501644 3. Kuberski, Stephan R. and Adamantios I. Gafos (submitted). "Distinct phase space topologies of identical phonemic sequences". Language. Linguistic Society of America. The present work introduces a metronome-driven speech elicitation paradigm in which participants were asked to utter repetitive sequences of elementary consonant-vowel syllables. This paradigm, explicitly designed to cover speech rates from a substantially wider range than has been explored so far in previous work, is demonstrated to satisfy the important prerequisites for assessing so far difficult to access aspects of speech. Specifically, the paradigm's extensive speech rate manipulation enabled elicitation of a great range of movement speeds as well as movement durations and excursions of the relevant effectors. The presence of such variation is a prerequisite to assessing whether invariant relations between these and other parameters exist and thus provides the foundation for a rigorous evaluation of the two laws examined in the first two contributions of this work. In the data resulting from this paradigm, it is shown that speech movements obey the same fundamental laws as movements from other domains of motor control do. In particular, it is demonstrated that speech strongly adheres to the power law relation between speed and curvature of movement with a clear speech rate dependency of the power law's exponent. The often-sought or reported exponent of one third in the statement of the law is unique to a subclass of movements which corresponds to the range of faster rates under which a particular utterance is produced. For slower rates, significantly larger values than one third are observed. Furthermore, for the first time in speech this work uncovers evidence for the presence of Fitts' law. It is shown that, beyond a speaker-specific speech rate, speech movements of the tongue clearly obey Fitts' law by emergence of its characteristic linear relation between movement time and index of difficulty. For slower speech rates (when temporal pressure is small), no such relation is observed. The methods and datasets obtained in the two assessment above provide a rigorous foundation both for addressing implications for theories and models of speech as well as for better understanding the status of speech movements in the context of human movements in general. All modern theories of language rely on a fundamental segmental hypothesis according to which the phonological message of an utterance is represented by a sequence of segments or phonemes. It is commonly assumed that each of these phonemes can be mapped to some unit of speech motor action, a so-called speech gesture. For the first time here, it is demonstrated that the relation between the phonological description of simple utterances and the corresponding speech motor action is non-unique. Specifically, by the extensive speech rate manipulation in the herein used experimental paradigm it is demonstrated that speech exhibits clearly distinct dynamical organizations underlying the production of simple utterances. At slower speech rates, the dynamical organization underlying the repetitive production of elementary /CV/ syllables can be described by successive concatenations of closing and opening gestures, each with its own equilibrium point. As speech rate increases, the equilibria of opening and closing gestures are not equally stable yielding qualitatively different modes of organization with either a single equilibrium point of a combined opening-closing gesture or a periodic attractor unleashed by the disappearance of both equilibria. This observation, the non-uniqueness of the dynamical organization underlying what on the surface appear to be identical phonemic sequences, is an entirely new result in the domain of speech. Beyond that, the demonstration of periodic attractors in speech reveals that dynamical equilibrium point models do not account for all possible modes of speech motor behavior.}, language = {en} } @phdthesis{He2019, author = {He, Hai}, title = {Exploring and engineering formaldehyde assimilation}, doi = {10.25932/publishup-47386}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473867}, school = {Universit{\"a}t Potsdam}, pages = {vi, 105}, year = {2019}, abstract = {Increasing concerns regarding the environmental impact of our chemical production have shifted attention towards possibilities for sustainable biotechnology. One-carbon (C1) compounds, including methane, methanol, formate and CO, are promising feedstocks for future bioindustry. CO2 is another interesting feedstock, as it can also be transformed using renewable energy to other C1 feedstocks for use. While formaldehyde is not suitable as a feedstock due to its high toxicity, it is a central intermediate in the process of C1 assimilation. This thesis explores formaldehyde metabolism and aims to engineer formaldehyde assimilation in the model organism Escherichia coli for the future C1-based bioindustry. The first chapter of the thesis aims to establish growth of E. coli on formaldehyde via the most efficient naturally occurring route, the ribulose monophosphate pathway. Linear variants of the pathway were constructed in multiple-gene knockouts strains, coupling E. coli growth to the activities of the key enzymes of the pathway. Formaldehyde-dependent growth was achieved in rationally designed strains. In the final strain, the synthetic pathway provides the cell with almost all biomass and energy requirements. In the second chapter, taking advantage of the unique feature of its reactivity, formaldehyde assimilation via condensation with glycine and pyruvate by two promiscuous aldolases was explored. Facilitated by these two reactions, the newly designed homoserine cycle is expected to support higher yields of a wide array of products than its counterparts. By dividing the pathway into segments and coupling them to the growth of dedicated strains, all pathway reactions were demonstrated to be sufficiently active. The work paves a way for future implementation of a highly efficient route for C1 feedstocks into commodity chemicals. In the third chapter, the in vivo rate of the spontaneous formaldehyde tetrahydrofolate condensation to methylene-tetrahydrofolate was assessed in order to evaluate its applicability as a biotechnological process. Tested within an E. coli strain deleted in essential genes for native methylene-tetrahydrofolate biosynthesis, the reaction was shown to support the production of this essential intermediate. However, only low growth rates were observed and only at high formaldehyde concentrations. Computational analysis dependent on in vivo evidence from this strain deduced the slow rate of this spontaneous reaction, thus ruling out its substantial contribution to growth on C1 feedstocks. The reactivity of formaldehyde makes it highly toxic. In the last chapter, the formation of thioproline, the condensation product of cysteine and formaldehyde, was confirmed to contribute this toxicity effect. Xaa-Pro aminopeptidase (PepP), which genetically links with folate metabolism, was shown to hydrolyze thioproline-containing peptides. Deleting pepP increased strain sensitivity to formaldehyde, pointing towards the toxicity of thioproline-containing peptides and the importance of their removal. The characterization in this study could be useful in handling this toxic intermediate. Overall, this thesis identified challenges related to formaldehyde metabolism and provided novel solutions towards a future bioindustry based on sustainable C1 feedstocks in which formaldehyde serves as a key intermediate.}, language = {en} } @phdthesis{Melani2019, author = {Melani, Giacomo}, title = {From structural fluctuations to vibrational spectroscopy of adsorbates on surfaces}, doi = {10.25932/publishup-44182}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441826}, school = {Universit{\"a}t Potsdam}, pages = {119}, year = {2019}, abstract = {Aluminum oxide is an Earth-abundant geological material, and its interaction with water is of crucial importance for geochemical and environmental processes. Some aluminum oxide surfaces are also known to be useful in heterogeneous catalysis, while the surface chemistry of aqueous oxide interfaces determines the corrosion, growth and dissolution of such materials. In this doctoral work, we looked mainly at the (0001) surface of α-Al 2 O 3 and its reactivity towards water. In particular, a great focus of this work is dedicated to simulate and address the vibrational spectra of water adsorbed on the α-alumina(0001) surface in various conditions and at different coverages. In fact, the main source of comparison and inspiration for this work comes from the collaboration with the "Interfacial Molecular Spectroscopy" group led by Dr. R. Kramer Campen at the Fritz-Haber Institute of the MPG in Berlin. The expertise of our project partners in surface-sensitive Vibrational Sum Frequency (VSF) generation spectroscopy was crucial to develop and adapt specific simulation schemes used in this work. Methodologically, the main approach employed in this thesis is Ab Initio Molecular Dynamics (AIMD) based on periodic Density Functional Theory (DFT) using the PBE functional with D2 dispersion correction. The analysis of vibrational frequencies from both a static and a dynamic, finite-temperature perspective offers the ability to investigate the water / aluminum oxide interface in close connection to experiment. The first project presented in this work considers the characterization of dissociatively adsorbed deuterated water on the Al-terminated (0001) surface. This particular structure is known from both experiment and theory to be the thermodynamically most stable surface termination of α-alumina in Ultra-High Vacuum (UHV) conditions. Based on experiments performed by our colleagues at FHI, different adsorption sites and products have been proposed and identified for D 2 O. While previous theoretical investigations only looked at vibrational frequencies of dissociated OD groups by staticNormal Modes Analysis (NMA), we rather employed a more sophisticated approach to directly assess vibrational spectra (like IR and VSF) at finite temperature from AIMD. In this work, we have employed a recent implementation which makes use of velocity-velocity autocorrelation functions to simulate such spectral responses of O-H(D) bonds. This approach allows for an efficient and qualitatively accurate estimation of Vibrational Densities of States (VDOS) as well as IR and VSF spectra, which are then tested against experimental spectra from our collaborators. In order to extend previous work on unimolecularly dissociated water on α-Al 2 O 3 , we then considered a different system, namely, a fully hydroxylated (0001) surface, which results from the reconstruction of the UHV-stable Al-terminated surface at high water contents. This model is then further extended by considering a hydroxylated surface with additional water molecules, forming a two-dimensional layer which serves as a potential template to simulate an aqueous interface in environmental conditions. Again, employing finite-temperature AIMD trajectories at the PBE+D2 level, we investigated the behaviour of both hydroxylated surface (HS) and the water-covered structure derived from it (known as HS+2ML). A full range of spectra, from VDOS to IR and VSF, is then calculated using the same methodology, as described above. This is the main focus of the second project, reported in Chapter 5. In this case, comparison between theoretical spectra and experimental data is definitely good. In particular, we underline the nature of high-frequency resonances observed above 3700 cm -1 in VSF experiments to be associated with surface OH-groups, known as "aluminols" which are a key fingerprint of the fully hydroxylated surface. In the third and last project, which is presented in Chapter 6, the extension of VSF spectroscopy experiments to the time-resolved regime offered us the opportunity to investigate vibrational energy relaxation at the α-alumina / water interface. Specifically, using again DFT-based AIMD simulations, we simulated vibrational lifetimes for surface aluminols as experimentally detected via pump-probe VSF. We considered the water-covered HS model as a potential candidate to address this problem. The vibrational (IR) excitation and subsequent relaxation is performed by means of a non-equilibrium molecular dynamics scheme. In such a scheme, we specifically looked at the O-H stretching mode of surface aluminols. Afterwards, the analysis of non-equilibrium trajectories allows for an estimation of relaxation times in the order of 2-4 ps which are in overall agreement with measured ones. The aim of this work has been to provide, within a consistent theoretical framework, a better understanding of vibrational spectroscopy and dynamics for water on the α-alumina(0001) surface,ranging from very low water coverage (similar to the UHV case) up to medium-high coverages, resembling the hydroxylated oxide in environmental moist conditions.}, language = {en} } @phdthesis{Wozny2019, author = {Wozny, Florian}, title = {Three empirical essays in health economics}, doi = {10.25932/publishup-46991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469910}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2019}, abstract = {Modern health care systems are characterized by pronounced prevention and cost-optimized treatments. This dissertation offers novel empirical evidence on how useful such measures can be. The first chapter analyzes how radiation, a main pollutant in health care, can negatively affect cognitive health. The second chapter focuses on the effect of Low Emission Zones on public heath, as air quality is the major external source of health problems. Both chapters point out potentials for preventive measures. Finally, chapter three studies how changes in treatment prices affect the reallocation of hospital resources. In the following, I briefly summarize each chapter and discuss implications for health care systems as well as other policy areas. Based on the National Educational Panel Study that is linked to data on radiation, chapter one shows that radiation can have negative long-term effects on cognitive skills, even at subclinical doses. Exploiting arguably exogenous variation in soil contamination in Germany due to the Chernobyl disaster in 1986, the findings show that people exposed to higher radiation perform significantly worse in cognitive tests 25 years later. Identification is ensured by abnormal rainfall within a critical period of ten days. The results show that the effect is stronger among older cohorts than younger cohorts, which is consistent with radiation accelerating cognitive decline as people get older. On average, a one-standarddeviation increase in the initial level of CS137 (around 30 chest x-rays) is associated with a decrease in the cognitive skills by 4.1 percent of a standard deviation (around 0.05 school years). Chapter one shows that sub-clinical levels of radiation can have negative consequences even after early childhood. This is of particular importance because most of the literature focuses on exposure very early in life, often during pregnancy. However, population exposed after birth is over 100 times larger. These results point to substantial external human capital costs of radiation which can be reduced by choices of medical procedures. There is a large potential for reductions because about one-third of all CT scans are assumed to be not medically justified (Brenner and Hall, 2007). If people receive unnecessary CT scans because of economic incentives, this chapter points to additional external costs of health care policies. Furthermore, the results can inform the cost-benefit trade-off for medically indicated procedures. Chapter two provides evidence about the effectiveness of Low Emission Zones. Low Emission Zones are typically justified by improvements in population health. However, there is little evidence about the potential health benefits from policy interventions aiming at improving air quality in inner-cities. The chapter ask how the coverage of Low Emission Zones air pollution and hospitalization, by exploiting variation in the roll out of Low Emission Zones in Germany. It combines information on the geographic coverage of Low Emission Zones with rich panel data on the universe of German hospitals over the period from 2006 to 2016 with precise information on hospital locations and the annual frequency of detailed diagnoses. In order to establish that our estimates of Low Emission Zones' health impacts can indeed be attributed to improvements in local air quality, we use data from Germany's official air pollution monitoring system and assign monitor locations to Low Emission Zones and test whether measures of air pollution are affected by the coverage of a Low Emission Zone. Results in chapter two confirm former results showing that the introduction of Low Emission Zones improved air quality significantly by reducing NO2 and PM10 concentrations. Furthermore, the chapter shows that hospitals which catchment areas are covered by a Low Emission Zone, diagnose significantly less air pollution related diseases, in particular by reducing the incidents of chronic diseases of the circulatory and the respiratory system. The effect is stronger before 2012, which is consistent with a general improvement in the vehicle fleet's emission standards. Depending on the disease, a one-standard-deviation increase in the coverage of a hospitals catchment area covered by a Low Emission Zone reduces the yearly number of diagnoses up to 5 percent. These findings have strong implications for policy makers. In 2015, overall costs for health care in Germany were around 340 billion euros, of which 46 billion euros for diseases of the circulatory system, making it the most expensive type of disease caused by 2.9 million cases (Statistisches Bundesamt, 2017b). Hence, reductions in the incidence of diseases of the circulatory system may directly reduce society's health care costs. Whereas chapter one and two study the demand-side in health care markets and thus preventive potential, chapter three analyzes the supply-side. By exploiting the same hospital panel data set as in chapter two, chapter three studies the effect of treatment price shocks on the reallocation of hospital resources in Germany. Starting in 2005, the implementation of the German-DRG-System led to general idiosyncratic treatment price shocks for individual hospitals. Thus far there is little evidence of the impact of general price shocks on the reallocation of hospital resources. Additionally, I add to the exiting literature by showing that price shocks can have persistent effects on hospital resources even when these shocks vanish. However, simple OLS regressions would underestimate the true effect, due to endogenous treatment price shocks. I implement a novel instrument variable strategy that exploits the exogenous variation in the number of days of snow in hospital catchment areas. A peculiarity of the reform allowed variation in days of snow to have a persistent impact on treatment prices. I find that treatment price increases lead to increases in input factors such as nursing staff, physicians and the range of treatments offered but to decreases in the treatment volume. This indicates supplier-induced demand. Furthermore, the probability of hospital mergers and privatization decreases. Structural differences in pre-treatment characteristics between hospitals enhance these effects. For instance, private and larger hospitals are more affected. IV estimates reveal that OLS results are biased towards zero in almost all dimensions because structural hospital differences are correlated with the reallocation of hospital resources. These results are important for several reasons. The G-DRG-Reform led to a persistent polarization of hospital resources, as some hospitals were exposed to treatment price increases, while others experienced reductions. If hospitals increase the treatment volume as a response to price reductions by offering unnecessary therapies, it has a negative impact on population wellbeing and public spending. However, results show a decrease in the range of treatments if prices decrease. Hospitals might specialize more, thus attracting more patients. From a policy perspective it is important to evaluate if such changes in the range of treatments jeopardize an adequate nationwide provision of treatments. Furthermore, the results show a decrease in the number of nurses and physicians if prices decrease. This could partly explain the nursing crisis in German hospitals. However, since hospitals specialize more they might be able to realize efficiency gains which justify reductions in input factors without loses in quality. Further research is necessary to provide evidence for the impact of the G-DRG-Reform on health care quality. Another important aspect are changes in the organizational structure. Many public hospitals have been privatized or merged. The findings show that this is at least partly driven by the G-DRG-Reform. This can again lead to a lack in services offered in some regions if merged hospitals specialize more or if hospitals are taken over by ecclesiastical organizations which do not provide all treatments due to moral conviction. Overall, this dissertation reveals large potential for preventive health care measures and helps to explain reallocation processes in the hospital sector if treatment prices change. Furthermore, its findings have potentially relevant implications for other areas of public policy. Chapter one identifies an effect of low dose radiation on cognitive health. As mankind is searching for new energy sources, nuclear power is becoming popular again. However, results of chapter one point to substantial costs of nuclear energy which have not been accounted yet. Chapter two finds strong evidence that air quality improvements by Low Emission Zones translate into health improvements, even at relatively low levels of air pollution. These findings may, for instance, be of relevance to design further policies targeted at air pollution such as diesel bans. As pointed out in chapter three, the implementation of DRG-Systems may have unintended side-effects on the reallocation of hospital resources. This may also apply to other providers in the health care sector such as resident doctors.}, language = {en} } @phdthesis{Bartel2019, author = {Bartel, Melanie}, title = {Kernresonanz-Strukturuntersuchungen an alternativen Precursoren und deren Zwischenprodukten f{\"u}r die Herstellung von Carbonfasern f{\"u}r den Massenmarkt}, doi = {10.25932/publishup-46930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469301}, school = {Universit{\"a}t Potsdam}, pages = {XII, 101, XXV}, year = {2019}, abstract = {Carbonfasern haben sich in der Luft- und Raumfahrt etabliert und gewinnen in Alltagsanwendungen wie dem Automobilbereich, Windkraft- und Sportbereich durch ihre hohen Zugfestigkeiten, insbesondere ihrer hohen E-Moduli, und ihrer geringen Dichte immer mehr an Bedeutung. Auf Grund ihrer hohen Kosten, welche sich zur H{\"a}lfte aus der Precursorherstellung, inklusive seiner Synthese und seinem Verspinnprozess, dem L{\"o}sungsspinnverfahren, ergeben, erhalten zunehmend alternative und schmelzspinnbare Precursoren Interesse. F{\"u}r die Carbonfaserherstellung wird fast ausschließlich Polyacrylnitril (PAN) verwendet, das vor dem Schmelzen irreversible exotherme Zyklisierungsreaktionen aufweist, welchen sich seine Zersetzung anschließt. Eine M{\"o}glichkeit der Reduzierung der Schmelztemperatur von Polymeren ist die Einbringung von Comonomeren zur Erh{\"o}hung des freien Volumens und die Reduzierung der intermolekularen Wechselwirkungen als interne Weichmacher. Wie am Fraunhofer IAP gezeigt wurde, kann mittels 2-Methoxyethylacrylat (MEA) die Schmelztemperatur zu neuartigen PAN-basierten Precursoren verringert werden. Um den PAN-co-MEA-Precursor f{\"u}r die nachfolgenden Prozessschritte der Carbonfaserherstellung zu verwenden, m{\"u}ssen die thermoplastischen Fasern in thermisch stabile Fasern ohne thermoplastisches Verhalten {\"u}berf{\"u}hrt werden. Es wurde ein neuer Prozessschritt (Pr{\"a}stabilisierung) eingef{\"u}hrt, welcher unter alkalischen Bedingungen zur Abspaltung der Comonomerseitenkette f{\"u}hrt. Neben der Esterhydrolyse finden Reaktionen statt, welche an diesem Material noch nicht hinreichend untersucht wurden. Weiterhin stellt sich die Frage nach der Kinetik der Pr{\"a}stabilisierung und der Ermittlung einer geeigneten Prozessf{\"u}hrung. Hierzu wurde die Pr{\"a}stabilisierung in den Labormaßstab {\"u}berf{\"u}hrt und die m{\"o}glichen Zusammensetzungen des aus DMSO und einer KOH-L{\"o}sung bestehenden Reaktionsmediums evaluiert. Weiterhin wurde die Behandlung bei verschiedenen Pr{\"a}stabilisierungszeiten von maximal 30 min und Temperaturen von 40, 50 und 60 °C durchgef{\"u}hrt, um prim{\"a}r mittels NMR-Spektroskopie die chemischen Struktur{\"a}nderungen aufzukl{\"a}ren. Die Esterhydrolyse des Comonomers, welche zur Abspaltung des 2-Methoxyethanols f{\"u}hrt, wurde mittels 1H-NMR-spektroskopischer Untersuchungen detektiert. Es wurde ein Modell aufgestellt, das die chemisch-physikalischen Struktur{\"a}nderungen w{\"a}hrend der Pr{\"a}stabilisierung aufzeigt. Die zuerst ablaufende Reaktion ist die Esterhydrolyse am Comonomer, welche vom Faserrand nach innen verl{\"a}uft und durch die Pr{\"a}senz des DMSO in Kombination mit der KOH-L{\"o}sung (Superbase) initiiert wird. Der zeitliche Reaktionsverlauf der Esterhydrolyse kann in drei Bereiche eingeteilt werden. Der erste Bereich ab dem Pr{\"a}stabilisierungsbeginn wird durch die Diffusion der basischen Anionen in die Faser, der zweite Bereich durch die Reaktion an der Estergruppe des Comonomers und der dritte Bereich durch letzte Reaktionen im Faserinneren und diffusiven Prozessen der Produkte und Edukte charakterisiert. Der zweite Bereich kann mit einer Reaktion pseudo 1. Ordnung abgebildet werden, da in diesem Bereich bereits eine ausreichende Diffusion der Edukte in die Faser stattgefunden hat. Bei 50 °C spielt die Diffusion im ersten Bereich im Vergleich zur Reaktion eine untergeordnete Rolle. Mit Erh{\"o}hung der Temperatur auf 60 °C kann eine im Verh{\"a}ltnis geringere Diffusions- als Reaktionsgeschwindigkeit beobachtet werden. Die Nebenreaktionen wurden mittels 13C-CP/MAS-NMR-spektroskopischen, elementaranlaytischen Untersuchungen sowie Doppelbrechungsmessungen charakterisiert. W{\"a}hrend der alkalischen Esterhydrolyse beginnt die Reduzierung der Nitrilgruppen unter der Bildung von prim{\"a}ren Carbons{\"a}ureamiden und Carbons{\"a}uren. Zur Beschreibung dieser Umsetzung wurde eine Methode entwickelt, welche die Addition von 13C-CP/MAS-NMR-Spektren der Modellsubstanzen PAN, PAM und PAA beinhaltet. Weitere stattfindende Reaktionen sind die Bildung von konjugierten Doppelbindungen, welche insbesondere auf eine Zyklisierung der Nitrile hinweisen. Die nasschemisch initiierte Zyklisierung der Nitrilgruppen kann zu k{\"u}rzeren Stabilisierungszeiten und einem besser kontrollierbaren Stabilisierungsprozess durch geringere W{\"a}rmefreisetzung und schlussendlich zu einer Kostenersparnis des gesamten Verfahrens f{\"u}hren. Die Umsetzung der Nitrilgruppen konnte mit einer Reaktion pseudo 1. Ordnung gut abgebildet werden. DMSO initiiert die Esterhydrolyse, wobei die KOH-Konzentration einen h{\"o}heren Einfluss auf die Reaktionsgeschwindigkeit der Ester- und Nitrilhydrolyse als die DMSO-Konzentration besitzt. Beide Reaktionen zeigen eine vergleichbare Abh{\"a}ngigkeit von der Temperatur. Die Erh{\"o}hung der Pr{\"a}stabilisierungszeit und der KOH- bzw. DMSO-Konzentration f{\"u}hrt zur Migration niedermolekularer Bestandteile des Fasermaterials an die Oberfl{\"a}che und der Bildung punktueller Ablagerungen bis hin zu miteinander verbundenen Einzelfasern. Eine weitere Erh{\"o}hung der Pr{\"a}stabilisierungszeit bzw. der Konzentration f{\"u}hrt zu einem steigenden Carbons{\"a}ureanteil und zur Quellung des Fasermaterials, wodurch die Ablagerungen in das Reaktionsmedium diffundieren. Die Ablagerungen enthalten Chlor, welches durch den Waschvorgang mit HCl in das Materialsystem gelangt ist und durch Parameteranpassungen reduziert wurde. Die schmelzbaren Fasern konnten durch die Pr{\"a}stabilisierung erfolgreich {\"u}ber eine Kern-Mantel-Struktur in nicht-thermoplastische Fasern {\"u}berf{\"u}hrt werden. Zur Ermittlung eines geeigneten Prozessfensters f{\"u}r nachfolgende thermische Beanspruchungen der pr{\"a}stabilisierten Fasern wurden drei Kriterien identifiziert, anhand welcher die Evaluation erfolgte. Das erste Kriterium beinhaltet die Notwendigkeit der vollst{\"a}ndigen Aufhebung der thermoplastischen Eigenschaft der Fasern. Als zweites Kriterium diente die Fasermorphologie. Anhand von REM-Aufnahmen wurden Faserb{\"u}ndel mit separierten Einzelfasern ohne Ablagerungen f{\"u}r die nachfolgende Stabilisierung ausgew{\"a}hlt. Das dritte Kriterium bezieht sich auf eine m{\"o}glichst geringe Umsetzung der Nitrilgruppen, um Pr{\"a}stabilisierungsbedingungen mit Nebenreaktionen zu vermeiden. Aus den Untersuchungen konnte eine Pr{\"a}stabilisierungstemperatur von 60 °C als geeignet identifiziert werden. Weiterhin f{\"u}hren hoch alkalische Zusammensetzungen des Reaktionsmediums mit KOH-Konzentrationen von 1, 1,5 und 2 M, vorzugsweise 1,5 M und 50 vol\% DMSO mit Reaktionszeiten von unter 10 min zu geeigneten Fasern. Ein MEA-Anteil unterhalb von 2 mol\% bewirkt eine {\"U}berf{\"u}hrung in die Unschmelzbarkeit. Thermisch stabile und f{\"u}r die nachfolgende Stabilisierung geeignete Fasern besitzen weiterhin 68 - 80 mol\% Nitrilgruppen, 20 - 25 mol\% Carbons{\"a}uren, bis zu 15 mol\% prim{\"a}re Carbons{\"a}ureamide und zyklisierte Strukturen.}, language = {de} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Farhy2019, author = {Farhy, Yael}, title = {Universals and particulars in morphology}, doi = {10.25932/publishup-47003}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470033}, school = {Universit{\"a}t Potsdam}, pages = {VI, 167}, year = {2019}, abstract = {For many years, psycholinguistic evidence has been predominantly based on findings from native speakers of Indo-European languages, primarily English, thus providing a rather limited perspective into the human language system. In recent years a growing body of experimental research has been devoted to broadening this picture, testing a wide range of speakers and languages, aiming to understanding the factors that lead to variability in linguistic performance. The present dissertation investigates sources of variability within the morphological domain, examining how and to what extent morphological processes and representations are shaped by specific properties of languages and speakers. Firstly, the present work focuses on a less explored language, Hebrew, to investigate how the unique non-concatenative morphological structure of Hebrew, namely a non-linear combination of consonantal roots and vowel patterns to form lexical entries (L-M-D + CiCeC = limed 'teach'), affects morphological processes and representations in the Hebrew lexicon. Secondly, a less investigated population was tested: late learners of a second language. We directly compare native (L1) and non-native (L2) speakers, specifically highly proficient and immersed late learners of Hebrew. Throughout all publications, we have focused on a morphological phenomenon of inflectional classes (called binyanim; singular: binyan), comparing productive (class Piel, e.g., limed 'teach') and unproductive (class Paal, e.g., lamad 'learn') verbal inflectional classes. By using this test case, two psycholinguistic aspects of morphology were examined: (i) how morphological structure affects online recognition of complex words, using masked priming (Publications I and II) and cross-modal priming (Publication III) techniques, and (ii) what type of cues are used when extending morpho-phonological patterns to novel complex forms, a process referred to as morphological generalization, using an elicited production task (Publication IV). The findings obtained in the four manuscripts, either published or under review, provide significant insights into the role of productivity in Hebrew morphological processing and generalization in L1 and L2 speakers. Firstly, the present L1 data revealed a close relationship between productivity of Hebrew verbal classes and recognition process, as revealed in both priming techniques. The consonantal root was accessed only in the productive class (Piel) but not the unproductive class (Paal). Another dissociation between the two classes was revealed in the cross-modal priming, yielding a semantic relatedness effect only for Paal but not Piel primes. These findings are taken to reflect that the Hebrew mental representations display a balance between stored undecomposable unstructured stems (Paal) and decomposed structured stems (Piel), in a similar manner to a typical dual-route architecture, showing that the Hebrew mental lexicon is less unique than previously claimed in psycholinguistic research. The results of the generalization study, however, indicate that there are still substantial differences between inflectional classes of Hebrew and other Indo-European classes, particularly in the type of information they rely on in generalization to novel forms. Hebrew binyan generalization relies more on cues of argument structure and less on phonological cues. Secondly, clear L1/L2 differences were observed in the sensitivity to abstract morphological and morpho-syntactic information during complex word recognition and generalization. While L1 Hebrew speakers were sensitive to the binyan information during recognition, expressed by the contrast in root priming, L2 speakers showed similar root priming effects for both classes, but only when the primes were presented in an infinitive form. A root priming effect was not obtained for primes in a finite form. These patterns are interpreted as evidence for a reduced sensitivity of L2 speakers to morphological information, such as information about inflectional classes, and evidence for processing costs in recognition of forms carrying complex morpho-syntactic information. Reduced reliance on structural information cues was found in production of novel verbal forms, when the L2 group displayed a weaker effect of argument structure for Piel responses, in comparison to the L1 group. Given the L2 results, we suggest that morphological and morphosyntactic information remains challenging for late bilinguals, even at high proficiency levels.}, language = {en} } @phdthesis{StutterGarcia2019, author = {Stutter Garcia, Ana}, title = {The use of grammatical knowledge in an additional language}, doi = {10.25932/publishup-46932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469326}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 340}, year = {2019}, abstract = {This thesis investigates whether multilingual speakers' use of grammatical constraints in an additional language (La) is affected by the native (L1) and non-native grammars (L2) of their linguistic repertoire. Previous studies have used untimed measures of grammatical performance to show that L1 and L2 grammars affect the initial stages of La acquisition. This thesis extends this work by examining whether speakers at intermediate levels of La proficiency, who demonstrate mature untimed/offline knowledge of the target La constraints, are differentially affected by their L1 and L2 knowledge when they comprehend sentences under processing pressure. With this purpose, several groups of La German speakers were tested on word order and agreement phenomena using online/timed measures of grammatical knowledge. Participants had mirror distributions of their prior languages and they were either L1English/L2Spanish speakers or L1Spanish/L2English speakers. Crucially, in half of the phenomena the target La constraint aligned with English but not with Spanish, while in the other half it aligned with Spanish but not with English. Results show that the L1 grammar plays a major role in the use of La constraints under processing pressure, as participants displayed increased sensitivity to La constraints when they aligned with their L1, and reduced sensitivity when they did not. Further, in specific phenomena in which the L2 and La constraints aligned, increased L2 proficiency resulted in an enhanced sensitivity to the La constraint. These findings suggest that both native and non-native grammars affect how speakers use La grammatical constraints under processing pressure. However, L1 and L2 grammars differentially influence on participants' performance: While L1 constraints seem to be reliably recruited to cope with the processing demands of real-time La use, proficiency in an L2 can enhance sensitivity to La constraints only in specific circumstances, namely when L2 and La constraints align.}, language = {en} } @phdthesis{Guislain2019, author = {Guislain, Alexis}, title = {Eco-physiological consequences of fluctuating light on phytoplankton}, doi = {10.25932/publishup-46927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469272}, school = {Universit{\"a}t Potsdam}, pages = {161}, year = {2019}, abstract = {Phytoplankton growth depends not only on the mean intensity but also on the dynamics of the light supply. The nonlinear light-dependency of growth is characterized by a small number of basic parameters: the compensation light intensity PARcompμ, where production and losses are balanced, the growth efficiency at sub-saturating light αµ, and the maximum growth rate at saturating light µmax. In surface mixed layers, phytoplankton may rapidly move between high light intensities and almost darkness. Because of the different frequency distribution of light and/or acclimation processes, the light-dependency of growth may differ between constant and fluctuating light. Very few studies measured growth under fluctuating light at a sufficient number of mean light intensities to estimate the parameters of the growth-irradiance relationship. Hence, the influence of light dynamics on µmax, αµ and PARcompμ are still largely unknown. By extension, accurate modelling predictions of phytoplankton development under fluctuating light exposure remain difficult to make. This PhD thesis does not intend to directly extrapolate few experimental results to aquatic systems - but rather improving the mechanistic understanding of the variation of the light-dependency of growth under light fluctuations and effects on phytoplankton development. In Lake TaiHu and at the Three Gorges Reservoir (China), we incubated phytoplankton communities in bottles placed either at fixed depths or moved vertically through the water column to mimic vertical mixing. Phytoplankton at fixed depths received only the diurnal changes in light (defined as constant light regime), while phytoplankton received rapidly fluctuating light by superimposing the vertical light gradient on the natural sinusoidal diurnal sunlight. The vertically moved samples followed a circular movement with 20 min per revolution, replicating to some extent the full overturn of typical Langmuir cells. Growth, photosynthesis, oxygen production and respiration of communities (at Lake TaiHu) were measured. To complete these investigations, a physiological experiment was performed in the laboratory on a toxic strain of Microcystis aeruginosa (FACBH 1322) incubated under 20 min period fluctuating light. Here, we measured electron transport rates and net oxygen production at a much higher time resolution (single minute timescale). The present PhD thesis provides evidence for substantial effects of fluctuating light on the eco-physiology of phytoplankton. Both experiments performed under semi-natural conditions in Lake TaiHu and at the Three Gorges Reservoir gave similar results. The significant decline in community growth efficiencies αµ under fluctuating light was caused for a great share by different frequency distribution of light intensities that shortened the effective daylength for production. The remaining gap in community αµ was attributed to species-specific photoacclimation mechanisms and to light-dependent respiratory losses. In contrast, community maximal growth rates µmax were similar between incubations at constant and fluctuating light. At daily growth saturating light supply, differences in losses for biosynthesis between the two light regimes were observed. Phytoplankton experiencing constant light suffered photo-inhibition - leading to photosynthesis foregone and additional respiratory costs for photosystems repair. On the contrary, intermittent exposure to low and high light intensities prevented photo-inhibition of mixed algae but forced them to develop alternative light strategy. They better harvested and exploited surface irradiance by enhancing their photosynthesis. In the laboratory, we showed that Microcystis aeruginosa increased its oxygen consumption by dark respiration in the light few minutes only after exposure to increasing light intensities. More, we proved that within a simulated Langmuir cell, the net production at saturating light and the compensation light intensity for production at limiting light are positively related. These results are best explained by an accumulation of photosynthetic products at increasing irradiance and mobilization of these fresh resources by rapid enhancement of dark respiration for maintenance and biosynthesis at decreasing irradiance. At the daily timescale, we showed that the enhancement of photosynthesis at high irradiance for biosynthesis of species increased their maintenance respiratory costs at limiting light. Species-specific growth at saturating light µmax and compensation light intensity for growth PARcompμ of species incubated in Lake TaiHu were positively related. Because of this species-specific physiological tradeoff, species displayed different light affinities to limiting and saturating light - thereby exhibiting a gleaner-opportunist tradeoff. In Lake TaiHu, we showed that inter-specific differences in light acquisition traits (µmax and PARcompμ) allowed coexis¬tence of species on a gradient of constant light while avoiding competitive exclusion. More interestingly we demonstrated for the first time that vertical mixing (inducing fluctuating light supply for phytoplankton) may alter or even reverse the light utilization strategies of species within couple of days. The intra-specific variation in traits under fluctuating light increased the niche space for acclimated species, precluding competitive exclusion. Overall, this PhD thesis contributes to a better understanding of phytoplankton eco-physiology under fluctuating light supply. This work could enhance the quality of predictions of phytoplankton development under certain weather conditions or climate change scenarios.}, language = {en} } @phdthesis{Messi2019, author = {Messi, Hugues Urbain Patrick}, title = {Les sources du savoir - l'expression de l'inf{\´e}rence en Fran{\c{c}}ais}, doi = {10.25932/publishup-46961}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469612}, school = {Universit{\"a}t Potsdam}, pages = {iii, 291}, year = {2019}, abstract = {1. Unter Mediativit{\"a}t verstehen wir in dieser Dissertation die sprachliche Markierung der Informationsquelle. Ein Sprecher, der einen Sachverhalt vermittelt, hat die M{\"o}glichkeit durch sprachliche Mittel ausdr{\"u}cklich zu markieren, wie er die {\"u}bermittelte Information bekommen hat. Um diese Informationsquelle sprachlich zu deuten, werden im Franz{\"o}sischen unter anderem einige Verben als mediative Marker (MM) verwendet. 2. Die untersuchten Elemente croire, imaginer, paraitre, penser, savoir, sembler, supposer, trouver sind „mediatiave Verben". Jedes der untersuchten Verben weist besondere semantische und pragmatische Eigenschaften auf, die immer mit dem Ausdruck der Wissensquelle verbunden sind. Es handelt sich also um kognitive Verben (KV), die eine sprachliche Markierung der Informationsquelle vornehmen. Nach ihrem Verhalten in solchen Kontexten erf{\"u}llen sie die Funktion der „mediatiaven Markierung". 3. Die epistemische Modalit{\"a}t ist der Meditivit{\"a}t untergeordnet. Die Erscheinungsform der Modalit{\"a}t (Modalit{\"a}tstyp) bestimmt die St{\"a}rke der epistemischen Modalit{\"a}t. Keines der analysierten Verben dr{\"u}ckt lediglich eine epistemische Leseart aus. Die Dichotomie zwischen der mediativen und epistemischen Modalit{\"a}t besteht darin, dass die erste die Wissensquelle ausdr{\"u}ckt und die zweite ausschließlich die Einstellung des Sprechers gegen{\"u}ber dem Wahrheitsgrad der {\"A}ußerung widerspiegelt. 4. F{\"u}r alle Konstruktionen der Form [V/{\o}P] oder [V, P] ist P die Matrix des Satzes Unsere Ergebnisse zeigen, dass - obwohl diese Konstituenten verschiedene Stellen besetzen k{\"o}nnen - sie dennoch ihre Funktionen als Matrix behalten, indem sie die Propositionen, auf die sie sich beziehen, unter ihrer Rektion behalten. 5. Die Konstruktion [V/{\o}P] und [V, P] stehen in freien Variation Da sich der Wechsel in einem vergleichbaren Kontext vollzieht, und da es in gleicher Umgebung eine freie Substitution gibt, handelt es sich bei den beiden Vorkommen [V/{\o}P] und [V, P] um syntaktische Varianten. 6. Der Konditional-Gebrauch dient haupts{\"a}chlich dazu, die Inferenztypen zu unterscheiden und gleichzeitig die zugrundeliegende Polyphonie zu verdeutlichen. Der Gebrauch des Konditionals dr{\"u}ckt aus, dass es sich nicht um eine zuverl{\"a}ssig zutreffende {\"A}ußerung handelt. Der Ausdruck von Zweifeln kann im Franz{\"o}sischen unter Verwendung spezifischer grammatischer Mittel erfolgen. Zu diesen geh{\"o}rt der Konditional zum Ausdruck der Mitigation (des Zweifels, der Reserviertheit usw.) und der Polyphonie.}, language = {fr} } @phdthesis{Crisologo2019, author = {Crisologo, Irene}, title = {Using spaceborne radar platforms to enhance the homogeneity of weather radar calibration}, doi = {10.25932/publishup-44570}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445704}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 61}, year = {2019}, abstract = {Accurate weather observations are the keystone to many quantitative applications, such as precipitation monitoring and nowcasting, hydrological modelling and forecasting, climate studies, as well as understanding precipitation-driven natural hazards (i.e. floods, landslides, debris flow). Weather radars have been an increasingly popular tool since the 1940s to provide high spatial and temporal resolution precipitation data at the mesoscale, bridging the gap between synoptic and point scale observations. Yet, many institutions still struggle to tap the potential of the large archives of reflectivity, as there is still much to understand about factors that contribute to measurement errors, one of which is calibration. Calibration represents a substantial source of uncertainty in quantitative precipitation estimation (QPE). A miscalibration of a few dBZ can easily deteriorate the accuracy of precipitation estimates by an order of magnitude. Instances where rain cells carrying torrential rains are misidentified by the radar as moderate rain could mean the difference between a timely warning and a devastating flood. Since 2012, the Philippine Atmospheric, Geophysical, and Astronomical Services Administration (PAGASA) has been expanding the country's ground radar network. We had a first look into the dataset from one of the longest running radars (the Subic radar) after devastating week-long torrential rains and thunderstorms in August 2012 caused by the annual southwestmonsoon and enhanced by the north-passing Typhoon Haikui. The analysis of the rainfall spatial distribution revealed the added value of radar-based QPE in comparison to interpolated rain gauge observations. However, when compared with local gauge measurements, severe miscalibration of the Subic radar was found. As a consequence, the radar-based QPE would have underestimated the rainfall amount by up to 60\% if they had not been adjusted by rain gauge observations—a technique that is not only affected by other uncertainties, but which is also not feasible in other regions of the country with very sparse rain gauge coverage. Relative calibration techniques, or the assessment of bias from the reflectivity of two radars, has been steadily gaining popularity. Previous studies have demonstrated that reflectivity observations from the Tropical Rainfall Measuring Mission (TRMM) and its successor, the Global Precipitation Measurement (GPM), are accurate enough to serve as a calibration reference for ground radars over low-to-mid-latitudes (± 35 deg for TRMM; ± 65 deg for GPM). Comparing spaceborne radars (SR) and ground radars (GR) requires cautious consideration of differences in measurement geometry and instrument specifications, as well as temporal coincidence. For this purpose, we implement a 3-D volume matching method developed by Schwaller and Morris (2011) and extended by Warren et al. (2018) to 5 years worth of observations from the Subic radar. In this method, only the volumetric intersections of the SR and GR beams are considered. Calibration bias affects reflectivity observations homogeneously across the entire radar domain. Yet, other sources of systematic measurement errors are highly heterogeneous in space, and can either enhance or balance the bias introduced by miscalibration. In order to account for such heterogeneous errors, and thus isolate the calibration bias, we assign a quality index to each matching SR-GR volume, and thus compute the GR calibration bias as a qualityweighted average of reflectivity differences in any sample of matching SR-GR volumes. We exemplify the idea of quality-weighted averaging by using beam blockage fraction (BBF) as a quality variable. Quality-weighted averaging is able to increase the consistency of SR and GR observations by decreasing the standard deviation of the SR-GR differences, and thus increasing the precision of the bias estimates. To extend this framework further, the SR-GR quality-weighted bias estimation is applied to the neighboring Tagaytay radar, but this time focusing on path-integrated attenuation (PIA) as the source of uncertainty. Tagaytay is a C-band radar operating at a lower wavelength and is therefore more affected by attenuation. Applying the same method used for the Subic radar, a time series of calibration bias is also established for the Tagaytay radar. Tagaytay radar sits at a higher altitude than the Subic radar and is surrounded by a gentler terrain, so beam blockage is negligible, especially in the overlapping region. Conversely, Subic radar is largely affected by beam blockage in the overlapping region, but being an SBand radar, attenuation is considered negligible. This coincidentally independent uncertainty contributions of each radar in the region of overlap provides an ideal environment to experiment with the different scenarios of quality filtering when comparing reflectivities from the two ground radars. The standard deviation of the GR-GR differences already decreases if we consider either BBF or PIA to compute the quality index and thus the weights. However, combining them multiplicatively resulted in the largest decrease in standard deviation, suggesting that taking both factors into account increases the consistency between the matched samples. The overlap between the two radars and the instances of the SR passing over the two radars at the same time allows for verification of the SR-GR quality-weighted bias estimation method. In this regard, the consistency between the two ground radars is analyzed before and after bias correction is applied. For cases when all three radars are coincident during a significant rainfall event, the correction of GR reflectivities with calibration bias estimates from SR overpasses dramatically improves the consistency between the two ground radars which have shown incoherent observations before correction. We also show that for cases where adequate SR coverage is unavailable, interpolating the calibration biases using a moving average can be used to correct the GR observations for any point in time to some extent. By using the interpolated biases to correct GR observations, we demonstrate that bias correction reduces the absolute value of the mean difference in most cases, and therefore improves the consistency between the two ground radars. This thesis demonstrates that in general, taking into account systematic sources of uncertainty that are heterogeneous in space (e.g. BBF) and time (e.g. PIA) allows for a more consistent estimation of calibration bias, a homogeneous quantity. The bias still exhibits an unexpected variability in time, which hints that there are still other sources of errors that remain unexplored. Nevertheless, the increase in consistency between SR and GR as well as between the two ground radars, suggests that considering BBF and PIA in a weighted-averaging approach is a step in the right direction. Despite the ample room for improvement, the approach that combines volume matching between radars (either SR-GR or GR-GR) and quality-weighted comparison is readily available for application or further scrutiny. As a step towards reproducibility and transparency in atmospheric science, the 3D matching procedure and the analysis workflows as well as sample data are made available in public repositories. Open-source software such as Python and wradlib are used for all radar data processing in this thesis. This approach towards open science provides both research institutions and weather services with a valuable tool that can be applied to radar calibration, from monitoring to a posteriori correction of archived data.}, language = {en} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Nasery2019, author = {Nasery, Mustafa}, title = {The success and failure of civil service reforms in Afghanistan}, doi = {10.25932/publishup-44473}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444738}, school = {Universit{\"a}t Potsdam}, pages = {viii, 258}, year = {2019}, abstract = {The Government will create a motivated, merit-based, performance-driven, and professional civil service that is resistant to temptations of corruption and which provides efficient, effective and transparent public services that do not force customers to pay bribes. — (GoIRA, 2006, p. 106) We were in a black hole! We had an empty glass and had nothing from our side to fill it with! Thus, we accepted anything anybody offered; that is how our glass was filled; that is how we reformed our civil service. — (Former Advisor to IARCSC, personal communication, August 2015) How and under what conditions were the post-Taleban Civil Service Reforms of Afghanistan initiated? What were the main components of the reforms? What were their objectives and to which extent were they achieved? Who were the leading domestic and foreign actors involved in the process? Finally, what specific factors influenced the success and failure Afghanistan's Civil Service Reforms since 2002? Guided by such fundamental questions, this research studies the wicked process of reforming the Afghan civil service in an environment where a variety of contextual, programmatic, and external factors affected the design and implementation of reforms that were entirely funded and technically assisted by the international community. Focusing on the core components of reforms—recruitment, remuneration, and appraisal of civil servants—the qualitative study provides a detailed picture of the pre-reform civil service and its major human resources developments in the past. Following discussions on the content and purposes of the main reform programs, it will then analyze the extent of changes in policies and practices by examining the outputs and effects of these reforms. Moreover, the study defines the specific factors that led the reforms toward a situation where most of the intended objectives remain unachieved. Doing so, it explores and explains how an overwhelming influence of international actors with conflicting interests, large-scale corruption, political interference, networks of patronage, institutionalized nepotism, culturally accepted cronyism and widespread ethnic favoritism created a very complex environment and prevented the reforms from transforming Afghanistan's patrimonial civil service into a professional civil service, which is driven by performance and merit.}, language = {en} } @phdthesis{AbdelwahabHusseinAbdelwahabElsayed2019, author = {Abdelwahab Hussein Abdelwahab Elsayed, Ahmed}, title = {Probabilistic, deep, and metric learning for biometric identification from eye movements}, doi = {10.25932/publishup-46798}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-467980}, school = {Universit{\"a}t Potsdam}, pages = {vi, 65}, year = {2019}, abstract = {A central insight from psychological studies on human eye movements is that eye movement patterns are highly individually characteristic. They can, therefore, be used as a biometric feature, that is, subjects can be identified based on their eye movements. This thesis introduces new machine learning methods to identify subjects based on their eye movements while viewing arbitrary content. The thesis focuses on probabilistic modeling of the problem, which has yielded the best results in the most recent literature. The thesis studies the problem in three phases by proposing a purely probabilistic, probabilistic deep learning, and probabilistic deep metric learning approach. In the first phase, the thesis studies models that rely on psychological concepts about eye movements. Recent literature illustrates that individual-specific distributions of gaze patterns can be used to accurately identify individuals. In these studies, models were based on a simple parametric family of distributions. Such simple parametric models can be robustly estimated from sparse data, but have limited flexibility to capture the differences between individuals. Therefore, this thesis proposes a semiparametric model of gaze patterns that is flexible yet robust for individual identification. These patterns can be understood as domain knowledge derived from psychological literature. Fixations and saccades are examples of simple gaze patterns. The proposed semiparametric densities are drawn under a Gaussian process prior centered at a simple parametric distribution. Thus, the model will stay close to the parametric class of densities if little data is available, but it can also deviate from this class if enough data is available, increasing the flexibility of the model. The proposed method is evaluated on a large-scale dataset, showing significant improvements over the state-of-the-art. Later, the thesis replaces the model based on gaze patterns derived from psychological concepts with a deep neural network that can learn more informative and complex patterns from raw eye movement data. As previous work has shown that the distribution of these patterns across a sequence is informative, a novel statistical aggregation layer called the quantile layer is introduced. It explicitly fits the distribution of deep patterns learned directly from the raw eye movement data. The proposed deep learning approach is end-to-end learnable, such that the deep model learns to extract informative, short local patterns while the quantile layer learns to approximate the distributions of these patterns. Quantile layers are a generic approach that can converge to standard pooling layers or have a more detailed description of the features being pooled, depending on the problem. The proposed model is evaluated in a large-scale study using the eye movements of subjects viewing arbitrary visual input. The model improves upon the standard pooling layers and other statistical aggregation layers proposed in the literature. It also improves upon the state-of-the-art eye movement biometrics by a wide margin. Finally, for the model to identify any subject — not just the set of subjects it is trained on — a metric learning approach is developed. Metric learning learns a distance function over instances. The metric learning model maps the instances into a metric space, where sequences of the same individual are close, and sequences of different individuals are further apart. This thesis introduces a deep metric learning approach with distributional embeddings. The approach represents sequences as a set of continuous distributions in a metric space; to achieve this, a new loss function based on Wasserstein distances is introduced. The proposed method is evaluated on multiple domains besides eye movement biometrics. This approach outperforms the state of the art in deep metric learning in several domains while also outperforming the state of the art in eye movement biometrics.}, language = {en} } @phdthesis{Schneider2019, author = {Schneider, Jan Niklas}, title = {Computational approaches for emotion research}, doi = {10.25932/publishup-45927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459275}, school = {Universit{\"a}t Potsdam}, pages = {xv, 145}, year = {2019}, abstract = {Emotionen sind ein zentrales Element menschlichen Erlebens und spielen eine wichtige Rolle bei der Entscheidungsfindung. Diese Dissertation identifiziert drei methodische Probleme der aktuellen Emotionsforschung und zeigt auf, wie diese mittels computergest{\"u}tzter Methoden gel{\"o}st werden k{\"o}nnen. Dieser Ansatz wird in drei Forschungsprojekten demonstriert, die die Entwicklung solcher Methoden sowie deren Anwendung auf konkrete Forschungsfragen beschreiben. Das erste Projekt beschreibt ein Paradigma welches es erm{\"o}glicht, die subjektive und objektive Schwierigkeit der Emotionswahrnehmung zu messen. Dar{\"u}ber hinaus erm{\"o}glicht es die Verwendung einer beliebigen Anzahl von Emotionskategorien im Vergleich zu den {\"u}blichen sechs Kategorien der Basisemotionen. Die Ergebnisse deuten auf eine Zunahme der Schwierigkeiten bei der Wahrnehmung von Emotionen mit zunehmendem Alter der Darsteller hin und liefern Hinweise darauf, dass junge Erwachsene, {\"a}ltere Menschen und M{\"a}nner ihre Schwierigkeit bei der Wahrnehmung von Emotionen untersch{\"a}tzen. Weitere Analysen zeigten eine geringe Relevanz personenbezogener Variablen und deuteten darauf hin, dass die Schwierigkeit der Emotionswahrnehmung vornehmlich durch die Auspr{\"a}gung der Wertigkeit des Ausdrucks bestimmt wird. Das zweite Projekt zeigt am Beispiel von Arousal, einem etablierten, aber vagen Konstrukt der Emotionsforschung, wie Face-Tracking-Daten dazu genutzt werden k{\"o}nnen solche Konstrukte zu sch{\"a}rfen. Es beschreibt, wie aus Face-Tracking-Daten Maße f{\"u}r die Entfernung, Geschwindigkeit und Beschleunigung von Gesichtsausdr{\"u}cken berechnet werden k{\"o}nnen. Das Projekt untersuchte wie diesen Maße mit der Arousal-Wahrnehmung in Menschen mit und ohne Autismus zusammenh{\"a}ngen. Der Abstand zum Neutralgesicht war pr{\"a}diktiv f{\"u}r die Arousal-Bewertungen in beiden Gruppen. Die Ergebnisse deuten auf eine qualitativ {\"a}hnliche Wahrnehmung von Arousal f{\"u}r Menschen mit und ohne Autismus hin. Im dritten Projekt stellen wir die Partial-Least-Squares-Analyse als allgemeine Methode vor, um eine optimale Repr{\"a}sentation zur Verkn{\"u}pfung zweier hochdimensionale Datens{\"a}tze zu finden. Das Projekt demonstriert die Anwendbarkeit dieser Methode in der Emotionsforschung anhand der Frage nach Unterschieden in der Emotionswahrnehmung zwischen M{\"a}nnern und Frauen. Wir konnten zeigen, dass die emotionale Wahrnehmung von Frauen systematisch mehr Varianz der Gesichtsausdr{\"u}cke erfasst und dass signifikante Unterschiede in der Art und Weise bestehen, wie Frauen und M{\"a}nner einige Gesichtsausdr{\"u}cke wahrnehmen. Diese konnten wir als dynamische Gesichtsausdr{\"u}cke visualisieren. Um die Anwendung der entwickelten Methode f{\"u}r die Forschungsgemeinschaft zu erleichtern, wurde ein Software-Paket f{\"u}r die Statistikumgebung R geschrieben. Zudem wurde eine Website entwickelt (thisemotiondoesnotexist.com), die es Besuchern erlaubt, ein Partial-Least-Squares-Modell von Emotionsbewertungen und Face-Tracking-Daten interaktiv zu erkunden, um die entwickelte Methode zu verbreiten und ihren Nutzen f{\"u}r die Emotionsforschung zu illustrieren.}, language = {en} }