@phdthesis{Sotiropoulou2019, author = {Sotiropoulou, Stavroula}, title = {Pleiotropy of phonetic indices in the expression of syllabic organization}, doi = {10.25932/publishup-54639}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-546399}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2019}, abstract = {This dissertation is concerned with the relation between qualitative phonological organization in the form of syllabic structure and continuous phonetics, that is, the spatial and temporal dimensions of vocal tract action that express syllabic structure. The main claim of the dissertation is twofold. First, we argue that syllabic organization exerts multiple effects on the spatio-temporal properties of the segments that partake in that organization. That is, there is no unique or privileged exponent of syllabic organization. Rather, syllabic organization is expressed in a pleiotropy of phonetic indices. Second, we claim that a better understanding of the relation between qualitative phonological organization and continuous phonetics is reached when one considers how the string of segments (over which the nature of the phonological organization is assessed) responds to perturbations (scaling of phonetic variables) of localized properties (such as durations) within that string. Specifically, variation in phonetic variables and more specifically prosodic variation is a crucial key to understanding the nature of the link between (phonological) syllabic organization and the phonetic spatio-temporal manifestation of that organization. The effects of prosodic variation on segmental properties and on the overlap between the segments, we argue, offer the right pathway to discover patterns related to syllabic organization. In our approach, to uncover evidence for global organization, the sequence of segments partaking in that organization as well as properties of these segments or their relations with one another must be somehow locally varied. The consequences of such variation on the rest of the sequence can then be used to unveil the span of organization. When local perturbations to segments or relations between adjacent segments have effects that ripple through the rest of the sequence, this is evidence that organization is global. If instead local perturbations stay local with no consequences for the rest of the whole, this indicates that organization is local.}, language = {en} } @phdthesis{Ruschel2019, author = {Ruschel, Matthias}, title = {Das Preußische Erbrecht in der Judikatur des Berliner Obertribunals in den Jahren 1836 bis 1865}, doi = {10.25932/publishup-52779}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-527798}, school = {Universit{\"a}t Potsdam}, pages = {283}, year = {2019}, abstract = {Die Dissertation befasst sich mit dem Allgemeinen Preußischen Landrecht von 1794 und der hierzu ergangenen Rechtsprechung des Berliner Obertribunals. Im Fokus der Untersuchung stehen die erbrechtlichen Regelungen des Landrechts und deren Anwendung sowie Auslegung in der Judikatur des h{\"o}chsten preußischen Gerichts. Der Forschungsgegenstand ergibt sich aus dem im Landrecht kodifizierten speziellen Gesetzesverst{\"a}ndnisses. Nach diesem sollte die Gesetzesauslegung durch die Rechtsprechung auf ein Minimum, n{\"a}mlich die Auslegung allein anhand des Wortlauts der Regelung reduziert werden, um dem absolutistischen Regierungsanspruch der preußischen Monarchen, namentlich Friedrich des Großen, hinreichend Rechnung zu tragen. In diesem Kontext wird der Frage nachgegangen, inwieweit das preußische Obertribunal das im Landrecht statuierte „Auslegungsverbot" beachtet hat und in welchen F{\"a}llen sich das Gericht von der Vorgabe emanzipierte und weitere Auslegungsmethoden anwendete und sich so eine unabh{\"a}ngige Rechtsprechung entwickeln konnte. Die Arbeit gliedert sich in drei Hauptabschnitte. Im Anschluss an die Einleitung, in der zun{\"a}chst die rechtshistorische Bedeutung des Landrechts und des Erbrechts sowie der Untersuchungsgegenstand umrissen werden, folgt die Darstellung der Entstehungsgeschichte des Landrechts und des Berliner Obertribunals. Hieran schließt sich in einem dritten Abschnitt eine Analyse der erbrechtlichen Vorschriften des Landrechts an. In dieser wird auf die Entstehungsgeschichte der verschiedenen erbrechtlichen Institute wie beispielsweise der gesetzlichen und gewillk{\"u}rten Erbfolge, dem Pflichtteilsrecht etc., unter Ber{\"u}cksichtigung des zeitgen{\"o}ssischen wissenschaftlichen Diskurses eingegangen. Im vierten Abschnitt geht es um die Judikate des Berliner Obertribunals aus den Jahren 1836-1865 in denen die zuvor dargestellten erbrechtlichen Regelungen entscheidungserheblich waren. Dabei wird der Forschungsfrage, inwieweit das Obertribunal das im Landrecht statuierte Auslegungsverbot beachtet hat und in welchen F{\"a}llen es von diesem abwich bzw. weitere Auslegungsmethoden anwendete, konkret nachgegangen wird. Insgesamt werden 26 Entscheidungen des Obertribunals unter dem Aspekt der Auslegungspraxis, der Kontinuit{\"a}t und der Beschleunigung der Rechtsprechung analysiert und ausgewertet.}, language = {de} } @phdthesis{Doering2019, author = {D{\"o}ring, Matthias}, title = {The public encounter}, doi = {10.25932/publishup-50227}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502276}, school = {Universit{\"a}t Potsdam}, pages = {xi, 115}, year = {2019}, abstract = {This thesis puts the citizen-state interaction at its center. Building on a comprehensive model incorporating various perspectives on this interaction, I derive selected research gaps. The three articles, comprising this thesis, tackle these gaps. A focal role plays the citizens' administrative literacy, the relevant competences and knowledge necessary to successfully interact with public organizations. The first article elaborates on the different dimensions of administrative literacy and develops a survey instrument to assess these. The second study shows that public employees change their behavior according to the competences that citizens display during public encounters. They treat citizens preferentially that are well prepared and able to persuade them of their application's potential. Thereby, they signal a higher success potential for bureaucratic success criteria which leads to the employees' cream-skimming behavior. The third article examines the dynamics of employees' communication strategies when recovering from a service failure. The study finds that different explanation strategies yield different effects on the client's frustration. While accepting the responsibility and explaining the reasons for a failure alleviates the frustration and anger, refusing the responsibility leads to no or even reinforcing effects on the client's frustration. The results emphasize the different dynamics that characterize the nature of citizen-state interactions and how they establish their short- and long-term outcomes.}, language = {en} } @phdthesis{Gong2019, author = {Gong, Chen Chris}, title = {Synchronization of coupled phase oscillators}, doi = {10.25932/publishup-48752}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487522}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 115}, year = {2019}, abstract = {Oscillatory systems under weak coupling can be described by the Kuramoto model of phase oscillators. Kuramoto phase oscillators have diverse applications ranging from phenomena such as communication between neurons and collective influences of political opinions, to engineered systems such as Josephson Junctions and synchronized electric power grids. This thesis includes the author's contribution to the theoretical framework of coupled Kuramoto oscillators and to the understanding of non-trivial N-body dynamical systems via their reduced mean-field dynamics. The main content of this thesis is composed of four parts. First, a partially integrable theory of globally coupled identical Kuramoto oscillators is extended to include pure higher-mode coupling. The extended theory is then applied to a non-trivial higher-mode coupled model, which has been found to exhibit asymmetric clustering. Using the developed theory, we could predict a number of features of the asymmetric clustering with only information of the initial state provided. The second part consists of an iterated discrete-map approach to simulate phase dynamics. The proposed map --- a Moebius map --- not only provides fast computation of phase synchronization, it also precisely reflects the underlying group structure of the dynamics. We then compare the iterated-map dynamics and various analogous continuous-time dynamics. We are able to replicate known phenomena such as the synchronization transition of the Kuramoto-Sakaguchi model of oscillators with distributed natural frequencies, and chimera states for identical oscillators under non-local coupling. The third part entails a particular model of repulsively coupled identical Kuramoto-Sakaguchi oscillators under common random forcing, which can be shown to be partially integrable. Via both numerical simulations and theoretical analysis, we determine that such a model cannot exhibit stationary multi-cluster states, contrary to the numerical findings in previous literature. Through further investigation, we find that the multi-clustering states reported previously occur due to the accumulation of discretization errors inherent in the integration algorithms, which introduce higher-mode couplings into the model. As a result, the partial integrability condition is violated. Lastly, we derive the microscopic cross-correlation of globally coupled non-identical Kuramoto oscillators under common fluctuating forcing. The effect of correlation arises naturally in finite populations, due to the non-trivial fluctuations of the meanfield. In an idealized model, we approximate the finite-sized fluctuation by a Gaussian white noise. The analytical approximation qualitatively matches the measurements in numerical experiments, however, due to other periodic components inherent in the fluctuations of the mean-field there still exist significant inconsistencies.}, language = {en} } @phdthesis{Kerutt2019, author = {Kerutt, Josephine Victoria}, title = {The high-redshift voyage of Lyman alpha and Lyman continuum emission as told by MUSE}, doi = {10.25932/publishup-47881}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-478816}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2019}, abstract = {Most of the matter in the universe consists of hydrogen. The hydrogen in the intergalactic medium (IGM), the matter between the galaxies, underwent a change of its ionisation state at the epoch of reionisation, at a redshift roughly between 6>z>10, or ~10^8 years after the Big Bang. At this time, the mostly neutral hydrogen in the IGM was ionised but the source of the responsible hydrogen ionising emission remains unclear. In this thesis I discuss the most likely candidates for the emission of this ionising radiation, which are a type of galaxy called Lyman alpha emitters (LAEs). As implied by their name, they emit Lyman alpha radiation, produced after a hydrogen atom has been ionised and recombines with a free electron. The ionising radiation itself (also called Lyman continuum emission) which is needed for this process inside the LAEs could also be responsible for ionising the IGM around those galaxies at the epoch of reionisation, given that enough Lyman continuum escapes. Through this mechanism, Lyman alpha and Lyman continuum radiation are closely linked and are both studied to better understand the properties of high redshift galaxies and the reionisation state of the universe. Before I can analyse their Lyman alpha emission lines and the escape of Lyman continuum emission from them, the first step is the detection and correct classification of LAEs in integral field spectroscopic data, specifically taken with the Multi-Unit Spectroscopic Explorer (MUSE). After detecting emission line objects in the MUSE data, the task of classifying them and determining their redshift is performed with the graphical user interface QtClassify, which I developed during the work on this thesis. It uses the strength of the combination of spectroscopic and photometric information that integral field spectroscopy offers to enable the user to quickly identify the nature of the detected emission lines. The reliable classification of LAEs and determination of their redshifts is a crucial first step towards an analysis of their properties. Through radiative transfer processes, the properties of the neutral hydrogen clouds in and around LAEs are imprinted on the shape of the Lyman alpha line. Thus after identifying the LAEs in the MUSE data, I analyse the properties of the Lyman alpha emission line, such as the equivalent width (EW) distribution, the asymmetry and width of the line as well as the double peak fraction. I challenge the common method of displaying EW distributions as histograms without taking the limits of the survey into account and construct a more independent EW distribution function that better reflects the properties of the underlying population of galaxies. I illustrate this by comparing the fraction of high EW objects between the two surveys MUSE-Wide and MUSE-Deep, both consisting of MUSE pointings (each with the size of one square arcminute) of different depths. In the 60 MUSE-Wide fields of one hour exposure time I find a fraction of objects with extreme EWs above EW_0>240A of ~20\%, while in the MUSE-Deep fields (9 fields with an exposure time of 10 hours and one with an exposure time of 31 hours) I find a fraction of only ~1\%, which is due to the differences in the limiting line flux of the surveys. The highest EW I measure is EW_0 = 600.63 +- 110A, which hints at an unusual underlying stellar population, possibly with a very low metallicity. With the knowledge of the redshifts and positions of the LAEs detected in the MUSE-Wide survey, I also look for Lyman continuum emission coming from these galaxies and analyse the connection between Lyman continuum emission and Lyman alpha emission. I use ancillary Hubble Space Telescope (HST) broadband photometry in the bands that contain the Lyman continuum and find six Lyman continuum leaker candidates. To test whether the Lyman continuum emission of LAEs is coming only from those individual objects or the whole population, I select LAEs that are most promising for the detection of Lyman continuum emission, based on their rest-frame UV continuum and Lyman alpha line shape properties. After this selection, I stack the broadband data of the resulting sample and detect a signal in Lyman continuum with a significance of S/N = 5.5, pointing towards a Lyman continuum escape fraction of ~80\%. If the signal is reliable, it strongly favours LAEs as the providers of the hydrogen ionising emission at the epoch of reionisation and beyond.}, language = {en} } @phdthesis{Kuberski2019, author = {Kuberski, Stephan R.}, title = {Fundamental motor laws and dynamics of speech}, doi = {10.25932/publishup-43771}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437714}, school = {Universit{\"a}t Potsdam}, pages = {97}, year = {2019}, abstract = {The present work is a compilation of three original research articles submitted (or already published) in international peer-reviewed venues of the field of speech science. These three articles address the topics of fundamental motor laws in speech and dynamics of corresponding speech movements: 1. Kuberski, Stephan R. and Adamantios I. Gafos (2019). "The speed-curvature power law in tongue movements of repetitive speech". PLOS ONE 14(3). Public Library of Science. doi: 10.1371/journal.pone.0213851. 2. Kuberski, Stephan R. and Adamantios I. Gafos (In press). "Fitts' law in tongue movements of repetitive speech". Phonetica: International Journal of Phonetic Science. Karger Publishers. doi: 10.1159/000501644 3. Kuberski, Stephan R. and Adamantios I. Gafos (submitted). "Distinct phase space topologies of identical phonemic sequences". Language. Linguistic Society of America. The present work introduces a metronome-driven speech elicitation paradigm in which participants were asked to utter repetitive sequences of elementary consonant-vowel syllables. This paradigm, explicitly designed to cover speech rates from a substantially wider range than has been explored so far in previous work, is demonstrated to satisfy the important prerequisites for assessing so far difficult to access aspects of speech. Specifically, the paradigm's extensive speech rate manipulation enabled elicitation of a great range of movement speeds as well as movement durations and excursions of the relevant effectors. The presence of such variation is a prerequisite to assessing whether invariant relations between these and other parameters exist and thus provides the foundation for a rigorous evaluation of the two laws examined in the first two contributions of this work. In the data resulting from this paradigm, it is shown that speech movements obey the same fundamental laws as movements from other domains of motor control do. In particular, it is demonstrated that speech strongly adheres to the power law relation between speed and curvature of movement with a clear speech rate dependency of the power law's exponent. The often-sought or reported exponent of one third in the statement of the law is unique to a subclass of movements which corresponds to the range of faster rates under which a particular utterance is produced. For slower rates, significantly larger values than one third are observed. Furthermore, for the first time in speech this work uncovers evidence for the presence of Fitts' law. It is shown that, beyond a speaker-specific speech rate, speech movements of the tongue clearly obey Fitts' law by emergence of its characteristic linear relation between movement time and index of difficulty. For slower speech rates (when temporal pressure is small), no such relation is observed. The methods and datasets obtained in the two assessment above provide a rigorous foundation both for addressing implications for theories and models of speech as well as for better understanding the status of speech movements in the context of human movements in general. All modern theories of language rely on a fundamental segmental hypothesis according to which the phonological message of an utterance is represented by a sequence of segments or phonemes. It is commonly assumed that each of these phonemes can be mapped to some unit of speech motor action, a so-called speech gesture. For the first time here, it is demonstrated that the relation between the phonological description of simple utterances and the corresponding speech motor action is non-unique. Specifically, by the extensive speech rate manipulation in the herein used experimental paradigm it is demonstrated that speech exhibits clearly distinct dynamical organizations underlying the production of simple utterances. At slower speech rates, the dynamical organization underlying the repetitive production of elementary /CV/ syllables can be described by successive concatenations of closing and opening gestures, each with its own equilibrium point. As speech rate increases, the equilibria of opening and closing gestures are not equally stable yielding qualitatively different modes of organization with either a single equilibrium point of a combined opening-closing gesture or a periodic attractor unleashed by the disappearance of both equilibria. This observation, the non-uniqueness of the dynamical organization underlying what on the surface appear to be identical phonemic sequences, is an entirely new result in the domain of speech. Beyond that, the demonstration of periodic attractors in speech reveals that dynamical equilibrium point models do not account for all possible modes of speech motor behavior.}, language = {en} } @phdthesis{He2019, author = {He, Hai}, title = {Exploring and engineering formaldehyde assimilation}, doi = {10.25932/publishup-47386}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473867}, school = {Universit{\"a}t Potsdam}, pages = {vi, 105}, year = {2019}, abstract = {Increasing concerns regarding the environmental impact of our chemical production have shifted attention towards possibilities for sustainable biotechnology. One-carbon (C1) compounds, including methane, methanol, formate and CO, are promising feedstocks for future bioindustry. CO2 is another interesting feedstock, as it can also be transformed using renewable energy to other C1 feedstocks for use. While formaldehyde is not suitable as a feedstock due to its high toxicity, it is a central intermediate in the process of C1 assimilation. This thesis explores formaldehyde metabolism and aims to engineer formaldehyde assimilation in the model organism Escherichia coli for the future C1-based bioindustry. The first chapter of the thesis aims to establish growth of E. coli on formaldehyde via the most efficient naturally occurring route, the ribulose monophosphate pathway. Linear variants of the pathway were constructed in multiple-gene knockouts strains, coupling E. coli growth to the activities of the key enzymes of the pathway. Formaldehyde-dependent growth was achieved in rationally designed strains. In the final strain, the synthetic pathway provides the cell with almost all biomass and energy requirements. In the second chapter, taking advantage of the unique feature of its reactivity, formaldehyde assimilation via condensation with glycine and pyruvate by two promiscuous aldolases was explored. Facilitated by these two reactions, the newly designed homoserine cycle is expected to support higher yields of a wide array of products than its counterparts. By dividing the pathway into segments and coupling them to the growth of dedicated strains, all pathway reactions were demonstrated to be sufficiently active. The work paves a way for future implementation of a highly efficient route for C1 feedstocks into commodity chemicals. In the third chapter, the in vivo rate of the spontaneous formaldehyde tetrahydrofolate condensation to methylene-tetrahydrofolate was assessed in order to evaluate its applicability as a biotechnological process. Tested within an E. coli strain deleted in essential genes for native methylene-tetrahydrofolate biosynthesis, the reaction was shown to support the production of this essential intermediate. However, only low growth rates were observed and only at high formaldehyde concentrations. Computational analysis dependent on in vivo evidence from this strain deduced the slow rate of this spontaneous reaction, thus ruling out its substantial contribution to growth on C1 feedstocks. The reactivity of formaldehyde makes it highly toxic. In the last chapter, the formation of thioproline, the condensation product of cysteine and formaldehyde, was confirmed to contribute this toxicity effect. Xaa-Pro aminopeptidase (PepP), which genetically links with folate metabolism, was shown to hydrolyze thioproline-containing peptides. Deleting pepP increased strain sensitivity to formaldehyde, pointing towards the toxicity of thioproline-containing peptides and the importance of their removal. The characterization in this study could be useful in handling this toxic intermediate. Overall, this thesis identified challenges related to formaldehyde metabolism and provided novel solutions towards a future bioindustry based on sustainable C1 feedstocks in which formaldehyde serves as a key intermediate.}, language = {en} } @phdthesis{Melani2019, author = {Melani, Giacomo}, title = {From structural fluctuations to vibrational spectroscopy of adsorbates on surfaces}, doi = {10.25932/publishup-44182}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441826}, school = {Universit{\"a}t Potsdam}, pages = {119}, year = {2019}, abstract = {Aluminum oxide is an Earth-abundant geological material, and its interaction with water is of crucial importance for geochemical and environmental processes. Some aluminum oxide surfaces are also known to be useful in heterogeneous catalysis, while the surface chemistry of aqueous oxide interfaces determines the corrosion, growth and dissolution of such materials. In this doctoral work, we looked mainly at the (0001) surface of α-Al 2 O 3 and its reactivity towards water. In particular, a great focus of this work is dedicated to simulate and address the vibrational spectra of water adsorbed on the α-alumina(0001) surface in various conditions and at different coverages. In fact, the main source of comparison and inspiration for this work comes from the collaboration with the "Interfacial Molecular Spectroscopy" group led by Dr. R. Kramer Campen at the Fritz-Haber Institute of the MPG in Berlin. The expertise of our project partners in surface-sensitive Vibrational Sum Frequency (VSF) generation spectroscopy was crucial to develop and adapt specific simulation schemes used in this work. Methodologically, the main approach employed in this thesis is Ab Initio Molecular Dynamics (AIMD) based on periodic Density Functional Theory (DFT) using the PBE functional with D2 dispersion correction. The analysis of vibrational frequencies from both a static and a dynamic, finite-temperature perspective offers the ability to investigate the water / aluminum oxide interface in close connection to experiment. The first project presented in this work considers the characterization of dissociatively adsorbed deuterated water on the Al-terminated (0001) surface. This particular structure is known from both experiment and theory to be the thermodynamically most stable surface termination of α-alumina in Ultra-High Vacuum (UHV) conditions. Based on experiments performed by our colleagues at FHI, different adsorption sites and products have been proposed and identified for D 2 O. While previous theoretical investigations only looked at vibrational frequencies of dissociated OD groups by staticNormal Modes Analysis (NMA), we rather employed a more sophisticated approach to directly assess vibrational spectra (like IR and VSF) at finite temperature from AIMD. In this work, we have employed a recent implementation which makes use of velocity-velocity autocorrelation functions to simulate such spectral responses of O-H(D) bonds. This approach allows for an efficient and qualitatively accurate estimation of Vibrational Densities of States (VDOS) as well as IR and VSF spectra, which are then tested against experimental spectra from our collaborators. In order to extend previous work on unimolecularly dissociated water on α-Al 2 O 3 , we then considered a different system, namely, a fully hydroxylated (0001) surface, which results from the reconstruction of the UHV-stable Al-terminated surface at high water contents. This model is then further extended by considering a hydroxylated surface with additional water molecules, forming a two-dimensional layer which serves as a potential template to simulate an aqueous interface in environmental conditions. Again, employing finite-temperature AIMD trajectories at the PBE+D2 level, we investigated the behaviour of both hydroxylated surface (HS) and the water-covered structure derived from it (known as HS+2ML). A full range of spectra, from VDOS to IR and VSF, is then calculated using the same methodology, as described above. This is the main focus of the second project, reported in Chapter 5. In this case, comparison between theoretical spectra and experimental data is definitely good. In particular, we underline the nature of high-frequency resonances observed above 3700 cm -1 in VSF experiments to be associated with surface OH-groups, known as "aluminols" which are a key fingerprint of the fully hydroxylated surface. In the third and last project, which is presented in Chapter 6, the extension of VSF spectroscopy experiments to the time-resolved regime offered us the opportunity to investigate vibrational energy relaxation at the α-alumina / water interface. Specifically, using again DFT-based AIMD simulations, we simulated vibrational lifetimes for surface aluminols as experimentally detected via pump-probe VSF. We considered the water-covered HS model as a potential candidate to address this problem. The vibrational (IR) excitation and subsequent relaxation is performed by means of a non-equilibrium molecular dynamics scheme. In such a scheme, we specifically looked at the O-H stretching mode of surface aluminols. Afterwards, the analysis of non-equilibrium trajectories allows for an estimation of relaxation times in the order of 2-4 ps which are in overall agreement with measured ones. The aim of this work has been to provide, within a consistent theoretical framework, a better understanding of vibrational spectroscopy and dynamics for water on the α-alumina(0001) surface,ranging from very low water coverage (similar to the UHV case) up to medium-high coverages, resembling the hydroxylated oxide in environmental moist conditions.}, language = {en} } @phdthesis{Wozny2019, author = {Wozny, Florian}, title = {Three empirical essays in health economics}, doi = {10.25932/publishup-46991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469910}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2019}, abstract = {Modern health care systems are characterized by pronounced prevention and cost-optimized treatments. This dissertation offers novel empirical evidence on how useful such measures can be. The first chapter analyzes how radiation, a main pollutant in health care, can negatively affect cognitive health. The second chapter focuses on the effect of Low Emission Zones on public heath, as air quality is the major external source of health problems. Both chapters point out potentials for preventive measures. Finally, chapter three studies how changes in treatment prices affect the reallocation of hospital resources. In the following, I briefly summarize each chapter and discuss implications for health care systems as well as other policy areas. Based on the National Educational Panel Study that is linked to data on radiation, chapter one shows that radiation can have negative long-term effects on cognitive skills, even at subclinical doses. Exploiting arguably exogenous variation in soil contamination in Germany due to the Chernobyl disaster in 1986, the findings show that people exposed to higher radiation perform significantly worse in cognitive tests 25 years later. Identification is ensured by abnormal rainfall within a critical period of ten days. The results show that the effect is stronger among older cohorts than younger cohorts, which is consistent with radiation accelerating cognitive decline as people get older. On average, a one-standarddeviation increase in the initial level of CS137 (around 30 chest x-rays) is associated with a decrease in the cognitive skills by 4.1 percent of a standard deviation (around 0.05 school years). Chapter one shows that sub-clinical levels of radiation can have negative consequences even after early childhood. This is of particular importance because most of the literature focuses on exposure very early in life, often during pregnancy. However, population exposed after birth is over 100 times larger. These results point to substantial external human capital costs of radiation which can be reduced by choices of medical procedures. There is a large potential for reductions because about one-third of all CT scans are assumed to be not medically justified (Brenner and Hall, 2007). If people receive unnecessary CT scans because of economic incentives, this chapter points to additional external costs of health care policies. Furthermore, the results can inform the cost-benefit trade-off for medically indicated procedures. Chapter two provides evidence about the effectiveness of Low Emission Zones. Low Emission Zones are typically justified by improvements in population health. However, there is little evidence about the potential health benefits from policy interventions aiming at improving air quality in inner-cities. The chapter ask how the coverage of Low Emission Zones air pollution and hospitalization, by exploiting variation in the roll out of Low Emission Zones in Germany. It combines information on the geographic coverage of Low Emission Zones with rich panel data on the universe of German hospitals over the period from 2006 to 2016 with precise information on hospital locations and the annual frequency of detailed diagnoses. In order to establish that our estimates of Low Emission Zones' health impacts can indeed be attributed to improvements in local air quality, we use data from Germany's official air pollution monitoring system and assign monitor locations to Low Emission Zones and test whether measures of air pollution are affected by the coverage of a Low Emission Zone. Results in chapter two confirm former results showing that the introduction of Low Emission Zones improved air quality significantly by reducing NO2 and PM10 concentrations. Furthermore, the chapter shows that hospitals which catchment areas are covered by a Low Emission Zone, diagnose significantly less air pollution related diseases, in particular by reducing the incidents of chronic diseases of the circulatory and the respiratory system. The effect is stronger before 2012, which is consistent with a general improvement in the vehicle fleet's emission standards. Depending on the disease, a one-standard-deviation increase in the coverage of a hospitals catchment area covered by a Low Emission Zone reduces the yearly number of diagnoses up to 5 percent. These findings have strong implications for policy makers. In 2015, overall costs for health care in Germany were around 340 billion euros, of which 46 billion euros for diseases of the circulatory system, making it the most expensive type of disease caused by 2.9 million cases (Statistisches Bundesamt, 2017b). Hence, reductions in the incidence of diseases of the circulatory system may directly reduce society's health care costs. Whereas chapter one and two study the demand-side in health care markets and thus preventive potential, chapter three analyzes the supply-side. By exploiting the same hospital panel data set as in chapter two, chapter three studies the effect of treatment price shocks on the reallocation of hospital resources in Germany. Starting in 2005, the implementation of the German-DRG-System led to general idiosyncratic treatment price shocks for individual hospitals. Thus far there is little evidence of the impact of general price shocks on the reallocation of hospital resources. Additionally, I add to the exiting literature by showing that price shocks can have persistent effects on hospital resources even when these shocks vanish. However, simple OLS regressions would underestimate the true effect, due to endogenous treatment price shocks. I implement a novel instrument variable strategy that exploits the exogenous variation in the number of days of snow in hospital catchment areas. A peculiarity of the reform allowed variation in days of snow to have a persistent impact on treatment prices. I find that treatment price increases lead to increases in input factors such as nursing staff, physicians and the range of treatments offered but to decreases in the treatment volume. This indicates supplier-induced demand. Furthermore, the probability of hospital mergers and privatization decreases. Structural differences in pre-treatment characteristics between hospitals enhance these effects. For instance, private and larger hospitals are more affected. IV estimates reveal that OLS results are biased towards zero in almost all dimensions because structural hospital differences are correlated with the reallocation of hospital resources. These results are important for several reasons. The G-DRG-Reform led to a persistent polarization of hospital resources, as some hospitals were exposed to treatment price increases, while others experienced reductions. If hospitals increase the treatment volume as a response to price reductions by offering unnecessary therapies, it has a negative impact on population wellbeing and public spending. However, results show a decrease in the range of treatments if prices decrease. Hospitals might specialize more, thus attracting more patients. From a policy perspective it is important to evaluate if such changes in the range of treatments jeopardize an adequate nationwide provision of treatments. Furthermore, the results show a decrease in the number of nurses and physicians if prices decrease. This could partly explain the nursing crisis in German hospitals. However, since hospitals specialize more they might be able to realize efficiency gains which justify reductions in input factors without loses in quality. Further research is necessary to provide evidence for the impact of the G-DRG-Reform on health care quality. Another important aspect are changes in the organizational structure. Many public hospitals have been privatized or merged. The findings show that this is at least partly driven by the G-DRG-Reform. This can again lead to a lack in services offered in some regions if merged hospitals specialize more or if hospitals are taken over by ecclesiastical organizations which do not provide all treatments due to moral conviction. Overall, this dissertation reveals large potential for preventive health care measures and helps to explain reallocation processes in the hospital sector if treatment prices change. Furthermore, its findings have potentially relevant implications for other areas of public policy. Chapter one identifies an effect of low dose radiation on cognitive health. As mankind is searching for new energy sources, nuclear power is becoming popular again. However, results of chapter one point to substantial costs of nuclear energy which have not been accounted yet. Chapter two finds strong evidence that air quality improvements by Low Emission Zones translate into health improvements, even at relatively low levels of air pollution. These findings may, for instance, be of relevance to design further policies targeted at air pollution such as diesel bans. As pointed out in chapter three, the implementation of DRG-Systems may have unintended side-effects on the reallocation of hospital resources. This may also apply to other providers in the health care sector such as resident doctors.}, language = {en} } @phdthesis{Bartel2019, author = {Bartel, Melanie}, title = {Kernresonanz-Strukturuntersuchungen an alternativen Precursoren und deren Zwischenprodukten f{\"u}r die Herstellung von Carbonfasern f{\"u}r den Massenmarkt}, doi = {10.25932/publishup-46930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469301}, school = {Universit{\"a}t Potsdam}, pages = {XII, 101, XXV}, year = {2019}, abstract = {Carbonfasern haben sich in der Luft- und Raumfahrt etabliert und gewinnen in Alltagsanwendungen wie dem Automobilbereich, Windkraft- und Sportbereich durch ihre hohen Zugfestigkeiten, insbesondere ihrer hohen E-Moduli, und ihrer geringen Dichte immer mehr an Bedeutung. Auf Grund ihrer hohen Kosten, welche sich zur H{\"a}lfte aus der Precursorherstellung, inklusive seiner Synthese und seinem Verspinnprozess, dem L{\"o}sungsspinnverfahren, ergeben, erhalten zunehmend alternative und schmelzspinnbare Precursoren Interesse. F{\"u}r die Carbonfaserherstellung wird fast ausschließlich Polyacrylnitril (PAN) verwendet, das vor dem Schmelzen irreversible exotherme Zyklisierungsreaktionen aufweist, welchen sich seine Zersetzung anschließt. Eine M{\"o}glichkeit der Reduzierung der Schmelztemperatur von Polymeren ist die Einbringung von Comonomeren zur Erh{\"o}hung des freien Volumens und die Reduzierung der intermolekularen Wechselwirkungen als interne Weichmacher. Wie am Fraunhofer IAP gezeigt wurde, kann mittels 2-Methoxyethylacrylat (MEA) die Schmelztemperatur zu neuartigen PAN-basierten Precursoren verringert werden. Um den PAN-co-MEA-Precursor f{\"u}r die nachfolgenden Prozessschritte der Carbonfaserherstellung zu verwenden, m{\"u}ssen die thermoplastischen Fasern in thermisch stabile Fasern ohne thermoplastisches Verhalten {\"u}berf{\"u}hrt werden. Es wurde ein neuer Prozessschritt (Pr{\"a}stabilisierung) eingef{\"u}hrt, welcher unter alkalischen Bedingungen zur Abspaltung der Comonomerseitenkette f{\"u}hrt. Neben der Esterhydrolyse finden Reaktionen statt, welche an diesem Material noch nicht hinreichend untersucht wurden. Weiterhin stellt sich die Frage nach der Kinetik der Pr{\"a}stabilisierung und der Ermittlung einer geeigneten Prozessf{\"u}hrung. Hierzu wurde die Pr{\"a}stabilisierung in den Labormaßstab {\"u}berf{\"u}hrt und die m{\"o}glichen Zusammensetzungen des aus DMSO und einer KOH-L{\"o}sung bestehenden Reaktionsmediums evaluiert. Weiterhin wurde die Behandlung bei verschiedenen Pr{\"a}stabilisierungszeiten von maximal 30 min und Temperaturen von 40, 50 und 60 °C durchgef{\"u}hrt, um prim{\"a}r mittels NMR-Spektroskopie die chemischen Struktur{\"a}nderungen aufzukl{\"a}ren. Die Esterhydrolyse des Comonomers, welche zur Abspaltung des 2-Methoxyethanols f{\"u}hrt, wurde mittels 1H-NMR-spektroskopischer Untersuchungen detektiert. Es wurde ein Modell aufgestellt, das die chemisch-physikalischen Struktur{\"a}nderungen w{\"a}hrend der Pr{\"a}stabilisierung aufzeigt. Die zuerst ablaufende Reaktion ist die Esterhydrolyse am Comonomer, welche vom Faserrand nach innen verl{\"a}uft und durch die Pr{\"a}senz des DMSO in Kombination mit der KOH-L{\"o}sung (Superbase) initiiert wird. Der zeitliche Reaktionsverlauf der Esterhydrolyse kann in drei Bereiche eingeteilt werden. Der erste Bereich ab dem Pr{\"a}stabilisierungsbeginn wird durch die Diffusion der basischen Anionen in die Faser, der zweite Bereich durch die Reaktion an der Estergruppe des Comonomers und der dritte Bereich durch letzte Reaktionen im Faserinneren und diffusiven Prozessen der Produkte und Edukte charakterisiert. Der zweite Bereich kann mit einer Reaktion pseudo 1. Ordnung abgebildet werden, da in diesem Bereich bereits eine ausreichende Diffusion der Edukte in die Faser stattgefunden hat. Bei 50 °C spielt die Diffusion im ersten Bereich im Vergleich zur Reaktion eine untergeordnete Rolle. Mit Erh{\"o}hung der Temperatur auf 60 °C kann eine im Verh{\"a}ltnis geringere Diffusions- als Reaktionsgeschwindigkeit beobachtet werden. Die Nebenreaktionen wurden mittels 13C-CP/MAS-NMR-spektroskopischen, elementaranlaytischen Untersuchungen sowie Doppelbrechungsmessungen charakterisiert. W{\"a}hrend der alkalischen Esterhydrolyse beginnt die Reduzierung der Nitrilgruppen unter der Bildung von prim{\"a}ren Carbons{\"a}ureamiden und Carbons{\"a}uren. Zur Beschreibung dieser Umsetzung wurde eine Methode entwickelt, welche die Addition von 13C-CP/MAS-NMR-Spektren der Modellsubstanzen PAN, PAM und PAA beinhaltet. Weitere stattfindende Reaktionen sind die Bildung von konjugierten Doppelbindungen, welche insbesondere auf eine Zyklisierung der Nitrile hinweisen. Die nasschemisch initiierte Zyklisierung der Nitrilgruppen kann zu k{\"u}rzeren Stabilisierungszeiten und einem besser kontrollierbaren Stabilisierungsprozess durch geringere W{\"a}rmefreisetzung und schlussendlich zu einer Kostenersparnis des gesamten Verfahrens f{\"u}hren. Die Umsetzung der Nitrilgruppen konnte mit einer Reaktion pseudo 1. Ordnung gut abgebildet werden. DMSO initiiert die Esterhydrolyse, wobei die KOH-Konzentration einen h{\"o}heren Einfluss auf die Reaktionsgeschwindigkeit der Ester- und Nitrilhydrolyse als die DMSO-Konzentration besitzt. Beide Reaktionen zeigen eine vergleichbare Abh{\"a}ngigkeit von der Temperatur. Die Erh{\"o}hung der Pr{\"a}stabilisierungszeit und der KOH- bzw. DMSO-Konzentration f{\"u}hrt zur Migration niedermolekularer Bestandteile des Fasermaterials an die Oberfl{\"a}che und der Bildung punktueller Ablagerungen bis hin zu miteinander verbundenen Einzelfasern. Eine weitere Erh{\"o}hung der Pr{\"a}stabilisierungszeit bzw. der Konzentration f{\"u}hrt zu einem steigenden Carbons{\"a}ureanteil und zur Quellung des Fasermaterials, wodurch die Ablagerungen in das Reaktionsmedium diffundieren. Die Ablagerungen enthalten Chlor, welches durch den Waschvorgang mit HCl in das Materialsystem gelangt ist und durch Parameteranpassungen reduziert wurde. Die schmelzbaren Fasern konnten durch die Pr{\"a}stabilisierung erfolgreich {\"u}ber eine Kern-Mantel-Struktur in nicht-thermoplastische Fasern {\"u}berf{\"u}hrt werden. Zur Ermittlung eines geeigneten Prozessfensters f{\"u}r nachfolgende thermische Beanspruchungen der pr{\"a}stabilisierten Fasern wurden drei Kriterien identifiziert, anhand welcher die Evaluation erfolgte. Das erste Kriterium beinhaltet die Notwendigkeit der vollst{\"a}ndigen Aufhebung der thermoplastischen Eigenschaft der Fasern. Als zweites Kriterium diente die Fasermorphologie. Anhand von REM-Aufnahmen wurden Faserb{\"u}ndel mit separierten Einzelfasern ohne Ablagerungen f{\"u}r die nachfolgende Stabilisierung ausgew{\"a}hlt. Das dritte Kriterium bezieht sich auf eine m{\"o}glichst geringe Umsetzung der Nitrilgruppen, um Pr{\"a}stabilisierungsbedingungen mit Nebenreaktionen zu vermeiden. Aus den Untersuchungen konnte eine Pr{\"a}stabilisierungstemperatur von 60 °C als geeignet identifiziert werden. Weiterhin f{\"u}hren hoch alkalische Zusammensetzungen des Reaktionsmediums mit KOH-Konzentrationen von 1, 1,5 und 2 M, vorzugsweise 1,5 M und 50 vol\% DMSO mit Reaktionszeiten von unter 10 min zu geeigneten Fasern. Ein MEA-Anteil unterhalb von 2 mol\% bewirkt eine {\"U}berf{\"u}hrung in die Unschmelzbarkeit. Thermisch stabile und f{\"u}r die nachfolgende Stabilisierung geeignete Fasern besitzen weiterhin 68 - 80 mol\% Nitrilgruppen, 20 - 25 mol\% Carbons{\"a}uren, bis zu 15 mol\% prim{\"a}re Carbons{\"a}ureamide und zyklisierte Strukturen.}, language = {de} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Farhy2019, author = {Farhy, Yael}, title = {Universals and particulars in morphology}, doi = {10.25932/publishup-47003}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470033}, school = {Universit{\"a}t Potsdam}, pages = {VI, 167}, year = {2019}, abstract = {For many years, psycholinguistic evidence has been predominantly based on findings from native speakers of Indo-European languages, primarily English, thus providing a rather limited perspective into the human language system. In recent years a growing body of experimental research has been devoted to broadening this picture, testing a wide range of speakers and languages, aiming to understanding the factors that lead to variability in linguistic performance. The present dissertation investigates sources of variability within the morphological domain, examining how and to what extent morphological processes and representations are shaped by specific properties of languages and speakers. Firstly, the present work focuses on a less explored language, Hebrew, to investigate how the unique non-concatenative morphological structure of Hebrew, namely a non-linear combination of consonantal roots and vowel patterns to form lexical entries (L-M-D + CiCeC = limed 'teach'), affects morphological processes and representations in the Hebrew lexicon. Secondly, a less investigated population was tested: late learners of a second language. We directly compare native (L1) and non-native (L2) speakers, specifically highly proficient and immersed late learners of Hebrew. Throughout all publications, we have focused on a morphological phenomenon of inflectional classes (called binyanim; singular: binyan), comparing productive (class Piel, e.g., limed 'teach') and unproductive (class Paal, e.g., lamad 'learn') verbal inflectional classes. By using this test case, two psycholinguistic aspects of morphology were examined: (i) how morphological structure affects online recognition of complex words, using masked priming (Publications I and II) and cross-modal priming (Publication III) techniques, and (ii) what type of cues are used when extending morpho-phonological patterns to novel complex forms, a process referred to as morphological generalization, using an elicited production task (Publication IV). The findings obtained in the four manuscripts, either published or under review, provide significant insights into the role of productivity in Hebrew morphological processing and generalization in L1 and L2 speakers. Firstly, the present L1 data revealed a close relationship between productivity of Hebrew verbal classes and recognition process, as revealed in both priming techniques. The consonantal root was accessed only in the productive class (Piel) but not the unproductive class (Paal). Another dissociation between the two classes was revealed in the cross-modal priming, yielding a semantic relatedness effect only for Paal but not Piel primes. These findings are taken to reflect that the Hebrew mental representations display a balance between stored undecomposable unstructured stems (Paal) and decomposed structured stems (Piel), in a similar manner to a typical dual-route architecture, showing that the Hebrew mental lexicon is less unique than previously claimed in psycholinguistic research. The results of the generalization study, however, indicate that there are still substantial differences between inflectional classes of Hebrew and other Indo-European classes, particularly in the type of information they rely on in generalization to novel forms. Hebrew binyan generalization relies more on cues of argument structure and less on phonological cues. Secondly, clear L1/L2 differences were observed in the sensitivity to abstract morphological and morpho-syntactic information during complex word recognition and generalization. While L1 Hebrew speakers were sensitive to the binyan information during recognition, expressed by the contrast in root priming, L2 speakers showed similar root priming effects for both classes, but only when the primes were presented in an infinitive form. A root priming effect was not obtained for primes in a finite form. These patterns are interpreted as evidence for a reduced sensitivity of L2 speakers to morphological information, such as information about inflectional classes, and evidence for processing costs in recognition of forms carrying complex morpho-syntactic information. Reduced reliance on structural information cues was found in production of novel verbal forms, when the L2 group displayed a weaker effect of argument structure for Piel responses, in comparison to the L1 group. Given the L2 results, we suggest that morphological and morphosyntactic information remains challenging for late bilinguals, even at high proficiency levels.}, language = {en} } @phdthesis{StutterGarcia2019, author = {Stutter Garcia, Ana}, title = {The use of grammatical knowledge in an additional language}, doi = {10.25932/publishup-46932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469326}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 340}, year = {2019}, abstract = {This thesis investigates whether multilingual speakers' use of grammatical constraints in an additional language (La) is affected by the native (L1) and non-native grammars (L2) of their linguistic repertoire. Previous studies have used untimed measures of grammatical performance to show that L1 and L2 grammars affect the initial stages of La acquisition. This thesis extends this work by examining whether speakers at intermediate levels of La proficiency, who demonstrate mature untimed/offline knowledge of the target La constraints, are differentially affected by their L1 and L2 knowledge when they comprehend sentences under processing pressure. With this purpose, several groups of La German speakers were tested on word order and agreement phenomena using online/timed measures of grammatical knowledge. Participants had mirror distributions of their prior languages and they were either L1English/L2Spanish speakers or L1Spanish/L2English speakers. Crucially, in half of the phenomena the target La constraint aligned with English but not with Spanish, while in the other half it aligned with Spanish but not with English. Results show that the L1 grammar plays a major role in the use of La constraints under processing pressure, as participants displayed increased sensitivity to La constraints when they aligned with their L1, and reduced sensitivity when they did not. Further, in specific phenomena in which the L2 and La constraints aligned, increased L2 proficiency resulted in an enhanced sensitivity to the La constraint. These findings suggest that both native and non-native grammars affect how speakers use La grammatical constraints under processing pressure. However, L1 and L2 grammars differentially influence on participants' performance: While L1 constraints seem to be reliably recruited to cope with the processing demands of real-time La use, proficiency in an L2 can enhance sensitivity to La constraints only in specific circumstances, namely when L2 and La constraints align.}, language = {en} } @phdthesis{Guislain2019, author = {Guislain, Alexis}, title = {Eco-physiological consequences of fluctuating light on phytoplankton}, doi = {10.25932/publishup-46927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469272}, school = {Universit{\"a}t Potsdam}, pages = {161}, year = {2019}, abstract = {Phytoplankton growth depends not only on the mean intensity but also on the dynamics of the light supply. The nonlinear light-dependency of growth is characterized by a small number of basic parameters: the compensation light intensity PARcompμ, where production and losses are balanced, the growth efficiency at sub-saturating light αµ, and the maximum growth rate at saturating light µmax. In surface mixed layers, phytoplankton may rapidly move between high light intensities and almost darkness. Because of the different frequency distribution of light and/or acclimation processes, the light-dependency of growth may differ between constant and fluctuating light. Very few studies measured growth under fluctuating light at a sufficient number of mean light intensities to estimate the parameters of the growth-irradiance relationship. Hence, the influence of light dynamics on µmax, αµ and PARcompμ are still largely unknown. By extension, accurate modelling predictions of phytoplankton development under fluctuating light exposure remain difficult to make. This PhD thesis does not intend to directly extrapolate few experimental results to aquatic systems - but rather improving the mechanistic understanding of the variation of the light-dependency of growth under light fluctuations and effects on phytoplankton development. In Lake TaiHu and at the Three Gorges Reservoir (China), we incubated phytoplankton communities in bottles placed either at fixed depths or moved vertically through the water column to mimic vertical mixing. Phytoplankton at fixed depths received only the diurnal changes in light (defined as constant light regime), while phytoplankton received rapidly fluctuating light by superimposing the vertical light gradient on the natural sinusoidal diurnal sunlight. The vertically moved samples followed a circular movement with 20 min per revolution, replicating to some extent the full overturn of typical Langmuir cells. Growth, photosynthesis, oxygen production and respiration of communities (at Lake TaiHu) were measured. To complete these investigations, a physiological experiment was performed in the laboratory on a toxic strain of Microcystis aeruginosa (FACBH 1322) incubated under 20 min period fluctuating light. Here, we measured electron transport rates and net oxygen production at a much higher time resolution (single minute timescale). The present PhD thesis provides evidence for substantial effects of fluctuating light on the eco-physiology of phytoplankton. Both experiments performed under semi-natural conditions in Lake TaiHu and at the Three Gorges Reservoir gave similar results. The significant decline in community growth efficiencies αµ under fluctuating light was caused for a great share by different frequency distribution of light intensities that shortened the effective daylength for production. The remaining gap in community αµ was attributed to species-specific photoacclimation mechanisms and to light-dependent respiratory losses. In contrast, community maximal growth rates µmax were similar between incubations at constant and fluctuating light. At daily growth saturating light supply, differences in losses for biosynthesis between the two light regimes were observed. Phytoplankton experiencing constant light suffered photo-inhibition - leading to photosynthesis foregone and additional respiratory costs for photosystems repair. On the contrary, intermittent exposure to low and high light intensities prevented photo-inhibition of mixed algae but forced them to develop alternative light strategy. They better harvested and exploited surface irradiance by enhancing their photosynthesis. In the laboratory, we showed that Microcystis aeruginosa increased its oxygen consumption by dark respiration in the light few minutes only after exposure to increasing light intensities. More, we proved that within a simulated Langmuir cell, the net production at saturating light and the compensation light intensity for production at limiting light are positively related. These results are best explained by an accumulation of photosynthetic products at increasing irradiance and mobilization of these fresh resources by rapid enhancement of dark respiration for maintenance and biosynthesis at decreasing irradiance. At the daily timescale, we showed that the enhancement of photosynthesis at high irradiance for biosynthesis of species increased their maintenance respiratory costs at limiting light. Species-specific growth at saturating light µmax and compensation light intensity for growth PARcompμ of species incubated in Lake TaiHu were positively related. Because of this species-specific physiological tradeoff, species displayed different light affinities to limiting and saturating light - thereby exhibiting a gleaner-opportunist tradeoff. In Lake TaiHu, we showed that inter-specific differences in light acquisition traits (µmax and PARcompμ) allowed coexis¬tence of species on a gradient of constant light while avoiding competitive exclusion. More interestingly we demonstrated for the first time that vertical mixing (inducing fluctuating light supply for phytoplankton) may alter or even reverse the light utilization strategies of species within couple of days. The intra-specific variation in traits under fluctuating light increased the niche space for acclimated species, precluding competitive exclusion. Overall, this PhD thesis contributes to a better understanding of phytoplankton eco-physiology under fluctuating light supply. This work could enhance the quality of predictions of phytoplankton development under certain weather conditions or climate change scenarios.}, language = {en} } @phdthesis{Messi2019, author = {Messi, Hugues Urbain Patrick}, title = {Les sources du savoir - l'expression de l'inf{\´e}rence en Fran{\c{c}}ais}, doi = {10.25932/publishup-46961}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469612}, school = {Universit{\"a}t Potsdam}, pages = {iii, 291}, year = {2019}, abstract = {1. Unter Mediativit{\"a}t verstehen wir in dieser Dissertation die sprachliche Markierung der Informationsquelle. Ein Sprecher, der einen Sachverhalt vermittelt, hat die M{\"o}glichkeit durch sprachliche Mittel ausdr{\"u}cklich zu markieren, wie er die {\"u}bermittelte Information bekommen hat. Um diese Informationsquelle sprachlich zu deuten, werden im Franz{\"o}sischen unter anderem einige Verben als mediative Marker (MM) verwendet. 2. Die untersuchten Elemente croire, imaginer, paraitre, penser, savoir, sembler, supposer, trouver sind „mediatiave Verben". Jedes der untersuchten Verben weist besondere semantische und pragmatische Eigenschaften auf, die immer mit dem Ausdruck der Wissensquelle verbunden sind. Es handelt sich also um kognitive Verben (KV), die eine sprachliche Markierung der Informationsquelle vornehmen. Nach ihrem Verhalten in solchen Kontexten erf{\"u}llen sie die Funktion der „mediatiaven Markierung". 3. Die epistemische Modalit{\"a}t ist der Meditivit{\"a}t untergeordnet. Die Erscheinungsform der Modalit{\"a}t (Modalit{\"a}tstyp) bestimmt die St{\"a}rke der epistemischen Modalit{\"a}t. Keines der analysierten Verben dr{\"u}ckt lediglich eine epistemische Leseart aus. Die Dichotomie zwischen der mediativen und epistemischen Modalit{\"a}t besteht darin, dass die erste die Wissensquelle ausdr{\"u}ckt und die zweite ausschließlich die Einstellung des Sprechers gegen{\"u}ber dem Wahrheitsgrad der {\"A}ußerung widerspiegelt. 4. F{\"u}r alle Konstruktionen der Form [V/{\o}P] oder [V, P] ist P die Matrix des Satzes Unsere Ergebnisse zeigen, dass - obwohl diese Konstituenten verschiedene Stellen besetzen k{\"o}nnen - sie dennoch ihre Funktionen als Matrix behalten, indem sie die Propositionen, auf die sie sich beziehen, unter ihrer Rektion behalten. 5. Die Konstruktion [V/{\o}P] und [V, P] stehen in freien Variation Da sich der Wechsel in einem vergleichbaren Kontext vollzieht, und da es in gleicher Umgebung eine freie Substitution gibt, handelt es sich bei den beiden Vorkommen [V/{\o}P] und [V, P] um syntaktische Varianten. 6. Der Konditional-Gebrauch dient haupts{\"a}chlich dazu, die Inferenztypen zu unterscheiden und gleichzeitig die zugrundeliegende Polyphonie zu verdeutlichen. Der Gebrauch des Konditionals dr{\"u}ckt aus, dass es sich nicht um eine zuverl{\"a}ssig zutreffende {\"A}ußerung handelt. Der Ausdruck von Zweifeln kann im Franz{\"o}sischen unter Verwendung spezifischer grammatischer Mittel erfolgen. Zu diesen geh{\"o}rt der Konditional zum Ausdruck der Mitigation (des Zweifels, der Reserviertheit usw.) und der Polyphonie.}, language = {fr} } @phdthesis{Crisologo2019, author = {Crisologo, Irene}, title = {Using spaceborne radar platforms to enhance the homogeneity of weather radar calibration}, doi = {10.25932/publishup-44570}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445704}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 61}, year = {2019}, abstract = {Accurate weather observations are the keystone to many quantitative applications, such as precipitation monitoring and nowcasting, hydrological modelling and forecasting, climate studies, as well as understanding precipitation-driven natural hazards (i.e. floods, landslides, debris flow). Weather radars have been an increasingly popular tool since the 1940s to provide high spatial and temporal resolution precipitation data at the mesoscale, bridging the gap between synoptic and point scale observations. Yet, many institutions still struggle to tap the potential of the large archives of reflectivity, as there is still much to understand about factors that contribute to measurement errors, one of which is calibration. Calibration represents a substantial source of uncertainty in quantitative precipitation estimation (QPE). A miscalibration of a few dBZ can easily deteriorate the accuracy of precipitation estimates by an order of magnitude. Instances where rain cells carrying torrential rains are misidentified by the radar as moderate rain could mean the difference between a timely warning and a devastating flood. Since 2012, the Philippine Atmospheric, Geophysical, and Astronomical Services Administration (PAGASA) has been expanding the country's ground radar network. We had a first look into the dataset from one of the longest running radars (the Subic radar) after devastating week-long torrential rains and thunderstorms in August 2012 caused by the annual southwestmonsoon and enhanced by the north-passing Typhoon Haikui. The analysis of the rainfall spatial distribution revealed the added value of radar-based QPE in comparison to interpolated rain gauge observations. However, when compared with local gauge measurements, severe miscalibration of the Subic radar was found. As a consequence, the radar-based QPE would have underestimated the rainfall amount by up to 60\% if they had not been adjusted by rain gauge observations—a technique that is not only affected by other uncertainties, but which is also not feasible in other regions of the country with very sparse rain gauge coverage. Relative calibration techniques, or the assessment of bias from the reflectivity of two radars, has been steadily gaining popularity. Previous studies have demonstrated that reflectivity observations from the Tropical Rainfall Measuring Mission (TRMM) and its successor, the Global Precipitation Measurement (GPM), are accurate enough to serve as a calibration reference for ground radars over low-to-mid-latitudes (± 35 deg for TRMM; ± 65 deg for GPM). Comparing spaceborne radars (SR) and ground radars (GR) requires cautious consideration of differences in measurement geometry and instrument specifications, as well as temporal coincidence. For this purpose, we implement a 3-D volume matching method developed by Schwaller and Morris (2011) and extended by Warren et al. (2018) to 5 years worth of observations from the Subic radar. In this method, only the volumetric intersections of the SR and GR beams are considered. Calibration bias affects reflectivity observations homogeneously across the entire radar domain. Yet, other sources of systematic measurement errors are highly heterogeneous in space, and can either enhance or balance the bias introduced by miscalibration. In order to account for such heterogeneous errors, and thus isolate the calibration bias, we assign a quality index to each matching SR-GR volume, and thus compute the GR calibration bias as a qualityweighted average of reflectivity differences in any sample of matching SR-GR volumes. We exemplify the idea of quality-weighted averaging by using beam blockage fraction (BBF) as a quality variable. Quality-weighted averaging is able to increase the consistency of SR and GR observations by decreasing the standard deviation of the SR-GR differences, and thus increasing the precision of the bias estimates. To extend this framework further, the SR-GR quality-weighted bias estimation is applied to the neighboring Tagaytay radar, but this time focusing on path-integrated attenuation (PIA) as the source of uncertainty. Tagaytay is a C-band radar operating at a lower wavelength and is therefore more affected by attenuation. Applying the same method used for the Subic radar, a time series of calibration bias is also established for the Tagaytay radar. Tagaytay radar sits at a higher altitude than the Subic radar and is surrounded by a gentler terrain, so beam blockage is negligible, especially in the overlapping region. Conversely, Subic radar is largely affected by beam blockage in the overlapping region, but being an SBand radar, attenuation is considered negligible. This coincidentally independent uncertainty contributions of each radar in the region of overlap provides an ideal environment to experiment with the different scenarios of quality filtering when comparing reflectivities from the two ground radars. The standard deviation of the GR-GR differences already decreases if we consider either BBF or PIA to compute the quality index and thus the weights. However, combining them multiplicatively resulted in the largest decrease in standard deviation, suggesting that taking both factors into account increases the consistency between the matched samples. The overlap between the two radars and the instances of the SR passing over the two radars at the same time allows for verification of the SR-GR quality-weighted bias estimation method. In this regard, the consistency between the two ground radars is analyzed before and after bias correction is applied. For cases when all three radars are coincident during a significant rainfall event, the correction of GR reflectivities with calibration bias estimates from SR overpasses dramatically improves the consistency between the two ground radars which have shown incoherent observations before correction. We also show that for cases where adequate SR coverage is unavailable, interpolating the calibration biases using a moving average can be used to correct the GR observations for any point in time to some extent. By using the interpolated biases to correct GR observations, we demonstrate that bias correction reduces the absolute value of the mean difference in most cases, and therefore improves the consistency between the two ground radars. This thesis demonstrates that in general, taking into account systematic sources of uncertainty that are heterogeneous in space (e.g. BBF) and time (e.g. PIA) allows for a more consistent estimation of calibration bias, a homogeneous quantity. The bias still exhibits an unexpected variability in time, which hints that there are still other sources of errors that remain unexplored. Nevertheless, the increase in consistency between SR and GR as well as between the two ground radars, suggests that considering BBF and PIA in a weighted-averaging approach is a step in the right direction. Despite the ample room for improvement, the approach that combines volume matching between radars (either SR-GR or GR-GR) and quality-weighted comparison is readily available for application or further scrutiny. As a step towards reproducibility and transparency in atmospheric science, the 3D matching procedure and the analysis workflows as well as sample data are made available in public repositories. Open-source software such as Python and wradlib are used for all radar data processing in this thesis. This approach towards open science provides both research institutions and weather services with a valuable tool that can be applied to radar calibration, from monitoring to a posteriori correction of archived data.}, language = {en} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Nasery2019, author = {Nasery, Mustafa}, title = {The success and failure of civil service reforms in Afghanistan}, doi = {10.25932/publishup-44473}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444738}, school = {Universit{\"a}t Potsdam}, pages = {viii, 258}, year = {2019}, abstract = {The Government will create a motivated, merit-based, performance-driven, and professional civil service that is resistant to temptations of corruption and which provides efficient, effective and transparent public services that do not force customers to pay bribes. — (GoIRA, 2006, p. 106) We were in a black hole! We had an empty glass and had nothing from our side to fill it with! Thus, we accepted anything anybody offered; that is how our glass was filled; that is how we reformed our civil service. — (Former Advisor to IARCSC, personal communication, August 2015) How and under what conditions were the post-Taleban Civil Service Reforms of Afghanistan initiated? What were the main components of the reforms? What were their objectives and to which extent were they achieved? Who were the leading domestic and foreign actors involved in the process? Finally, what specific factors influenced the success and failure Afghanistan's Civil Service Reforms since 2002? Guided by such fundamental questions, this research studies the wicked process of reforming the Afghan civil service in an environment where a variety of contextual, programmatic, and external factors affected the design and implementation of reforms that were entirely funded and technically assisted by the international community. Focusing on the core components of reforms—recruitment, remuneration, and appraisal of civil servants—the qualitative study provides a detailed picture of the pre-reform civil service and its major human resources developments in the past. Following discussions on the content and purposes of the main reform programs, it will then analyze the extent of changes in policies and practices by examining the outputs and effects of these reforms. Moreover, the study defines the specific factors that led the reforms toward a situation where most of the intended objectives remain unachieved. Doing so, it explores and explains how an overwhelming influence of international actors with conflicting interests, large-scale corruption, political interference, networks of patronage, institutionalized nepotism, culturally accepted cronyism and widespread ethnic favoritism created a very complex environment and prevented the reforms from transforming Afghanistan's patrimonial civil service into a professional civil service, which is driven by performance and merit.}, language = {en} } @phdthesis{AbdelwahabHusseinAbdelwahabElsayed2019, author = {Abdelwahab Hussein Abdelwahab Elsayed, Ahmed}, title = {Probabilistic, deep, and metric learning for biometric identification from eye movements}, doi = {10.25932/publishup-46798}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-467980}, school = {Universit{\"a}t Potsdam}, pages = {vi, 65}, year = {2019}, abstract = {A central insight from psychological studies on human eye movements is that eye movement patterns are highly individually characteristic. They can, therefore, be used as a biometric feature, that is, subjects can be identified based on their eye movements. This thesis introduces new machine learning methods to identify subjects based on their eye movements while viewing arbitrary content. The thesis focuses on probabilistic modeling of the problem, which has yielded the best results in the most recent literature. The thesis studies the problem in three phases by proposing a purely probabilistic, probabilistic deep learning, and probabilistic deep metric learning approach. In the first phase, the thesis studies models that rely on psychological concepts about eye movements. Recent literature illustrates that individual-specific distributions of gaze patterns can be used to accurately identify individuals. In these studies, models were based on a simple parametric family of distributions. Such simple parametric models can be robustly estimated from sparse data, but have limited flexibility to capture the differences between individuals. Therefore, this thesis proposes a semiparametric model of gaze patterns that is flexible yet robust for individual identification. These patterns can be understood as domain knowledge derived from psychological literature. Fixations and saccades are examples of simple gaze patterns. The proposed semiparametric densities are drawn under a Gaussian process prior centered at a simple parametric distribution. Thus, the model will stay close to the parametric class of densities if little data is available, but it can also deviate from this class if enough data is available, increasing the flexibility of the model. The proposed method is evaluated on a large-scale dataset, showing significant improvements over the state-of-the-art. Later, the thesis replaces the model based on gaze patterns derived from psychological concepts with a deep neural network that can learn more informative and complex patterns from raw eye movement data. As previous work has shown that the distribution of these patterns across a sequence is informative, a novel statistical aggregation layer called the quantile layer is introduced. It explicitly fits the distribution of deep patterns learned directly from the raw eye movement data. The proposed deep learning approach is end-to-end learnable, such that the deep model learns to extract informative, short local patterns while the quantile layer learns to approximate the distributions of these patterns. Quantile layers are a generic approach that can converge to standard pooling layers or have a more detailed description of the features being pooled, depending on the problem. The proposed model is evaluated in a large-scale study using the eye movements of subjects viewing arbitrary visual input. The model improves upon the standard pooling layers and other statistical aggregation layers proposed in the literature. It also improves upon the state-of-the-art eye movement biometrics by a wide margin. Finally, for the model to identify any subject — not just the set of subjects it is trained on — a metric learning approach is developed. Metric learning learns a distance function over instances. The metric learning model maps the instances into a metric space, where sequences of the same individual are close, and sequences of different individuals are further apart. This thesis introduces a deep metric learning approach with distributional embeddings. The approach represents sequences as a set of continuous distributions in a metric space; to achieve this, a new loss function based on Wasserstein distances is introduced. The proposed method is evaluated on multiple domains besides eye movement biometrics. This approach outperforms the state of the art in deep metric learning in several domains while also outperforming the state of the art in eye movement biometrics.}, language = {en} } @phdthesis{Schneider2019, author = {Schneider, Jan Niklas}, title = {Computational approaches for emotion research}, doi = {10.25932/publishup-45927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459275}, school = {Universit{\"a}t Potsdam}, pages = {xv, 145}, year = {2019}, abstract = {Emotionen sind ein zentrales Element menschlichen Erlebens und spielen eine wichtige Rolle bei der Entscheidungsfindung. Diese Dissertation identifiziert drei methodische Probleme der aktuellen Emotionsforschung und zeigt auf, wie diese mittels computergest{\"u}tzter Methoden gel{\"o}st werden k{\"o}nnen. Dieser Ansatz wird in drei Forschungsprojekten demonstriert, die die Entwicklung solcher Methoden sowie deren Anwendung auf konkrete Forschungsfragen beschreiben. Das erste Projekt beschreibt ein Paradigma welches es erm{\"o}glicht, die subjektive und objektive Schwierigkeit der Emotionswahrnehmung zu messen. Dar{\"u}ber hinaus erm{\"o}glicht es die Verwendung einer beliebigen Anzahl von Emotionskategorien im Vergleich zu den {\"u}blichen sechs Kategorien der Basisemotionen. Die Ergebnisse deuten auf eine Zunahme der Schwierigkeiten bei der Wahrnehmung von Emotionen mit zunehmendem Alter der Darsteller hin und liefern Hinweise darauf, dass junge Erwachsene, {\"a}ltere Menschen und M{\"a}nner ihre Schwierigkeit bei der Wahrnehmung von Emotionen untersch{\"a}tzen. Weitere Analysen zeigten eine geringe Relevanz personenbezogener Variablen und deuteten darauf hin, dass die Schwierigkeit der Emotionswahrnehmung vornehmlich durch die Auspr{\"a}gung der Wertigkeit des Ausdrucks bestimmt wird. Das zweite Projekt zeigt am Beispiel von Arousal, einem etablierten, aber vagen Konstrukt der Emotionsforschung, wie Face-Tracking-Daten dazu genutzt werden k{\"o}nnen solche Konstrukte zu sch{\"a}rfen. Es beschreibt, wie aus Face-Tracking-Daten Maße f{\"u}r die Entfernung, Geschwindigkeit und Beschleunigung von Gesichtsausdr{\"u}cken berechnet werden k{\"o}nnen. Das Projekt untersuchte wie diesen Maße mit der Arousal-Wahrnehmung in Menschen mit und ohne Autismus zusammenh{\"a}ngen. Der Abstand zum Neutralgesicht war pr{\"a}diktiv f{\"u}r die Arousal-Bewertungen in beiden Gruppen. Die Ergebnisse deuten auf eine qualitativ {\"a}hnliche Wahrnehmung von Arousal f{\"u}r Menschen mit und ohne Autismus hin. Im dritten Projekt stellen wir die Partial-Least-Squares-Analyse als allgemeine Methode vor, um eine optimale Repr{\"a}sentation zur Verkn{\"u}pfung zweier hochdimensionale Datens{\"a}tze zu finden. Das Projekt demonstriert die Anwendbarkeit dieser Methode in der Emotionsforschung anhand der Frage nach Unterschieden in der Emotionswahrnehmung zwischen M{\"a}nnern und Frauen. Wir konnten zeigen, dass die emotionale Wahrnehmung von Frauen systematisch mehr Varianz der Gesichtsausdr{\"u}cke erfasst und dass signifikante Unterschiede in der Art und Weise bestehen, wie Frauen und M{\"a}nner einige Gesichtsausdr{\"u}cke wahrnehmen. Diese konnten wir als dynamische Gesichtsausdr{\"u}cke visualisieren. Um die Anwendung der entwickelten Methode f{\"u}r die Forschungsgemeinschaft zu erleichtern, wurde ein Software-Paket f{\"u}r die Statistikumgebung R geschrieben. Zudem wurde eine Website entwickelt (thisemotiondoesnotexist.com), die es Besuchern erlaubt, ein Partial-Least-Squares-Modell von Emotionsbewertungen und Face-Tracking-Daten interaktiv zu erkunden, um die entwickelte Methode zu verbreiten und ihren Nutzen f{\"u}r die Emotionsforschung zu illustrieren.}, language = {en} } @phdthesis{BarqueroPipin2019, author = {Barquero Pip{\´i}n, Antonio Carlos}, title = {Lengua, cultura, interculturalidad}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-480-7}, doi = {10.25932/publishup-43902}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439023}, school = {Universit{\"a}t Potsdam}, pages = {429}, year = {2019}, abstract = {Ohne Pragmatik w{\"a}re Kommunikation nicht m{\"o}glich, da wir sprachliche Aussagen nicht interpretieren k{\"o}nnten. F{\"u}r jeden Lernenden einer Sprache, die er nicht beherrscht, reicht es nicht aus, sprachlich kompetent zu sein, da der Zweck der Kommunikation darin besteht, mit anderen Menschen und in bestimmten Kontexten zu kommunizieren. Nur eine Lehre, die es erm{\"o}glicht, Aussagen zur Durchf{\"u}hrung von Sprachhandlungen zu erstellen und zu verstehen und die f{\"u}r einen bestimmten Kontext am besten geeigneten auszuw{\"a}hlen, kann sich als effizient erweisen. Die hier vorgestellte Arbeit zielt darauf ab, der wissenschaftlichen Gemeinschaft und insbesondere denjenigen, die direkt und indirekt am Unterrichtsprozess beteiligt sind, das Konzept der verbalen Pragmatik bekannt zu machen und es anderen wie Grammatik, Kultur oder Interkulturalit{\"a}t gegen{\"u}ber zu stellen. Ferner wendet sie sich der Frage zu, wie man auf die Bedeutung und die dringende Notwendigkeit aufmerksam macht, Pragmatik als relevante Disziplin im Kommunikationsprozess zu etablieren; dabei wird insbesondere auf ihre systematische Einbeziehung in Lehrb{\"u}chern f{\"u}r Spanisch als Fremdsprache, die f{\"u}r den schulischen Kontext konzipiert wurden, abgestellt. Dazu werden das Vorhandensein pragmatischer Elemente und die F{\"o}rderung pragmatischer Kompetenz in Lehrb{\"u}chern f{\"u}r Anf{\"a}nger sowie ihre Relevanz bei der Festlegung von Inhalten, Fortschrittsart und Methodik untersucht.}, language = {es} } @phdthesis{Maerz2019, author = {Maerz, Sven}, title = {Analyzing pore systems through comprehensive digital image analysis (DIA)}, doi = {10.25932/publishup-44588}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445880}, school = {Universit{\"a}t Potsdam}, pages = {xii, 107, xxi}, year = {2019}, abstract = {Carbonates tend to have complex pore systems which are often composed of distinct assemblages of genetically and geometrically different pore types at various scales (e.g., Melim et al., 2001; Lee et al., 2009; He et al., 2014; Dernaika \& Sinclair, 2017; Zhang et al., 2017). Such carbonate-typical multimodal pore systems are the result of both primary depositional processes and multiple stages of postdepositional modifications, causing small-scale heterogeneities in pore system properties and leading to the co-occurrence of both effective and ineffective pore types. These intrinsic variations in pore type effectiveness are the main reason for the often low correlation between porosity and permeability in carbonate pore systems (e.g., Mazzullo 2004; Ehrenberg \& Nadeau, 2005; Hollis et al., 2010; He et al., 2014; Rashid et al., 2015; Dernaika \& Sinclair, 2017), as it is also true for the marginal lacustrine carbonates studied in this thesis. However, by extracting interconnected and thus effective pore types, and simultaneously excluding isolated and ineffective pores, the understanding and prediction of permeability for given porosity can be highly enhanced (e.g., Melim et al., 2001; Zhang et al., 2017). In this thesis, a step-by-step workflow based on digital image analysis (DIA) is presented and performed on 32 facies-representative samples of marginal lacustrine carbonates from the Middle Miocene N{\"o}rdlinger Ries crater lake (Southern Germany), resulting in 77 mean values of pore type effectiveness which are based on 23,508 individual pore geometry data. By using pore shape factor γ (sensu Anselmetti et al., 1998) as a parameter to quantitatively describe pore shape complexity and therefore pore interconnectivity, the potential contribution (Kcontr.) of each pore type to total permeability (Ktotal) is calculated, and the most effective pore types are then identified. As a result, primary interpeloidal pores and secondary vugs are the most effective pore types in the studied marginal lacustrine succession, mainly due to their generally big size and complex shape, leading to an excellent interconnection between both pore types and consequently to the establishment of a highly effective pore network. Both pore types together compose the pore system of the peloidal grainstone facies. Therefore, this lithofacies type has been identified as the sedimentary facies with highest porosity-permeability properties in this marginal lacustrine succession. By applying the DIA-based method to 23 additional samples from the studied outcrop which all show extensive partial to complete cementation of preexisting pores, the impact of cementation on pore geometry and therefore on porosity and permeability is quantified. This results in a cementation reduction value for each relevant parameter which can then be used to enhance precision of predicting porosity and permeability within the studied succession. Furthermore, the concept of using pore shape complexity as a proxy parameter for pore system effectiveness is tested by applying an independent method (i.e., fluid flow simulation) to the dataset. DIA is then used once again to evaluate the outcome of fluid flow simulation. The results confirm the previous findings that interpeloidal pores and vugs together build up the most effective pore system in the Ries lake carbonates. Finally, the extraction of the interconnected (i.e., effective) pore network leads to an improved correlation between porosity and permeability within the studied carbonates. The step-by-step workflow described in this thesis provides a quantitative petrographic method to identify and extract effective porosity from the pore system, which is crucial for understanding how carbonate pore systems generate permeability. This thesis also demonstrates that pore shape complexity is the most important geometrical parameter controlling pore interconnection and consequently the formation of effective porosity. It further emphasizes that pore shape factor γ (sensu Anselmetti et al. 1998) is a very robust and scale-independent proxy parameter to quantify pore type effectiveness. Additionally, DIA proves to be an ideal tool to directly link porosity and permeability to their mutual origin: the rock fabric and associated pore structure.}, language = {de} } @phdthesis{Nguyen2019, author = {Nguyen, Quyet Doan}, title = {Electro-acoustical probing of space-charge and dipole-polarization profiles in polymer dielectrics for electret and electrical-insulation applications}, doi = {10.25932/publishup-44562}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445629}, school = {Universit{\"a}t Potsdam}, pages = {105}, year = {2019}, abstract = {Electrets are dielectrics with quasi-permanent electric charge and/or dipoles, sometimes can be regarded as an electric analogy to a magnet. Since the discovery of the excellent charge retention capacity of poly(tetrafluoro ethylene) and the invention of the electret microphone, electrets have grown out of a scientific curiosity to an important application both in science and technology. The history of electret research goes hand in hand with the quest for new materials with better capacity at charge and/or dipole retention. To be useful, electrets normally have to be charged/poled to render them electro-active. This process involves electric-charge deposition and/or electric dipole orientation within the dielectrics ` surfaces and bulk. Knowledge of the spatial distribution of electric charge and/or dipole polarization after their deposition and subsequent decay is crucial in the task to improve their stability in the dielectrics. Likewise, for dielectrics used in electrical insulation applications, there are also needs for accumulated space-charge and polarization spatial profiling. Traditionally, space-charge accumulation and large dipole polarization within insulating dielectrics is considered undesirable and harmful to the insulating dielectrics as they might cause dielectric loss and could lead to internal electric field distortion and local field enhancement. High local electric field could trigger several aging processes and reduce the insulating dielectrics' lifetime. However, with the advent of high-voltage DC transmission and high-voltage capacitor for energy storage, these are no longer the case. There are some overlapped between the two fields of electrets and electric insulation. While quasi-permanently trapped electric-charge and/or large remanent dipole polarization are the requisites for electret operation, stably trapped electric charge in electric insulation helps reduce electric charge transport and overall reduced electric conductivity. Controlled charge trapping can help in preventing further charge injection and accumulation as well as serving as field grading purpose in insulating dielectrics whereas large dipole polarization can be utilized in energy storage applications. In this thesis, the Piezoelectrically-generated Pressure Steps (PPSs) were employed as a nondestructive method to probe the electric-charge and dipole polarization distribution in a range of thin film (several hundred micron) polymer-based materials, namely polypropylene (PP), low-density polyethylene/magnesium oxide (LDPE/MgO) nanocomposites and poly(vinylidene fluoride-co- trifluoro ethylene) (P(VDF-TrFE)) copolymer. PP film surface-treated with phosphoric acid to introduce surfacial isolated nanostructures serves as example of 2-dimensional nano-composites whereas LDPE/MgO serves as the case of 3-dimensional nano-composites with MgO nano-particles dispersed in LDPE polymer matrix. It is evidenced that the nanoparticles on the surface of acid-treated PP and in the bulk of LDPE/MgO nanocomposites improve charge trapping capacity of the respective material and prevent further charge injection and transport and that the enhanced charge trapping capacity makes PP and LDPE/MgO nanocomposites potential materials for both electret and electrical insulation applications. As for PVDF and VDF-based copolymers, the remanent spatial polarization distribution depends critically on poling method as well as specific parameters used in the respective poling method. In this work, homogeneous polarization poling of P(VDF-TrFE) copolymers with different VDF-contents have been attempted with hysteresis cyclical poling. The behaviour of remanent polarization growth and spatial polarization distribution are reported and discussed. The Piezoelectrically-generated Pressure Steps (PPSs) method has proven as a powerful method for the charge storage and transport characterization of a wide range of polymer material from nonpolar, to polar, to polymer nanocomposites category.}, language = {en} } @phdthesis{vonKaphengst2019, author = {von Kaphengst, Dragana}, title = {Project's management quality in development cooperation}, doi = {10.25932/publishup-43099}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430992}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 237}, year = {2019}, abstract = {In light of the debate on the consequences of competitive contracting out of traditionally public services, this research compares two mechanisms used to allocate funds in development cooperation—direct awarding and competitive contracting out—aiming to identify their potential advantages and disadvantages. The agency theory is applied within the framework of rational-choice institutionalism to study the institutional arrangements that surround two different money allocation mechanisms, identify the incentives they create for the behavior of individual actors in the field, and examine how these then transfer into measurable differences in managerial quality of development aid projects. In this work, project management quality is seen as an important determinant of the overall project success. For data-gathering purposes, the German development agency, the Gesellschaft f{\"u}r Internationale Zusammenarbeit (GIZ), is used due to its unique way of work. Whereas the majority of projects receive funds via direct-award mechanism, there is a commercial department, GIZ International Services (GIZ IS) that has to compete for project funds. The data concerning project management practices on the GIZ and GIZ IS projects was gathered via a web-based, self-administered survey of project team leaders. Principal component analysis was applied to reduce the dimensionality of the independent variable to total of five components of project management. Furthermore, multiple regression analysis identified the differences between the separate components on these two project types. Enriched by qualitative data gathered via interviews, this thesis offers insights into everyday managerial practices in development cooperation and identifies the advantages and disadvantages of the two allocation mechanisms. The thesis first reiterates the responsibility of donors and implementers for overall aid effectiveness. It shows that the mechanism of competitive contracting out leads to better oversight and control of implementers, fosters deeper cooperation between the implementers and beneficiaries, and has a potential to strengthen ownership of recipient countries. On the other hand, it shows that the evaluation quality does not tremendously benefit from the competitive allocation mechanism and that the quality of the component knowledge management and learning is better when direct-award mechanisms are used. This raises questions about the lacking possibilities of actors in the field to learn about past mistakes and incorporate the finings into the future interventions, which is one of the fundamental issues of aid effectiveness. Finally, the findings show immense deficiencies in regard to oversight and control of individual projects in German development cooperation.}, language = {en} } @phdthesis{Meyer2019, author = {Meyer, Ljuba}, title = {Bildungsort Familie}, doi = {10.25932/publishup-44431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444319}, school = {Universit{\"a}t Potsdam}, pages = {V, 438}, year = {2019}, abstract = {In der Bildungs- und Familienforschung wird die intergenerationale Weitergabe von Bildung innerhalb der Familie haupts{\"a}chlich unter dem Blickwinkel des schulischen Erfolges der nachwachsenden Generation thematisiert. „Wie" aber bildungsbezogene Transferprozesse innerhalb der Familie konkret ablaufen, bleibt jedoch in der deutschen Forschungslandschaft weitestgehend unbearbeitet. An dieser Stelle setzt diese qualitativ angelegte Arbeit an. Ziel dieser Arbeit ist, bildungsbezogene Transferprozesse innerhalb von russischen Dreigenerationenfamilien, die aus der ehemaligen Sowjetunion nach Berlin seit 1989 ausgewandert sind und zwischen der Großeltern-, Elterngeneration und der Enkelgeneration ablaufen, zu untersuchen. Hinter diesen Transferprozessen verbergen sich im Sinne Bourdieus bewusste und unbewusste Bildungsstrategien der interviewten Familienmitglieder. Im Rahmen dieser Arbeit wurden zwei Sp{\"a}taussiedlerfamilien - zu diesen z{\"a}hlen Familie Hoffmann und Familie Popow, sowie zwei russisch-j{\"u}dische Familien - zu diesen z{\"a}hlen Familie Rosenthal und Familie Buchbinder, interviewt. Es wurden mit den einzelnen Mitgliedern der vier untersuchten Dreigenerationenfamilien Gruppendiskussionen sowie mit je einem Vertreter einer Generation leitfadengest{\"u}tzte Einzelinterviews gef{\"u}hrt. Die Erhebungsphase fand in Berlin im Zeitraum von 2010 bis 2012 statt. Das auf diese Weise gewonnene empirische Material wurde mithilfe der dokumentarischen Methode nach Bohnsack ausgewertet. Hierdurch wurde es m{\"o}glich die implizite Selbstverst{\"a}ndlichkeit, mit der sich Bildung in Familien nach Bourdieu habituell vollzieht, einzufangen und rekonstruierbar zu machen. In der Arbeit wurden eine habitustheoretische Interpretation der russischen Dreigenerationenfamilien und die entsprechende Feldanalyse nach Bourdieu vorgenommen. In diesem Zusammenhang wurde der soziale Raum der untersuchten Familien in der Ankunftsgesellschaft bez{\"u}glich ihres Vergleichshorizontes der Herkunftsgesellschaft rekonstruiert. Weiter wurde der Bildungstransfer vor dem jeweiligen Erlebnishintergrund der einzelnen Familien untersucht und diesbez{\"u}glich eine Typisierung vorgenommen. Im Rahmen dieser Untersuchung konnten neue Erkenntnisse zum bisher unerforschten Feld des Bildungstransfers russischer Dreigenerationenfamilien in Berlin gewonnen werden. Ein wesentliches Ergebnis dieser Arbeit ist, dass die Anwendung von Bourdieus Klassentheorie auch auf Gruppen, die in einer sozialistischen Gesellschaft sozialisiert wurden und in eine kapitalistisch orientierte Gesellschaft ausgewandert sind, produktiv sein kann. Ein weiteres zentrales Ergebnis der Studie ist, dass bei zwei der vier untersuchten Familien die Migration den intergenerationalen Bildungstransfer beeinflusste. In diesem Zusammenhang weist Familie Rosenthal durch die Migration einen „gespaltenen" Habitus auf. Dieser ist darauf zur{\"u}ckzuf{\"u}hren, dass diese Familie bei der Planung des Berufes f{\"u}r die Enkelin in Berlin sich am Praktischen und Notwendigen orientierte. W{\"a}hrend die bewusste Bildungsstrategie der Großeltern- und Elterngeneration f{\"u}r die Enkelgeneration im Ankunftsland dem Habitus der Notwendigkeit, den Bourdieu der Arbeiterklasse zuschreibt, zugeordnet werden kann, l{\"a}sst sich hingegen das Freizeitverhalten der Familie Rosenthal dem Habitus der Distinktion zuordnen, der typisch f{\"u}r die herrschende Klasse ist. Ein weiterer Befund dieser Untersuchung ist, dass im Vergleich zur Enkelin Rosenthal bei der Enkelin Popow eine sogenannte Sph{\"a}rendiskrepanz rekonstruiert wurde. So ist die Enkelin Popow in der {\"a}ußeren Sph{\"a}re der Schule auf sich gestellt, da die Großeltern- und Elterngeneration zum deutschen Schulsystem nur {\"u}ber einen geringen Informationsstand verf{\"u}gen. Die Enkelin grenzt sich einerseits von ihrer Familie (innere Sph{\"a}re) und deutschen Schulabbrechern ({\"a}ußere Sph{\"a}re) ab, orientiert sich aber andererseits beim Versuch sozial aufzusteigen an russischsprachigen Peers, die die gymnasiale Oberstufe besuchen (dritte Sph{\"a}re). Bei Enkelin Popow fungiert demzufolge die Peergruppe und nicht die Familie als zentraler Bildungsort. An dieser Stelle sei angemerkt, dass sowohl bei einer russisch-j{\"u}dischen Familie als auch bei einer Sp{\"a}taussiedlerfamilie der intergenerationale Bildungstransfer durch die Migration beeinflusst wurde. W{\"a}hrend Familie Rosenthal in der Herkunftsgesellschaft der Intelligenzija zuzuordnen ist, geh{\"o}rt Familie Popow der Arbeiterschaft an. Daraus folgt, dass der intergenerationale Bildungstransfer der untersuchten Familien sowohl unabh{\"a}ngig vom Sp{\"a}taussiedler- und Kontingentfl{\"u}chtlingsstatus als auch vom herkunftsortspezifischen sozialen Status abl{\"a}uft. Demnach kann geschlussfolgert werden, dass im Rahmen dieser Studie die Migration ein zentraler Faktor f{\"u}r den intergenerationalen Bildungstransfer ist.}, language = {de} } @phdthesis{Michalczyk2019, author = {Michalczyk, Anna}, title = {Modelling of nitrogen cycles in intensive winter wheat-summer maize double cropping systems in the North China Plain}, doi = {10.25932/publishup-44421}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444213}, school = {Universit{\"a}t Potsdam}, pages = {X, 154}, year = {2019}, abstract = {The North China Plain (NCP) is one of the most productive and intensive agricultural regions in China. High doses of mineral nitrogen (N) fertiliser, often combined with flood irrigation, are applied, resulting in N surplus, groundwater depletion and environmental pollution. The objectives of this thesis were to use the HERMES model to simulate the N cycle in winter wheat (Triticum aestivum L.)-summer maize (Zea mays L.) double crop rotations and show the performance of the HERMES model, of the new ammonia volatilisation sub-module and of the new nitrification inhibition tool in the NCP. Further objectives were to assess the models potential to save N and water on plot and county scale, as well as on short and long-term. Additionally, improved management strategies with the help of a model-based nitrogen fertiliser recommendation (NFR) and adapted irrigation, should be found. Results showed that the HERMES model performed well under growing conditions of the NCP and was able to describe the relevant processes related to soil-plant interactions concerning N and water during a 2.5 year field experiment. No differences in grain yield between the real-time model-based NFR and the other treatments of the experiments on plot scale in Quzhou County could be found. Simulations with increasing amounts of irrigation resulted in significantly higher N leaching, higher N requirements of the NFR and reduced yields. Thus, conventional flood irrigation as currently practised by the farmers bears great uncertainties and exact irrigation amounts should be known for future simulation studies. In the best-practice scenario simulation on plot-scale, N input and N leaching, but also irrigation water could be reduced strongly within 2 years. Thus, the model-based NFR in combination with adapted irrigation had the highest potential to reduce nitrate leaching, compared to farmers practice and mineral N (Nmin)-reduced treatments. Also the calibrated and validated ammonia volatilisation sub-module of the HERMES model worked well under the climatic and soil conditions of northern China. Simple ammonia volatilisation approaches gave also satisfying results compared to process-oriented approaches. During the simulation with Ammonium sulphate Nitrate with nitrification inhibitor (ASNDMPP) ammonia volatilisation was higher than in the simulation without nitrification inhibitor, while the result for nitrate leaching was the opposite. Although nitrification worked well in the model, nitrification-born nitrous oxide emissions should be considered in future. Results of the simulated annual long-term (31 years) N losses in whole Quzhou County in Hebei Province were 296.8 kg N ha-1 under common farmers practice treatment and 101.7 kg N ha-1 under optimised treatment including NFR and automated irrigation (OPTai). Spatial differences in simulated N losses throughout Quzhou County, could only be found due to different N inputs. Simulations of an optimised treatment, could save on average more than 260 kg N ha-1a-1 from fertiliser input and 190 kg N ha-1a-1 from N losses and around 115.7 mm a-1 of water, compared to farmers practice. These long-term simulation results showed lower N and water saving potential, compared to short-term simulations and underline the necessity of long-term simulations to overcome the effect of high initial N stocks in soil. Additionally, the OPTai worked best on clay loam soil except for a high simulated denitrification loss, while the simulations using farmers practice irrigation could not match the actual water needs resulting in yield decline, especially for winter wheat. Thus, a precise adaption of management to actual weather conditions and plant growth needs is necessary for future simulations. However, the optimised treatments did not seem to be able to maintain the soil organic matter pools, even with full crop residue input. Extra organic inputs seem to be required to maintain soil quality in the optimised treatments. HERMES is a relatively simple model, with regard to data input requirements, to simulate the N cycle. It can offer interpretation of management options on plot, on county and regional scale for extension and research staff. Also in combination with other N and water saving methods the model promises to be a useful tool.}, language = {en} } @phdthesis{Kriegerowski2019, author = {Kriegerowski, Marius}, title = {Development of waveform-based, automatic analysis tools for the spatio-temporal characterization of massive earthquake clusters and swarms}, doi = {10.25932/publishup-44404}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444040}, school = {Universit{\"a}t Potsdam}, pages = {xv, 83}, year = {2019}, abstract = {Earthquake swarms are characterized by large numbers of events occurring in a short period of time within a confined source volume and without significant mainshock aftershock pattern as opposed to tectonic sequences. Intraplate swarms in the absence of active volcanism usually occur in continental rifts as for example in the Eger Rift zone in North West Bohemia, Czech Republic. A common hypothesis links event triggering to pressurized fluids. However, the exact causal chain is often poorly understood since the underlying geotectonic processes are slow compared to tectonic sequences. The high event rate during active periods challenges standard seismological routines as these are often designed for single events and therefore costly in terms of human resources when working with phase picks or computationally costly when exploiting full waveforms. This methodological thesis develops new approaches to analyze earthquake swarm seismicity as well as the underlying seismogenic volume. It focuses on the region of North West (NW) Bohemia, a well studied, well monitored earthquake swarm region. In this work I develop and test an innovative approach to detect and locate earthquakes using deep convolutional neural networks. This technology offers great potential as it allows to efficiently process large amounts of data which becomes increasingly important given that seismological data storage grows at increasing pace. The proposed deep neural network trained on NW Bohemian earthquake swarm records is able to locate 1000 events in less than 1 second using full waveforms while approaching precision of double difference relocated catalogs. A further technological novelty is that the trained filters of the deep neural network's first layer can be repurposed to function as a pattern matching event detector without additional training on noise datasets. For further methodological development and benchmarking, I present a new toolbox to generate realistic earthquake cluster catalogs as well as synthetic full waveforms of those clusters in an automated fashion. The input is parameterized using constraints on source volume geometry, nucleation and frequency-magnitude relations. It harnesses recorded noise to produce highly realistic synthetic data for benchmarking and development. This tool is used to study and assess detection performance in terms of magnitude of completeness Mc of a full waveform detector applied to synthetic data of a hydrofracturing experiment at the Wysin site, Poland. Finally, I present and demonstrate a novel approach to overcome the masking effects of wave propagation between earthquake and stations and to determine source volume attenuation directly in the source volume where clustered earthquakes occur. The new event couple spectral ratio approach exploits high frequency spectral slopes of two events sharing the greater part of their rays. Synthetic tests based on the toolbox mentioned before show that this method is able to infer seismic wave attenuation within the source volume at high spatial resolution. Furthermore, it is independent from the distance towards a station as well as the complexity of the attenuation and velocity structure outside of the source volume of swarms. The application to recordings of the NW Bohemian earthquake swarm shows increased P phase attenuation within the source volume (Qp < 100) based on results at a station located close to the village Luby (LBC). The recordings of a station located in epicentral proximity, close to Nov{\´y} Kostel (NKC), show a relatively high complexity indicating that waves arriving at that station experience more scattering than signals recorded at other stations. The high level of complexity destabilizes the inversion. Therefore, the Q estimate at NKC is not reliable and an independent proof of the high attenuation finding given the geometrical and frequency constraints is still to be done. However, a high attenuation in the source volume of NW Bohemian swarms has been postulated before in relation to an expected, highly damaged zone bearing CO 2 at high pressure. The methods developed in the course of this thesis yield the potential to improve our understanding regarding the role of fluids and gases in intraplate event clustering.}, language = {en} } @phdthesis{LeBot2019, author = {Le Bot, Nils}, title = {Quel avenir pour les gares m{\´e}tropolitaines fran{\c{c}}aises et allemandes ?}, doi = {10.25932/publishup-44220}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442201}, school = {Universit{\"a}t Potsdam}, pages = {589}, year = {2019}, abstract = {Cette th{\`e}se d'urbanisme s'est donn{\´e}e pour objectif de r{\´e}fl{\´e}chir {\`a} l'avenir des gares m{\´e}tropolitaines fran{\c{c}}aises et allemandes {\`a} horizon 2050. Elle porte une interrogation sur les fondements de la gare comme objet urbain conceptuel (abord{\´e} comme un syst{\`e}me) et pose comme hypoth{\`e}se qu'il serait en quelque sorte dot{\´e} de propri{\´e}t{\´e}s autonomes. Parmi ces propri{\´e}t{\´e}s, c'est le processus d'expansion et de dialogue sans cesse renouvel{\´e} et conflictuel, entre la gare et son tissu urbain environnant, qui guide cette recherche ; notamment dans le rapport qu'il entretient avec l'hypermobilit{\´e} des m{\´e}tropoles. Pour ce faire, cette th{\`e}se convoque quatre terrains d'{\´e}tudes : les gares principales de Cologne et de Stuttgart en Allemagne et les gares de Paris-Montparnasse et Lyon-Part-Dieu en France ; et commence par un historique d{\´e}taill{\´e} de leurs {\´e}volutions morphologiques, pour d{\´e}gager une s{\´e}rie de variables architectoniques et urbaines. Il proc{\`e}de dans un deuxi{\`e}me temps {\`a} une s{\´e}rie d'analyse prospective, permettant de juger de l'influence possible des politiques publiques en mati{\`e}re transports et de mobilit{\´e}, sur l'avenir conceptuel des gares. Cette th{\`e}se propose alors le concept de syst{\`e}me-gare, pour d{\´e}crire l'expansion et l'int{\´e}gration des gares m{\´e}tropolitaines avec leur environnement urbain ; un processus de n{\´e}gociation dialectique qui ne trouve pas sa r{\´e}solution dans le concept de gare comme lieu de vie/ville. Elle invite alors {\`a} penser la gare comme une h{\´e}t{\´e}rotopie, et propose une lecture d{\´e}polaris{\´e}e et d{\´e}hi{\´e}rarchis{\´e}e de ces espaces, en introduisant les concepts d'orchestre de gares et de m{\´e}tagare. Cette recherche propose enfin une lecture critique de la « ville num{\´e}rique » et du concept de « mobilit{\´e} comme service. » Pour {\´e}viter une mise en flux tendus potentiellement dommageables, l'application de ces concepts en gare ne pourra se soustraire {\`a} une augmentation simultan{\´e}e des espaces physiques.}, language = {fr} } @phdthesis{Zemella2019, author = {Zemella, Anne}, title = {Fluoreszenzmarkierung und Modifizierung von komplexen Proteinen in eukaryotischen zellfreien Systemen durch die Etablierung von orthogonalen tRNA/Aminoacyl-tRNA-Synthetase-Paaren}, doi = {10.25932/publishup-44236}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442361}, school = {Universit{\"a}t Potsdam}, pages = {XI, 141}, year = {2019}, abstract = {Die funktionelle Charakterisierung von therapeutisch relevanten Proteinen kann bereits durch die Bereitstellung des Zielproteins in ad{\"a}quaten Mengen limitierend sein. Dies trifft besonders auf Membranproteine zu, die aufgrund von zytotoxischen Effekten auf die Produktionszelllinie und der Tendenz Aggregate zu bilden, in niedrigen Ausbeuten an aktivem Protein resultieren k{\"o}nnen. Der lebende Organismus kann durch die Verwendung von translationsaktiven Zelllysaten umgangen werden- die Grundlage der zellfreien Proteinsynthese. Zu Beginn der Arbeit wurde die ATP-abh{\"a}ngige Translation eines Lysates auf der Basis von kultivierten Insektenzellen (Sf21) analysiert. F{\"u}r diesen Zweck wurde ein ATP-bindendes Aptamer eingesetzt, durch welches die Translation der Nanoluziferase reguliert werden konnte. Durch die dargestellte Applizierung von Aptameren, k{\"o}nnten diese zuk{\"u}nftig in zellfreien Systemen f{\"u}r die Visualisierung der Transkription und Translation eingesetzt werden, wodurch zum Beispiel komplexe Prozesse validiert werden k{\"o}nnen. Neben der reinen Proteinherstellung k{\"o}nnen Faktoren wie posttranslationale Modifikationen sowie eine Integration in eine lipidische Membran essentiell f{\"u}r die Funktionalit{\"a}t des Membranproteins sein. Im zweiten Abschnitt konnte, im zellfreien Sf21-System, f{\"u}r den G-Protein-gekoppelten Rezeptor Endothelin B sowohl eine Integration in die endogen vorhandenen Endoplasmatisch Retikulum-basierten Membranstrukturen als auch Glykosylierungen, identifiziert werden. Auf der Grundlage der erfolgreichen Synthese des ET-B-Rezeptors wurden verschiedene Methoden zur Fluoreszenzmarkierung des Adenosin-Rezeptors A2a (Adora2a) angewandt und optimiert. Im dritten Abschnitt wurde der Adora2a mit Hilfe einer vorbeladenen tRNA, welche an eine fluoreszierende Aminos{\"a}ure gekoppelt war, im zellfreien Chinesischen Zwerghamster Ovarien (CHO)-System markiert. Zus{\"a}tzlich konnte durch den Einsatz eines modifizierten tRNA/Aminoacyl-tRNA-Synthetase-Paares eine nicht-kanonische Aminos{\"a}ure an Position eines integrierten Amber-Stopcodon in die Polypeptidkette eingebaut und die funktionelle Gruppe im Anschluss an einen Fluoreszenzfarbstoff gekoppelt werden. Aufgrund des offenen Charakters eignen sich zellfreie Proteinsynthesesysteme besonders f{\"u}r eine Integration von exogenen Komponenten in den Translationsprozess. Mit Hilfe der Fluoreszenzmarkierung wurde eine ligandvermittelte Konformations{\"a}nderung im Adora2a {\"u}ber einen Biolumineszenz-Resonanzenergietransfer detektiert. Durch die Etablierung der Amber-Suppression wurde dar{\"u}ber hinaus das Hormon Erythropoetin pegyliert, wodurch Eigenschaften wie Stabilit{\"a}t und Halbwertszeit des Proteins ver{\"a}ndert wurden. Zu guter Letzt wurde ein neues tRNA/Aminoacyl-tRNA-Synthetase-Paar auf Basis der Methanosarcina mazei Pyrrolysin-Synthetase etabliert, um das Repertoire an nicht-kanonischen Aminos{\"a}uren und den damit verbundenen Kopplungsreaktionen zu erweitern. Zusammenfassend wurden die Potenziale zellfreier Systeme in Bezug auf der Herstellung von komplexen Membranproteinen und der Charakterisierung dieser durch die Einbringung einer positionsspezifischen Fluoreszenzmarkierung verdeutlicht, wodurch neue M{\"o}glichkeiten f{\"u}r die Analyse und Funktionalisierung von komplexen Proteinen geschaffen wurden.}, language = {de} } @phdthesis{Schmidt2019, author = {Schmidt, Martin}, title = {Fragmentation of landscapes: modelling ecosystem services of transition zones}, doi = {10.25932/publishup-44294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442942}, school = {Universit{\"a}t Potsdam}, pages = {XV, 103}, year = {2019}, abstract = {For millennia, humans have affected landscapes all over the world. Due to horizontal expansion, agriculture plays a major role in the process of fragmentation. This process is caused by a substitution of natural habitats by agricultural land leading to agricultural landscapes. These landscapes are characterized by an alternation of agriculture and other land use like forests. In addition, there are landscape elements of natural origin like small water bodies. Areas of different land use are beside each other like patches, or fragments. They are physically distinguishable which makes them look like a patchwork from an aerial perspective. These fragments are each an own ecosystem with conditions and properties that differ from their adjacent fragments. As open systems, they are in exchange of information, matter and energy across their boundaries. These boundary areas are called transition zones. Here, the habitat properties and environmental conditions are altered compared to the interior of the fragments. This changes the abundance and the composition of species in the transition zones, which in turn has a feedback effect on the environmental conditions. The literature mainly offers information and insights on species abundance and composition in forested transition zones. Abiotic effects, the gradual changes in energy and matter, received less attention. In addition, little is known about non-forested transition zones. For example, the effects on agricultural yield in transition zones of an altered microclimate, matter dynamics or different light regimes are hardly researched or understood. The processes in transition zones are closely connected with altered provisioning and regulating ecosystem services. To disentangle the mechanisms and to upscale the effects, models can be used. My thesis provides insights into these topics: literature was reviewed and a conceptual framework for the quantitative description of gradients of matter and energy in transition zones was introduced. The results of measurements of environmental gradients like microclimate, aboveground biomass and soil carbon and nitrogen content are presented that span from within the forest into arable land. Both the measurements and the literature review could not validate a transition zone of 100 m for abiotic effects. Although this value is often reported and used in the literature, it is likely to be smaller. Further, the measurements suggest that on the one hand trees in transition zones are smaller compared to those in the interior of the fragments, while on the other hand less biomass was measured in the arable lands' transition zone. These results support the hypothesis that less carbon is stored in the aboveground biomass in transition zones. The soil at the edge (zero line) between adjacent forest and arable land contains more nitrogen and carbon content compared to the interior of the fragments. One-year measurements in the transition zone also provided evidence that microclimate is different compared to the fragments' interior. To predict the possible yield decreases that transition zones might cause, a modelling approach was developed. Using a small virtual landscape, I modelled the effect of a forest fragment shading the adjacent arable land and the effects of this on yield using the MONICA crop growth model. In the transition zone yield was less compared to the interior due to shading. The results of the simulations were upscaled to the landscape level and exemplarily calculated for the arable land of a whole region in Brandenburg, Germany. The major findings of my thesis are: (1) Transition zones are likely to be much smaller than assumed in the scientific literature; (2) transition zones aren't solely a phenomenon of forested ecosystems, but significantly extend into arable land as well; (3) empirical and modelling results show that transition zones encompass biotic and abiotic changes that are likely to be important to a variety of agricultural landscape ecosystem services.}, language = {en} } @phdthesis{Burmester2019, author = {Burmester, Juliane}, title = {Linguistic and visual salience in sentence comprehension}, doi = {10.25932/publishup-44315}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443155}, school = {Universit{\"a}t Potsdam}, pages = {XI, 165}, year = {2019}, abstract = {Interlocutors typically link their utterances to the discourse environment and enrich communication by linguistic (e.g., information packaging) and extra-linguistic (e.g., eye gaze, gestures) means to optimize information transfer. Psycholinguistic studies underline that ‒for meaning computation‒ listeners profit from linguistic and visual cues that draw their focus of attention to salient information. This dissertation is the first work that examines how linguistic compared to visual salience cues influence sentence comprehension using the very same experimental paradigms and materials, that is, German subject-before-object (SO) and object-before-subject (OS) sentences, across the two cue modalities. Linguistic salience was induced by indicating a referent as the aboutness topic. Visual salience was induced by implicit (i.e., unconscious) or explicit (i.e., shared) manipulations of listeners' attention to a depicted referent. In Study 1, a selective, facilitative impact of linguistic salience on the context-sensitive OS word order was found using offline comprehensibility judgments. More precisely, during online sentence processing, this impact was characterized by a reduced sentence-initial Late positivity which reflects reduced processing costs for updating the current mental representation of discourse. This facilitative impact of linguistic salience was not replicated by means of an implicit visual cue (Study 2) shown to modulate word order preferences during sentence production. However, a gaze shift to a depicted referent as an indicator of shared attention eased sentence-initial processing similar to linguistic salience as revealed by reduced reading times (Study 3). Yet, this cue did not modulate the strong subject-antecedent preference during later pronoun resolution like linguistic salience. Taken together, these findings suggest a significant impact of linguistic and visual salience cues on sentence comprehension, which substantiates that both the information delivered via language and via the visual environment is integrated into the mental representation of the discourse; but, the way how salience is induced is crucial to its impact.}, language = {en} } @phdthesis{Hoang2019, author = {Hoang, Yen}, title = {De novo binning strategy to analyze and visualize multi-dimensional cytometric data}, doi = {10.25932/publishup-44307}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443078}, school = {Universit{\"a}t Potsdam}, pages = {vii, 81, xxxii}, year = {2019}, abstract = {Since half a century, cytometry has been a major scientific discipline in the field of cytomics - the study of system's biology at single cell level. It enables the investigation of physiological processes, functional characteristics and rare events with proteins by analysing multiple parameters on an individual cell basis. In the last decade, mass cytometry has been established which increased the parallel measurement to up to 50 proteins. This has shifted the analysis strategy from conventional consecutive manual gates towards multi-dimensional data processing. Novel algorithms have been developed to tackle these high-dimensional protein combinations in the data. They are mainly based on clustering or non-linear dimension reduction techniques, or both, often combined with an upstream downsampling procedure. However, these tools have obstacles either in comprehensible interpretability, reproducibility, computational complexity or in comparability between samples and groups. To address this bottleneck, a reproducible, semi-automated cytometric data mining workflow PRI (pattern recognition of immune cells) is proposed which combines three main steps: i) data preparation and storage; ii) bin-based combinatorial variable engineering of three protein markers, the so called triploTs, and subsequent sectioning of these triploTs in four parts; and iii) deployment of a data-driven supervised learning algorithm, the cross-validated elastic-net regularized logistic regression, with these triploT sections as input variables. As a result, the selected variables from the models are ranked by their prevalence, which potentially have discriminative value. The purpose is to significantly facilitate the identification of meaningful subpopulations, which are most distinguish between two groups. The proposed workflow PRI is exemplified by a recently published public mass cytometry data set. The authors found a T cell subpopulation which is discriminative between effective and ineffective treatment of breast carcinomas in mice. With PRI, that subpopulation was not only validated, but was further narrowed down as a particular Th1 cell population. Moreover, additional insights of combinatorial protein expressions are revealed in a traceable manner. An essential element in the workflow is the reproducible variable engineering. These variables serve as basis for a clearly interpretable visualization, for a structured variable exploration and as input layers in neural network constructs. PRI facilitates the determination of marker levels in a semi-continuous manner. Jointly with the combinatorial display, it allows a straightforward observation of correlating patterns, and thus, the dominant expressed markers and cell hierarchies. Furthermore, it enables the identification and complex characterization of discriminating subpopulations due to its reproducible and pseudo-multi-parametric pattern presentation. This endorses its applicability as a tool for unbiased investigations on cell subsets within multi-dimensional cytometric data sets.}, language = {en} } @phdthesis{Qin2019, author = {Qin, Qing}, title = {Chemical functionalization of porous carbon-based materials to enable novel modes for efficient electrochemical N2 fixation}, doi = {10.25932/publishup-44339}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443397}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2019}, abstract = {The central motivation of the thesis was to provide possible solutions and concepts to improve the performance (e.g. activity and selectivity) of electrochemical N2 reduction reaction (NRR). Given that porous carbon-based materials usually exhibit a broad range of structural properties, they could be promising NRR catalysts. Therefore, the advanced design of novel porous carbon-based materials and the investigation of their application in electrocatalytic NRR including the particular reaction mechanisms are the most crucial points to be addressed. In this regard, three main topics were investigated. All of them are related to the functionalization of porous carbon for electrochemical NRR or other electrocatalytic reactions. In chapter 3, a novel C-TixOy/C nanocomposite has been described that has been obtained via simple pyrolysis of MIL-125(Ti). A novel mode for N2 activation is achieved by doping carbon atoms from nearby porous carbon into the anion lattice of TixOy. By comparing the NRR performance of M-Ts and by carrying out DFT calculations, it is found that the existence of (O-)Ti-C bonds in C-doped TixOy can largely improve the ability to activate and reduce N2 as compared to unoccupied OVs in TiO2. The strategy of rationally doping heteroatoms into the anion lattice of transition metal oxides to create active centers may open many new opportunities beyond the use of noble metal-based catalysts also for other reactions that require the activation of small molecules as well. In chapter 4, a novel catalyst construction composed of Au single atoms decorated on the surface of NDPCs was reported. The introduction of Au single atoms leads to active reaction sites, which are stabilized by the N species present in NDPCs. Thus, the interaction within as-prepared AuSAs-NDPCs catalysts enabled promising performance for electrochemical NRR. For the reaction mechanism, Au single sites and N or C species can act as Frustrated Lewis pairs (FLPs) to enhance the electron donation and back-donation process to activate N2 molecules. This work provides new opportunities for catalyst design in order to achieve efficient N2 fixation at ambient conditions by utilizing recycled electric energy. The last topic described in chapter 5 mainly focused on the synthesis of dual heteroatom-doped porous carbon from simple precursors. The introduction of N and B heteroatoms leads to the construction of N-B motives and Frustrated Lewis pairs in a microporous architecture which is also rich in point defects. This can improve the strength of adsorption of different reactants (N2 and HMF) and thus their activation. As a result, BNC-2 exhibits a desirable electrochemical NRR and HMF oxidation performance. Gas adsorption experiments have been used as a simple tool to elucidate the relationship between the structure and catalytic activity. This work provides novel and deep insights into the rational design and the origin of activity in metal-free electrocatalysts and enables a physically viable discussion of the active motives, as well as the search for their further applications. Throughout this thesis, the ubiquitous problems of low selectivity and activity of electrochemical NRR are tackled by designing porous carbon-based catalysts with high efficiency and exploring their catalytic mechanisms. The structure-performance relationships and mechanisms of activation of the relatively inert N2 molecules are revealed by either experimental results or DFT calculations. These fundamental understandings pave way for a future optimal design and targeted promotion of NRR catalysts with porous carbon-based structure, as well as study of new N2 activation modes.}, language = {en} } @phdthesis{Fuhr2019, author = {Fuhr, Antonie}, title = {Eine Hypothese {\"u}ber die Grundlagen von Moral und einige Implikationen}, doi = {10.25932/publishup-44309}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443091}, school = {Universit{\"a}t Potsdam}, pages = {172}, year = {2019}, abstract = {In der Dissertationsarbeit mit dem Titel „Eine Hypothese {\"u}ber die Grundlagen von Moral und einige Implikationen" unternimmt die Autorin den Versuch, die anthropologischen Pr{\"a}missen moralischen Handelns herauszuarbeiten. Es wird eine Hypothese aufgestellt und erl{\"a}utert, die behauptet, dass moralisches Handeln nur dann verst{\"a}ndlich wird, wenn der Handelnde erstens die F{\"a}higkeit der Phantasie aufweist, zweitens auf Erfahrungen (mittels seines Ged{\"a}chtnisses) zugreifen kann und durch Konversation mit anderen Personen interagierte und interagiert, denn nur auf der Basis dieser drei Grundlagen von Moral k{\"o}nnen sich diejenigen F{\"a}higkeiten ent¬wickeln, die als Voraussetzungen moralischen Handeln gesehen werden m{\"u}ssen: Selbstbewusstsein, Freiheit, die Entwicklung eines Wir-Gef{\"u}hls, die Genese eines moralischen Ideals und die F{\"a}higkeit, sich im Entscheiden und Handeln nach diesem Ideal richten zu k{\"o}nnen. Außerdem werden in dieser Dissertation einige Implikationen dieser Hypothese auf individueller und zwischenmenschlicher Ebene diskutiert.}, language = {de} } @phdthesis{Rezaei2019, author = {Rezaei, Mina}, title = {Deep representation learning from imbalanced medical imaging}, doi = {10.25932/publishup-44275}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442759}, school = {Universit{\"a}t Potsdam}, pages = {xxviii, 160}, year = {2019}, abstract = {Medical imaging plays an important role in disease diagnosis, treatment planning, and clinical monitoring. One of the major challenges in medical image analysis is imbalanced training data, in which the class of interest is much rarer than the other classes. Canonical machine learning algorithms suppose that the number of samples from different classes in the training dataset is roughly similar or balance. Training a machine learning model on an imbalanced dataset can introduce unique challenges to the learning problem. A model learned from imbalanced training data is biased towards the high-frequency samples. The predicted results of such networks have low sensitivity and high precision. In medical applications, the cost of misclassification of the minority class could be more than the cost of misclassification of the majority class. For example, the risk of not detecting a tumor could be much higher than referring to a healthy subject to a doctor. The current Ph.D. thesis introduces several deep learning-based approaches for handling class imbalanced problems for learning multi-task such as disease classification and semantic segmentation. At the data-level, the objective is to balance the data distribution through re-sampling the data space: we propose novel approaches to correct internal bias towards fewer frequency samples. These approaches include patient-wise batch sampling, complimentary labels, supervised and unsupervised minority oversampling using generative adversarial networks for all. On the other hand, at algorithm-level, we modify the learning algorithm to alleviate the bias towards majority classes. In this regard, we propose different generative adversarial networks for cost-sensitive learning, ensemble learning, and mutual learning to deal with highly imbalanced imaging data. We show evidence that the proposed approaches are applicable to different types of medical images of varied sizes on different applications of routine clinical tasks, such as disease classification and semantic segmentation. Our various implemented algorithms have shown outstanding results on different medical imaging challenges.}, language = {en} } @phdthesis{Mandal2019, author = {Mandal, Sankalita}, title = {Event handling in business processes}, doi = {10.25932/publishup-44170}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441700}, school = {Universit{\"a}t Potsdam}, pages = {xix, 151}, year = {2019}, abstract = {Business process management (BPM) deals with modeling, executing, monitoring, analyzing, and improving business processes. During execution, the process communicates with its environment to get relevant contextual information represented as events. Recent development of big data and the Internet of Things (IoT) enables sources like smart devices and sensors to generate tons of events which can be filtered, grouped, and composed to trigger and drive business processes. The industry standard Business Process Model and Notation (BPMN) provides several event constructs to capture the interaction possibilities between a process and its environment, e.g., to instantiate a process, to abort an ongoing activity in an exceptional situation, to take decisions based on the information carried by the events, as well as to choose among the alternative paths for further process execution. The specifications of such interactions are termed as event handling. However, in a distributed setup, the event sources are most often unaware of the status of process execution and therefore, an event is produced irrespective of the process being ready to consume it. BPMN semantics does not support such scenarios and thus increases the chance of processes getting delayed or getting in a deadlock by missing out on event occurrences which might still be relevant. The work in this thesis reviews the challenges and shortcomings of integrating real-world events into business processes, especially the subscription management. The basic integration is achieved with an architecture consisting of a process modeler, a process engine, and an event processing platform. Further, points of subscription and unsubscription along the process execution timeline are defined for different BPMN event constructs. Semantic and temporal dependencies among event subscription, event occurrence, event consumption and event unsubscription are considered. To this end, an event buffer with policies for updating the buffer, retrieving the most suitable event for the current process instance, and reusing the event has been discussed that supports issuing of early subscription. The Petri net mapping of the event handling model provides our approach with a translation of semantics from a business process perspective. Two applications based on this formal foundation are presented to support the significance of different event handling configurations on correct process execution and reachability of a process path. Prototype implementations of the approaches show that realizing flexible event handling is feasible with minor extensions of off-the-shelf process engines and event platforms.}, language = {en} } @phdthesis{AppiahDwomoh2019, author = {Appiah-Dwomoh, Edem Korkor}, title = {Postural control and back pain in adolescent athletes}, doi = {10.25932/publishup-44269}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442692}, school = {Universit{\"a}t Potsdam}, pages = {VI, 77, X}, year = {2019}, abstract = {Back pain is a problem in adolescent athletes affecting postural control which is an important requirement for physical and daily activities whether under static or dynamic conditions. One leg stance and star excursion balance postural control tests are effective in measuring static and dynamic postural control respectively. These tests have been used in individuals with back pain, athletes and non-athletes without first establishing their reliabilities. In addition to this, there is no published literature investigating dynamic posture in adolescent athletes with back pain using the star excursion balance test. Therefore, the aim of the thesis was to assess deficit in postural control in adolescent athletes with and without back pain using static (one leg stance test) and dynamic postural (SEBT) control tests. Adolescent athletes with and without back pain participated in the study. Static and dynamic postural control tests were performed using one leg stance and SEBT respectively. The reproducibility of both tests was established. Afterwards, it was determined whether there was an association between static and dynamic posture using the measure of displacement of the centre pressure and reach distance respectively. Finally, it was investigated whether there was a difference in postural control in adolescent athletes with and without back pain using the one leg stance test and the SEBT. Fair to excellent reliabilities was recorded for the static (one leg stance) and dynamic (star excursion balance) postural control tests in the subjects of interest. No association was found between variables of the static and dynamic tests for the adolescent athletes with and without back pain. Also, no statistically significant difference was obtained between adolescent athletics with and without back pain using the static and dynamic postural control test. One leg stance test and SEBT can be used as measures of postural control in adolescent athletes with and without back pain. Although static and dynamic postural control might be related, adolescent athletes with and without back pain might be using different mechanisms in controlling their static and dynamic posture. Consequently, static and dynamic postural control in adolescent athletes with back pain was not different from those without back pain. These outcome measures might not be challenging enough to detect deficit in postural control in our study group of interest.}, language = {en} } @phdthesis{Willig2019, author = {Willig, Lisa}, title = {Ultrafast magneto-optical studies of remagnetisation dynamics in transition metals}, doi = {10.25932/publishup-44194}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441942}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 113, XVII}, year = {2019}, abstract = {Ultrafast magnetisation dynamics have been investigated intensely for two decades. The recovery process after demagnetisation, however, was rarely studied experimentally and discussed in detail. The focus of this work lies on the investigation of the magnetisation on long timescales after laser excitation. It combines two ultrafast time resolved methods to study the relaxation of the magnetic and lattice system after excitation with a high fluence ultrashort laser pulse. The magnetic system is investigated by time resolved measurements of the magneto-optical Kerr effect. The experimental setup has been implemented in the scope of this work. The lattice dynamics were obtained with ultrafast X-ray diffraction. The combination of both techniques leads to a better understanding of the mechanisms involved in magnetisation recovery from a non-equilibrium condition. Three different groups of samples are investigated in this work: Thin Nickel layers capped with nonmagnetic materials, a continuous sample of the ordered L10 phase of Iron Platinum and a sample consisting of Iron Platinum nanoparticles embedded in a carbon matrix. The study of the remagnetisation reveals a general trend for all of the samples: The remagnetisation process can be described by two time dependences. A first exponential recovery that slows down with an increasing amount of energy absorbed in the system until an approximately linear time dependence is observed. This is followed by a second exponential recovery. In case of low fluence excitation, the first recovery is faster than the second. With increasing fluence the first recovery is slowed down and can be described as a linear function. If the pump-induced temperature increase in the sample is sufficiently high, a phase transition to a paramagnetic state is observed. In the remagnetisation process, the transition into the ferromagnetic state is characterised by a distinct transition between the linear and exponential recovery. From the combination of the transient lattice temperature Tp(t) obtained from ultrafast X-ray measurements and magnetisation M(t) gained from magneto-optical measurements we construct the transient magnetisation versus temperature relations M(Tp). If the lattice temperature remains below the Curie temperature the remagnetisation curve M(Tp) is linear and stays below the M(T) curve in equilibrium in the continuous transition metal layers. When the sample is heated above phase transition, the remagnetisation converges towards the static temperature dependence. For the granular Iron Platinum sample the M(Tp) curves for different fluences coincide, i.e. the remagnetisation follows a similar path irrespective of the initial laser-induced temperature jump.}, language = {en} } @phdthesis{Amirkhanyan2019, author = {Amirkhanyan, Aragats}, title = {Methods and frameworks for GeoSpatioTemporal data analytics}, doi = {10.25932/publishup-44168}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441685}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 133}, year = {2019}, abstract = {In the era of social networks, internet of things and location-based services, many online services produce a huge amount of data that have valuable objective information, such as geographic coordinates and date time. These characteristics (parameters) in the combination with a textual parameter bring the challenge for the discovery of geospatiotemporal knowledge. This challenge requires efficient methods for clustering and pattern mining in spatial, temporal and textual spaces. In this thesis, we address the challenge of providing methods and frameworks for geospatiotemporal data analytics. As an initial step, we address the challenges of geospatial data processing: data gathering, normalization, geolocation, and storage. That initial step is the basement to tackle the next challenge -- geospatial clustering challenge. The first step of this challenge is to design the method for online clustering of georeferenced data. This algorithm can be used as a server-side clustering algorithm for online maps that visualize massive georeferenced data. As the second step, we develop the extension of this method that considers, additionally, the temporal aspect of data. For that, we propose the density and intensity-based geospatiotemporal clustering algorithm with fixed distance and time radius. Each version of the clustering algorithm has its own use case that we show in the thesis. In the next chapter of the thesis, we look at the spatiotemporal analytics from the perspective of the sequential rule mining challenge. We design and implement the framework that transfers data into textual geospatiotemporal data - data that contain geographic coordinates, time and textual parameters. By this way, we address the challenge of applying pattern/rule mining algorithms in geospatiotemporal space. As the applicable use case study, we propose spatiotemporal crime analytics -- discovery spatiotemporal patterns of crimes in publicly available crime data. The second part of the thesis, we dedicate to the application part and use case studies. We design and implement the application that uses the proposed clustering algorithms to discover knowledge in data. Jointly with the application, we propose the use case studies for analysis of georeferenced data in terms of situational and public safety awareness.}, language = {en} } @phdthesis{Nagel2019, author = {Nagel, Oliver}, title = {Amoeboid cells as a transport system for micro-objects}, doi = {10.25932/publishup-44219}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442192}, school = {Universit{\"a}t Potsdam}, pages = {x, 84}, year = {2019}, abstract = {Due to advances in science and technology towards smaller and more powerful processing units, the fabrication of micrometer sized machines for different tasks becomes more and more possible. Such micro-robots could revolutionize medical treatment of diseases and shall support to work on other small machines. Nevertheless, scaling down robots and other devices is a challenging task and will probably remain limited in near future. Over the past decade the concept of bio-hybrid systems has proved to be a promising approach in order to advance the further development of micro-robots. Bio-hybrid systems combine biological cells with artificial components, thereby benefiting from the functionality of living biological cells. Cell-driven micro-transport is one of the most prominent applications in the emerging field of these systems. So far, micrometer sized cargo has been successfully transported by means of swimming bacterial cells. The potential of motile adherent cells as transport systems has largely remained unexplored. This thesis concentrates on the social amoeba Dictyostelium discoideum as a potential candidate for an amoeboid bio-hybrid transport system. The use of this model organism comes with several advantages. Due to the unspecific properties of Dictyostelium adhesion, a wide range of different cargo materials can be used for transport. As amoeboid cells exceed bacterial cells in size by one order of magnitude, also the size of an object carried by a single cell can also be much larger for an amoeba. Finally it is possible to guide the cell-driven transport based on the chemotactic behavior of the amoeba. Since cells undergo a developmentally induced chemotactic aggregation, cargo could be assembled in a self-organized manner into a cluster. It is also possible to impose an external chemical gradient to guide the amoeboid transport system to a desired location. To establish Dictyostelium discoideum as a possible candidate for bio-hybrid transport systems, this thesis will first investigate the movement of single cells. Secondly, the interaction of cargo and cells will be studied. Eventually, a conceptional proof will be conducted, that the cheomtactic behavior can be exploited either to transport a cargo self-organized or through an external chemical source.}, language = {en} } @phdthesis{Kochlik2019, author = {Kochlik, Bastian Max}, title = {Relevance of biomarkers for the diagnosis of the frailty syndrome}, doi = {10.25932/publishup-44118}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441186}, school = {Universit{\"a}t Potsdam}, pages = {IV, 99}, year = {2019}, abstract = {Frailty and sarcopenia share some underlying characteristics like loss of muscle mass, low muscle strength, and low physical performance. Imaging parameters and functional examinations mainly assess frailty and sarcopenia criteria; however, these measures can have limitations in clinical settings. Therefore, finding suitable biomarkers that reflect a catabolic muscle state e.g. an elevated muscle protein turnover as suggested in frailty, are becoming more relevant concerning frailty diagnosis and risk assessment. 3-Methylhistidine (3-MH) and its ratios 3-MH-to-creatinine (3-MH/Crea) and 3 MH-to-estimated glomerular filtration rate (3-MH/eGFR) are under discussion as possible biomarkers for muscle protein turnover and might support the diagnosis of frailty. However, there is some skepticism about the reliability of 3-MH measures since confounders such as meat and fish intake might influence 3-MH plasma concentrations. Therefore, the influence of dietary habits and an intervention with white meat on plasma 3-MH was determined in young and healthy individuals. In another study, the cross-sectional associations of plasma 3-MH, 3-MH/Crea and 3-MH/eGFR with the frailty status (robust, pre-frail and frail) were investigated. Oxidative stress (OS) is a possible contributor to frailty development, and high OS levels as well as low micronutrient levels are associated with the frailty syndrome. However, data on simultaneous measures of OS biomarkers together with micronutrients are lacking in studies including frail, pre-frail and robust individuals. Therefore, cross-sectional associations of protein carbonyls (PrCarb), 3-nitrotyrosine (3-NT) and several micronutrients with the frailty status were determined. A validated UPLC-MS/MS (ultra-performance liquid chromatography tandem mass spectrometry) method for the simultaneous quantification of 3-MH and 1-MH (1 methylhistidine, as marker for meat and fish consumption) was presented and used for further analyses. Omnivores showed higher plasma 3-MH and 1-MH concentrations than vegetarians and a white meat intervention resulted in an increase in plasma 3-MH, 3 MH/Crea, 1-MH and 1-MH/Crea in omnivores. Elevated 3-MH and 3-MH/Crea levels declined significantly within 24 hours after this white meat intervention. Thus, 3-MH and 3-MH/Crea might be used as biomarker for muscle protein turnover when subjects did not consume meat 24 hours prior to blood samplings. Plasma 3-MH, 3-MH/Crea and 3-MH/eGFR were higher in frail individuals than in robust individuals. Additionally, these biomarkers were positively associated with frailty in linear regression models, and higher odds to be frail were found for every increase in 3 MH and 3-MH/eGFR quintile in multivariable logistic regression models adjusted for several confounders. This was the first study using 3-MH/eGFR and it is concluded that plasma 3-MH, 3-MH/Crea and 3-MH/eGFR might be used to identify frail individuals or individuals at higher risk to be frail, and that there might be threshold concentrations or ratios to support these diagnoses. Higher vitamin D3, lutein/zeaxanthin, γ-tocopherol, α-carotene, β-carotene, lycopene and β-cryptoxanthin concentrations and additionally lower PrCarb concentrations were found in robust compared to frail individuals in multivariate linear models. Frail subjects had higher odds to be in the lowest than in the highest tertile for vitamin D3 α-tocopherol, α-carotene, β-carotene, lycopene, lutein/zeaxanthin, and β cryptoxanthin, and had higher odds to be in the highest than in the lowest tertile for PrCarb than robust individuals in multivariate logistic regression models. Thus, a low micronutrient together with a high PrCarb status is associated with pre-frailty and frailty.}, language = {en} } @phdthesis{Kehm2019, author = {Kehm, Richard}, title = {The impact of metabolic stress and aging on functionality and integrity of pancreatic islets and beta-cells}, doi = {10.25932/publishup-44109}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441099}, school = {Universit{\"a}t Potsdam}, pages = {VI, 138}, year = {2019}, abstract = {The increasing age of worldwide population is a major contributor for the rising prevalence of major pathologies and disease, such as type 2 diabetes, mediated by massive insulin resistance and a decline in functional beta-cell mass, highly associated with an elevated incidence of obesity. Thus, the impact of aging under physiological conditions and in combination with diet-induced metabolic stress on characteristics of pancreatic islets and beta-cells, with the focus on functionality and structural integrity, were investigated in the present dissertation. Primarily induced by malnutrition due to chronic and excess intake of high caloric diets, containing large amounts of carbohydrates and fats, obesity followed by systemic inflammation and peripheral insulin resistance occurs over time, initiating metabolic stress conditions. Elevated insulin demands initiate an adaptive response by beta-cell mass expansion due to increased proliferation, but prolonged stress conditions drive beta-cell failure and loss. Aging has been also shown to affect beta-cell functionality and morphology, in particular by proliferative limitations. However, most studies in rodents were performed under beta-cell challenging conditions, such as high-fat diet interventions. Thus, in the first part of the thesis (publication I), a characterization of age-related alterations on pancreatic islets and beta-cells was performed by using plasma samples and pancreatic tissue sections of standard diet-fed C57BL/6J wild-type mice in several age groups (2.5, 5, 10, 15 and 21 months). Aging was accompanied by decreased but sustained islet proliferative potential as well as an induction of cellular senescence. This was associated with a progressive islet expansion to maintain normoglycemia throughout lifespan. Moreover, beta-cell function and mass were not impaired although the formation and accumulation of AGEs occurred, located predominantly in the islet vasculature, accompanied by an induction of oxidative and nitrosative (redox) stress. The nutritional behavior throughout human lifespan; however, is not restricted to a balanced diet. This emphasizes the significance to investigate malnutrition by the intake of high-energy diets, inducing metabolic stress conditions that synergistically with aging might amplify the detrimental effects on endocrine pancreas. Using diabetes-prone NZO mice aged 7 weeks, fed a dietary regimen of carbohydrate restriction for different periods (young mice - 11 weeks, middle-aged mice - 32 weeks) followed by a carbohydrate intervention for 3 weeks, offered the opportunity to distinguish the effects of diet-induced metabolic stress in different ages on the functionality and integrity of pancreatic islets and their beta-cells (publication II, manuscript). Interestingly, while young NZO mice exhibited massive hyperglycemia in response to diet-induced metabolic stress accompanied by beta-cell dysfunction and apoptosis, middle-aged animals revealed only moderate hyperglycemia by the maintenance of functional beta-cells. The loss of functional beta-cell mass in islets of young mice was associated with reduced expression of PDX1 transcription factor, increased endocrine AGE formation and related redox stress as well as TXNIP-dependent induction of the mitochondrial death pathway. Although the amounts of secreted insulin and the proliferative potential were comparable in both age groups, islets of middle-aged mice exhibited sustained PDX1 expression, almost regular insulin secretory function, increased capacity for cell cycle progression as well as maintained redox potential. The results of the present thesis indicate a loss of functional beta-cell mass in young diabetes-prone NZO mice, occurring by redox imbalance and induction of apoptotic signaling pathways. In contrast, aging under physiological conditions in C57BL/6J mice and in combination with diet-induced metabolic stress in NZO mice does not appear to have adverse effects on the functionality and structural integrity of pancreatic islets and beta-cells, associated with adaptive responses on changing metabolic demands. However, considering the detrimental effects of aging, it has to be assumed that the compensatory potential of mice might be exhausted at a later point of time, finally leading to a loss of functional beta-cell mass and the onset and progression of type 2 diabetes. The polygenic, diabetes-prone NZO mouse is a suitable model for the investigation of human obesity-associated type 2 diabetes. However, mice at advanced age attenuated the diabetic phenotype or do not respond to the dietary stimuli. This might be explained by the middle age of mice, corresponding to the human age of about 38-40 years, in which the compensatory mechanisms of pancreatic islets and beta cells towards metabolic stress conditions are presumably more active.}, language = {en} } @phdthesis{Grafe2019, author = {Grafe, Marianne Erika}, title = {Analysis of supramolecular assemblies of NE81, the first lamin protein in a non-metazoan organism}, doi = {10.25932/publishup-44180}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441802}, school = {Universit{\"a}t Potsdam}, pages = {V, 94}, year = {2019}, abstract = {Lamine sind Proteine an der inneren Kernh{\"u}lle und bilden zusammen mit verbundenen Proteinen die nukle{\"a}re Lamina. Dieses Netzwerk sorgt f{\"u}r die Stabilit{\"a}t des Zellkerns und unterst{\"u}tzt die Organisation des Zell-Zytoskeletts. Zus{\"a}tzlich sind Lamine und ihre verbundenen Proteine in viele Prozesse wie Genregulation und Zelldifferenzierung involviert. Bis 2012 war der Stand der Forschung, dass nur bei mehrzelligen Organismen eine nukle{\"a}re Lamina zu finden ist. NE81 ist das erste lamin-{\"a}hnliche Protein, das in einem nicht-mehrzelligen Organismus (Dictyostelium discoideum) entdeckt wurde. Es hat viele Eigenschaften und Strukturmerkmale mit Laminen gemeinsam. Dazu z{\"a}hlt der dreiteilige Aufbau des Proteins, eine Phosphorylierungsstelle f{\"u}r ein Zellzyklus-abh{\"a}ngiges Enzym, ein Kernlokalisationssignal, wodurch das Protein in den Kern transportiert wird, sowie eine C-terminale Sequenz zur Verankerung des Proteins in der Kernh{\"u}lle. In dieser Arbeit wurden verschiedene Methoden zur vereinfachten Untersuchung von Laminstrukturen getestet, um zu zeigen, dass sich NE81 wie bereits bekannte Lamin-Proteine verh{\"a}lt und supramolekulare Netzwerke aus Laminfilamenten bildet. Zur Analyse der Struktur supramolekularer Anordnungen wurde das Protein durch Entfernen des Kernlokalisationssignals auf der {\"a}ußeren Kernh{\"u}lle von Dictyostelium gebildet. Die anschließende Untersuchung der Oberfl{\"a}che der Kerne mit einem Rasterelektronenmikroskop zeigte, dass NE81 Strukturen in der Gr{\"o}ße von Laminen bildet, allerdings nicht in regelm{\"a}ßigen filament{\"o}sen Anordnungen. Um die Entstehung der Laminfilamente zu untersuchen, wurde l{\"o}sliches NE81 aus Dictyostelium aufgereinigt und mit verschiedenen mikroskopischen Methoden untersucht. Dabei wurde festgestellt, dass NE81 unter Niedrigsalz-Bedingungen d{\"u}nne, fadenf{\"o}rmige Strukturen und Netzwerke ausbildet, die denen von S{\"a}ugetier-Laminen sehr {\"a}hnlich sind. Die Mutation der Phosphorylierungsstelle von NE81 zu einer imitierenden dauerhaften Phosphorylierung von NE81 in der Zelle, zeigte zun{\"a}chst ein gel{\"o}stes Protein, das {\"u}berraschenderweise unter Blaulichtbestrahlung der Zelle wieder lamin-{\"a}hnliche Anordnungen formte. Die Ergebnisse dieser Arbeit zeigen, dass NE81 echte Laminstrukturen ausbilden kann und hebt Dictyostelium als Nicht-S{\"a}ugetier-Modellorganismus mit einer gut charakterisierten Kernh{\"u}lle, mit allen relevanten, aus tierischen Zellen bekannten Proteinen, hervor.}, language = {en} } @phdthesis{Rector2019, author = {Rector, Michael V.}, title = {The acute effect of exercise on flow-mediated dilation in young people with cystic fibrosis}, doi = {10.25932/publishup-43893}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438938}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2019}, abstract = {Introduction: Cystic fibrosis (CF) is a genetic disease which disrupts the function of an epithelial surface anion channel, CFTR (cystic fibrosis transmembrane conductance regulator). Impairment to this channel leads to inflammation and infection in the lung causing the majority of morbidity and mortality. However, CF is a multiorgan disease affecting many tissues, including vascular smooth muscle. Studies have revealed young people with cystic fibrosis lacking inflammation and infection still demonstrate vascular endothelial dysfunction, measured per flow-mediated dilation (FMD). In other disease cohorts, i.e. diabetic and obese, endurance exercise interventions have been shown improve or taper this impairment. However, long-term exercise interventions are risky, as well as costly in terms of time and resources. Nevertheless, emerging research has correlated the acute effects of exercise with its long-term benefits and advocates the study of acute exercise effects on FMD prior to longitudinal studies. The acute effects of exercise on FMD have previously not been examined in young people with CF, but could yield insights on the potential benefits of long-term exercise interventions. The aims of these studies were to 1) develop and test the reliability of the FMD method and its applicability to study acute exercise effects; 2) compare baseline FMD and the acute exercise effect on FMD between young people with and without CF; and 3) explore associations between the acute effects of exercise on FMD and demographic characteristics, physical activity levels, lung function, maximal exercise capacity or inflammatory hsCRP levels. Methods: Thirty young volunteers (10 people with CF, 10 non-CF and 10 non-CF active matched controls) between the ages of 10 and 30 years old completed blood draws, pulmonary function tests, maximal exercise capacity tests and baseline FMD measurements, before returning approximately 1 week later and performing a 30-min constant load training at 75\% HRmax. FMD measurements were taken prior, immediately after, 30 minutes after and 1 hour after constant load training. ANOVAs and repeated measures ANOVAs were employed to explore differences between groups and timepoints, respectively. Linear regression was implemented and evaluated to assess correlations between FMD and demographic characteristics, physical activity levels, lung function, maximal exercise capacity or inflammatory hsCRP levels. For all comparisons, statistical significance was set at a p-value of α < 0.05. Results: Young people with CF presented with decreased lung function and maximal exercise capacity compared to matched controls. Baseline FMD was also significantly decreased in the CF group (CF: 5.23\% v non-CF: 8.27\% v non-CF active: 9.12\%). Immediately post-training, FMD was significantly attenuated (approximately 40\%) in all groups with CF still demonstrating the most minimal FMD. Follow-up measurements of FMD revealed a slow recovery towards baseline values 30 min post-training and improvements in the CF and non-CF active groups 60 min post-training. Linear regression exposed significant correlations between maximal exercise capacity (VO2 peak), BMI and FMD immediately post-training. Conclusion: These new findings confirm that CF vascular endothelial dysfunction can be acutely modified by exercise and will aid in underlining the importance of exercise in CF populations. The potential benefits of long-term exercise interventions on vascular endothelial dysfunction in young people with CF warrants further investigation.}, language = {en} } @phdthesis{Bieri2019, author = {Bieri, Pascal}, title = {Topmanager im Kreuzfeuer ihrer Dilemmata}, doi = {10.25932/publishup-44106}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441062}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 301}, year = {2019}, abstract = {Die vorliegende Forschungsarbeit untersucht den Umgang mit Dilemmata von Topmanagern. Dilemmata sind ein allt{\"a}gliches Gesch{\"a}ft im Topmanagement. Die entsprechenden Akteure sind daher immer wieder mit diesen konfrontiert und mit ihnen umzugehen, geh{\"o}rt gewissermaßen zu ihrer Berufsbeschreibung. Hinzu kommen Dilemmata im nicht direkt gesch{\"a}ftlichen Bereich, wie zum Beispiel jene zwischen Familien- und Arbeitszeit. Doch stellt dieses Feld ein kaum untersuchtes Forschungsgebiet dar. W{\"a}hrend Dilemmata in anderen Bereichen eine zunehmende Aufmerksamkeit erfuhren, wurden deren Besonderheiten im Topmanagement genauso wenig differenziert betrachtet wie zugeh{\"o}rige Umgangsweisen. Theorie und Praxis stellen bez{\"u}glich Dilemmata von Topmanagern vor allem einen Gegensatz dar, beziehungsweise fehlt es an einer theoretischen Fundierung der Empirie. Diesem Umstand wird mittels dieser Studie begegnet. Auf der Grundlage einer differenzierten und breiten Erfassung von Theorien zu Dilemmata, so diese auch noch nicht auf Topmanager bezogen wurden, und einer empirischen Erhebung, die im Mittelpunkt dieser Arbeit stehen, soll das Feld Dilemmata von Topmanagern der Forschung ge{\"o}ffnet werden. Empirische Grundlage sind vor allem narrative Interviews mit Topmanagern {\"u}ber ihre Dilemmata-Wahrnehmung, ausgemachte Ursachen, Umgangsweisen und Resultate. Dies erlaubt es, Topmanagertypen sowie Dilemmata-Arten, mit denen sie konfrontiert sind oder waren, analytisch herauszuarbeiten. Angesichts der Praxisrelevanz von Dilemmata von Topmanagern wird jedoch nicht nur ein theoretisches Modell zu dieser Thematik erarbeitet, es werden auch Reflexionen auf die Praxis in Form von Handlungsempfehlungen vorgenommen. Schließlich gilt es, die allgemeine Theorie zu Dilemmata, ohne konkreten Bezug zu Topmanagern, mit den theoretischen Erkenntnissen dieser Studie auf empirischer Basis zu kontrastieren. Dabei wird im Rahmen der empirischen Erfassung und Auswertung dem Ansatz der Grounded-Theory-Methodologie gefolgt.}, language = {de} } @phdthesis{Riemer2019, author = {Riemer, Janine}, title = {Synthese und Charakterisierung selektiver Fluoroionophore f{\"u}r intra- und extrazellul{\"a}re Bestimmungen von Kalium- und Natrium-Ionen}, doi = {10.25932/publishup-44193}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441932}, school = {Universit{\"a}t Potsdam}, pages = {IV, 165}, year = {2019}, abstract = {Im Rahmen dieser Dissertation konnten neue Kalium- und Natrium-Ionen Fluoreszenzfarbstoffe von der Klasse der Fluoroionophore synthetisiert und charakterisiert werden. Sie bestehen aus einem N Phenylazakronenether als Ionophor und unterschiedlichen Fluorophoren und sind {\"u}ber einen π-konjugierten 1,2,3-Triazol-1,4-diyl Spacer verbunden. Dabei lag der Fokus w{\"a}hrend ihrer Entwicklung darauf, diese in ihrer Sensitivit{\"a}t, Selektivit{\"a}t und in ihren photophysikalischen Eigenschaften so zu funktionalisieren, dass sie f{\"u}r intra- bzw. extrazellul{\"a}re Konzentrationsbestimmungen geeignet sind. Durch Variation der in ortho Position der N-Phenylazakronenether befindlichen Alkoxy-Gruppen und der fluorophoren Gruppe der Fluoroionophore konnte festgestellt werden, dass die Sensitivit{\"a}t und Selektivit{\"a}t f{\"u}r Kalium- bzw. Natrium-Ionen jeweils durch eine bestimmte Isomerie der 1,2,3-Triazol-1,4-diyl-Einheit erh{\"o}ht wird. Des Weiteren wurde gezeigt, dass durch eine erh{\"o}hte Einschr{\"a}nkung der N,N-Diethylamino-Gruppe des Fluorophors eine Steigerung der Fluoreszenzquantenausbeute und eine Verschiebung des Emissionsmaximums auf {\"u}ber 500 nm erreicht werden konnte. Die Einf{\"u}hrung einer Isopropoxy-Gruppe an einem N-Phenylaza-[18]krone-6-ethers resultierte dabei in einem hoch selektiven Kalium-Ionen Fluoroionophor und erm{\"o}glichte eine in vitro {\"U}berwachung von 10 - 80 mM Kalium-Ionen. Die Substitution einer Methoxy-Gruppe an einem N-Phenylaza-[15]krone-5-ether kombiniert mit unterschiedlich N,N-Diethylamino-Coumarinen lieferte hingegen zwei Natrium-Ionen Fluoroionophore, die f{\"u}r die {\"U}berwachung von intra- bzw. extrazellul{\"a}ren Natrium-Ionen Konzentrationen geeignet sind. In einem weiteren Schritt wurden N-Phenylaza-[18]krone-6-ether mit einem Fluorophor, basierend auf einem [1,3]-Dioxolo[4,5-f][1,3]benzodioxol-(DBD)-Grundger{\"u}st, funktionalisiert. Die im Anschluss durchgef{\"u}hrten spektroskopischen Untersuchungen ergaben, dass die Isopropoxy-Gruppe in ortho Position des N-Phenylaza-[18]krone-6-ether in einen f{\"u}r extrazellul{\"a}re Kalium-Ionen Konzentrationen selektiven Fluoroionophor resultierte, der die Konzentrationsbestimmungen {\"u}ber die Fluoreszenzintensit{\"a}t und -lebensdauer erm{\"o}glicht. In einem abschließenden Schritt konnte unter Verwendung eines Pyrens als fluorophore Gruppe ein weiterer f{\"u}r extrazellul{\"a}re Kalium-Ionen Konzentrationen geeigneter Fluoroionophor entwickelt werden. Die Bestimmung der Kalium-Ionen Konzentration erfolgte hierbei anhand der Fluoreszenzintensit{\"a}tsverh{\"a}ltnisse bei zwei Emissionswellenl{\"a}ngen. Insgesamt konnten 17 verschiedene neue Fluoroionophore f{\"u}r die Bestimmung von Kalium- bzw. Natrium-Ionen synthetisiert und charakterisiert werden. Sechs dieser neuen Molek{\"u}le erm{\"o}glichen in vitro Messungen der intra- oder extrazellul{\"a}ren Kalium- und Natrium-Ionen Konzentrationen und k{\"o}nnten zuk{\"u}nftig f{\"u}r in vivo Konzentrationsmessungen verwendet werden.}, language = {de} } @phdthesis{Debsharma2019, author = {Debsharma, Tapas}, title = {Cellulose derived polymers}, doi = {10.25932/publishup-44131}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441312}, school = {Universit{\"a}t Potsdam}, pages = {x, 103}, year = {2019}, abstract = {Plastics, such as polyethylene, polypropylene, and polyethylene terephthalate are part of our everyday lives in the form of packaging, household goods, electrical insulation, etc. These polymers are non-degradable and create many environmental problems and public health concerns. Additionally, these polymers are produced from finite fossils resources. With the continuous utilization of these limited resources, it is important to look towards renewable sources along with biodegradation of the produced polymers, ideally. Although many bio-based polymers are known, such as polylactic acid, polybutylene succinate adipate or polybutylene succinate, none have yet shown the promise of replacing conventional polymers like polyethylene, polypropylene and polyethylene terephthalate. Cellulose is one of the most abundant renewable resources produced in nature. It can be transformed into various small molecules, such as sugars, furans, and levoglucosenone. The aim of this research is to use the cellulose derived molecules for the synthesis of polymers. Acid-treated cellulose was subjected to thermal pyrolysis to obtain levoglucosenone, which was reduced to levoglucosenol. Levoglucosenol was polymerized, for the first time, by ring-opening metathesis polymerization (ROMP) yielding high molar mass polymers of up to ~150 kg/mol. The poly(levoglucosenol) is thermally stable up to ~220 ℃, amorphous, and is exhibiting a relatively high glass transition temperature of ~100 ℃. The poly(levoglucosenol) can be converted to a transparent film, resembling common plastic, and was found to degrade in a moist acidic environment. This means that poly(levoglucosenol) may find its use as an alternative to conventional plastic, for instance, polystyrene. Levoglucosenol was also converted into levoglucosenyl methyl ether, which was polymerized by cationic ring-opening metathesis polymerization (CROP). Polymers were obtained with molar masses up to ~36 kg/mol. These polymers are thermally stable up to ~220 ℃ and are semi-crystalline thermoplastics, having a glass transition temperature of ~35 ℃ and melting transition of 70-100 ℃. Additionally, the polymers underwent cross-linking, hydrogenation and thiol-ene click chemistry.}, language = {en} } @phdthesis{Meinig2019, author = {Meinig, Michael}, title = {Bedrohungsanalyse f{\"u}r milit{\"a}rische Informationstechnik}, doi = {10.25932/publishup-44160}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441608}, school = {Universit{\"a}t Potsdam}, pages = {X, 137}, year = {2019}, abstract = {Risiken f{\"u}r Cyberressourcen k{\"o}nnen durch unbeabsichtigte oder absichtliche Bedrohungen entstehen. Dazu geh{\"o}ren Insider-Bedrohungen von unzufriedenen oder nachl{\"a}ssigen Mitarbeitern und Partnern, eskalierende und aufkommende Bedrohungen aus aller Welt, die stetige Weiterentwicklung der Angriffstechnologien und die Entstehung neuer und zerst{\"o}rerischer Angriffe. Informationstechnik spielt mittlerweile in allen Bereichen des Lebens eine entscheidende Rolle, u. a. auch im Bereich des Milit{\"a}rs. Ein ineffektiver Schutz von Cyberressourcen kann hier Sicherheitsvorf{\"a}lle und Cyberattacken erleichtern, welche die kritischen Vorg{\"a}nge st{\"o}ren, zu unangemessenem Zugriff, Offenlegung, {\"A}nderung oder Zerst{\"o}rung sensibler Informationen f{\"u}hren und somit die nationale Sicherheit, das wirtschaftliche Wohlergehen sowie die {\"o}ffentliche Gesundheit und Sicherheit gef{\"a}hrden. Oftmals ist allerdings nicht klar, welche Bedrohungen konkret vorhanden sind und welche der kritischen Systemressourcen besonders gef{\"a}hrdet ist. In dieser Dissertation werden verschiedene Analyseverfahren f{\"u}r Bedrohungen in milit{\"a}rischer Informationstechnik vorgeschlagen und in realen Umgebungen getestet. Dies bezieht sich auf Infrastrukturen, IT-Systeme, Netze und Anwendungen, welche Verschlusssachen (VS)/Staatsgeheimnisse verarbeiten, wie zum Beispiel bei milit{\"a}rischen oder Regierungsorganisationen. Die Besonderheit an diesen Organisationen ist das Konzept der Informationsr{\"a}ume, in denen verschiedene Datenelemente, wie z. B. Papierdokumente und Computerdateien, entsprechend ihrer Sicherheitsempfindlichkeit eingestuft werden, z. B. „STRENG GEHEIM", „GEHEIM", „VS-VERTRAULICH", „VS-NUR-F{\"U}R-DEN-DIENSTGEBRAUCH" oder „OFFEN". Die Besonderheit dieser Arbeit ist der Zugang zu eingestuften Informationen aus verschiedenen Informationsr{\"a}umen und der Prozess der Freigabe dieser. Jede in der Arbeit entstandene Ver{\"o}ffentlichung wurde mit Angeh{\"o}rigen in der Organisation besprochen, gegengelesen und freigegeben, so dass keine eingestuften Informationen an die {\"O}ffentlichkeit gelangen. Die Dissertation beschreibt zun{\"a}chst Bedrohungsklassifikationsschemen und Angreiferstrategien, um daraus ein ganzheitliches, strategiebasiertes Bedrohungsmodell f{\"u}r Organisationen abzuleiten. Im weiteren Verlauf wird die Erstellung und Analyse eines Sicherheitsdatenflussdiagramms definiert, welches genutzt wird, um in eingestuften Informationsr{\"a}umen operationelle Netzknoten zu identifizieren, die aufgrund der Bedrohungen besonders gef{\"a}hrdet sind. Die spezielle, neuartige Darstellung erm{\"o}glicht es, erlaubte und verbotene Informationsfl{\"u}sse innerhalb und zwischen diesen Informationsr{\"a}umen zu verstehen. Aufbauend auf der Bedrohungsanalyse werden im weiteren Verlauf die Nachrichtenfl{\"u}sse der operationellen Netzknoten auf Verst{\"o}ße gegen Sicherheitsrichtlinien analysiert und die Ergebnisse mit Hilfe des Sicherheitsdatenflussdiagramms anonymisiert dargestellt. Durch Anonymisierung der Sicherheitsdatenflussdiagramme ist ein Austausch mit externen Experten zur Diskussion von Sicherheitsproblematiken m{\"o}glich. Der dritte Teil der Arbeit zeigt, wie umfangreiche Protokolldaten der Nachrichtenfl{\"u}sse dahingehend untersucht werden k{\"o}nnen, ob eine Reduzierung der Menge an Daten m{\"o}glich ist. Dazu wird die Theorie der groben Mengen aus der Unsicherheitstheorie genutzt. Dieser Ansatz wird in einer Fallstudie, auch unter Ber{\"u}cksichtigung von m{\"o}glichen auftretenden Anomalien getestet und ermittelt, welche Attribute in Protokolldaten am ehesten redundant sind.}, language = {de} } @phdthesis{Knoechel2019, author = {Kn{\"o}chel, Jane}, title = {Model reduction of mechanism-based pharmacodynamic models and its link to classical drug effect models}, doi = {10.25932/publishup-44059}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440598}, school = {Universit{\"a}t Potsdam}, pages = {vii, 147}, year = {2019}, abstract = {Continuous insight into biological processes has led to the development of large-scale, mechanistic systems biology models of pharmacologically relevant networks. While these models are typically designed to study the impact of diverse stimuli or perturbations on multiple system variables, the focus in pharmacological research is often on a specific input, e.g., the dose of a drug, and a specific output related to the drug effect or response in terms of some surrogate marker. To study a chosen input-output pair, the complexity of the interactions as well as the size of the models hinders easy access and understanding of the details of the input-output relationship. The objective of this thesis is the development of a mathematical approach, in specific a model reduction technique, that allows (i) to quantify the importance of the different state variables for a given input-output relationship, and (ii) to reduce the dynamics to its essential features -- allowing for a physiological interpretation of state variables as well as parameter estimation in the statistical analysis of clinical data. We develop a model reduction technique using a control theoretic setting by first defining a novel type of time-limited controllability and observability gramians for nonlinear systems. We then show the superiority of the time-limited generalised gramians for nonlinear systems in the context of balanced truncation for a benchmark system from control theory. The concept of time-limited controllability and observability gramians is subsequently used to introduce a state and time-dependent quantity called the input-response (ir) index that quantifies the importance of state variables for a given input-response relationship at a particular time. We subsequently link our approach to sensitivity analysis, thus, enabling for the first time the use of sensitivity coefficients for state space reduction. The sensitivity based ir-indices are given as a product of two sensitivity coefficients. This allows not only for a computational more efficient calculation but also for a clear distinction of the extent to which the input impacts a state variable and the extent to which a state variable impacts the output. The ir-indices give insight into the coordinated action of specific state variables for a chosen input-response relationship. Our developed model reduction technique results in reduced models that still allow for a mechanistic interpretation in terms of the quantities/state variables of the original system, which is a key requirement in the field of systems pharmacology and systems biology and distinguished the reduced models from so-called empirical drug effect models. The ir-indices are explicitly defined with respect to a reference trajectory and thereby dependent on the initial state (this is an important feature of the measure). This is demonstrated for an example from the field of systems pharmacology, showing that the reduced models are very informative in their ability to detect (genetic) deficiencies in certain physiological entities. Comparing our novel model reduction technique to the already existing techniques shows its superiority. The novel input-response index as a measure of the importance of state variables provides a powerful tool for understanding the complex dynamics of large-scale systems in the context of a specific drug-response relationship. Furthermore, the indices provide a means for a very efficient model order reduction and, thus, an important step towards translating insight from biological processes incorporated in detailed systems pharmacology models into the population analysis of clinical data.}, language = {en} } @phdthesis{Hanschmann2019, author = {Hanschmann, Raffael Tino}, title = {Stalling the engine? EU climate politics after the 'Great Recession'}, doi = {10.25932/publishup-44044}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440441}, school = {Universit{\"a}t Potsdam}, pages = {XXVIII, 303}, year = {2019}, abstract = {This dissertation investigates the impact of the economic and fiscal crisis starting in 2008 on EU climate policy-making. While the overall number of adopted greenhouse gas emission reduction policies declined in the crisis aftermath, EU lawmakers decided to introduce new or tighten existing regulations in some important policy domains. Existing knowledge about the crisis impact on EU legislative decision-making cannot explain these inconsistencies. In response, this study develops an actor-centred conceptual framework based on rational choice institutionalism that provides a micro-level link to explain how economic crises translate into altered policy-making patterns. The core theoretical argument draws on redistributive conflicts, arguing that tensions between 'beneficiaries' and 'losers' of a regulatory initiative intensify during economic crises and spill over to the policy domain. To test this hypothesis and using social network analysis, this study analyses policy processes in three case studies: The introduction of carbon dioxide emission limits for passenger cars, the expansion of the EU Emissions Trading System to aviation, and the introduction of a regulatory framework for biofuels. The key finding is that an economic shock causes EU policy domains to polarise politically, resulting in intensified conflict and more difficult decision-making. The results also show that this process of political polarisation roots in the industry that is the subject of the regulation, and that intergovernmental bargaining among member states becomes more important, but also more difficult in times of crisis.}, language = {en} } @phdthesis{Ghani2019, author = {Ghani, Humaad}, title = {Structural evolution of the Kohat and Potwar fold and thrust belts of Pakistan}, doi = {10.25932/publishup-44077}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440775}, school = {Universit{\"a}t Potsdam}, pages = {viii, 121}, year = {2019}, abstract = {Fold and thrust belts are characteristic features of collisional orogen that grow laterally through time by deforming the upper crust in response to stresses caused by convergence. The deformation propagation in the upper crust is accommodated by shortening along major folds and thrusts. The formation of these structures is influenced by the mechanical strength of d{\´e}collements, basement architecture, presence of preexisting structures and taper of the wedge. These factors control not only the sequence of deformation but also cause differences in the structural style. The Himalayan fold and thrust belt exhibits significant differences in the structural style from east to west. The external zone of the Himalayan fold and thrust belt, also called the Subhimalaya, has been extensively studied to understand the temporal development and differences in the structural style in Bhutan, Nepal and India; however, the Subhimalaya in Pakistan remains poorly studied. The Kohat and Potwar fold and thrust belts (herein called Kohat and Potwar) represent the Subhimalaya in Pakistan. The Main Boundary Thrust (MBT) marks the northern boundary of both Kohat and Potwar, showing that these belts are genetically linked to foreland-vergent deformation within the Himalayan orogen, despite the pronounced contrast in structural style. This contrast becomes more pronounced toward south, where the active strike-slip Kalabagh Fault Zone links with the Kohat and Potwar range fronts, known as the Surghar Range and the Salt Range, respectively. The Surghar and Salt Ranges developed above the Surghar Thrust (SGT) and Main Frontal Thrust (MFT). In order to understand the structural style and spatiotemporal development of the major structures in Kohat and Potwar, I have used structural modeling and low temperature thermochronolgy methods in this study. The structural modeling is based on construction of balanced cross-sections by integrating surface geology, seismic reflection profiles and well data. In order to constrain the timing and magnitude of exhumation, I used apatite (U-Th-Sm)/He (AHe) and apatite fission track (AFT) dating. The results obtained from both methods are combined to document the Paleozoic to Recent history of Kohat and Potwar. The results of this research suggest two major events in the deformation history. The first major deformation event is related to Late Paleozoic rifting associated with the development of the Neo-Tethys Ocean. The second major deformation event is related to the Late Miocene to Pliocene development of the Himalayan fold and thrust belt in the Kohat and Potwar. The Late Paleozoic rifting is deciphered by inverse thermal modelling of detrital AFT and AHe ages from the Salt Range. The process of rifting in this area created normal faulting that resulted in the exhumation/erosion of Early to Middle Paleozoic strata, forming a major unconformity between Cambrian and Permian strata that is exposed today in the Salt Range. The normal faults formed in Late Paleozoic time played an important role in localizing the Miocene-Pliocene deformation in this area. The combination of structural reconstructions and thermochronologic data suggest that deformation initiated at 15±2 Ma on the SGT ramp in the southern part of Kohat. The early movement on the SGT accreted the foreland into the Kohat deforming wedge, forming the range front. The development of the MBT at 12±2 Ma formed the northern boundary of Kohat and Potwar. Deformation propagated south of the MBT in the Kohat on double d{\´e}collements and in the Potwar on a single basal d{\´e}collement. The double d{\´e}collement in the Kohat adopted an active roof-thrust deformation style that resulted in the disharmonic structural style in the upper and lower parts of the stratigraphic section. Incremental shortening resulted in the development of duplexes in the subsurface between two d{\´e}collements and imbrication above the roof thrust. Tectonic thickening caused by duplexes resulted in cooling and exhumation above the roof thrust by removal of a thick sequence of molasse strata. The structural modelling shows that the ramps on which duplexes formed in Kohat continue as tip lines of fault propagation folds in the Potwar. The absence of a double d{\´e}collement in the Potwar resulted in the preservation of a thick sequence of molasse strata there. The temporal data suggest that deformation propagated in-sequence from ~ 8 to 3 Ma in the northern part of Kohat and Potwar; however, internal deformation in the Kohat was more intense, probably required for maintaining a critical taper after a significant load was removed above the upper d{\´e}collement. In the southern part of Potwar, a steeper basement slope (β≥3°) and the presence of salt at the base of the stratigraphic section allowed for the complete preservation of the stratigraphic wedge, showcased by very little internal deformation. Activation of the MFT at ~4 Ma allowed the Salt Range to become the range front of the Potwar. The removal of a large amount of molasse strata above the MFT ramp enhanced the role of salt in shaping the structural style of the Salt Range and Kalabagh Fault Zone. Salt accumulation and migration resulted in the formation of normal faults in both areas. Salt migration in the Kalabagh fault zone has triggered out-of-sequence movement on ramps in the Kohat. The amount of shortening calculated between the MBT and the SGT in Kohat is 75±5 km and between the MBT and the MFT in Potwar is 65±5 km. A comparable amount of shortening is accommodated in the Kohat and Potwar despite their different widths: 70 km Kohat and 150 km Potwar. In summary, this research suggests that deformation switched between different structures during the last ~15 Ma through different modes of fault propagation, resulting in different structural styles and the out-of-sequence development of Kohat and Potwar.}, language = {en} } @phdthesis{Meessen2019, author = {Meeßen, Christian}, title = {The thermal and rheological state of the Northern Argentinian foreland basins}, doi = {10.25932/publishup-43994}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439945}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 151}, year = {2019}, abstract = {The foreland of the Andes in South America is characterised by distinct along strike changes in surface deformational styles. These styles are classified into two end-members, the thin-skinned and the thick-skinned style. The superficial expression of thin-skinned deformation is a succession of narrowly spaced hills and valleys, that form laterally continuous ranges on the foreland facing side of the orogen. Each of the hills is defined by a reverse fault that roots in a basal d{\´e}collement surface within the sedimentary cover, and acted as thrusting ramp to stack the sedimentary pile. Thick-skinned deformation is morphologically characterised by spatially disparate, basement-cored mountain ranges. These mountain ranges are uplifted along reactivated high-angle crustal-scale discontinuities, such as suture zones between different tectonic terranes. Amongst proposed causes for the observed variation are variations in the dip angle of the Nazca plate, variation in sediment thickness, lithospheric thickening, volcanism or compositional differences. The proposed mechanisms are predominantly based on geological observations or numerical thermomechanical modelling, but there has been no attempt to understand the mechanisms from a point of data-integrative 3D modelling. The aim of this dissertation is therefore to understand how lithospheric structure controls the deformational behaviour. The integration of independent data into a consistent model of the lithosphere allows to obtain additional evidence that helps to understand the causes for the different deformational styles. Northern Argentina encompasses the transition from the thin-skinned fold-and-thrust belt in Bolivia, to the thick-skinned Sierras Pampeanas province, which makes this area a well suited location for such a study. The general workflow followed in this study first involves data-constrained structural- and density-modelling in order to obtain a model of the study area. This model was then used to predict the steady-state thermal field, which was then used to assess the present-day rheological state in northern Argentina. The structural configuration of the lithosphere in northern Argentina was determined by means of data-integrative, 3D density modelling verified by Bouguer gravity. The model delineates the first-order density contrasts in the lithosphere in the uppermost 200 km, and discriminates bodies for the sediments, the crystalline crust, the lithospheric mantle and the subducting Nazca plate. To obtain the intra-crustal density structure, an automated inversion approach was developed and applied to a starting structural model that assumed a homogeneously dense crust. The resulting final structural model indicates that the crustal structure can be represented by an upper crust with a density of 2800 kg/m³, and a lower crust of 3100 kg/m³. The Transbrazilian Lineament, which separates the Pampia terrane from the R{\´i}o de la Plata craton, is expressed as a zone of low average crustal densities. In an excursion, we demonstrate in another study, that the gravity inversion method developed to obtain intra-crustal density structures, is also applicable to obtain density variations in the uppermost lithospheric mantle. Densities in such sub-crustal depths are difficult to constrain from seismic tomographic models due to smearing of crustal velocities. With the application to the uppermost lithospheric mantle in the north Atlantic, we demonstrate in Tan et al. (2018) that lateral density trends of at least 125\,km width are robustly recovered by the inversion method, thereby providing an important tool for the delineation of subcrustal density trends. Due to the genetic link between subduction, orogenesis and retroarc foreland basins the question rises whether the steady-state assumption is valid in such a dynamic setting. To answer this question, I analysed (i) the impact of subduction on the conductive thermal field of the overlying continental plate, (ii) the differences between the transient and steady-state thermal fields of a geodynamic coupled model. Both studies indicate that the assumption of a thermal steady-state is applicable in most parts of the study area. Within the orogenic wedge, where the assumption cannot be applied, I estimated the transient thermal field based on the results of the conducted analyses. Accordingly, the structural model that had been obtained in the first step, could be used to obtain a 3D conductive steady-state thermal field. The rheological assessment based on this thermal field indicates that the lithosphere of the thin-skinned Subandean ranges is characterised by a relatively strong crust and a weak mantle. Contrarily, the adjacent foreland basin consists of a fully coupled, very strong lithosphere. Thus, shortening in northern Argentina can only be accommodated within the weak lithosphere of the orogen and the Subandean ranges. The analysis suggests that the d{\´e}collements of the fold-and-thrust belt are the shallow continuation of shear zones that reside in the ductile sections of the orogenic crust. Furthermore, the localisation of the faults that provide strain transfer between the deeper ductile crust and the shallower d{\´e}collement is strongly influenced by crustal weak zones such as foliation. In contrast to the northern foreland, the lithosphere of the thick-skinned Sierras Pampeanas is fully coupled and characterised by a strong crust and mantle. The high overall strength prevents the generation of crustal-scale faults by tectonic stresses. Even inherited crustal-scale discontinuities, such as sutures, cannot sufficiently reduce the strength of the lithosphere in order to be reactivated. Therefore, magmatism that had been identified to be a precursor of basement uplift in the Sierras Pampeanas, is the key factor that leads to the broken foreland of this province. Due to thermal weakening, and potentially lubrication of the inherited discontinuities, the lithosphere is locally weakened such that tectonic stresses can uplift the basement blocks. This hypothesis explains both the spatially disparate character of the broken foreland, as well as the observed temporal delay between volcanism and basement block uplift. This dissertation provides for the first time a data-driven 3D model that is consistent with geophysical data and geological observations, and that is able to causally link the thermo-rheological structure of the lithosphere to the observed variation of surface deformation styles in the retroarc foreland of northern Argentina.}, language = {en} } @phdthesis{Schuerings2019, author = {Sch{\"u}rings, Marco Philipp Hermann}, title = {Synthesis of 1D microgel strands and their motion analysis in solution}, doi = {10.25932/publishup-43953}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439532}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2019}, abstract = {The fabrication of 1D nanostrands composed of stimuli responsive microgels has been shown in this work. Microgels are well known materials able to respond to various stimuli from outer environment. Since these microgels respond via a volume change to an external stimulus, a targeted mechanical response can be achieved. Through carefully choosing the right composition of the polymer matrix, microgels can be designed to react precisely to the targeted stimuli (e.g. drug delivery via pH and temperature changes, or selective contractions through changes in electrical current125). In this work, it was aimed to create flexible nano-filaments which are capable of fast anisotropic contractions similar to muscle filaments. For the fabrication of such filaments or strands, nanostructured templates (PDMS wrinkles) were chosen due to a facile and low-cost fabrication and versatile tunability of their dimensions. Additionally, wrinkling is a well-known lithography-free method which enables the fabrication of nanostructures in a reproducible manner and with a high long-range periodicity. In Chapter 2.1, it was shown for the first time that microgels as soft matter particles can be aligned to densely packed microgel arrays of various lateral dimensions. The alignment of microgels with different compositions (e.g. VCL/AAEM, NIPAAm, NIPAAm/VCL and charged microgels) was shown by using different assembly techniques (e.g. spin-coating, template confined molding). It was chosen to set one experimental parameter constant which was the SiOx surface composition of the templates and substrates (e.g. oxidized PDMS wrinkles, Si-wafers and glass slides). It was shown that the fabrication of nanoarrays was feasible with all tested microgel types. Although the microgels exhibited different deformability when aligned on a flat surface, they retained their thermo-responsivity and swelling behavior. Towards the fabrication of 1D microgel strands interparticle connectivity was aspired. This was achieved via different cross-linking methods (i.e. cross-linking via UV-irradiation and host-guest complexation) discussed in Chapter 2.2. The microgel arrays created by different assembly methods and microgel types were tested for their cross-linking suitability. It was observed that NIPAAm based microgels cannot be cross-linked with UV light. Furthermore, it was found that these microgels exhibit a strong surface-particle-interaction and therefore could not be detached from the given substrates. In contrast to the latter, with VCL/AAEM based microgels it was possible to both UV cross-link them based on the keto-enol tautomerism of the AAEM copolymer, and to detach them from the substrate due to the lower adhesion energy towards SiOx surfaces. With VCL/AAEM microgels long, one-dimensional microgel strands could be re-dispersed in water for further analysis. It has also been shown that at least one lateral dimension of the free dispersed 1D microgel strands is easily controllable by adjusting the wavelength of the wrinkled template. For further work, only VCL/AAEM based microgels were used to focus on the main aim of this work, i.e. the fabrication of 1D microgel nanostrands. As an alternative to the unspecific and harsh UV cross-linking, the host-guest complexation via diazobenzene cross-linkers and cyclodextrin hosts was explored. The idea behind this approach was to give means to a future construction kit-like approach by incorporation of cyclodextrin comonomers in a broad variety of particle systems (e.g. microgels, nanoparticles). For this purpose, VCL/AAEM microgels were copolymerized with different amounts of mono-acrylate functionalized β-cyclodextrin (CD). After successfully testing the cross-linking capability in solution, the cross-linking of aligned VCL/AAEM/CD microgels was tried. Although the cross-linking worked well, once the single arrays came into contact to each other, they agglomerated. As a reason for this behavior residual amounts of mono-complexed diazobenzene linkers were suspected. Thus, end-capping strategies were tried out (e.g. excess amounts of β-cyclodextrin and coverage with azobenzene functionalized AuNPs) but were unsuccessful. With deeper thought, entropy effects were taken into consideration which favor the release of complexed diazobenzene linker leading to agglomerations. To circumvent this entropy driven effect, a multifunctional polymer with 50\% azobenzene groups (Harada polymer) was used. First experiments with this polymer showed promising results regarding a less pronounced agglomeration (Figure 77). Thus, this approach could be pursued in the future. In this chapter it was found out that in contrast to pearl necklace and ribbon like formations, particle alignment in zigzag formation provided the best compromise in terms of stability in dispersion (see Figure 44a and Figure 51) while maintaining sufficient flexibility. For this reason, microgel strands in zigzag formation were used for the motion analysis described in Chapter 2.3. The aim was to observe the properties of unrestrained microgel strands in solution (e.g. diffusion behavior, rotational properties and ideally, anisotropic contraction after temperature increase). Initially, 1D microgel strands were manipulated via AFM in a liquid cell setup. It could be observed that the strands required a higher load force compared to single microgels to be detached from the surface. However, with the AFM it was not possible to detach the strands in a controllable manner but resulted in a complete removal of single microgel particles and a tearing off the strands from the surface, respectively. For this reason, to observe the motion behavior of unrestrained microgel strands in solution, confocal microscopy was used. Furthermore, to hinder an adsorption of the strands, it was found out that coating the surface of the substrates with a repulsive polymer film was beneficial. Confocal and wide-field microscopy videos showed that the microgel strands exhibit translational and rotational diffusive motion in solution without perceptible bending. Unfortunately, with these methods the detection of the anisotropic stimuli responsive contraction of the free moving microgel strands was not possible. To summarize, the flexibility of microgel strands is more comparable to the mechanical behavior of a semi flexible cable than to a yarn. The strands studied here consist of dozens or even hundreds of discrete submicron units strung together by cross-linking, having few parallels in nanotechnology. With the insights gained in this work on microgel-surface interactions, in the future, a targeted functionalization of the template and substrate surfaces can be conducted to actively prevent unwanted microgel adsorption for a given microgel system (e.g. PVCL and polystyrene coating235). This measure would make the discussed alignment methods more diverse. As shown herein, the assembly methods enable a versatile microgel alignment (e.g. microgel meshes, double and triple strands). To go further, one could use more complex templates (e.g. ceramic rhombs and star shaped wrinkles (Figure 14) to expand the possibilities of microgel alignment and to precisely control their aspect ratios (e.g. microgel rods with homogeneous size distributions).}, language = {en} } @phdthesis{Kolk2019, author = {Kolk, Jens}, title = {The long-term legacy of historical land cover changes}, doi = {10.25932/publishup-43939}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439398}, school = {Universit{\"a}t Potsdam}, pages = {196}, year = {2019}, abstract = {Over the last years there is an increasing awareness that historical land cover changes and associated land use legacies may be important drivers for present-day species richness and biodiversity due to time-delayed extinctions or colonizations in response to historical environmental changes. Historically altered habitat patches may therefore exhibit an extinction debt or colonization credit and can be expected to lose or gain species in the future. However, extinction debts and colonization credits are difficult to detect and their actual magnitudes or payments have rarely been quantified because species richness patterns and dynamics are also shaped by recent environmental conditions and recent environmental changes. In this thesis we aimed to determine patterns of herb-layer species richness and recent species richness dynamics of forest herb layer plants and link those patterns and dynamics to historical land cover changes and associated land use legacies. The study was conducted in the Prignitz, NE-Germany, where the forest distribution remained stable for the last ca. 100 years but where a) the deciduous forest area had declined by more than 90 per cent (leaving only remnants of "ancient forests"), b) small new forests had been established on former agricultural land ("post-agricultural forests"). Here, we analyzed the relative importance of land use history and associated historical land cover changes for herb layer species richness compared to recent environmental factors and determined magnitudes of extinction debt and colonization credit and their payment in ancient and post-agricultural forests, respectively. We showed that present-day species richness patterns were still shaped by historical land cover changes that ranged back to more than a century. Although recent environmental conditions were largely comparable we found significantly more forest specialists, species with short-distance dispersal capabilities and clonals in ancient forests than in post-agricultural forests. Those species richness differences were largely contingent to a colonization credit in post-agricultural forests that ranged up to 9 species (average 4.7), while the extinction debt in ancient forests had almost completely been paid. Environmental legacies from historical agricultural land use played a minor role for species richness differences. Instead, patch connectivity was most important. Species richness in ancient forests was still dependent on historical connectivity, indicating a last glimpse of an extinction debt, and the colonization credit was highest in isolated post-agricultural forests. In post-agricultural forests that were better connected or directly adjacent to ancient forest patches the colonization credit was way smaller and we were able to verify a gradual payment of the colonization credit from 2.7 species to 1.5 species over the last six decades.}, language = {en} } @phdthesis{Perlich2019, author = {Perlich, Anja}, title = {Digital collaborative documentation in mental healthcare}, doi = {10.25932/publishup-44029}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440292}, school = {Universit{\"a}t Potsdam}, pages = {x, 135}, year = {2019}, abstract = {With the growth of information technology, patient attitudes are shifting - away from passively receiving care towards actively taking responsibility for their well- being. Handling doctor-patient relationships collaboratively and providing patients access to their health information are crucial steps in empowering patients. In mental healthcare, the implicit consensus amongst practitioners has been that sharing medical records with patients may have an unpredictable, harmful impact on clinical practice. In order to involve patients more actively in mental healthcare processes, Tele-Board MED (TBM) allows for digital collaborative documentation in therapist-patient sessions. The TBM software system offers a whiteboard-inspired graphical user interface that allows therapist and patient to jointly take notes during the treatment session. Furthermore, it provides features to automatically reuse the digital treatment session notes for the creation of treatment session summaries and clinical case reports. This thesis presents the development of the TBM system and evaluates its effects on 1) the fulfillment of the therapist's duties of clinical case documentation, 2) patient engagement in care processes, and 3) the therapist-patient relationship. Following the design research methodology, TBM was developed and tested in multiple evaluation studies in the domains of cognitive behavioral psychotherapy and addiction care. The results show that therapists are likely to use TBM with patients if they have a technology-friendly attitude and when its use suits the treatment context. Support in carrying out documentation duties as well as fulfilling legal requirements contributes to therapist acceptance. Furthermore, therapists value TBM as a tool to provide a discussion framework and quick access to worksheets during treatment sessions. Therapists express skepticism, however, regarding technology use in patient sessions and towards complete record transparency in general. Patients expect TBM to improve the communication with their therapist and to offer a better recall of discussed topics when taking a copy of their notes home after the session. Patients are doubtful regarding a possible distraction of the therapist and usage in situations when relationship-building is crucial. When applied in a clinical environment, collaborative note-taking with TBM encourages patient engagement and a team feeling between therapist and patient. Furthermore, it increases the patient's acceptance of their diagnosis, which in turn is an important predictor for therapy success. In summary, TBM has a high potential to deliver more than documentation support and record transparency for patients, but also to contribute to a collaborative doctor-patient relationship. This thesis provides design implications for the development of digital collaborative documentation systems in (mental) healthcare as well as recommendations for a successful implementation in clinical practice.}, language = {en} } @phdthesis{Nikaj2019, author = {Nikaj, Adriatik}, title = {Restful choreographies}, doi = {10.25932/publishup-43890}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438903}, school = {Universit{\"a}t Potsdam}, pages = {xix, 146}, year = {2019}, abstract = {Business process management has become a key instrument to organize work as many companies represent their operations in business process models. Recently, business process choreography diagrams have been introduced as part of the Business Process Model and Notation standard to represent interactions between business processes, run by different partners. When it comes to the interactions between services on the Web, Representational State Transfer (REST) is one of the primary architectural styles employed by web services today. Ideally, the RESTful interactions between participants should implement the interactions defined at the business choreography level. The problem, however, is the conceptual gap between the business process choreography diagrams and RESTful interactions. Choreography diagrams, on the one hand, are modeled from business domain experts with the purpose of capturing, communicating and, ideally, driving the business interactions. RESTful interactions, on the other hand, depend on RESTful interfaces that are designed by web engineers with the purpose of facilitating the interaction between participants on the internet. In most cases however, business domain experts are unaware of the technology behind web service interfaces and web engineers tend to overlook the overall business goals of web services. While there is considerable work on using process models during process implementation, there is little work on using choreography models to implement interactions between business processes. This thesis addresses this research gap by raising the following research question: How to close the conceptual gap between business process choreographies and RESTful interactions? This thesis offers several research contributions that jointly answer the research question. The main research contribution is the design of a language that captures RESTful interactions between participants---RESTful choreography modeling language. Formal completeness properties (with respect to REST) are introduced to validate its instances, called RESTful choreographies. A systematic semi-automatic method for deriving RESTful choreographies from business process choreographies is proposed. The method employs natural language processing techniques to translate business interactions into RESTful interactions. The effectiveness of the approach is shown by developing a prototypical tool that evaluates the derivation method over a large number of choreography models. In addition, the thesis proposes solutions towards implementing RESTful choreographies. In particular, two RESTful service specifications are introduced for aiding, respectively, the execution of choreographies' exclusive gateways and the guidance of RESTful interactions.}, language = {en} } @phdthesis{Lewandowski2019, author = {Lewandowski, Max}, title = {Hadamard states for bosonic quantum field theory on globally hyperbolic spacetimes}, doi = {10.25932/publishup-43938}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439381}, school = {Universit{\"a}t Potsdam}, pages = {v, 69}, year = {2019}, abstract = {Quantenfeldtheorie auf gekr{\"u}mmten Raumzeiten ist eine semiklassische N{\"a}herung einer Quantentheorie der Gravitation, im Rahmen derer ein Quantenfeld unter dem Einfluss eines klassisch modellierten Gravitationsfeldes, also einer gekr{\"u}mmten Raumzeit, beschrieben wird. Eine der bemerkenswertesten Vorhersagen dieses Ansatzes ist die Erzeugung von Teilchen durch die gekr{\"u}mmte Raumzeit selbst, wie zum Beispiel durch Hawkings Verdampfen schwarzer L{\"o}cher und den Unruh Effekt. Andererseits deuten diese Aspekte bereits an, dass fundamentale Grundpfeiler der Theorie auf dem Minkowskiraum, insbesondere ein ausgezeichneter Vakuumzustand und damit verbunden der Teilchenbegriff, f{\"u}r allgemeine gekr{\"u}mmte Raumzeiten keine sinnvolle Entsprechung besitzen. Gleichermaßen ben{\"o}tigen wir eine alternative Implementierung von Kovarianz in die Theorie, da gekr{\"u}mmte Raumzeiten im Allgemeinen keine nicht-triviale globale Symmetrie aufweisen. Letztere Problematik konnte im Rahmen lokal-kovarianter Quantenfeldtheorie gel{\"o}st werden, wohingegen die Abwesenheit entsprechender Konzepte f{\"u}r Vakuum und Teilchen in diesem allgemeinen Fall inzwischen sogar in Form von no-go-Aussagen manifestiert wurde. Beim algebraischen Ansatz f{\"u}r eine Quantenfeldtheorie werden zun{\"a}chst Observablen eingef{\"u}hrt und erst anschließend Zust{\"a}nde via Zuordnung von Erwartungswerten. Obwohl die Observablen unter physikalischen Gesichtspunkten konstruiert werden, existiert dennoch eine große Anzahl von m{\"o}glichen Zust{\"a}nden, von denen viele, aus physikalischen Blickwinkeln betrachtet, nicht sinnvoll sind. Dieses Konzept von Zust{\"a}nden ist daher noch zu allgemein und bedarf weiterer physikalisch motivierter Einschr{\"a}nkungen. Beispielsweise ist es nat{\"u}rlich, sich im Falle freier Quantenfeldtheorien mit linearen Feldgleichungen auf quasifreie Zust{\"a}nde zu konzentrieren. Dar{\"u}ber hinaus ist die Renormierung von Erwartungswerten f{\"u}r Produkte von Feldern von zentraler Bedeutung. Dies betrifft insbesondere den Energie-Impuls-Tensor, dessen Erwartungswert durch distributionelle Bil{\"o}sungen der Feldgleichungen gegeben ist. Tats{\"a}chlich liefert J. Hadamard Theorie hyperbolischer Differentialgleichungen Bil{\"o}sungen mit festem singul{\"a}ren Anteil, so dass ein geeignetes Renormierungsverfahren definiert werden kann. Die sogenannte Hadamard-Bedingung an Bidistributionen steht f{\"u}r die Forderung einer solchen Singularit{\"a}tenstruktur und sie hat sich etabliert als nat{\"u}rliche Verallgemeinerung der f{\"u}r flache Raumzeiten formulierten Spektralbedingung. Seit Radzikowskis wegweisenden Resultaten l{\"a}sst sie sich außerdem lokal ausdr{\"u}cken, n{\"a}mlich als eine Bedingung an die Wellenfrontenmenge der Bil{\"o}sung. Diese Formulierung schl{\"a}gt eine Br{\"u}cke zu der von Duistermaat und H{\"o}rmander entwickelten mikrolokalen Analysis, die seitdem bei der {\"U}berpr{\"u}fung der Hadamard-Bedingung sowie der Konstruktion von Hadamard Zust{\"a}nden vielfach Verwendung findet und rasante Fortschritte auf diesem Gebiet ausgel{\"o}st hat. Obwohl unverzichtbar f{\"u}r die Analyse der Charakteristiken von Operatoren und ihrer Parametrizen sind die Methoden und Aussagen der mikrolokalen Analysis ungeeignet f{\"u}r die Analyse von nicht-singul{\"a}ren Strukturen und zentrale Aussagen sind typischerweise bis auf glatte Anteile formuliert. Beispielsweise lassen sich aus Radzikowskis Resultaten nahezu direkt Existenzaussagen und sogar ein konkretes Konstruktionsschema f{\"u}r Hadamard Zust{\"a}nde ableiten, die {\"u}brigen Eigenschaften (Bil{\"o}sung, Kausalit{\"a}t, Positivit{\"a}t) k{\"o}nnen jedoch auf diesem Wege nur modulo glatte Funktionen gezeigt werden. Es ist das Ziel dieser Dissertation, diesen Ansatz f{\"u}r lineare Wellenoperatoren auf Schnitten in Vektorb{\"u}ndeln {\"u}ber global-hyperbolischen Lorentz-Mannigfaltigkeiten zu vollenden und, ausgehend von einer lokalen Hadamard Reihe, Hadamard Zust{\"a}nde zu konstruieren. Beruhend auf Wightmans L{\"o}sung f{\"u}r die d'Alembert-Gleichung auf dem Minkowski-Raum und der Herleitung der avancierten und retardierten Fundamentall{\"o}sung konstruieren wir lokal Parametrizen in Form von Hadamard-Reihen und f{\"u}gen sie zu globalen Bil{\"o}sungen zusammen. Diese besitzen dann die Hadamard-Eigenschaft und wir zeigen anschließend, dass glatte Bischnitte existieren, die addiert werden k{\"o}nnen, so dass die verbleibenden Bedingungen erf{\"u}llt sind.}, language = {en} } @phdthesis{Schaefer2019, author = {Sch{\"a}fer, Merlin}, title = {Understanding and predicting global change impacts on migratory birds}, doi = {10.25932/publishup-43925}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439256}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 153}, year = {2019}, abstract = {This is a publication-based dissertation comprising three original research stud-ies (one published, one submitted and one ready for submission; status March 2019). The dissertation introduces a generic computer model as a tool to investigate the behaviour and population dynamics of animals in cyclic environments. The model is further employed for analysing how migratory birds respond to various scenarios of altered food supply under global change. Here, ecological and evolutionary time-scales are considered, as well as the biological constraints and trade-offs the individual faces, which ultimately shape response dynamics at the population level. Further, the effect of fine-scale temporal patterns in re-source supply are studied, which is challenging to achieve experimentally. My findings predict population declines, altered behavioural timing and negative carry-over effects arising in migratory birds under global change. They thus stress the need for intensified research on how ecological mechanisms are affected by global change and for effective conservation measures for migratory birds. The open-source modelling software created for this dissertation can now be used for other taxa and related research questions. Overall, this thesis improves our mechanistic understanding of the impacts of global change on migratory birds as one prerequisite to comprehend ongoing global biodiversity loss. The research results are discussed in a broader ecological and scientific context in a concluding synthesis chapter.}, language = {en} } @phdthesis{Krentz2019, author = {Krentz, Konrad-Felix}, title = {A Denial-of-Sleep-Resilient Medium Access Control Layer for IEEE 802.15.4 Networks}, doi = {10.25932/publishup-43930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439301}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 187}, year = {2019}, abstract = {With the emergence of the Internet of things (IoT), plenty of battery-powered and energy-harvesting devices are being deployed to fulfill sensing and actuation tasks in a variety of application areas, such as smart homes, precision agriculture, smart cities, and industrial automation. In this context, a critical issue is that of denial-of-sleep attacks. Such attacks temporarily or permanently deprive battery-powered, energy-harvesting, or otherwise energy-constrained devices of entering energy-saving sleep modes, thereby draining their charge. At the very least, a successful denial-of-sleep attack causes a long outage of the victim device. Moreover, to put battery-powered devices back into operation, their batteries have to be replaced. This is tedious and may even be infeasible, e.g., if a battery-powered device is deployed at an inaccessible location. While the research community came up with numerous defenses against denial-of-sleep attacks, most present-day IoT protocols include no denial-of-sleep defenses at all, presumably due to a lack of awareness and unsolved integration problems. After all, despite there are many denial-of-sleep defenses, effective defenses against certain kinds of denial-of-sleep attacks are yet to be found. The overall contribution of this dissertation is to propose a denial-of-sleep-resilient medium access control (MAC) layer for IoT devices that communicate over IEEE 802.15.4 links. Internally, our MAC layer comprises two main components. The first main component is a denial-of-sleep-resilient protocol for establishing session keys among neighboring IEEE 802.15.4 nodes. The established session keys serve the dual purpose of implementing (i) basic wireless security and (ii) complementary denial-of-sleep defenses that belong to the second main component. The second main component is a denial-of-sleep-resilient MAC protocol. Notably, this MAC protocol not only incorporates novel denial-of-sleep defenses, but also state-of-the-art mechanisms for achieving low energy consumption, high throughput, and high delivery ratios. Altogether, our MAC layer resists, or at least greatly mitigates, all denial-of-sleep attacks against it we are aware of. Furthermore, our MAC layer is self-contained and thus can act as a drop-in replacement for IEEE 802.15.4-compliant MAC layers. In fact, we implemented our MAC layer in the Contiki-NG operating system, where it seamlessly integrates into an existing protocol stack.}, language = {en} } @phdthesis{Zapata2019, author = {Zapata, Sebastian Henao}, title = {Paleozoic to Pliocene evolution of the Andean retroarc between 26 and 28°S: interactions between tectonics, climate, and upper plate architecture}, doi = {10.25932/publishup-43903}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439036}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2019}, abstract = {Interactions and feedbacks between tectonics, climate, and upper plate architecture control basin geometry, relief, and depositional systems. The Andes is part of a longlived continental margin characterized by multiple tectonic cycles which have strongly modified the Andean upper plate architecture. In the Andean retroarc, spatiotemporal variations in the structure of the upper plate and tectonic regimes have resulted in marked along-strike variations in basin geometry, stratigraphy, deformational style, and mountain belt morphology. These along-strike variations include high-elevation plateaus (Altiplano and Puna) associated with a thin-skin fold-and-thrust-belt and thick-skin deformation in broken foreland basins such as the Santa Barbara system and the Sierras Pampeanas. At the confluence of the Puna Plateau, the Santa Barbara system and the Sierras Pampeanas, major along-strike changes in upper plate architecture, mountain belt morphology, basement exhumation, and deformation style can be recognized. I have used a source to sink approach to unravel the spatiotemporal tectonic evolution of the Andean retroarc between 26 and 28°S. I obtained a large low-temperature thermochronology data set from basement units which includes apatite fission track, apatite U-Th-Sm/He, and zircon U-Th/He (ZHe) cooling ages. Stratigraphic descriptions of Miocene units were temporally constrained by U-Pb LA-ICP-MS zircon ages from interbedded pyroclastic material. Modeled ZHe ages suggest that the basement of the study area was exhumed during the Famatinian orogeny (550-450 Ma), followed by a period of relative tectonic quiescence during the Paleozoic and the Triassic. The basement experienced horst exhumation during the Cretaceous development of the Salta rift. After initial exhumation, deposition of thick Cretaceous syn-rift strata caused reheating of several basement blocks within the Santa Barbara system. During the Eocene-Oligocene, the Andean compressional setting was responsible for the exhumation of several disconnected basement blocks. These exhumed blocks were separated by areas of low relief, in which humid climate and low erosion rates facilitated the development of etchplains on the crystalline basement. The exhumed basement blocks formed an Eocene to Oligocene broken foreland basin in the back-bulge depozone of the Andean foreland. During the Early Miocene, foreland basin strata filled up the preexisting Paleogene topography. The basement blocks in lower relief positions were reheated; associated geothermal gradients were higher than 25°C/km. Miocene volcanism was responsible for lateral variations on the amount of reheating along the Campo-Arenal basin. Around 12 Ma, a new deformational phase modified the drainage network and fragmented the lacustrine system. As deformation and rock uplift continued, the easily eroded sedimentary cover was efficiently removed and reworked by an ephemeral fluvial system, preventing the development of significant relief. After ~6 Ma, the low erodibility of the basement blocks which began to be exposed caused relief increase, leading to the development of stable fluvial systems. Progressive relief development modified atmospheric circulation, creating a rainfall gradient. After 3 Ma, orographic rainfall and high relief lead to the development of proximal fluvial-gravitational depositional systems in the surrounding basins.}, language = {en} } @phdthesis{Sidarenka2019, author = {Sidarenka, Uladzimir}, title = {Sentiment analysis of German Twitter}, doi = {10.25932/publishup-43742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437422}, school = {Universit{\"a}t Potsdam}, pages = {vii, 217}, year = {2019}, abstract = {The immense popularity of online communication services in the last decade has not only upended our lives (with news spreading like wildfire on the Web, presidents announcing their decisions on Twitter, and the outcome of political elections being determined on Facebook) but also dramatically increased the amount of data exchanged on these platforms. Therefore, if we wish to understand the needs of modern society better and want to protect it from new threats, we urgently need more robust, higher-quality natural language processing (NLP) applications that can recognize such necessities and menaces automatically, by analyzing uncensored texts. Unfortunately, most NLP programs today have been created for standard language, as we know it from newspapers, or, in the best case, adapted to the specifics of English social media. This thesis reduces the existing deficit by entering the new frontier of German online communication and addressing one of its most prolific forms—users' conversations on Twitter. In particular, it explores the ways and means by how people express their opinions on this service, examines current approaches to automatic mining of these feelings, and proposes novel methods, which outperform state-of-the-art techniques. For this purpose, I introduce a new corpus of German tweets that have been manually annotated with sentiments, their targets and holders, as well as lexical polarity items and their contextual modifiers. Using these data, I explore four major areas of sentiment research: (i) generation of sentiment lexicons, (ii) fine-grained opinion mining, (iii) message-level polarity classification, and (iv) discourse-aware sentiment analysis. In the first task, I compare three popular groups of lexicon generation methods: dictionary-, corpus-, and word-embedding-based ones, finding that dictionary-based systems generally yield better polarity lists than the last two groups. Apart from this, I propose a linear projection algorithm, whose results surpass many existing automatically-generated lexicons. Afterwords, in the second task, I examine two common approaches to automatic prediction of sentiment spans, their sources, and targets: conditional random fields (CRFs) and recurrent neural networks, obtaining higher scores with the former model and improving these results even further by redefining the structure of CRF graphs. When dealing with message-level polarity classification, I juxtapose three major sentiment paradigms: lexicon-, machine-learning-, and deep-learning-based systems, and try to unite the first and last of these method groups by introducing a bidirectional neural network with lexicon-based attention. Finally, in order to make the new classifier aware of microblogs' discourse structure, I let it separately analyze the elementary discourse units of each tweet and infer the overall polarity of a message from the scores of its EDUs with the help of two new approaches: latent-marginalized CRFs and Recursive Dirichlet Process.}, language = {en} } @phdthesis{MarimonTarter2019, author = {Marimon Tarter, Mireia}, title = {Word segmentation in German-learning infants and German-speaking adults}, doi = {10.25932/publishup-43740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437400}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2019}, abstract = {There is evidence that infants start extracting words from fluent speech around 7.5 months of age (e.g., Jusczyk \& Aslin, 1995) and that they use at least two mechanisms to segment words forms from fluent speech: prosodic information (e.g., Jusczyk, Cutler \& Redanz, 1993) and statistical information (e.g., Saffran, Aslin \& Newport, 1996). However, how these two mechanisms interact and whether they change during development is still not fully understood. The main aim of the present work is to understand in what way different cues to word segmentation are exploited by infants when learning the language in their environment, as well as to explore whether this ability is related to later language skills. In Chapter 3 we pursued to determine the reliability of the method used in most of the experiments in the present thesis (the Headturn Preference Procedure), as well as to examine correlations and individual differences between infants' performance and later language outcomes. In Chapter 4 we investigated how German-speaking adults weigh statistical and prosodic information for word segmentation. We familiarized adults with an auditory string in which statistical and prosodic information indicated different word boundaries and obtained both behavioral and pupillometry responses. Then, we conducted further experiments to understand in what way different cues to word segmentation are exploited by 9-month-old German-learning infants (Chapter 5) and by 6-month-old German-learning infants (Chapter 6). In addition, we conducted follow-up questionnaires with the infants and obtained language outcomes at later stages of development. Our findings from this thesis revealed that (1) German-speaking adults show a strong weight of prosodic cues, at least for the materials used in this study and that (2) German-learning infants weight these two kind of cues differently depending on age and/or language experience. We observed that, unlike English-learning infants, 6-month-old infants relied more strongly on prosodic cues. Nine-month-olds do not show any preference for either of the cues in the word segmentation task. From the present results it remains unclear whether the ability to use prosodic cues to word segmentation relates to later language vocabulary. We speculate that prosody provides infants with their first window into the specific acoustic regularities in the signal, which enables them to master the specific stress pattern of German rapidly. Our findings are a step forwards in the understanding of an early impact of the native prosody compared to statistical learning in early word segmentation.}, language = {en} } @phdthesis{LozadaGobilard2019, author = {Lozada Gobilard, Sissi Donna}, title = {From genes to communities: Assessing plant diversity and connectivity in kettle holes as metaecosystems in agricultural landscapes}, doi = {10.25932/publishup-43768}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437684}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 147}, year = {2019}, abstract = {Species assembly from a regional pool into local metacommunities and how they colonize and coexist over time and space is essential to understand how communities response to their environment including abiotic and biotic factors. In highly disturbed landscapes, connectivity of isolated habitat patches is essential to maintain biodiversity and the entire ecosystem functioning. In northeast Germany, a high density of the small water bodies called kettle holes, are good systems to study metacommunities due to their condition as "aquatic islands" suitable for hygrophilous species that are surrounded by in unsuitable matrix of crop fields. The main objective of this thesis was to infer the main ecological processes shaping plant communities and their response to the environment, from biodiversity patterns and key life-history traits involved in connectivity using ecological and genetic approaches; and to provide first insights of the role of kettle holes harboring wild-bee species as important mobile linkers connecting plant communities in this insular system. t a community level, I compared plant diversity patterns and trait composition in ephemeral vs. permanent kettle holes). My results showed that types of kettle holes act as environmental filers shaping plant diversity, community-composition and trait-distribution, suggesting species sorting and niche processes in both types of kettle holes. At a population level, I further analyzed the role of dispersal and reproductive strategies of four selected species occurring in permanent kettle holes. Using microsatellites, I found that breeding system (degree of clonality), is the main factor shaping genetic diversity and genetic divergence. Although, higher gene flow and lower genetic differentiation among populations in wind vs. insect pollinated species was also found, suggesting that dispersal mechanisms played a role related to gene flow and connectivity. For most flowering plants, pollinators play an important role connecting communities. Therefore, as a first insight of the potential mobile linkers of these plant communities, I investigated the diversity wild-bees occurring in these kettle holes. My main results showed that local habitat quality (flower resources) had a positive effect on bee diversity, while habitat heterogeneity (number of natural landscape elements surrounding kettle holes 100-300m), was negatively correlated. This thesis covers from genetic flow at individual and population level to plant community assembly. My results showed how patterns of biodiversity, dispersal and reproduction strategies in plant population and communities can be used to infer ecological processes. In addition, I showed the importance of life-history traits and the relationship between species and their abiotic and biotic interactions. Furthermore, I included a different level of mobile linkers (pollinators) for a better understanding of another level of the system. This integration is essential to understand how communities respond to their surrounding environment and how disturbances such as agriculture, land-use and climate change might affect them. I highlight the need to integrate many scientific areas covering from genes to ecosystems at different spatiotemporal scales for a better understanding, management and conservation of our ecosystems.}, language = {en} } @phdthesis{Brase2019, author = {Brase, Alexa Kristin}, title = {Spiele um Studium und Lehre? Zur mikropolitischen Nutzung von Qualit{\"a}tsmanagementsystemen an Hochschulen in Deutschland}, doi = {10.25932/publishup-43737}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437377}, school = {Universit{\"a}t Potsdam}, pages = {IX, 228}, year = {2019}, abstract = {Die Wissenschaftsfreiheit ist ein Grundrecht, dessen Sinn und Auslegung im Rahmen von Reformen des Hochschulsystems nicht nur der Justiz, sondern auch der Wissenschaft selbst immer wieder Anlass zur Diskussion geben, so auch im Zuge der Einf{\"u}hrung des so genannten Qualit{\"a}tsmanagements von Studium und Lehre an deutschen Hochschulen. Die vorliegende Dissertationsschrift stellt die Ergebnisse einer empirischen Studie vor, die mit einer soziologischen Betrachtung des Qualit{\"a}tsmanagements unterschiedlicher Hochschulen zu dieser Diskussion beitr{\"a}gt. Auf Grundlage der Pr{\"a}misse, dass Verlauf und Folgen einer organisationalen Innovation nur verstanden werden k{\"o}nnen, wenn der allt{\"a}gliche Umgang der Organisationsmitglieder mit den neuen Strukturen und Prozessen in die Analyse einbezogen wird, geht die Studie von der Frage aus, wie Akteurinnen und Akteure an deutschen Hochschulen die Qualit{\"a}tsmanagementsysteme ihrer Organisationen nutzen. Die qualitative inhaltsanalytische Auswertung von 26 Leitfaden-Interviews mit Prorektorinnen und -rektoren, Qualit{\"a}tsmanagement-Personal und Studiendekaninnen und -dekanen an neun Hochschulen ergibt, dass die Strategien der Akteursgruppen an den Hochschulen im Zusammenspiel mit strukturellen Aspekten unterschiedliche Dynamiken entstehen lassen, mit denen Implikationen f{\"u}r die Lehrfreiheit verbunden sind: W{\"a}hrend die Autonomie der Lehrenden durch das Qualit{\"a}tsmanagement an einigen Hochschulen unterst{\"u}tzt wird, sind sowohl Autonomie als auch Verantwortung f{\"u}r Studium und Lehre an anderen Hochschulen Gegenstand andauernder Konflikte, die auch das Qualit{\"a}tsmanagement einschließen.}, language = {de} } @phdthesis{Numberger2019, author = {Numberger, Daniela}, title = {Urban wastewater and lakes as habitats for bacteria and potential vectors for pathogens}, doi = {10.25932/publishup-43709}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437095}, school = {Universit{\"a}t Potsdam}, pages = {VI, 130}, year = {2019}, abstract = {Wasser ist lebensnotwendig und somit eine essentielle Ressource. Jedoch sind unsere S{\"u}ßwasser-Ressourcen begrenzt und ihre Erhaltung daher besonders wichtig. Verschmutzungen mit Chemikalien und Krankheitserregern, die mit einer wachsenden Bev{\"o}lkerung und Urbanisierung einhergehen, verschlechtern die Qualit{\"a}t unseres S{\"u}ßwassers. Außerdem kann Wasser als {\"U}bertragungsvektor f{\"u}r Krankheitserreger dienen und daher wasserb{\"u}rtige Krankheiten verursachen. Der Leibniz-Forschungsverbund INFECTIONS'21 untersuchte innerhalb der interdisziplin{\"a}ren Forschungsgruppe III - „Wasser", Gew{\"a}sser als zentralen Mittelpunkt f{\"u}r Krankheiterreger. Dabei konzentrierte man sich auf Clostridioides difficile sowie avi{\"a}re Influenza A-Viren, von denen angenommen wird, dass sie in die Gew{\"a}sser ausgeschieden werden. Ein weiteres Ziel bestand darin, die bakterielle Gemeinschaften eines Kl{\"a}rwerkes der deutschen Hauptstadt Berlin zu charakterisieren, um anschließend eine Bewertung des potentiellen Gesundheitsrisikos geben zu k{\"o}nnen. Bakterielle Gemeinschaften des Roh- und Klarwassers aus dem Kl{\"a}rwerk unterschieden sich signifikant voneinander. Der Anteil an Darm-/F{\"a}kalbakterien war relativ niedrig und potentielle Darmpathogene wurden gr{\"o}ßtenteils aus dem Rohwasser entfernt. Ein potentielles Gesundheitsrisiko konnte allerdings von potentiell pathogenen Legionellen wie L. lytica festgestellt werden, deren relative Abundanz im Klarwasser h{\"o}her war als im Rohwasser. Es wurden außerdem drei C. difficile-Isolate aus den Kl{\"a}rwerk-Rohwasser und einem st{\"a}dtischen Badesee in Berlin (Weisser See) gewonnen und sequenziert. Die beiden Isolate aus dem Kl{\"a}rwerk tragen keine Toxin-Gene, wohingegen das Isolat aus dem See Toxin-Gene besitzt. Alle drei Isolate sind sehr nah mit humanen St{\"a}mmen verwandt. Dies deutet auf ein potentielles, wenn auch sporadisches Gesundheitsrisiko hin. (Avi{\"a}re) Influenza A-Viren wurden in 38.8\% der untersuchten Sedimentproben mittels PCR detektiert, aber die Virusisolierung schlug fehl. Ein Experiment mit beimpften Wasser- und Sedimentproben zeigte, dass f{\"u}r die Isolierung aus Sedimentproben eine relativ hohe Viruskonzentration n{\"o}tig ist. In Wasserproben ist jedoch ein niedriger Titer an Influenza A-Viren ausreichend, um eine Infektion auszul{\"o}sen. Es konnte zudem auch festgestellt werden, dass sich „Madin-Darby Canine Kidney (MDCK)―-Zellkulturen im Gegensatz zu embryonierten H{\"u}hnereiern besser eignen, um Influenza A-Viren aus Sediment zu isolieren. Zusammenfassend l{\"a}sst sich sagen, dass diese Arbeit m{\"o}gliche Gesundheitsrisiken aufgedeckt hat, wie etwa durch Legionellen im untersuchten Berliner Kl{\"a}rwerk, deren relative Abundanz in gekl{\"a}rtem Abwasser h{\"o}her ist als im Rohwasser. Desweiteren wird indiziert, dass Abwasser und Gew{\"a}sser als Reservoir und Vektor f{\"u}r pathogene Organismen dienen k{\"o}nnen, selbst f{\"u}r nicht-typische Wasser-Pathogene wie C. difficile.}, language = {en} } @phdthesis{Thater2019, author = {Thater, Sabine}, title = {The interplay between supermassive black holes and their host galaxies}, doi = {10.25932/publishup-43757}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437570}, school = {Universit{\"a}t Potsdam}, pages = {iv, 186}, year = {2019}, abstract = {Supermassive black holes reside in the hearts of almost all massive galaxies. Their evolutionary path seems to be strongly linked to the evolution of their host galaxies, as implied by several empirical relations between the black hole mass (M BH ) and different host galaxy properties. The physical driver of this co-evolution is, however, still not understood. More mass measurements over homogeneous samples and a detailed understanding of systematic uncertainties are required to fathom the origin of the scaling relations. In this thesis, I present the mass estimations of supermassive black holes in the nuclei of one late-type and thirteen early-type galaxies. Our SMASHING sample extends from the intermediate to the massive galaxy mass regime and was selected to fill in gaps in number of galaxies along the scaling relations. All galaxies were observed at high spatial resolution, making use of the adaptive-optics mode of integral field unit (IFU) instruments on state-of-the-art telescopes (SINFONI, NIFS, MUSE). I extracted the stellar kinematics from these observations and constructed dynamical Jeans and Schwarzschild models to estimate the mass of the central black holes robustly. My new mass estimates increase the number of early-type galaxies with measured black hole masses by 15\%. The seven measured galaxies with nuclear light deficits ('cores') augment the sample of cored galaxies with measured black holes by 40\%. Next to determining massive black hole masses, evaluating the accuracy of black hole masses is crucial for understanding the intrinsic scatter of the black hole- host galaxy scaling relations. I tested various sources of systematic uncertainty on my derived mass estimates. The M BH estimate of the single late-type galaxy of the sample yielded an upper limit, which I could constrain very robustly. I tested the effects of dust, mass-to-light ratio (M/L) variation, and dark matter on my measured M BH . Based on these tests, the typically assumed constant M/L ratio can be an adequate assumption to account for the small amounts of dark matter in the center of that galaxy. I also tested the effect of a variable M/L variation on the M BH measurement on a second galaxy. By considering stellar M/L variations in the dynamical modeling, the measured M BH decreased by 30\%. In the future, this test should be performed on additional galaxies to learn how an as constant assumed M/L flaws the estimated black hole masses. Based on our upper limit mass measurement, I confirm previous suggestions that resolving the predicted BH sphere-of-influence is not a strict condition to measure black hole masses. Instead, it is only a rough guide for the detection of the black hole if high-quality, and high signal-to-noise IFU data are used for the measurement. About half of our sample consists of massive early-type galaxies which show nuclear surface brightness cores and signs of triaxiality. While these types of galaxies are typically modeled with axisymmetric modeling methods, the effects on M BH are not well studied yet. The massive galaxies of our presented galaxy sample are well suited to test the effect of different stellar dynamical models on the measured black hole mass in evidently triaxial galaxies. I have compared spherical Jeans and axisymmetric Schwarzschild models and will add triaxial Schwarzschild models to this comparison in the future. The constructed Jeans and Schwarzschild models mostly disagree with each other and cannot reproduce many of the triaxial features of the galaxies (e.g., nuclear sub-components, prolate rotation). The consequence of the axisymmetric-triaxial assumption on the accuracy of M BH and its impact on the black hole - host galaxy relation needs to be carefully examined in the future. In the sample of galaxies with published M BH , we find measurements based on different dynamical tracers, requiring different observations, assumptions, and methods. Crucially, different tracers do not always give consistent results. I have used two independent tracers (cold molecular gas and stars) to estimate M BH in a regular galaxy of our sample. While the two estimates are consistent within their errors, the stellar-based measurement is twice as high as the gas-based. Similar trends have also been found in the literature. Therefore, a rigorous test of the systematics associated with the different modeling methods is required in the future. I caution to take the effects of different tracers (and methods) into account when discussing the scaling relations. I conclude this thesis by comparing my galaxy sample with the compilation of galaxies with measured black holes from the literature, also adding six SMASHING galaxies, which were published outside of this thesis. None of the SMASHING galaxies deviates significantly from the literature measurements. Their inclusion to the published early-type galaxies causes a change towards a shallower slope for the M BH - effective velocity dispersion relation, which is mainly driven by the massive galaxies of our sample. More unbiased and homogenous measurements are needed in the future to determine the shape of the relation and understand its physical origin.}, language = {en} } @phdthesis{Batoulis2019, author = {Batoulis, Kimon}, title = {Sound integration of process and decision models}, doi = {10.25932/publishup-43738}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437386}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 155}, year = {2019}, abstract = {Business process management is an established technique for business organizations to manage and support their processes. Those processes are typically represented by graphical models designed with modeling languages, such as the Business Process Model and Notation (BPMN). Since process models do not only serve the purpose of documentation but are also a basis for implementation and automation of the processes, they have to satisfy certain correctness requirements. In this regard, the notion of soundness of workflow nets was developed, that can be applied to BPMN process models in order to verify their correctness. Because the original soundness criteria are very restrictive regarding the behavior of the model, different variants of the soundness notion have been developed for situations in which certain violations are not even harmful. All of those notions do only consider the control-flow structure of a process model, however. This poses a problem, taking into account the fact that with the recent release and the ongoing development of the Decision Model and Notation (DMN) standard, an increasing number of process models are complemented by respective decision models. DMN is a dedicated modeling language for decision logic and separates the concerns of process and decision logic into two different models, process and decision models respectively. Hence, this thesis is concerned with the development of decisionaware soundness notions, i.e., notions of soundness that build upon the original soundness ideas for process models, but additionally take into account complementary decision models. Similar to the various notions of workflow net soundness, this thesis investigates different notions of decision soundness that can be applied depending on the desired degree of restrictiveness. Since decision tables are a standardized means of DMN to represent decision logic, this thesis also puts special focus on decision tables, discussing how they can be translated into an unambiguous format and how their possible output values can be efficiently determined. Moreover, a prototypical implementation is described that supports checking a basic version of decision soundness. The decision soundness notions were also empirically evaluated on models from participants of an online course on process and decision modeling as well as from a process management project of a large insurance company. The evaluation demonstrates that violations of decision soundness indeed occur and can be detected with our approach.}, language = {en} } @phdthesis{Veh2019, author = {Veh, Georg}, title = {Outburst floods from moraine-dammed lakes in the Himalayas}, doi = {10.25932/publishup-43607}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436071}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2019}, abstract = {The Himalayas are a region that is most dependent, but also frequently prone to hazards from changing meltwater resources. This mountain belt hosts the highest mountain peaks on earth, has the largest reserve of ice outside the polar regions, and is home to a rapidly growing population in recent decades. One source of hazard has attracted scientific research in particular in the past two decades: glacial lake outburst floods (GLOFs) occurred rarely, but mostly with fatal and catastrophic consequences for downstream communities and infrastructure. Such GLOFs can suddenly release several million cubic meters of water from naturally impounded meltwater lakes. Glacial lakes have grown in number and size by ongoing glacial mass losses in the Himalayas. Theory holds that enhanced meltwater production may increase GLOF frequency, but has never been tested so far. The key challenge to test this notion are the high altitudes of >4000 m, at which lakes occur, making field work impractical. Moreover, flood waves can attenuate rapidly in mountain channels downstream, so that many GLOFs have likely gone unnoticed in past decades. Our knowledge on GLOFs is hence likely biased towards larger, destructive cases, which challenges a detailed quantification of their frequency and their response to atmospheric warming. Robustly quantifying the magnitude and frequency of GLOFs is essential for risk assessment and management along mountain rivers, not least to implement their return periods in building design codes. Motivated by this limited knowledge of GLOF frequency and hazard, I developed an algorithm that efficiently detects GLOFs from satellite images. In essence, this algorithm classifies land cover in 30 years (~1988-2017) of continuously recorded Landsat images over the Himalayas, and calculates likelihoods for rapidly shrinking water bodies in the stack of land cover images. I visually assessed such detected tell-tale sites for sediment fans in the river channel downstream, a second key diagnostic of GLOFs. Rigorous tests and validation with known cases from roughly 10\% of the Himalayas suggested that this algorithm is robust against frequent image noise, and hence capable to identify previously unknown GLOFs. Extending the search radius to the entire Himalayan mountain range revealed some 22 newly detected GLOFs. I thus more than doubled the existing GLOF count from 16 previously known cases since 1988, and found a dominant cluster of GLOFs in the Central and Eastern Himalayas (Bhutan and Eastern Nepal), compared to the rarer affected ranges in the North. Yet, the total of 38 GLOFs showed no change in the annual frequency, so that the activity of GLOFs per unit glacial lake area has decreased in the past 30 years. I discussed possible drivers for this finding, but left a further attribution to distinct GLOF-triggering mechanisms open to future research. This updated GLOF frequency was the key input for assessing GLOF hazard for the entire Himalayan mountain belt and several subregions. I used standard definitions in flood hydrology, describing hazard as the annual exceedance probability of a given flood peak discharge [m3 s-1] or larger at the breach location. I coupled the empirical frequency of GLOFs per region to simulations of physically plausible peak discharges from all existing ~5,000 lakes in the Himalayas. Using an extreme-value model, I could hence calculate flood return periods. I found that the contemporary 100-year GLOF discharge (the flood level that is reached or exceeded on average once in 100 years) is 20,600+2,200/-2,300 m3 s-1 for the entire Himalayas. Given the spatial and temporal distribution of historic GLOFs, contemporary GLOF hazard is highest in the Eastern Himalayas, and lower for regions with rarer GLOF abundance. I also calculated GLOF hazard for some 9,500 overdeepenings, which could expose and fill with water, if all Himalayan glaciers have melted eventually. Assuming that the current GLOF rate remains unchanged, the 100-year GLOF discharge could double (41,700+5,500/-4,700 m3 s-1), while the regional GLOF hazard may increase largest in the Karakoram. To conclude, these three stages-from GLOF detection, to analysing their frequency and estimating regional GLOF hazard-provide a framework for modern GLOF hazard assessment. Given the rapidly growing population, infrastructure, and hydropower projects in the Himalayas, this thesis assists in quantifying the purely climate-driven contribution to hazard and risk from GLOFs.}, language = {en} } @phdthesis{Behm2019, author = {Behm, Laura Vera Johanna}, title = {Thermoresponsive Zellkultursubstrate f{\"u}r zeitlich-r{\"a}umlich gesteuertes Auswachsen neuronaler Zellen}, doi = {10.25932/publishup-43619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436196}, school = {Universit{\"a}t Potsdam}, pages = {VII, 105}, year = {2019}, abstract = {Ein wichtiges Ziel der Neurowissenschaften ist das Verst{\"a}ndnis der komplexen und zugleich faszinierenden, hochgeordneten Vernetzung der Neurone im Gehirn, welche neuronalen Prozessen, wie zum Beispiel dem Wahrnehmen oder Lernen wie auch Neuropathologien zu Grunde liegt. F{\"u}r verbesserte neuronale Zellkulturmodelle zur detaillierten Untersuchung dieser Prozesse ist daher die Rekonstruktion von geordneten neuronalen Verbindungen dringend erforderlich. Mit Oberfl{\"a}chenstrukturen aus zellattraktiven und zellabweisenden Beschichtungen k{\"o}nnen neuronale Zellen und ihre Neuriten in vitro strukturiert werden. Zur Kontrolle der neuronalen Verbindungsrichtung muss das Auswachsen der Axone zu benachbarten Zellen dynamisch gesteuert werden, zum Beispiel {\"u}ber eine ver{\"a}nderliche Zug{\"a}nglichkeit der Oberfl{\"a}che. In dieser Arbeit wurde untersucht, ob mit thermoresponsiven Polymeren (TRP) beschichtete Zellkultursubstrate f{\"u}r eine dynamische Kontrolle des Auswachsens neuronaler Zellen geeignet sind. TRP k{\"o}nnen {\"u}ber die Temperatur von einem zellabweisenden in einen zellattraktiven Zustand geschaltet werden, womit die Zug{\"a}nglichkeit der Oberfl{\"a}che f{\"u}r Zellen dynamisch gesteuert werden kann. Die TRP-Beschichtung wurde mikrostrukturiert, um einzelne oder wenige neuronale Zellen zun{\"a}chst auf der Oberfl{\"a}che anzuordnen und das Auswachsen der Zellen und Neuriten {\"u}ber definierte TRP-Bereiche in Abh{\"a}ngigkeit der Temperatur zeitlich und r{\"a}umlich zu kontrollieren. Das Protokoll wurde mit der neuronalen Zelllinie SH-SY5Y etabliert und auf humane induzierte Neurone {\"u}bertragen. Die Anordnung der Zellen konnte bei Kultivierung im zellabweisenden Zustand des TRPs f{\"u}r bis zu 7 Tage aufrecht erhalten werden. Durch Schalten des TRPs in den zellattraktiven Zustand konnte das Auswachsen der Neuriten und Zellen zeitlich und r{\"a}umlich induziert werden. Immunozytochemische F{\"a}rbungen und Patch-Clamp-Ableitungen der Neurone demonstrierten die einfache Anwendbarkeit und Zellkompatibilit{\"a}t der TRP-Substrate. Eine pr{\"a}zisere r{\"a}umliche Kontrolle des Auswachsens der Zellen sollte durch lokales Schalten der TRP-Beschichtung erreicht werden. Daf{\"u}r wurden Mikroheizchips mit Mikroelektroden zur lokalen Jouleschen Erw{\"a}rmung der Substratoberfl{\"a}che entwickelt. Zur Evaluierung der generierten Temperaturprofile wurde eine Temperaturmessmethode entwickelt und die erhobenen Messwerte mit numerisch simulierten Werten abgeglichen. Die Temperaturmessmethode basiert auf einfach zu applizierenden Sol-Gel-Schichten, die den temperatursensitiven Fluoreszenzfarbstoff Rhodamin B enthalten. Sie erm{\"o}glicht oberfl{\"a}chennahe Temperaturmessungen in trockener und w{\"a}ssriger Umgebung mit hoher Orts- und Temperaturaufl{\"o}sung. Numerische Simulationen der Temperaturprofile korrelierten gut mit den experimentellen Daten. Auf dieser Basis konnten Geometrie und Material der Mikroelektroden hinsichtlich einer lokal stark begrenzten Temperierung optimiert werden. Ferner wurden f{\"u}r die Kultvierung der Zellen auf den Mikroheizchips eine Zellkulturkammer und Kontaktboard f{\"u}r die elektrische Kontaktierung der Mikroelektroden geschaffen. Die vorgestellten Ergebnisse demonstrieren erstmalig das enorme Potential thermoresponsiver Zellkultursubstrate f{\"u}r die zeitlich und r{\"a}umlich gesteuerte Formation geordneter neuronaler Verbindungen in vitro. Zuk{\"u}nftig k{\"o}nnte dies detaillierte Studien zur neuronalen Informationsverarbeitung oder zu Neuropathologien an relevanten, humanen Zellmodellen erm{\"o}glichen.}, language = {de} } @phdthesis{Angwenyi2019, author = {Angwenyi, David}, title = {Time-continuous state and parameter estimation with application to hyperbolic SPDEs}, doi = {10.25932/publishup-43654}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436542}, school = {Universit{\"a}t Potsdam}, pages = {xi, 101}, year = {2019}, abstract = {Data assimilation has been an active area of research in recent years, owing to its wide utility. At the core of data assimilation are filtering, prediction, and smoothing procedures. Filtering entails incorporation of measurements' information into the model to gain more insight into a given state governed by a noisy state space model. Most natural laws are governed by time-continuous nonlinear models. For the most part, the knowledge available about a model is incomplete; and hence uncertainties are approximated by means of probabilities. Time-continuous filtering, therefore, holds promise for wider usefulness, for it offers a means of combining noisy measurements with imperfect model to provide more insight on a given state. The solution to time-continuous nonlinear Gaussian filtering problem is provided for by the Kushner-Stratonovich equation. Unfortunately, the Kushner-Stratonovich equation lacks a closed-form solution. Moreover, the numerical approximations based on Taylor expansion above third order are fraught with computational complications. For this reason, numerical methods based on Monte Carlo methods have been resorted to. Chief among these methods are sequential Monte-Carlo methods (or particle filters), for they allow for online assimilation of data. Particle filters are not without challenges: they suffer from particle degeneracy, sample impoverishment, and computational costs arising from resampling. The goal of this thesis is to:— i) Review the derivation of Kushner-Stratonovich equation from first principles and its extant numerical approximation methods, ii) Study the feedback particle filters as a way of avoiding resampling in particle filters, iii) Study joint state and parameter estimation in time-continuous settings, iv) Apply the notions studied to linear hyperbolic stochastic differential equations. The interconnection between It{\^o} integrals and stochastic partial differential equations and those of Stratonovich is introduced in anticipation of feedback particle filters. With these ideas and motivated by the variants of ensemble Kalman-Bucy filters founded on the structure of the innovation process, a feedback particle filter with randomly perturbed innovation is proposed. Moreover, feedback particle filters based on coupling of prediction and analysis measures are proposed. They register a better performance than the bootstrap particle filter at lower ensemble sizes. We study joint state and parameter estimation, both by means of extended state spaces and by use of dual filters. Feedback particle filters seem to perform well in both cases. Finally, we apply joint state and parameter estimation in the advection and wave equation, whose velocity is spatially varying. Two methods are employed: Metropolis Hastings with filter likelihood and a dual filter comprising of Kalman-Bucy filter and ensemble Kalman-Bucy filter. The former performs better than the latter.}, language = {en} } @phdthesis{Noack2019, author = {Noack, Sebastian}, title = {Poly(lactide)-based amphiphilic block copolymers}, doi = {10.25932/publishup-43616}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436168}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 148}, year = {2019}, abstract = {Due to its bioavailability and (bio)degradability, poly(lactide) (PLA) is an interesting polymer that is already being used as packaging material, surgical seam, and drug delivery system. Dependent on various parameters such as polymer composition, amphiphilicity, sample preparation, and the enantiomeric purity of lactide, PLA in an amphiphilic block copolymer can affect the self-assembly behavior dramatically. However, sizes and shapes of aggregates have a critical effect on the interactions between biological and drug delivery systems, where the general understanding of these polymers and their ability to influence self-assembly is of significant interest in science. The first part of this thesis describes the synthesis and study of a series of linear poly(L-lactide) (PLLA) and poly(D-lactide) (PDLA)-based amphiphilic block copolymers with varying PLA (hydrophobic), and poly(ethylene glycol) (PEG) (hydrophilic) chain lengths and different block copolymer sequences (PEG-PLA and PLA-PEG). The PEG-PLA block copolymers were synthesized by ring-opening polymerization of lactide initiated by a PEG-OH macroinitiator. In contrast, the PLA-PEG block copolymers were produced by a Steglich-esterification of modified PLA with PEG-OH. The aqueous self-assembly at room temperature of the enantiomerically pure PLLA-based block copolymers and their stereocomplexed mixtures was investigated by dynamic light scattering (DLS), transmission electron microscopy (TEM), wide-angle X-ray diffraction (WAXD), and differential scanning calorimetry (DSC). Spherical micelles and worm-like structures were produced, whereby the obtained self-assembled morphologies were affected by the lactide weight fraction in the block copolymer and self-assembly time. The formation of worm-like structures increases with decreasing PLA-chain length and arises from spherical micelles, which become colloidally unstable and undergo an epitaxial fusion with other micelles. As shown by DSC experiments, the crystallinity of the corresponding PLA blocks increases within the self-assembly time. However, the stereocomplexed self-assembled structures behave differently from the parent polymers and result in irregular-shaped clusters of spherical micelles. Additionally, time-dependent self-assembly experiments showed a transformation, from already self-assembled morphologies of different shapes to more compact micelles upon stereocomplexation. In the second part of this thesis, with the objective to influence the self-assembly of PLA-based block copolymers and its stereocomplexes, poly(methyl phosphonate) (PMeP) and poly(isopropyl phosphonate) (PiPrP) were produced by ring-opening polymerization to implement an alternative to the hydrophilic block PEG. Although, the 1,8 diazabicyclo[5.4.0]unde 7 ene (DBU) or 1,5,7 triazabicyclo[4.4.0]dec-5-ene (TBD) mediated synthesis of the corresponding poly(alkyl phosphonate)s was successful, however, not so the polymerization of copolymers with PLA-based precursors (PLA-homo polymers, and PEG-PLA block copolymers). Transesterification, obtained by 1H-NMR spectroscopy, between the poly(phosphonate)- and PLA block caused a high-field shifted peak split of the methine proton in the PLA polymer chain, with split intensities depending on the used catalyst (DBU for PMeP, and TBD for PiPrP polymerization). An additional prepared block copolymer PiPrP-PLLA that wasn't affected in its polymer sequence was finally used for self-assembly experiments with PLA-PEG and PEG-PLA mixing. This work provides a comprehensive study of the self-assembly behavior of PLA-based block copolymers influenced by various parameters such as polymer block lengths, self-assembly time, and stereocomplexation of block copolymer mixtures.}, language = {en} } @phdthesis{Jakobs2019, author = {Jakobs, Friedrich}, title = {Dubrovin-rings and their connection to Hughes-free skew fields of fractions}, doi = {10.25932/publishup-43556}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435561}, school = {Universit{\"a}t Potsdam}, pages = {ix, 62}, year = {2019}, abstract = {One method of embedding groups into skew fields was introduced by A. I. Mal'tsev and B. H. Neumann (cf. [18, 19]). If G is an ordered group and F is a skew field, the set F((G)) of formal power series over F in G with well-ordered support forms a skew field into which the group ring F[G] can be embedded. Unfortunately it is not suficient that G is left-ordered since F((G)) is only an F-vector space in this case as there is no natural way to define a multiplication on F((G)). One way to extend the original idea onto left-ordered groups is to examine the endomorphism ring of F((G)) as explored by N. I. Dubrovin (cf. [5, 6]). It is possible to embed any crossed product ring F[G; η, σ] into the endomorphism ring of F((G)) such that each non-zero element of F[G; η, σ] defines an automorphism of F((G)) (cf. [5, 10]). Thus, the rational closure of F[G; η, σ] in the endomorphism ring of F((G)), which we will call the Dubrovin-ring of F[G; η, σ], is a potential candidate for a skew field of fractions of F[G; η, σ]. The methods of N. I. Dubrovin allowed to show that specific classes of groups can be embedded into a skew field. For example, N. I. Dubrovin contrived some special criteria, which are applicable on the universal covering group of SL(2, R). These methods have also been explored by J. Gr{\"a}ter and R. P. Sperner (cf. [10]) as well as N.H. Halimi and T. Ito (cf. [11]). Furthermore, it is of interest to know if skew fields of fractions are unique. For example, left and right Ore domains have unique skew fields of fractions (cf. [2]). This is not the general case as for example the free group with 2 generators can be embedded into non-isomorphic skew fields of fractions (cf. [12]). It seems likely that Ore domains are the most general case for which unique skew fields of fractions exist. One approach to gain uniqueness is to restrict the search to skew fields of fractions with additional properties. I. Hughes has defined skew fields of fractions of crossed product rings F[G; η, σ] with locally indicable G which fulfill a special condition. These are called Hughes-free skew fields of fractions and I. Hughes has proven that they are unique if they exist [13, 14]. This thesis will connect the ideas of N. I. Dubrovin and I. Hughes. The first chapter contains the basic terminology and concepts used in this thesis. We present methods provided by N. I. Dubrovin such as the complexity of elements in rational closures and special properties of endomorphisms of the vector space of formal power series F((G)). To combine the ideas of N.I. Dubrovin and I. Hughes we introduce Conradian left-ordered groups of maximal rank and examine their connection to locally indicable groups. Furthermore we provide notations for crossed product rings, skew fields of fractions as well as Dubrovin-rings and prove some technical statements which are used in later parts. The second chapter focuses on Hughes-free skew fields of fractions and their connection to Dubrovin-rings. For that purpose we introduce series representations to interpret elements of Hughes-free skew fields of fractions as skew formal Laurent series. This 1 Introduction allows us to prove that for Conradian left-ordered groups G of maximal rank the statement "F[G; η, σ] has a Hughes-free skew field of fractions" implies "The Dubrovin ring of F [G; η, σ] is a skew field". We will also prove the reverse and apply the results to give a new prove of Theorem 1 in [13]. Furthermore we will show how to extend injective ring homomorphisms of some crossed product rings onto their Hughes-free skew fields of fractions. At last we will be able to answer the open question whether Hughes--free skew fields are strongly Hughes-free (cf. [17, page 53]).}, language = {en} } @phdthesis{Jantzen2019, author = {Jantzen, Friederike}, title = {Genetic basis and adaptive significance of repeated scent loss in selfing Capsella species}, doi = {10.25932/publishup-43525}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435253}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2019}, abstract = {Floral scent is an important way for plants to communicate with insects, but scent emission has been lost or strongly reduced during the transition from pollinator-mediated outbreeding to selfing. The shift from outcrossing to selfing is not only accompanied by scent loss, but also by a reduction in other pollinator-attracting traits like petal size and can be observed multiple times among angiosperms. These changes are summarized by the term selfing syndrome and represent one of the most prominent examples of convergent evolution within the plant kingdom. In this work the genus Capsella was used as a model to study convergent evolution in two closely related selfers with separate transitions to self-fertilization. Compared to their outbreeding ancestor C. grandiflora, the emission of benzaldehyde as main compound of floral scent is lacking or strongly reduced in the selfing species C. rubella and C. orientalis. In C. rubella the loss of benzaldehyde was caused by mutations to cinnamate:CoA ligase CNL1, but the biochemical basis and evolutionary history of this loss remained unknown, together with the genetic basis of scent loss in C. orientalis. Here, a combination of plant transformations, in vitro enzyme assays, population genetics and quantitative genetics has been used to address these questions. The results indicate that CNL1 has been inactivated twice independently by point mutations in C. rubella, leading to a loss of benzaldehyde emission. Both inactivated haplotypes can be found around the Mediterranean Sea, indicating that they arose before the species´ geographical spread. This study confirmed CNL1 as a hotspot for mutations to eliminate benzaldehyde emission, as it has been suggested by previous studies. In contrast to these findings, CNL1 in C. orientalis remains active. To test whether similar mechanisms underlie the convergent evolution of scent loss in C. orientalis a QTL mapping approach was used and the results suggest that this closely related species followed a different evolutionary route to reduce floral scent, possibly reflecting that the convergent evolution of floral scent is driven by ecological rather than genetic factors. In parallel with studying the genetic basis of repeated scent loss a method for testing the adaptive value of individual selfing syndrome traits was established. The established method allows estimating outcrossing rates with a high throughput of samples and detects successfully insect-mediated outcrossing events, providing major advantages regarding time and effort compared to other approaches. It can be applied to correlate outcrossing rates with differences in individual traits by using quasi-isogenic lines as demonstrated here or with environmental or morphological parameters. Convergent evolution can not only be observed for scent loss in Capsella but also for the morphological evolution of petal size. Previous studies detected several QTLs underlying the petal size reduction in C. orientalis and C. rubella, some of them shared among both species. One shared QTL is PAQTL1 which might map to NUBBIN, a growth factor. To better understand the morphological evolution and genetic basis of petal size reduction, this QTL was studied. Mapping this QTL to a gene might identify another example for a hotspot gene, in this case for the convergent evolution of petal size.}, language = {en} } @phdthesis{Trautwein2019, author = {Trautwein, Jutta}, title = {The Mental lexicon in acquisition}, doi = {10.25932/publishup-43431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434314}, school = {Universit{\"a}t Potsdam}, pages = {IV, 177}, year = {2019}, abstract = {The individual's mental lexicon comprises all known words as well related infor-mation on semantics, orthography and phonology. Moreover, entries connect due to simi-larities in these language domains building a large network structure. The access to lexical information is crucial for processing of words and sentences. Thus, a lack of information in-hibits the retrieval and can cause language processing difficulties. Hence, the composition of the mental lexicon is essential for language skills and its assessment is a central topic of lin-guistic and educational research. In early childhood, measurement of the mental lexicon is uncomplicated, for example through parental questionnaires or the analysis of speech samples. However, with growing content the measurement becomes more challenging: With more and more words in the mental lexicon, the inclusion of all possible known words into a test or questionnaire be-comes impossible. That is why there is a lack of methods to assess the mental lexicon for school children and adults. For the same reason, there are only few findings on the courses of lexical development during school years as well as its specific effect on other language skills. This dissertation is supposed to close this gap by pursuing two major goals: First, I wanted to develop a method to assess lexical features, namely lexicon size and lexical struc-ture, for children of different age groups. Second, I aimed to describe the results of this method in terms of lexical development of size and structure. Findings were intended to help understanding mechanisms of lexical acquisition and inform theories on vocabulary growth. The approach is based on the dictionary method where a sample of words out of a dictionary is tested and results are projected on the whole dictionary to determine an indi-vidual's lexicon size. In the present study, the childLex corpus, a written language corpus for children in German, served as the basis for lexicon size estimation. The corpus is assumed to comprise all words children attending primary school could know. Testing a sample of words out of the corpus enables projection of the results on the whole corpus. For this purpose, a vocabulary test based on the corpus was developed. Afterwards, test performance of virtual participants was simulated by drawing different lexicon sizes from the corpus and comparing whether the test items were included in the lexicon or not. This allowed determination of the relation between test performance and total lexicon size and thus could be transferred to a sample of real participants. Besides lexicon size, lexical content could be approximated with this approach and analyzed in terms of lexical structure. To pursue the presented aims and establish the sampling method, I conducted three consecutive studies. Study 1 includes the development of a vocabulary test based on the childLex corpus. The testing was based on the yes/no format and included three versions for different age groups. The validation grounded on the Rasch Model shows that it is a valid instrument to measure vocabulary for primary school children in German. In Study 2, I estab-lished the method to estimate lexicon sizes and present results on lexical development dur-ing primary school. Plausible results demonstrate that lexical growth follows a quadratic function starting with about 6,000 words at the beginning of school and about 73,000 words on average for young adults. Moreover, the study revealed large interindividual differences. Study 3 focused on the analysis of network structures and their development in the mental lexicon due to orthographic similarities. It demonstrates that networks possess small-word characteristics and decrease in interconnectivity with age. Taken together, this dissertation provides an innovative approach for the assessment and description of the development of the mental lexicon from primary school onwards. The studies determine recent results on lexical acquisition in different age groups that were miss-ing before. They impressively show the importance of this period and display the existence of extensive interindividual differences in lexical development. One central aim of future research needs to address the causes and prevention of these differences. In addition, the application of the method for further research (e.g. the adaptation for other target groups) and teaching purposes (e.g. adaptation of texts for different target groups) appears to be promising.}, language = {en} } @phdthesis{Otte2019, author = {Otte, Fabian}, title = {C-Arylglykoside und Chalkone}, doi = {10.25932/publishup-43430}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434305}, school = {Universit{\"a}t Potsdam}, pages = {XI, 201}, year = {2019}, abstract = {Im bis heute andauernden Zeitalter der wissenschaftlichen Medizin, konnte ein breites Spektrum von Wirkstoffen zur Behandlung diverser Krankheiten zusammengetragen werden. Dennoch hat es sich die organische Synthesechemie zur Aufgabe gemacht, dieses Spektrum auf neuen oder bekannten Wegen und aus verschiedenen Gr{\"u}nden zu erweitern. Zum einen ist das Vorkommen bestimmter Verbindungen in der Natur h{\"a}ufig limitiert, sodass synthetische Methoden immer {\"o}fter an Stelle eines weniger nachhaltigen Abbaus treten. Zum anderen kann durch Derivatisierung und Wirkstoffanpassung die physiologische Wirkung oder die Bioverf{\"u}gbarkeit eines Wirkstoffes erh{\"o}ht werden. In dieser Arbeit konnten einige Vertreter der bekannten Wirkstoffklassen C-Arylglykoside und Chalkone durch den Schl{\"u}sselschritt der Palladium-katalysierten MATSUDA-HECK-Reaktion synthetisiert werden. Dazu wurden im Fall der C-Arylglykoside zun{\"a}chst unges{\"a}ttigte Kohlenhydrate (Glykale) {\"u}ber eine Ruthenium-katalysierte Zyklisierungsreaktion dargestellt. Diese wurden im Anschluss mit unterschiedlich substituierten Diazoniumsalzen in der oben erw{\"a}hnten Palladium-katalysierten Kupplungsreaktion zur Reaktion gebracht. Bei der Auswertung der analytischen Daten konnte festgestellt werden, dass stets die trans-Diastereomere gebildet wurden. Im Anschluss konnte gezeigt werden, dass die Doppelbindungen dieser Verbindungen durch Hydrierung, Dihydroxylierung oder Epoxidierung funktionalisiert werden k{\"o}nnen. Auf diesem Wege konnte u. a. eine dem Diabetesmedikament Dapagliflozin {\"a}hnliche Verbindung hergestellt werden. Im zweiten Teil der Arbeit wurden Arylallylchromanone durch die MATSUDA-HECK-Reaktion von verschiedenen 8-Allylchromanonen mit Diazoniumsalzen dargestellt. Dabei konnte beobachtet werden, dass eine MOM-Schutzgruppe in 7-Position der Molek{\"u}le die Darstellung von Produktgemischen unterdr{\"u}ckt und jeweils nur eine der m{\"o}glichen Verbindungen gebildet wird. Die Lage der Doppelbindung konnte mittels 2D-NMR-Untersuchungen lokalisiert werden. In Kooperation mit der theoretischen Chemie sollte durch Berechnungen untersucht werden, wie die beobachteten Verbindungen entstehen. Durch eine auftretende Wechselwirkung innerhalb des Molek{\"u}ls konnte allerdings keine explizite Aussage getroffen werden. Im Anschluss sollten die erhaltenen Verbindungen in einer allylischen Oxidation zu Chalkonen umgesetzt werden. Die Ruthenium-katalysierten Methoden zeigten u. a. keine Eignung. Es konnte allerdings eine metallfreie, Mikrowellen-unterst{\"u}tzte Methode erfolgreich erprobt werden, sodass die Darstellung einiger Vertreter dieser physiologisch aktiven Stoffklasse gelang.}, language = {de} } @phdthesis{Walczak2019, author = {Walczak, Ralf}, title = {Molecular design of nitrogen-doped nanoporous noble carbon materials for gas adsorption}, doi = {10.25932/publishup-43524}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435241}, school = {Universit{\"a}t Potsdam}, pages = {II, 155}, year = {2019}, abstract = {In den modernen Gesellschaften f{\"u}hrt ein stetig steigender Energiebedarf zu dem zunehmenden Verbrauch fossiler Brennstoffe wie Kohle, {\"O}l, und Gas. Die Verbrennung dieser kohlenstoffbasierten Brennstoffe f{\"u}hrt unweigerlich zur Freisetzung von Treibhausgasen, vor allem von CO2. Die CO2 Aufnahme unmittelbar bei den Verbrennungsanlagen oder direkt aus der Luft, zusammen mit Regulierung von CO2 produzierenden Energiesektoren (z.B. K{\"u}hlanlagen), k{\"o}nnen den CO2 Ausstoß reduzieren. Allerdings f{\"u}hren insbesondere bei der CO2 Aufnahme die geringen CO2 Konzentrationen und die Aufnahme konkurrierender Gase zu niedrigen CO2 Kapazit{\"a}ten und Selektivit{\"a}ten. Das Zusammenspiel der Gastmolek{\"u}le mit por{\"o}sen Materialien ist dabei essentiell. Por{\"o}se Kohlenstoffmaterialien besitzen attraktive Eigenschaften, unter anderem elektrische Leitf{\"a}higkeit, einstellbare Porosit{\"a}t, als auch chemische und thermische Stabilit{\"a}t. Allerdings f{\"u}hrt die zu geringe Polarisierbarkeit dieser Materialien zu einer geringen Affinit{\"a}t zu polaren Molek{\"u}len (z.B. CO2, H2O, oder NH3). Diese Affinit{\"a}t kann durch den Einbau von Stickstoff erh{\"o}ht werden. Solche Materialien sind oft „edler" als reine Kohlenstoffe, dies bedeutet, dass sie eher oxidierend wirken, als selbst oxidiert zu werden. Die Problematik besteht darin, einen hohen und gleichm{\"a}ßig verteilten Stickstoffgehalt in das Kohlenstoffger{\"u}st einzubauen. Die Zielsetzung dieser Dissertation ist die Erforschung neuer Synthesewege f{\"u}r stickstoffdotierte edle Kohlenstoffmaterialien und die Entwicklung eines grundlegenden Verst{\"a}ndnisses f{\"u}r deren Anwendung in Gasadsorption und elektrochemischer Energiespeicherung. Es wurde eine templatfreie Synthese f{\"u}r stickstoffreiche, edle, und mikropor{\"o}se Kohlenstoffmaterialien durch direkte Kondensation eines stickstoffreichen organischen Molek{\"u}ls als Vorl{\"a}ufer erarbeitet. Dadurch konnten Materialien mit hohen Adsorptionskapazit{\"a}ten f{\"u}r H2O und CO2 bei niedrigen Konzentrationen und moderate CO2/N2 Selektivit{\"a}ten erzielt werden. Um die CO2/N2 Selektivit{\"a}ten zu verbessern, wurden mittels der Einstellung des Kondensationsgrades die molekulare Struktur und Porosit{\"a}t der Kohlenstoffmaterialien kontrolliert. Diese Materialien besitzen die Eigenschaften eines molekularen Siebs f{\"u}r CO2 {\"u}ber N2, das zu herausragenden CO2/N2 Selektivit{\"a}ten f{\"u}hrt. Der ultrahydrophile Charakter der Porenoberfl{\"a}chen und die kleinen Mikroporen dieser Kohlenstoffmaterialien erm{\"o}glichen grundlegende Untersuchungen f{\"u}r die Wechselwirkungen mit Molek{\"u}len die polarer sind als CO2, n{\"a}mlich H2O und NH3. Eine weitere Reihe stickstoffdotierter Kohlenstoffmaterialien wurde durch Kondensation eines konjugierten mikropor{\"o}sen Polymers synthetisiert und deren strukturelle Besonderheiten als Anodenmaterial f{\"u}r die Natriumionen Batterie untersucht. Diese Dissertation leistet einen Beitrag zur Erforschung stickstoffdotierter Kohlenstoffmaterialien und deren Wechselwirkungen mit verschiedenen Gastmolek{\"u}len.}, language = {en} } @phdthesis{Kagel2019, author = {Kagel, Heike}, title = {Light-induced pH cycle}, doi = {10.25932/publishup-43435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434353}, school = {Universit{\"a}t Potsdam}, pages = {VI, 118}, year = {2019}, abstract = {Background Many biochemical reactions depend on the pH of their environment and some are strongly accelerated in an acidic surrounding. A classical approach to control biochemical reactions non-invasivly is by changing the temperature. However, if the pH could be controlled by optical means using photo-active chemicals, this would mean to be able to accelerate suitable biochemical reactions. Optically switching the pH can be achieved by using photoacids. A photoacid is a molecule with a functional group that releases a proton upon irradiation with the suitable wavelength, acidifying the environmental aqueous surrounding. A major goal of this work was to establish a non-invasive method of optically controlling the pH in aqueous solutions, offering the opportunity to enhance the known chemical reactions portfolio. To demonstrate the photo-switchable pH cycling we chose an enzymatic assay using acid phosphatase, which is an enzyme with a strong pH dependent activity. Results In this work we could demonstrate a light-induced, reversible control of the enzymatic activity of acid phosphatase non-invasivly. To successfully conduct those experiments a high power LED array was designed and built, suitable for a 96 well standard microtiter plate, not being commercially available. Heat management and a lateral ventilation system to avoid heat accumulation were established and a stable light intensity achieved. Different photoacids were characterised and their pH dependent absorption spectra recorded. By using the reversible photoacid G-acid as a proton donor, the pH can be changed reversibly using high power UV 365 nm LEDs. To demonstrate the pH cycling, acid phosphatase with hydrolytic activity under acidic conditions was chosen. An assay using the photoacid together with the enzyme was established, also providing that G-acid does not inhibit acid phosphatase. The feasibility of reversibly regulating the enzyme's pH dependent activity by optical means was demonstrated, by controlling the enzymatic activity with light. It was demonstrated that the enzyme activity depends on the light exposure time only. When samples are not illuminated and left in the dark, no enzymatic activity was recorded. The process can be rapidly controlled by simply switching the light on and off and should be applicable to a wide range of enzymes and biochemical reactions. Conclusions Reversible photoacids offer a light-dependent regulation of pH, making them extremely attractive for miniaturizable, non-invasive and time-resolved control of biochemical reactions. Many enzymes have a sharp pH dependent activity, thus the established setup in this thesis could be used for a versatile enzyme portfolio. Even though the demonstrated photo-switchable strategy could also be used for non-enzymatic assays, greatly facilitating the assay establishment. Photoacids have the potential for high throughput methods and automation. We demonstrated that it is possible to control photoacids using commonly available LEDs, making their use in highly integrated devices and instruments more attractive. The successfully designed 96 well high power UV LED array presents an opportunity for general combinatorial analysis in e.g. photochemistry, where a high light intensity is needed for the investigation of various reactions.}, language = {en} } @phdthesis{Korges2019, author = {Korges, Maximilian}, title = {Constraining the hydrology of intrusion-related ore deposits with fluid inclusions and numerical modeling}, doi = {10.25932/publishup-43484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434843}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 99}, year = {2019}, abstract = {Magmatic-hydrothermal fluids are responsible for numerous mineralization types, including porphyry copper and granite related tin-tungsten (Sn-W) deposits. Ore formation is dependent on various factors, including, the pressure and temperature regime of the intrusions, the chemical composition of the magma and hydrothermal fluids, and fluid rock interaction during the ascent. Fluid inclusions have potential to provide direct information on the temperature, salinity, pressure and chemical composition of fluids responsible for ore formation. Numerical modeling allows the parametrization of pluton features that cannot be analyzed directly via geological observations. Microthermometry of fluid inclusions from the Zinnwald Sn-W deposit, Erzgebirge, Germany / Czech Republic, provide evidence that the greisen mineralization is associated with a low salinity (2-10 wt.\% NaCl eq.) fluid with homogenization temperatures between 350°C and 400°C. Quartzes from numerous veins are host to inclusions with the same temperatures and salinities, whereas cassiterite- and wolframite-hosted assemblages with slightly lower temperatures (around 350°C) and higher salinities (ca. 15 wt. NaCl eq.). Further, rare quartz samples contained boiling assemblages consisting of coexisting brine and vapor phases. The formation of ore minerals within the greisen is driven by invasive fluid-rock interaction, resulting in the loss of complexing agents (Cl-) leading to precipitation of cassiterite. The fluid inclusion record in the veins suggests boiling as the main reason for cassiterite and wolframite mineralization. Ore and coexisting gangue minerals hosted different types of fluid inclusions where the beginning boiling processes are solely preserved by the ore minerals emphasizing the importance of microthermometry in ore minerals. Further, the study indicates that boiling as a precipitation mechanism can only occur in mineralization related to shallow intrusions whereas deeper plutons prevent the fluid from boiling and can therefore form tungsten mineralization in the distal regions. The tin mineralization in the H{\"a}mmerlein deposit, Erzgebirge, Germany, occurs within a skarn horizon and the underlying schist. Cassiterite within the skarn contains highly saline (30-50 wt\% NaCl eq.) fluid inclusions, with homogenization temperatures up to 500°C, whereas cassiterites from the schist and additional greisen samples contain inclusions of lower salinity (~5 wt\% NaCl eq.) and temperature (between 350 and 400°C). Inclusions in the gangue minerals (quartz, fluorite) preserve homogenization temperatures below 350°C and sphalerite showed the lowest homogenization temperatures (ca. 200°C) whereby all minerals (cassiterite from schist and greisen, gangue minerals and sphalerite) show similar salinity ranges (2-5 wt\% NaCl eq.). Similar trace element contents and linear trends in the chemistry of the inclusions suggest a common source fluid. The inclusion record in the H{\"a}mmerlein deposit documents an early exsolution of hot brines from the underlying granite which is responsible for the mineralization hosted by the skarn. Cassiterites in schist and greisen are mainly forming due to fluid-rock interaction at lower temperatures. The low temperature inclusions documented in the sphalerite mineralization as well as their generally low trace element composition in comparison to the other minerals suggests that their formation was induced by mixing with meteoric fluids. Numerical simulations of magma chambers and overlying copper distribution document the importance of incremental growth by sills. We analyzed the cooling behavior at variable injection intervals as well as sill thicknesses. The models suggest that magma accumulation requires volumetric injection rates of at least 4 x 10-4 km³/y. These injection rates are further needed to form a stable magmatic-hydrothermal fluid plume above the magma chamber to ensure a constant copper precipitation and enrichment within a confined location in order to form high-grade ore shells within a narrow geological timeframe between 50 and 100 kyrs as suggested for porphyry copper deposits. The highest copper enrichment can be found in regions with steep temperature gradients, typical of regions where the magmatic-hydrothermal fluid meets the cooler ambient fluids.}, language = {en} } @phdthesis{Laudan2019, author = {Laudan, Jonas}, title = {Changing susceptibility of flood-prone residents in Germany}, doi = {10.25932/publishup-43442}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434421}, school = {Universit{\"a}t Potsdam}, pages = {113}, year = {2019}, abstract = {Floods are among the most costly natural hazards that affect Europe and Germany, demanding a continuous adaptation of flood risk management. While social and economic development in recent years altered the flood risk patterns mainly with regard to an increase in flood exposure, different flood events are further expected to increase in frequency and severity in certain European regions due to climate change. As a result of recent major flood events in Germany, the German flood risk management shifted to more integrated approaches that include private precaution and preparation to reduce the damage on exposed assets. Yet, detailed insights into the preparedness decisions of flood-prone households remain scarce, especially in connection to mental impacts and individual coping strategies after being affected by different flood types. This thesis aims to gain insights into flash floods as a costly hazard in certain German regions and compares the damage driving factors to the damage driving factors of river floods. Furthermore, psychological impacts as well as the effects on coping and mitigation behaviour of flood-affected households are assessed. In this context, psychological models such as the Protection Motivation Theory (PMT) and methods such as regressions and Bayesian statistics are used to evaluate influencing factors on the mental coping after an event and to identify psychological variables that are connected to intended private flood mitigation. The database consists of surveys that were conducted among affected households after major river floods in 2013 and flash floods in 2016. The main conclusions that can be drawn from this thesis reveal that the damage patterns and damage driving factors of strong flash floods differ significantly from those of river floods due to a rapid flow origination process, higher flow velocities and flow forces. However, the effects on mental coping of people that have been affected by flood events appear to be weakly influenced by different flood types, but yet show a coherence to the event severity, where often thinking of the respective event is pronounced and also connected to a higher mitigation motivation. The mental coping and preparation after floods is further influenced by a good information provision and a social environment, which encourages a positive attitude towards private mitigation. As an overall recommendation, approaches for an integrated flood risk management in Germany should be followed that also take flash floods into account and consider psychological characteristics of affected households to support and promote private flood mitigation. Targeted information campaigns that concern coping options and discuss current flood risks are important to better prepare for future flood hazards in Germany.}, language = {en} } @phdthesis{Krejca2019, author = {Krejca, Martin Stefan}, title = {Theoretical analyses of univariate estimation-of-distribution algorithms}, doi = {10.25932/publishup-43487}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434870}, school = {Universit{\"a}t Potsdam}, pages = {xii, 243}, year = {2019}, abstract = {Optimization is a core part of technological advancement and is usually heavily aided by computers. However, since many optimization problems are hard, it is unrealistic to expect an optimal solution within reasonable time. Hence, heuristics are employed, that is, computer programs that try to produce solutions of high quality quickly. One special class are estimation-of-distribution algorithms (EDAs), which are characterized by maintaining a probabilistic model over the problem domain, which they evolve over time. In an iterative fashion, an EDA uses its model in order to generate a set of solutions, which it then uses to refine the model such that the probability of producing good solutions is increased. In this thesis, we theoretically analyze the class of univariate EDAs over the Boolean domain, that is, over the space of all length-n bit strings. In this setting, the probabilistic model of a univariate EDA consists of an n-dimensional probability vector where each component denotes the probability to sample a 1 for that position in order to generate a bit string. My contribution follows two main directions: first, we analyze general inherent properties of univariate EDAs. Second, we determine the expected run times of specific EDAs on benchmark functions from theory. In the first part, we characterize when EDAs are unbiased with respect to the problem encoding. We then consider a setting where all solutions look equally good to an EDA, and we show that the probabilistic model of an EDA quickly evolves into an incorrect model if it is always updated such that it does not change in expectation. In the second part, we first show that the algorithms cGA and MMAS-fp are able to efficiently optimize a noisy version of the classical benchmark function OneMax. We perturb the function by adding Gaussian noise with a variance of σ², and we prove that the algorithms are able to generate the true optimum in a time polynomial in σ² and the problem size n. For the MMAS-fp, we generalize this result to linear functions. Further, we prove a run time of Ω(n log(n)) for the algorithm UMDA on (unnoisy) OneMax. Last, we introduce a new algorithm that is able to optimize the benchmark functions OneMax and LeadingOnes both in O(n log(n)), which is a novelty for heuristics in the domain we consider.}, language = {en} } @phdthesis{Arf2019, author = {Arf, Shelan Ali}, title = {Women's everyday reality of social insecurity}, doi = {10.25932/publishup-43433}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434333}, school = {Universit{\"a}t Potsdam}, pages = {334}, year = {2019}, abstract = {Since 1980 Iraq passed through various wars and conflicts including Iraq-Iran war, Saddam Hussein's the Anfals and Halabja campaigns against the Kurds and the killing campaigns against Shiite in 1986, Saddam Hussein's invasion of Kuwait in August 1990, the Gulf war in 1990, Iraq war in 2003 and the fall of Saddam, the conflicts and chaos in the transmission of power after the death of Saddam, and the war against ISIS . All these wars left severe impacts in most households in Iraq; on women and children in particular. The consequences of such long wars could be observed in all sectors including economic, social, cultural and religious sectors. The social structure, norms and attitudes are intensely affected. Many women specifically divorced women found them-selves in challenging different difficulties such as social as well as economic situations. Thus the divorced women in Iraqi Kurdistan are the focus of this research. Considering the fact that there is very few empirical researches on this topic, a constructivist grounded theory methodology (CGT) is viewed as reliable in order to come up with a comprehensive picture about the everyday life of divorced women in Iraqi Kurdistan. Data collected in Sulaimani city in Iraqi Kurdistan. The work of Kathy Charmaz was chosen to be the main methodological context of the research and the main data collection method was individual intensive narrative interviews with divorced women. Women generally and divorced women specifically in Iraqi Kurdistan are living in a patriarchal society that passing through many changes due to the above mentioned wars among many other factors. This research is trying to study the everyday life of divorced women in such situations and the forms of social insecurity they are experiencing. The social institutions starting from the family as a very significant institution for women to the governmental and non-governmental institutions that are working to support women, and the copying strategies, are in focus in this research. The main research argument is that the family is playing ambivalent roles in divorced women's life. For instance, on one side families are revealed to be an essential source of security to most respondents, on the other side families posed also many threats and restrictions on those women. This argument supported by what called by Suad joseph "the paradox of support and suppression" . Another important finding is that the stat institution(laws , constitutions ,Offices of combating violence against woman and family) are supporting women somehow and offering them protection from the insecurities but it is clear that the existence of the laws does not stop the violence against women in Iraqi Kurdistan, As explained by Pateman because the laws /the contract is a sexual-social contract that upholds the sex rights of males and grants them more privileges than females. The political instability, Tribal social norms also play a major role in influencing the rule of law. It is noteworthy to refer that analyzing the interviews in this research showed that in spite that divorced women living in insecurities and facing difficulties but most of the respondents try to find a coping strategies to tackle difficult situations and to deal with the violence they face; these strategies are bargaining, sometimes compromising or resisting …etc. Different theories used to explain these coping strategies such as bargaining with patriarchy. Kandiyoti who stated that women living under certain restraints struggle to find way and strategies to enhance their situations. The research finding also revealed that the western liberal feminist view of agency is limited this is agree with Saba Mahmood and what she explained about Muslim women agency. For my respondents, who are divorced women, their agency reveals itself in different ways, in resisting or compromising with or even obeying the power of male relatives, and the normative system in the society. Agency is also explained the behavior of women contacting formal state institutions in cases of violence like the police or Offices of combating violence against woman and family.}, language = {en} } @phdthesis{Sarhan2019, author = {Sarhan, Radwan Mohamed}, title = {Plasmon-driven photocatalytic reactions monitored by surface-enhanced Raman spectroscopy}, doi = {10.25932/publishup-43330}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-433304}, school = {Universit{\"a}t Potsdam}, year = {2019}, abstract = {Plasmonic metal nanostructures can be tuned to efficiently interact with light, converting the photons into energetic charge carriers and heat. Therefore, the plasmonic nanoparticles such as gold and silver nanoparticles act as nano-reactors, where the molecules attached to their surfaces benefit from the enhanced electromagnetic field along with the generated energetic charge carriers and heat for possible chemical transformations. Hence, plasmonic chemistry presents metal nanoparticles as a unique playground for chemical reactions on the nanoscale remotely controlled by light. However, defining the elementary concepts behind these reactions represents the main challenge for understanding their mechanism in the context of the plasmonically assisted chemistry. Surface-enhanced Raman scattering (SERS) is a powerful technique employing the plasmon-enhanced electromagnetic field, which can be used for probing the vibrational modes of molecules adsorbed on plasmonic nanoparticles. In this cumulative dissertation, I use SERS to probe the dimerization reaction of 4-nitrothiophenol (4-NTP) as a model example of plasmonic chemistry. I first demonstrate that plasmonic nanostructures such as gold nanotriangles and nanoflowers have a high SERS efficiency, as evidenced by probing the vibrations of the rhodamine dye R6G and the 4-nitrothiophenol 4-NTP. The high signal enhancement enabled the measurements of SERS spectra with a short acquisition time, which allows monitoring the kinetics of chemical reactions in real time. To get insight into the reaction mechanism, several time-dependent SERS measurements of the 4-NTP have been performed under different laser and temperature conditions. Analysis of the results within a mechanistic framework has shown that the plasmonic heating significantly enhances the reaction rate, while the reaction is probably initiated by the energetic electrons. The reaction was shown to be intensity-dependent, where a certain light intensity is required to drive the reaction. Finally, first attempts to scale up the plasmonic catalysis have been performed showing the necessity to achieve the reaction threshold intensity. Meanwhile, the induced heat needs to quickly dissipate from the reaction substrate, since otherwise the reactants and the reaction platform melt. This study might open the way for further work seeking the possibilities to quickly dissipate the plasmonic heat generated during the reaction and therefore, scaling up the plasmonic catalysis.}, language = {en} } @phdthesis{Ramachandran2019, author = {Ramachandran, Varsha}, title = {Massive star evolution, star formation, and feedback at low metallicity}, doi = {10.25932/publishup-43245}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432455}, school = {Universit{\"a}t Potsdam}, pages = {291}, year = {2019}, abstract = {The goal of this thesis is to broaden the empirical basis for a better, comprehensive understanding of massive star evolution, star formation and feedback at low metallicity. Low metallicity massive stars are a key to understand the early universe. Quantitative information on metal-poor massive stars was sparse before. The quantitative spectroscopic studies of massive star populations associated with large-scale ISM structures were not performed at low metallicity before, but are important to investigate star-formation histories and feedback in detail. Much of this work relies on spectroscopic observations with VLT-FLAMES of ~500 OB stars in the Magellanic Clouds. When available, the optical spectroscopy was complemented by UV spectra from the HST, IUE, and FUSE archives. The two representative young stellar populations that have been studied are associated with the superbubble N 206 in the Large Magellanic Cloud (LMC) and with the supergiant shell SMC-SGS 1 in the Wing of the Small Magellanic Cloud (SMC), respectively. We performed spectroscopic analyses of the massive stars using the nonLTE Potsdam Wolf-Rayet (PoWR) model atmosphere code. We estimated the stellar, wind, and feedback parameters of the individual massive stars and established their statistical distributions. The mass-loss rates of N206 OB stars are consistent with theoretical expectations for LMC metallicity. The most massive and youngest stars show nitrogen enrichment at their surface and are found to be slower rotators than the rest of the sample. The N 206 complex has undergone star formation episodes since more than 30 Myr, with a current star formation rate higher than average in the LMC. The spatial age distribution of stars across the complex possibly indicates triggered star formation due to the expansion of the superbubble. Three very massive, young Of stars in the region dominate the ionizing and mechanical feedback among hundreds of other OB stars in the sample. The current stellar wind feedback rate from the two WR stars in the complex is comparable to that released by the whole OB sample. We see only a minor fraction of this stellar wind feedback converted into X-ray emission. In this LMC complex, stellar winds and supernovae equally contribute to the total energy feedback, which eventually powered the central superbubble. However, the total energy input accumulated over the time scale of the superbubble significantly exceeds the observed energy content of the complex. The lack of energy along with the morphology of the complex suggests a leakage of hot gas from the superbubble. With a detailed spectroscopic study of massive stars in SMC-SGS 1, we provide the stellar and wind parameters of a large sample of OB stars at low metallicity, including those in the lower mass-range. The stellar rotation velocities show a broad, tentatively bimodal distribution, with Be stars being among the fastest. A few very luminous O stars are found close to the main sequence, while all other, slightly evolved stars obey a strict luminosity limit. Considering additional massive stars in evolved stages, with published parameters and located all over the SMC, essentially confirms this picture. The comparison with single-star evolutionary tracks suggests a dichotomy in the fate of massive stars in the SMC. Only stars with an initial mass below 30 solar masses seem to evolve from the main sequence to the cool side of the HRD to become a red supergiant and to explode as type II-P supernova. In contrast, more massive stars appear to stay always hot and might evolve quasi chemically homogeneously, finally collapsing to relatively massive black holes. However, we find no indication that chemical mixing is correlated with rapid rotation. We measured the key parameters of stellar feedback and established the links between the rates of star formation and supernovae. Our study demonstrates that in metal-poor environments stellar feedback is dominated by core-collapse supernovae in combination with winds and ionizing radiation supplied by a few of the most massive stars. We found indications of the stochastic mode of star formation, where the resulting stellar population is fully capable of producing large-scale structures such as the supergiant shell SMC-SGS 1 in the Wing. The low level of feedback in metal-poor stellar populations allows star formation episodes to persist over long timescales. Our study showcases the importance of quantitative spectroscopy of massive stars with adequate stellar-atmosphere models in order to understand star-formation, evolution, and feedback. The stellar population analyses in the LMC and SMC make us understand that massive stars and their impact can be very different depending on their environment. Obviously, due to their different metallicity, the massive stars in the LMC and the SMC follow different evolutionary paths. Their winds differ significantly, and the key feedback agents are different. As a consequence, the star formation can proceed in different modes.}, language = {en} } @phdthesis{BernaschinaSchuermann2019, author = {Bernaschina Sch{\"u}rmann, Vicente}, title = {{\´A}ngeles que cantan de continuo}, isbn = {978-3-86956-459-3}, issn = {2629-2548}, doi = {10.25932/publishup-42645}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426450}, school = {Universit{\"a}t Potsdam}, pages = {387}, year = {2019}, abstract = {Objeto de esta investigaci{\´o}n es el auge y ca{\´i}da de una legitimaci{\´o}n teol{\´o}gica de la poes{\´i}a que tuvo lugar en el virreinato del Per{\´u} entre fines del siglo XVI y la segunda mitad del siglo XVII. Su punto c{\´u}lmine est{\´a} marcado por el surgimiento de una "Academia Ant{\´a}rtica" en las primeras d{\´e}cadas del siglo XVII, mientras que su fin, se aprecia a fines del mismo siglo, cuando eruditos de las {\´o}rdenes religiosas, especialmente Juan de Espinosa y Medrano en sus textos en defensa de la poes{\´i}a y las ciencias, negaron a la poes{\´i}a cualquier estatuto teol{\´o}gico, sirvi{\´e}ndose sin embargo de ella para escribir sus sermones y textos. A partir del auge y ca{\´i}da de esta legitimaci{\´o}n teol{\´o}gica en el virreinato del Per{\´u}, este estudio muestra la existencia de dos movimientos que forman un quiasmo entre una teologizaci{\´o}n de la poes{\´i}a y una poetizaci{\´o}n de la teolog{\´i}a, en cuyo centro velado se encuentra en disputa el saber te{\´o}rico y pr{\´a}ctico de la poes{\´i}a. Lo que est{\´a} en disputa en este sentido no es la poes{\´i}a, entendida como una cumbre de las bellas letras, sino la posesi{\´o}n leg{\´i}tima de un modo de lectura anal{\´o}gico y tipol{\´o}gico del orden del universo, fundado en las Sagradas Escrituras y en la historia de la salvaci{\´o}n, y un modo po{\´e}tico para doctrinar a todos los miembros de la sociedad virreinal en concordancia con aquel modo de lectura.}, language = {es} } @phdthesis{Gabriel2019, author = {Gabriel, Christina}, title = {Entwicklung neuer St{\"a}rkederivate f{\"u}r die Anwendung als wasserbasierte Filmbildner in Farben und Lacken}, doi = {10.25932/publishup-43294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432942}, school = {Universit{\"a}t Potsdam}, pages = {XVIII, 160, xxxix, x1}, year = {2019}, abstract = {In den letzten Jahrzehnten fand auch in der Beschichtungsindustrie ein Umdenken hin zu umweltfreundlicheren Farben und Lacken statt. Allerdings basieren auch neue L{\"o}sungen meist nicht auf Biopolymeren und in einem noch geringeren Anteil auf wasserbasierten Beschichtungssystemen aus nachwachsenden Rohstoffen. Dies stellt den Ankn{\"u}pfungspunkt dieser Arbeit dar, in der untersucht wurde, ob das Biopolymer St{\"a}rke das Potenzial zum wasserbasierten Filmbildner f{\"u}r Farben und Lacke besitzt. Dabei m{\"u}ssen angelehnt an etablierte synthetische Marktprodukte die folgenden Kriterien erf{\"u}llt werden: Die w{\"a}ssrige Dispersion muss mindestens einen 30\%igen Feststoffgehalt haben, bei Raumtemperatur verarbeitet werden k{\"o}nnen und Viskosit{\"a}ten zwischen 10^2-10^3 mPa·s aufweisen. Die finale Beschichtung muss einen geschlossenen Film bilden und sehr gute Haftfestigkeiten zu einer spezifischen Oberfl{\"a}che, in dieser Arbeit Glas, besitzen. Als Grundlage f{\"u}r die Modifizierung der St{\"a}rke wurde eine Kombination von molekularem Abbau und chemischer Funktionalisierung ausgew{\"a}hlt. Da nicht bekannt war, welchen Einfluss die St{\"a}rkeart, die gew{\"a}hlte Abbaureaktion als auch verschiedene Substituenten auf die Dispersionsherstellung und deren Eigenschaften sowie die Beschichtungseigenschaften aus{\"u}ben k{\"o}nnten, wurden die strukturellen Parameter getrennt voneinander untersucht. Das erste Themengebiet beinhaltete den oxidativen Abbau von Kartoffel- und Palerbsenst{\"a}rke mittels des Hypochlorit-Abbaus (OCl-) und des ManOx-Abbaus (H2O2, KMnO4). Mit beiden Abbaureaktionen konnten vergleichbare gewichtsmittlere Molmassen (Mw) von 2·10^5-10^6 g/mol (GPC-MALS) hergestellt werden. Allerdings f{\"u}hrten die gew{\"a}hlten Reaktionsbedingungen beim ManOx-Abbau zur Bildung von Gelpartikeln. Diese lagen im µm-Bereich (DLS und Kryo-REM-Messungen) und hatten zur Folge, dass die ManOx-Proben deutlich erh{\"o}hte Viskosit{\"a}ten (c: 7,5 \%; 9-260 mPa·s) im Vergleich zu den OCl--Proben (4-10 mPa·s) bei scherverd{\"u}nnendem Verhalten besaßen und die Eigenschaften von viskoelastischen Gelen (G' > G'') zeigten. Des Weiteren wiesen sie reduzierte Heißwasserl{\"o}slichkeiten (95 °C, vorrangig: 70-99 \%) auf. Der OCl--Abbau f{\"u}hrte zu hydrophileren (Carboxylgruppengehalt bis zu 6,1 \%; ManOx: bis zu 3,1 \%), nach 95 °C-Behandlung vollst{\"a}ndig wasserl{\"o}slichen abgebauten St{\"a}rken, die ein Newtonsches Fließverhalten mit Eigenschaften einer viskoelastischen Fl{\"u}ssigkeit (G'' > G') hatten. Die OCl--Proben konnten im Vergleich zu den ManOx-Produkten (10-20 \%) zu konzentrierteren Dispersionen (20-40 \%) verarbeitet werden, die gleichzeitig die Einschr{\"a}nkung von anwendungsrelevanten Mw auf < 7·10^5 g/mol zuließen (Konzentration sollte > 30 \% sein). Außerdem f{\"u}hrten nur die OCl--Proben der Kartoffelst{\"a}rke zu transparenten (alle anderen waren opak) geschlossenen Beschichtungsfilmen. Somit hebt sich die Kombination von OCl--Abbau und Kartoffelst{\"a}rke mit Hinblick auf die Endanwendung ab. Das zweite Themengebiet umfasste Untersuchungen zum Einfluss von Ester- und Hydroxyalkylether-Substituenten auf Basis einer industriell abgebauten Kartoffelst{\"a}rke (Mw: 1,2·10^5 g/mol) vor allem auf die Dispersionsherstellung, die rheologischen Eigenschaften der Dispersionen und die Beschichtungseigenschaften in Kombination mit Glassubstraten. Dazu wurden Ester und Ether mit DS/MS-Werten von 0,07-0,91 synthetisiert. Die Derivate konnten zu wasserbasierten Dispersionen mit Konzentrationen von 30-45 \% verarbeitet werden, wobei bei hydrophoberen Modifikaten ein Co-L{\"o}semittel, Diethylenglycolmonobutylether (DEGBE), eingesetzt werden musste. Die Feststoffgehalte sanken dabei f{\"u}r beide Derivatklassen vor allem mit zunehmender Alkylkettenl{\"a}nge. Die anwendungsrelevanten Viskosit{\"a}ten (323-1240 mPa·s) stiegen auf Grund von Wechselwirkungen tendenziell mit DS/MS und Alkylkettenl{\"a}nge an. Hinsichtlich der Beschichtungseigenschaften erwiesen sich die Ester vergleichend zu den Ethern als die bevorzugte Substituentenklasse, da nur die Ester geschlossene, defektfreie und mehrheitlich transparente Beschichtungsfilme bildeten, die exzellente bis sehr gute Haftfestigkeiten (ISO Klasse: 0 und 1) auf Glas besaßen. Die Ether bildeten mehrheitlich br{\"u}chige Filme. Basierend auf der Kombination der Ergebnisse aus L{\"o}semittelaustausch, den rheologischen Untersuchungen und zus{\"a}tzlichen Oberfl{\"a}chenspannungsmessungen (30-61 mN/m) konnte geschlossen werden, dass wahrscheinlich fehlende oder schlechte Haftfestigkeiten vorrangig akkumuliertem Wasser in den Beschichtungsfilmen (visuell: tr{\"u}b oder weiß) geschuldet sind, w{\"a}hrend die Br{\"u}chigkeit vermutlich auf Wechselwirkungen (H-Br{\"u}cken Wechselwirkungen, hydrophobe Wechselwirkungen) zwischen den Polymeren zur{\"u}ckgef{\"u}hrt werden kann. Insgesamt scheint die Kombination aus Kartoffelst{\"a}rke basierend auf dem OCl--Abbau mit Mw < 7·10^5 g/mol und einem Estersubstituenten eine gute Wahl f{\"u}r wasserbasierte Dispersionen mit hohen Feststoffkonzentrationen (> 30 \%), guter Filmbildung und exzellenten Haftungen auf Glas zu sein.}, language = {de} } @phdthesis{PerezMedrano2019, author = {P{\´e}rez Medrano, Cuauht{\´e}moc}, title = {Ficci{\´o}n her{\´e}tica}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-454-8}, issn = {2629-2548}, doi = {10.25932/publishup-42449}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-424490}, school = {Universit{\"a}t Potsdam}, pages = {226}, year = {2019}, abstract = {Mit der kubanischen Wirtschaftskrise entstanden neue literarische Gestaltungsprozesse nationaler Identit{\"a}tskonstruktionen. Die vorliegende Analyse geht der zeitgen{\"o}ssischen Literatur Kubas nach, in der soziale, kulturelle und politische Paradoxe der post-revolution{\"a}ren Weltanschauung zum Vorschein kommen und der Figur der „Insel" bzw. „Insularit{\"a}t" eine große Bedeutung f{\"u}r die Neukonfiguration der Nation zukommt. Die „Insularit{\"a}t" dr{\"u}ckt in literarischen Texten verschiedene Aneignungen des Raumes und somit auch eine Weltanschauungen aus. Die literarische Figur der „Insel" kann hypothetisch als „visuelles Ph{\"a}nomen" (Ette 2002) konzipiert werden, das durch soziokulturelle Umst{\"a}nde konfiguriert wird. Unterschiedliche postkommunistische Erfahrungen bilden sog. „erlebnisweltliche" Repr{\"a}sentationen und somit literarische und soziale Identit{\"a}tsbilder. Im Verlauf der Zeit werden die Metapher der „Insel" bzw. der „Insularit{\"a}t" anders beschrieben. Erste Untersuchungen der Metapher der „Insel" bzw. der „Insularit{\"a}t" finden sich in den Texten Noche insular (Lezama Lima 2000) und La isla en peso (Pi{\~n}era 1998). Die Metapher wird in den Begrifflichkeiten der „Simulation" und „Dissimulation" (Sarduy 1982) oder auch einer „visuellen Erscheinung" als „Grund-Ekphrasis" (Pimentel 2001) beschrieben. Demgegen{\"u}ber stellen sp{\"a}tere Arbeiten die literarische Figur der „Insel" als ein Instrument der Dekonstruktion der Nationalliteratur Kubas dar (Rojas 1998; Nuez 1998, Benitez Rojo 1998). In zeitgen{\"o}ssischen Analysen wird die Metapher in ihrer jeweiligen Beziehungen zur Repr{\"a}sentation und zum „Lebenswissen" (Ette 2010) herausgearbeitet. Die Repr{\"a}sentation der "Insel" bzw. der "Insularit{\"a}t" wird in der vorliegenden Arbeit anhand des soziokulturellen Kontextes in den letzten dreißig Jahren u.a. in den Romanen von Abilio Estevez Tuyo es el reino (1998), Atilio Caballero La {\´u}ltima playa (1999), Daniel D{\´i}az Mantilla Regreso a Utop{\´i}a (2007) sowie den Erz{\"a}hlungen von Ena Lucia Portela Huracan (2000), Antonio Jos{\´e} Ponte Un nuevo arte de hacer ruinas (2005), Emerio Medina Isla (2005), Orlando Lu{\´i}s Pardo Tokionama (2009), Ahmel Echeverria Isla (2014) und Anesly Negr{\´i}n Isla a mediod{\´i}a (2014) untersucht.}, language = {es} } @phdthesis{Schlenter2019, author = {Schlenter, Judith}, title = {Predictive language processing in late bilinguals}, doi = {10.25932/publishup-43249}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432498}, school = {Universit{\"a}t Potsdam}, pages = {251}, year = {2019}, abstract = {The current thesis examined how second language (L2) speakers of German predict upcoming input during language processing. Early research has shown that the predictive abilities of L2 speakers relative to L1 speakers are limited, resulting in the proposal of the Reduced Ability to Generate Expectations (RAGE) hypothesis. Considering that prediction is assumed to facilitate language processing in L1 speakers and probably plays a role in language learning, the assumption that L1/L2 differences can be explained in terms of different processing mechanisms is a particularly interesting approach. However, results from more recent studies on the predictive processing abilities of L2 speakers have indicated that the claim of the RAGE hypothesis is too broad and that prediction in L2 speakers could be selectively limited. In the current thesis, the RAGE hypothesis was systematically put to the test. In this thesis, German L1 and highly proficient late L2 learners of German with Russian as L1 were tested on their predictive use of one or more information sources that exist as cues to sentence interpretation in both languages, to test for selective limits. The results showed that, in line with previous findings, L2 speakers can use the lexical-semantics of verbs to predict the upcoming noun. Here the level of prediction was more systematically controlled for than in previous studies by using verbs that restrict the selection of upcoming nouns to the semantic category animate or inanimate. Hence, prediction in L2 processing is possible. At the same time, this experiment showed that the L2 group was slower/less certain than the L1 group. Unlike previous studies, the experiment on case marking demonstrated that L2 speakers can use this morphosyntactic cue for prediction. Here, the use of case marking was tested by manipulating the word order (Dat > Acc vs. Acc > Dat) in double object constructions after a ditransitive verb. Both the L1 and the L2 group showed a difference between the two word order conditions that emerged within the critical time window for an anticipatory effect, indicating their sensitivity towards case. However, the results for the post-critical time window pointed to a higher uncertainty in the L2 group, who needed more time to integrate incoming information and were more affected by the word order variation than the L1 group, indicating that they relied more on surface-level information. A different cue weighting was also found in the experiment testing whether participants predict upcoming reference based on implicit causality information. Here, an additional child L1 group was tested, who had a lower memory capacity than the adult L2 group, as confirmed by a digit span task conducted with both learner groups. Whereas the children were only slightly delayed compared to the adult L1 group and showed the same effect of condition, the L2 speakers showed an over-reliance on surface-level information (first-mention/subjecthood). Hence, the pattern observed resulted more likely from L1/L2 differences than from resource deficits. The reviewed studies and the experiments conducted show that L2 prediction is affected by a range of factors. While some of the factors can be attributed to more individual differences (e.g., language similarity, slower processing) and can be interpreted by L2 processing accounts assuming that L1 and L2 processing are basically the same, certain limits are better explained by accounts that assume more substantial L1/L2 differences. Crucially, the experimental results demonstrate that the RAGE hypothesis should be refined: Although prediction as a fast-operating mechanism is likely to be affected in L2 speakers, there is no indication that prediction is the dominant source of L1/L2 differences. The results rather demonstrate that L2 speakers show a different weighting of cues and rely more on semantic and surface-level information to predict as well as to integrate incoming information.}, language = {en} } @phdthesis{Sablowski2019, author = {Sablowski, Daniel}, title = {Spectroscopic analysis of the benchmark system Alpha Aurigae}, doi = {10.25932/publishup-43239}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432396}, school = {Universit{\"a}t Potsdam}, pages = {169}, year = {2019}, abstract = {Binaries play an important role in observational and theoretical astrophysics. Since the mass and the chemical composition are key ingredients for stellar evolution, high-resolution spectroscopy is an important and necessary tool to derive those parameters to high confidence in binaries. This involves carefully measured orbital motion by the determination of radial velocity (RV) shifts and sophisticated techniques to derive the abundances of elements within the stellar atmosphere. A technique superior to conventional cross-correlation methods to determine RV shifts in known as spectral disentangling. Hence, a major task of this thesis was the design of a sophisticated software package for this approach. In order to investigate secondary effects, such as flux and line-profile variations, imprinting changes on the spectrum the behavior of spectral disentangling on such variability is a key to understand the derived values, to improve them, and to get information about the variability itself. Therefore, the spectral disentangling code presented in this thesis and available to the community combines multiple advantages: separation of the spectra for detailed chemical analysis, derivation of orbital elements, derivation of individual RVs in order to investigate distorted systems (either by third body interaction or relativistic effects), the suppression of telluric contaminations, the derivation of variability, and the possibility to apply the technique to eclipsing binaries (important for orbital inclination) or in general to systems that undergo flux-variations. This code in combination with the spectral synthesis codes MOOG and SME was used in order to derive the carbon 12C/13C isotope ratio (CIR) of the benchmark binary Capella. The observational result will be set into context with theoretical evolution by the use of MESA models and resolves the discrepancy of theory and observations existing since the first measurement of Capella's CIR in 1976. The spectral disentangling code has been made available to the community and its applicability to completely different behaving systems, Wolf-Rayet stars, have also been investigated and resulted in a published article. Additionally, since this technique relies strongly on data quality, continues development of scientific instruments to achieve best observational data is of great importance in observational astrophysics. That is the reason why there has also been effort in astronomical instrumentation during the work on this thesis.}, language = {en} } @phdthesis{Brechun2019, author = {Brechun, Katherine E.}, title = {Development and application of genetic networks for engineering photo-controlled proteins}, doi = {10.25932/publishup-43092}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430924}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 195}, year = {2019}, abstract = {Light-switchable proteins are being used increasingly to understand and manipulate complex molecular systems. The success of this approach has fueled the development of tailored photo-switchable proteins, to enable targeted molecular events to be studied using light. The development of novel photo-switchable tools has to date largely relied on rational design. Complementing this approach with directed evolution would be expected to facilitate these efforts. Directed evolution, however, has been relatively infrequently used to develop photo-switchable proteins due to the challenge presented by high-throughput evaluation of switchable protein activity. This thesis describes the development of two genetic circuits that can be used to evaluate libraries of switchable proteins, enabling optimization of both the on- and off-states. A screening system is described, which permits detection of DNA-binding activity based on conditional expression of a fluorescent protein. In addition, a tunable selection system is presented, which allows for the targeted selection of protein-protein interactions of a desired affinity range. This thesis additionally describes the development and characterization of a synthetic protein that was designed to investigate chromophore reconstitution in photoactive yellow protein (PYP), a promising scaffold for engineering photo-controlled protein tools.}, language = {en} } @phdthesis{SchulteOsseili2019, author = {Schulte-Osseili, Christine}, title = {Vom Monomer zum Glykopolymer}, doi = {10.25932/publishup-43216}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432169}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 149}, year = {2019}, abstract = {Glykopolymere sind synthetische und nat{\"u}rlich vorkommende Polymere, die eine Glykaneinheit in der Seitenkette des Polymers tragen. Glykane sind durch die Glykan-Protein-Wechselwirkung verantwortlich f{\"u}r viele biologische Prozesse. Die Beteiligung der Glykanen in diesen biologischen Prozessen erm{\"o}glicht das Imitieren und Analysieren der Wechselwirkungen durch geeignete Modellverbindungen, z.B. der Glykopolymere. Dieses System der Glykan-Protein-Wechselwirkung soll durch die Glykopolymere untersucht und studiert werden, um die spezifische und selektive Bindung der Proteine an die Glykopolymere nachzuweisen. Die Proteine, die in der Lage sind, Kohlenhydratstrukturen selektiv zu binden, werden Lektine genannt. In dieser Dissertationsarbeit wurden verschiedene Glykopolymere synthetisiert. Dabei sollte auf einen effizienten und kosteng{\"u}nstigen Syntheseweg geachtet werden. Verschiedene Glykopolymere wurden durch funktionalisierte Monomere mit verschiedenen Zuckern, wie z.B. Mannose, Laktose, Galaktose oder N-Acetyl-Glukosamin als funktionelle Gruppe, hergestellt. Aus diesen funktionalisierten Glykomonomeren wurden {\"u}ber ATRP und RAFT-Polymerisation Glykopolymere synthetisiert. Die erhaltenen Glykopolymere wurden in Diblockcopolymeren als hydrophiler Block angewendet und die Selbstassemblierung in w{\"a}ssriger L{\"o}sung untersucht. Die Polymere formten in w{\"a}ssriger L{\"o}sung Mizellen, bei denen der Zuckerblock an der Oberfl{\"a}che der Mizellen sitzt. Die Mizellen wurden mit einem hydrophoben Fluoreszenzfarbstoff beladen, wodurch die CMC der Mizellenbildung bestimmt werden konnte. Außerdem wurden die Glykopolymere als Oberfl{\"a}chenbeschichtung {\"u}ber „Grafting from" mit SI-ATRP oder {\"u}ber „Grafting to" auf verschiedene Oberfl{\"a}chen gebunden. Durch die glykopolymerbschichteten Oberfl{\"a}chen konnte die Glykan Protein Wechselwirkung {\"u}ber spektroskopische Messmethoden, wie SPR- und Mikroring Resonatoren untersucht werden. Hierbei wurde die spezifische und selektive Bindung der Lektine an die Glykopolymere nachgewiesen und die Bindungsst{\"a}rke untersucht. Die synthetisierten Glykopolymere k{\"o}nnten durch Austausch der Glykaneinheit f{\"u}r andere Lektine adressierbar werden und damit ein weites Feld an anderen Proteinen erschließen. Die biovertr{\"a}glichen Glykopolymere w{\"a}ren alternativen f{\"u}r den Einsatz in biologischen Prozessen als Transporter von Medikamenten oder Farbstoffe in den K{\"o}rper. Außerdem k{\"o}nnten die funktionalisierten Oberfl{\"a}chen in der Diagnostik zum Erkennen von Lektinen eingesetzt werden. Die Glykane, die keine selektive und spezifische Bindung zu Proteinen eingehen, k{\"o}nnten als antiadsorptive Oberfl{\"a}chenbeschichtung z.B. in der Zellbiologie eingesetzt werden.}, language = {de} } @phdthesis{AlHalbouni2019, author = {Al-Halbouni, Djamil}, title = {Photogrammetry and distinct element geomechanical modelling of sinkholes and large-scale karstic depressions}, doi = {10.25932/publishup-43215}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432159}, school = {Universit{\"a}t Potsdam}, pages = {137}, year = {2019}, abstract = {Sinkholes and depressions are typical landforms of karst regions. They pose a considerable natural hazard to infrastructure, agriculture, economy and human life in affected areas worldwide. The physio-chemical processes of sinkholes and depression formation are manifold, ranging from dissolution and material erosion in the subsurface to mechanical subsidence/failure of the overburden. This thesis addresses the mechanisms leading to the development of sinkholes and depressions by using complementary methods: remote sensing, distinct element modelling and near-surface geophysics. In the first part, detailed information about the (hydro)-geological background, ground structures, morphologies and spatio-temporal development of sinkholes and depressions at a very active karst area at the Dead Sea are derived from satellite image analysis, photogrammetry and geologic field surveys. There, clusters of an increasing number of sinkholes have been developing since the 1980s within large-scale depressions and are distributed over different kinds of surface materials: clayey mud, sandy-gravel alluvium and lacustrine evaporites (salt). The morphology of sinkholes differs depending in which material they form: Sinkholes in sandy-gravel alluvium and salt are generally deeper and narrower than sinkholes in the interbedded evaporite and mud deposits. From repeated aerial surveys, collapse precursory features like small-scale subsidence, individual holes and cracks are identified in all materials. The analysis sheds light on the ongoing hazardous subsidence process, which is driven by the base-level fall of the Dead Sea and by the dynamic formation of subsurface water channels. In the second part of this thesis, a novel, 2D distinct element geomechanical modelling approach with the software PFC2D-V5 to simulating individual and multiple cavity growth and sinkhole and large-scale depression development is presented. The approach involves a stepwise material removal technique in void spaces of arbitrarily shaped geometries and is benchmarked by analytical and boundary element method solutions for circular cavities. Simulated compression and tension tests are used to calibrate model parameters with bulk rock properties for the materials of the field site. The simulations show that cavity and sinkhole evolution is controlled by material strength of both overburden and cavity host material, the depth and relative speed of the cavity growth and the developed stress pattern in the subsurface. Major findings are: (1) A progressively deepening differential subrosion with variable growth speed yields a more fragmented stress pattern with stress interaction between the cavities. It favours multiple sinkhole collapses and nesting within large-scale depressions. (2) Low-strength materials do not support large cavities in the material removal zone, and subsidence is mainly characterised by gradual sagging into the material removal zone with synclinal bending. (3) High-strength materials support large cavity formation, leading to sinkhole formation by sudden collapse of the overburden. (4) Large-scale depression formation happens either by coalescence of collapsing holes, block-wise brittle failure, or gradual sagging and lateral widening. The distinct element based approach is compared to results from remote sensing and geophysics at the field site. The numerical simulation outcomes are generally in good agreement with derived morphometrics, documented surface and subsurface structures as well as seismic velocities. Complementary findings on the subrosion process are provided from electric and seismic measurements in the area. Based on the novel combination of methods in this thesis, a generic model of karst landform evolution with focus on sinkhole and depression formation is developed. A deepening subrosion system related to preferential flow paths evolves and creates void spaces and subsurface conduits. This subsequently leads to hazardous subsidence, and the formation of sinkholes within large-scale depressions. Finally, a monitoring system for shallow natural hazard phenomena consisting of geodetic and geophysical observations is proposed for similarly affected areas.}, language = {en} } @phdthesis{Voss2019, author = {Voß, Amira}, title = {F{\"u}r eine Reformierung des irakischen internationalen Privatrechts}, doi = {10.25932/publishup-43019}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430190}, school = {Universit{\"a}t Potsdam}, pages = {XIII; 272}, year = {2019}, abstract = {Seit 2003 hat sich das politische Bild des Irak stark ver{\"a}ndert. Dadurch begann der Prozess der Neugestaltung der irakischen Rechtsordnung. Die irakische Verfassung von 2005 legt erstmalig in der Geschichte des Irak den Islam und die Demokratie als zwei nebeneinander zu beachtende Grundprinzipien bei der Gesetzgebung fest. Trotz dieser signifikanten Ver{\"a}nderung im irakischen Rechtssystem und erheblicher Entwicklungen im internationalen Privat- und Zivilverfahrensrecht (IPR/IZVR) im internationalen Vergleich gilt die haupts{\"a}chlich im irakischen Zivilgesetzbuch (ZGB) von 1951 enthaltene gesetzliche Regelung des IPR/IZVR im Irak weiterhin. Deshalb entstand diese Arbeit f{\"u}r eine Reformierung des irakischen IPR/IZVR. Die Arbeit gilt als erste umfassende wissenschaftliche Untersuchung, die sich mit dem jetzigen Inhalt und der zuk{\"u}nftigen Reformierung des irakischen internationalen Privatrecht- und Zivilverfahrensrechts (IPR/IZVR) besch{\"a}ftigt. Die Verfasserin vermittelt einen Gesamt{\"u}berblick {\"u}ber das jetzt geltende irakische internationale Privat- und Zivilverfahrensrecht mit gelegentlicher punktueller und stichwortartiger Heranziehung des deutschen, islamischen, t{\"u}rkischen und tunesischen Rechts, zeigt dessen Schwachstellen auf und unterbreitet entsprechende Reformvorschl{\"a}ge. Wegen der besonderen Bedeutung des internationalen Vertragsrechts f{\"u}r die Wirtschaft im Irak und auch zum Teil f{\"u}r Deutschland gibt die Verfasserin einen genaueren {\"U}berblick {\"u}ber das irakische internationale Vertragsrecht und bekr{\"a}ftigt gleichzeitig dessen Reformbed{\"u}rftigkeit. Die Darstellung der wichtigen Entwicklungen im deutsch-europ{\"a}ischen, im traditionellen islamischen Recht und im t{\"u}rkischen und tunesischen internationalen Privat- und Zivilverfahrensrecht im zweiten Kapitel dienen als Grundlage, auf die bei der Reformierung des irakischen IPR/ IZVR zur{\"u}ck gegriffen werden kann. Da die Kenntnisse des islamischen Rechts nicht zwingend zum Rechtsstudium geh{\"o}ren, wird das islamische Recht dazu in Bezug auf seine Entstehung und die Rechtsquellen dargestellt. Am Ende der Arbeit wird ein Entwurf eines f{\"o}deralen Gesetzes zum internationalen Privatrecht im Irak katalogisiert, der sich im Rahmen der irakischen Verfassung gleichzeitig mit dem Islam und der Demokratie vereinbaren l{\"a}sst.}, language = {de} } @phdthesis{Teckentrup2019, author = {Teckentrup, Lisa}, title = {Understanding predator-prey interactions}, doi = {10.25932/publishup-43162}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431624}, school = {Universit{\"a}t Potsdam}, pages = {ix, 133}, year = {2019}, abstract = {Predators can have numerical and behavioral effects on prey animals. While numerical effects are well explored, the impact of behavioral effects is unclear. Furthermore, behavioral effects are generally either analyzed with a focus on single individuals or with a focus on consequences for other trophic levels. Thereby, the impact of fear on the level of prey communities is overlooked, despite potential consequences for conservation and nature management. In order to improve our understanding of predator-prey interactions, an assessment of the consequences of fear in shaping prey community structures is crucial. In this thesis, I evaluated how fear alters prey space use, community structure and composition, focusing on terrestrial mammals. By integrating landscapes of fear in an existing individual-based and spatially-explicit model, I simulated community assembly of prey animals via individual home range formation. The model comprises multiple hierarchical levels from individual home range behavior to patterns of prey community structure and composition. The mechanistic approach of the model allowed for the identification of underlying mechanism driving prey community responses under fear. My results show that fear modified prey space use and community patterns. Under fear, prey animals shifted their home ranges towards safer areas of the landscape. Furthermore, fear decreased the total biomass and the diversity of the prey community and reinforced shifts in community composition towards smaller animals. These effects could be mediated by an increasing availability of refuges in the landscape. Under landscape changes, such as habitat loss and fragmentation, fear intensified negative effects on prey communities. Prey communities in risky environments were subject to a non-proportional diversity loss of up to 30\% if fear was taken into account. Regarding habitat properties, I found that well-connected, large safe patches can reduce the negative consequences of habitat loss and fragmentation on prey communities. Including variation in risk perception between prey animals had consequences on prey space use. Animals with a high risk perception predominantly used safe areas of the landscape, while animals with a low risk perception preferred areas with a high food availability. On the community level, prey diversity was higher in heterogeneous landscapes of fear if individuals varied in their risk perception compared to scenarios in which all individuals had the same risk perception. Overall, my findings give a first, comprehensive assessment of the role of fear in shaping prey communities. The linkage between individual home range behavior and patterns at the community level allows for a mechanistic understanding of the underlying processes. My results underline the importance of the structure of the landscape of fear as a key driver of prey community responses, especially if the habitat is threatened by landscape changes. Furthermore, I show that individual landscapes of fear can improve our understanding of the consequences of trait variation on community structures. Regarding conservation and nature management, my results support calls for modern conservation approaches that go beyond single species and address the protection of biotic interactions.}, language = {en} } @phdthesis{RomeroMujalli2019, author = {Romero Mujalli, Daniel}, title = {Ecological modeling of adaptive evolutionary responses to rapid climate change}, doi = {10.25932/publishup-43062}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430627}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2019}, abstract = {A contemporary challenge in Ecology and Evolutionary Biology is to anticipate the fate of populations of organisms in the context of a changing world. Climate change and landscape changes due to anthropic activities have been of major concern in the contemporary history. Organisms facing these threats are expected to respond by local adaptation (i.e., genetic changes or phenotypic plasticity) or by shifting their distributional range (migration). However, there are limits to their responses. For example, isolated populations will have more difficulties in developing adaptive innovations by means of genetic changes than interconnected metapopulations. Similarly, the topography of the environment can limit dispersal opportunities for crawling organisms as compared to those that rely on wind. Thus, populations of species with different life history strategy may differ in their ability to cope with changing environmental conditions. However, depending on the taxon, empirical studies investigating organisms' responses to environmental change may become too complex, long and expensive; plus, complications arising from dealing with endangered species. In consequence, eco-evolutionary modeling offers an opportunity to overcome these limitations and complement empirical studies, understand the action and limitations of underlying mechanisms, and project into possible future scenarios. In this work I take a modeling approach and investigate the effect and relative importance of evolutionary mechanisms (including phenotypic plasticity) on the ability for local adaptation of populations with different life strategy experiencing climate change scenarios. For this, I performed a review on the state of the art of eco-evolutionary Individual-Based Models (IBMs) and identify gaps for future research. Then, I used the results from the review to develop an eco-evolutionary individual-based modeling tool to study the role of genetic and plastic mechanisms in promoting local adaption of populations of organisms with different life strategies experiencing scenarios of climate change and environmental stochasticity. The environment was simulated through a climate variable (e.g., temperature) defining a phenotypic optimum moving at a given rate of change. The rate of change was changed to simulate different scenarios of climate change (no change, slow, medium, rapid climate change). Several scenarios of stochastic noise color resembling different climatic conditions were explored. Results show that populations of sexual species will rely mainly on standing genetic variation and phenotypic plasticity for local adaptation. Population of species with relatively slow growth rate (e.g., large mammals) - especially those of small size - are the most vulnerable, particularly if their plasticity is limited (i.e., specialist species). In addition, whenever organisms from these populations are capable of adaptive plasticity, they can buffer fitness losses in reddish climatic conditions. Likewise, whenever they can adjust their plastic response (e.g., bed-hedging strategy) they will cope with bluish environmental conditions as well. In contrast, life strategies of high fecundity can rely on non-adaptive plasticity for their local adaptation to novel environmental conditions, unless the rate of change is too rapid. A recommended management measure is to guarantee interconnection of isolated populations into metapopulations, such that the supply of useful genetic variation can be increased, and, at the same time, provide them with movement opportunities to follow their preferred niche, when local adaptation becomes problematic. This is particularly important for bluish and reddish climatic conditions, when the rate of change is slow, or for any climatic condition when the level of stress (rate of change) is relatively high.}, language = {en} } @phdthesis{Rabe2019, author = {Rabe, Sophie}, title = {Wirksamkeit einer telemedizinisch assistierten Bewegungstherapie f{\"u}r die postrehabilitative Versorgung von Patienten mit Knie- oder H{\"u}ft-Totalendoprothese im berufsf{\"a}higen Alter}, doi = {10.25932/publishup-43055}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430556}, school = {Universit{\"a}t Potsdam}, pages = {x, 78}, year = {2019}, abstract = {Einleitung Die Implantation einer Knie- oder H{\"u}ft-Totalendoprothese (TEP) ist eine der h{\"a}ufigsten operativen Eingriffe. Im Anschluss an die Operation und die postoperative Rehabilitation stellt die Bewegungstherapie einen wesentlichen Bestandteil der Behandlung zur Verbesserung der Gelenkfunktion und der Lebensqualit{\"a}t dar. In strukturschwachen Gebieten werden entsprechende Angebote nur in unzureichender Dichte vorgehalten. Zudem zeichnet sich ein fl{\"a}chendeckender Fachkr{\"a}ftemangel im Bereich der Physiotherapie ab. Die Tele-Nachsorge bietet daher einen innovativen Ansatz f{\"u}r die postrehabilitative Versorgung der Patienten. Das Ziel der vorliegenden Untersuchung war die {\"U}berpr{\"u}fung der Wirksamkeit einer interaktiven Tele-Nachsorgeintervention f{\"u}r Patienten mit Knie- oder H{\"u}ft-TEP im Vergleich zur herk{\"o}mmlichen Versorgung (usual care). Dazu wurden die Funktionalit{\"a}t und die berufliche Wiedereingliederung untersucht. Methode Zwischen August 2016 und August 2017 wurden 111 Patienten (54,9 ± 6,8 Jahre, 54,3 \% weiblich) zu Beginn ihrer station{\"a}ren Anschlussheilbehandlung nach Implantation einer Knie- oder H{\"u}ft-TEP in diese randomisiert, kontrolliert, multizentrische Studie eingeschlossen. Nach Entlassung aus der orthop{\"a}dischen Anschlussrehabilitation (Baseline) f{\"u}hrte die Interventionsgruppe (IG) ein dreimonatiges interaktives Training {\"u}ber ein Telerehabilitationssystem durch. Hierf{\"u}r erstellte ein betreuender Physiotherapeut einen individuellen Trainingsplan aus 38 {\"U}bungen zur Verbesserung der Kraft sowie der posturalen Kontrolle. Zur Anpassung des Trainingsplans {\"u}bermittelte das System dem Physiotherapeuten Daten zur Quantit{\"a}t sowie zur Qualit{\"a}t des Trainings. Die Kontrollgruppe (KG) konnte die herk{\"o}mmlichen Versorgungsangebote nutzen. Zur Beurteilung der Wirksamkeit der Intervention wurde die Differenz der Verbesserung im 6MWT zwischen der IG und der KG nach drei Monaten als prim{\"a}rer Endpunkt definiert. Als sekund{\"a}re Endpunkte wurden die Return-to-Work-Rate sowie die funktionelle Mobilit{\"a}t mittels des Stair Ascend Tests, des Five-Times-Sit-to-Stand Test und des Timed Up and Go Tests untersucht. Weiterhin wurden die gesundheitsbezogene Lebensqualit{\"a}t mit dem Short-Form 36 (SF-36) und die gelenkbezogenen Einschr{\"a}nkungen mit dem Western Ontario and McMaster Universities Osteoarthritis Index (WOMAC) evaluiert. Der prim{\"a}re und die sekund{\"a}ren Endpunkte wurden anhand von baseline-adjustierten Kovarianzanalysen im intention-to-treat-Ansatz ausgewertet. Zus{\"a}tzlich wurde die Teilnahme an Nachsorgeangeboten und die Adh{\"a}renz der Interventionsgruppe an der Tele-Nachsorge erfasst und evaluiert. Ergebnisse Zum Ende der Intervention wiesen beide Gruppen einen statistisch signifikanten Anstieg ihrer 6MWT Strecke auf (p < 0,001). Zu diesem Zeitpunkt legten die Teilnehmer der IG im Mittel 530,8 ± 79,7 m, die der KG 514,2 ± 71,2 m zur{\"u}ck. Dabei betrug die Differenz der Verbesserung der Gehstrecke in der IG 88,3 ± 57,7 m und in der KG 79,6 ± 48,7 m. Damit zeigt der prim{\"a}re Endpunkt keine signifikanten Gruppenunterschiede (p = 0,951). Bez{\"u}glich der beruflichen Wiedereingliederung konnte jedoch eine signifikant h{\"o}here Rate in der IG (64,6 \% versus 46,2 \%; p = 0,014) festgestellt werden. F{\"u}r die sekund{\"a}ren Endpunkte der funktionellen Mobilit{\"a}t, der Lebensqualit{\"a}t und der gelenkbezogenen Beschwerden belegen die Ergebnisse eine Gleichwertigkeit beider Gruppen zum Ende der Intervention. Schlussfolgerung Die telemedizinisch assistierte Bewegungstherapie f{\"u}r Knie- oder H{\"u}ft-TEP Patienten ist der herk{\"o}mmlichen Versorgung zur Nachsorge hinsichtlich der erzielten Verbesserungen der funktionellen Mobilit{\"a}t, der gesundheitsbezogenen Lebensqualit{\"a}t und der gelenkbezogenen Beschwerden gleichwertig. In dieser Patientenpopulation ließen sich klinisch relevante Verbesserungen unabh{\"a}ngig von der Form der Bewegungstherapie erzielen. Im Hinblick auf die berufliche Wiedereingliederung zeigte sich eine signifikant h{\"o}here Rate in der Interventionsgruppe. Die telemedizinisch assistierte Bewegungstherapie scheint eine geeignete Versorgungsform der Nachsorge zu sein, die orts- und zeitunabh{\"a}ngig durchgef{\"u}hrt werden kann und somit den Bed{\"u}rfnissen berufst{\"a}tiger Patienten entgegenkommt und in den Alltag der Patienten integriert werden kann. Die Tele-Nachsorge sollte daher als optionale und komplement{\"a}re Form der postrehabilitativen Nachsorge angeboten werden. Auch im Hinblick auf den zunehmenden Fachkr{\"a}ftemangel im Bereich der Physiotherapie und bestehende Versorgungsl{\"u}cken in strukturschwachen Gebieten kann der Einsatz der Tele-Nachsorge innovative und bedarfsgerechte L{\"o}sungsans{\"a}tze bieten.}, language = {de} } @phdthesis{Yan2019, author = {Yan, Runyu}, title = {Nitrogen-doped and porous carbons towards new energy storage mechanisms for supercapacitors with high energy density}, doi = {10.25932/publishup-43141}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431413}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2019}, abstract = {Supercapacitors are electrochemical energy storage devices with rapid charge/discharge rate and long cycle life. Their biggest challenge is the inferior energy density compared to other electrochemical energy storage devices such as batteries. Being the most widely spread type of supercapacitors, electrochemical double-layer capacitors (EDLCs) store energy by electrosorption of electrolyte ions on the surface of charged electrodes. As a more recent development, Na-ion capacitors (NICs) are expected to be a more promising tactic to tackle the inferior energy density due to their higher-capacity electrodes and larger operating voltage. The charges are simultaneously stored by ion adsorption on the capacitive-type cathode surface and via faradic process in the battery-type anode, respectively. Porous carbon electrodes are of great importance in these devices, but the paramount problems are the facile synthetic routes for high-performance carbons and the lack of fundamental understanding of the energy storage mechanisms. Therefore, the aim of the present dissertation is to develop novel synthetic methods for (nitrogen-doped) porous carbon materials with superior performance, and to reveal a deeper understanding energy storage mechanisms of EDLCs and NICs. The first part introduces a novel synthetic method towards hierarchical ordered meso-microporous carbon electrode materials for EDLCs. The large amount of micropores and highly ordered mesopores endow abundant sites for charge storage and efficient electrolyte transport, respectively, giving rise to superior EDLC performance in different electrolytes. More importantly, the controversial energy storage mechanism of EDLCs employing ionic liquid (IL) electrolytes is investigated by employing a series of porous model carbons as electrodes. The results not only allow to conclude on the relations between the porosity and ion transport dynamics, but also deliver deeper insights into the energy storage mechanism of IL-based EDLCs which is different from the one usually dominating in solvent-based electrolytes leading to compression double-layers. The other part focuses on anodes of NICs, where novel synthesis of nitrogen-rich porous carbon electrodes and their sodium storage mechanism are investigated. Free-standing fibrous nitrogen-doped carbon materials are synthesized by electrospinning using the nitrogen-rich monomer (hexaazatriphenylene-hexacarbonitrile, C18N12) as the precursor followed by condensation at high temperature. These fibers provide superior capacity and desirable charge/discharge rate for sodium storage. This work also allows insights into the sodium storage mechanism in nitrogen-doped carbons. Based on this mechanism, further optimization is done by designing a composite material composed of nitrogen-rich carbon nanoparticles embedded in conductive carbon matrix for a better charge/discharge rate. The energy density of the assembled NICs significantly prevails that of common EDLCs while maintaining the high power density and long cycle life.}, language = {en} } @phdthesis{Damour2019, author = {Damour, Jean-Claude}, title = {Hegel und Wittgenstein {\"u}ber den Sinn der Sprache}, doi = {10.25932/publishup-43103}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431036}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2019}, abstract = {The key objectives of this dissertation are to justify the use of dialectic methodology in the realm of the philosophy of language and to conduct a systematic processing of a limited part of this field. In order to explain and determine this approach, which is found rarely, if ever, in contemporary research, I will begin by referring to two philosophical authors: Hegel and Wittgenstein. Although Hegel and Wittgenstein are, prima facie, two authors who have very little in common, the primary supposition of this dissertation regarding the history of ideas is that Hegel's concept of "Spirit" and Wittgenstein's concept of "Form of life" are nevertheless both approaches and the results of philosophical effort that imply the necessity of solving a sceptical challenge. Wittgenstein actually developed an argument in his Philosophical Investigations that has been described as the "rule-following paradox" and has been considered in secondary literature (especially Kripke) as the main tenet of a sceptical argument. Consequently, Wittgenstein's theory of language as developed in Philosophical Investigations has been interpreted by various authors either as a solution to this scepticism or as a sceptical, or "aporetic", text in itself (Brandom). The first section of my dissertation aims to demonstrate that dealing with this paradox does not constitute a full sceptical argument and can be considered as the first moment of a higher form of sceptical challenge, an antinomy. A full sceptical challenge implies both the possibility that the theory corresponding to the unique solution of the paradox, the negation of any explicit normativity ("dispositionalism"), and the negation of the principle of this solution, can be proved. I'll therefore attempt to establish an antinomy of the concept of normativity with respect to the rule of language, similar to Kant's exposure of his cosmological antinomy (thesis cum antithesis). The second aim of my dissertation is to show: that Kant's approach to solving his antinomy is ineffective concerning the antinomy of normativity; that this antinomy implies a confrontation with radical scepticism in a sense that we are committed not to simply challenging or reconsidering some theories, but to engaging in a deep revision of our methodology (This in turn entails a deep revision of the current norms of rationality); that the Hegelian dialectic emerges as the solution to such a radical sceptical challenge, as the true solution to antinomy. A further goal of this dissertation is to use this methodological result to gain a new knowledge of language, consisting of two contradictory moments of cognition that are constructively combined: normativity by means of disposition, and normativity by means of an explicit rule-following. The tangible benefit of such a methodological approach is the possibility of building a systematic philosophy of language that enables the establishment of a dialectical deduction of the moments of the concept of language as moments of the concept of the spirit, in other words, to establish the sense of language. Nonetheless, I must limit myself to exposure to the doctrine of imagination, which encompasses general semiotics and the system of grammar.}, language = {de} } @phdthesis{Desanois2019, author = {Desanois, Louis}, title = {On the origin of epithermal Sn-Ag-Zn mineralization at the Pirquitas mine, NW Argentina}, doi = {10.25932/publishup-43082}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430822}, school = {Universit{\"a}t Potsdam}, pages = {104}, year = {2019}, abstract = {The Central Andes host large reserves of base and precious metals. The region represented, in 2017, an important part of the worldwide mining activity. Three principal types of deposits have been identified and studied: 1) porphyry type deposits extending from central Chile and Argentina to Bolivia, and Northern Peru, 2) iron oxide-copper-gold (IOCG) deposits, extending from central Peru to central Chile, and 3) epithermal tin polymetallic deposits extending from Southern Peru to Northern Argentina, which compose a large part of the deposits of the Bolivian Tin Belt (BTB). Deposits in the BTB can be divided into two major types: (1) tin-tungsten-zinc pluton-related polymetallic deposits, and (2) tin-silver-lead-zinc epithermal polymetallic vein deposits. Mina Pirquitas is a tin-silver-lead-zinc epithermal polymetallic vein deposit, located in north-west Argentina, that used to be one of the most important tin-silver producing mine of the country. It was interpreted to be part of the BTB and it shares similar mineral associations with southern pluton related BTB epithermal deposits. Two major mineralization events related to three pulses of magmatic fluids mixed with meteoric water have been identified. The first event can be divided in two stages: 1) stage I-1 with quartz, pyrite, and cassiterite precipitating from fluids between 233 and 370 °C and salinity between 0 and 7.5 wt\%, corresponding to a first pulse of fluids, and 2) stage I-2 with sphalerite and tin-silver-lead-antimony sulfosalts precipitating from fluids between 213 and 274 °C with salinity up to 10.6 wt\%, corresponding to a new pulse of magmatic fluids in the hydrothermal system. The mineralization event II deposited the richest silver ores at Pirquitas. Event II fluids temperatures and salinities range between 190 and 252 °C and between 0.9 and 4.3 wt\% respectively. This corresponds to the waning supply of magmatic fluids. Noble gas isotopic compositions and concentrations in ore-hosted fluid inclusions demonstrate a significant contribution of magmatic fluids to the Pirquitas mineralization although no intrusive rocks are exposed in the mine area. Lead and sulfur isotopic measurements on ore minerals show that Pirquitas shares a similar signature with southern pluton related polymetallic deposits in the BTB. Furthermore, the major part of the sulfur isotopic values of sulfide and sulfosalt minerals from Pirquitas ranges in the field for sulfur derived from igneous rocks. This suggests that the main contribution of sulfur to the hydrothermal system at Pirquitas is likely to be magma-derived. The precise age of the deposit is still unknown but the results of wolframite dating of 2.9 ± 9.1 Ma and local structural observations suggest that the late mineralization event is younger than 12 Ma.}, language = {en} } @phdthesis{Ehrlich2019, author = {Ehrlich, Elias}, title = {On the role of trade-offs in predator-prey interactions}, doi = {10.25932/publishup-43063}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430631}, school = {Universit{\"a}t Potsdam}, pages = {192}, year = {2019}, abstract = {Predation drives coexistence, evolution and population dynamics of species in food webs, and has strong impacts on related ecosystem functions (e.g. primary production). The effect of predation on these processes largely depends on the trade-offs between functional traits in the predator and prey community. Trade-offs between defence against predation and competitive ability, for example, allow for prey speciation and predator-mediated coexistence of prey species with different strategies (defended or competitive), which may stabilize the overall food web dynamics. While the importance of such trade-offs for coexistence is widely known, we lack an understanding and the empirical evidence of how the variety of differently shaped trade-offs at multiple trophic levels affect biodiversity, trait adaptation and biomass dynamics in food webs. Such mechanistic understanding is crucial for predictions and management decisions that aim to maintain biodiversity and the capability of communities to adapt to environmental change ensuring their persistence. In this dissertation, after a general introduction to predator-prey interactions and tradeoffs, I first focus on trade-offs in the prey between qualitatively different types of defence (e.g. camouflage or escape behaviour) and their costs. I show that these different types lead to different patterns of predator-mediated coexistence and population dynamics, by using a simple predator-prey model. In a second step, I elaborate quantitative aspects of trade-offs and demonstrates that the shape of the trade-off curve in combination with trait-fitness relationships strongly affects competition among different prey types: Either specialized species with extreme trait combinations (undefended or completely defended) coexist, or a species with an intermediate defence level dominates. The developed theory on trade-off shapes and coexistence is kept general, allowing for applications apart from defence-competitiveness trade-offs. Thirdly, I tested the theory on trade-off shapes on a long-term field data set of phytoplankton from Lake Constance. The measured concave trade-off between defence and growth governs seasonal trait changes of phytoplankton in response to an altering grazing pressure by zooplankton, and affects the maintenance of trait variation in the community. In a fourth step, I analyse the interplay of different tradeoffs at multiple trophic levels with plankton data of Lake Constance and a corresponding tritrophic food web model. The results show that the trait and biomass dynamics of the different three trophic levels are interrelated in a trophic biomass-trait cascade, leading to unintuitive patterns of trait changes that are reversed in comparison to predictions from bitrophic systems. Finally, in the general discussion, I extract main ideas on trade-offs in multitrophic systems, develop a graphical theory on trade-off-based coexistence, discuss the interplay of intra- and interspecific trade-offs, and end with a management-oriented view on the results of the dissertation, describing how food webs may respond to future global changes, given their trade-offs.}, language = {en} } @phdthesis{Liu2019, author = {Liu, Jiabo}, title = {Dynamics of the geomagnetic field during the last glacial}, doi = {10.25932/publishup-42946}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429461}, school = {Universit{\"a}t Potsdam}, pages = {xv, 158}, year = {2019}, abstract = {Geomagnetic paleosecular variations (PSVs) are an expression of geodynamo processes inside the Earth's liquid outer core. These paleomagnetic time series provide insights into the properties of the Earth's magnetic field, from normal behavior with a dominating dipolar geometry, over field crises, such as pronounced intensity lows and geomagnetic excursions with a distorted field geometry, to the complete reversal of the dominating dipole contribution. Particularly, long-term high-resolution and high-quality PSV time series are needed for properly reconstructing the higher frequency components in the spectrum of geomagnetic field variations and for a better understanding of the effects of smoothing during the recording of such paleomagnetic records by sedimentary archives. In this doctorate study, full vector paleomagnetic records were derived from 16 sediment cores recovered from the southeastern Black Sea. Age models are based on radiocarbon dating and correlations of warming/cooling cycles monitored by high-resolution X-ray fluorescence (XRF) elementary ratios as well as ice-rafted debris (IRD) in Black Sea sediments to the sequence of 'Dansgaard-Oeschger' (DO) events defined from Greenland ice core oxygen isotope stratigraphy. In order to identify the carriers of magnetization in Black Sea sediments, core MSM33-55-1 recovered from the southeast Black Sea was subjected to detailed rock magnetic and electron microscopy investigations. The younger part of core MSM33-55-1 was continuously deposited since 41 ka. Before 17.5 ka, the magnetic minerals were dominated by a mixture of greigite (Fe3S4) and titanomagnetite (Fe3-xTixO4) in samples with SIRM/κLF >10 kAm-1, or exclusively by titanomagnetite in samples with SIRM/κLF ≤10 kAm-1. It was found that greigite is generally present as crustal aggregates in locally reducing micro-environments. From 17.5 ka to 8.3 ka, the dominant magnetic mineral in this transition phase was changing from greigite (17.5 - ~10.0 ka) to probably silicate-hosted titanomagnetite (~10.0 - 8.3 ka). After 8.3 ka, the anoxic Black Sea was a favorable environment for the formation of non-magnetic pyrite (FeS2) framboids. Aiming to avoid compromising of paleomagnetic data by erroneous directions carried by greigite, paleomagnetic data from samples with SIRM/κLF >10 kAm-1, shown to contain greigite by various methods, were removed from obtained records. Consequently, full vector paleomagnetic records, comprising directional data and relative paleointensity (rPI), were derived only from samples with SIRM/κLF ≤10 kAm-1 from 16 Black Sea sediment cores. The obtained data sets were used to create a stack covering the time window between 68.9 and 14.5 ka with temporal resolution between 40 and 100 years, depending on sedimentation rates. At 64.5 ka, according to obtained results from Black Sea sediments, the second deepest minimum in relative paleointensity during the past 69 ka occurred. The field minimum during MIS 4 is associated with large declination swings beginning about 3 ka before the minimum. While a swing to 50°E is associated with steep inclinations (50-60°) according to the coring site at 42°N, the subsequent declination swing to 30°W is associated with shallow inclinations of down to 40°. Nevertheless, these large deviations from the direction of a geocentric axial dipole field (I=61°, D=0°) still can not yet be termed as 'excursional', since latitudes of corresponding VGPs only reach down to 51.5°N (120°E) and 61.5°N (75°W), respectively. However, these VGP positions at opposite sides of the globe are linked with VGP drift rates of up to 0.2° per year in between. These extreme secular variations might be the mid-latitude expression of the Norwegian-Greenland Sea excursion found at several sites much further North in Arctic marine sediments between 69°N and 81°N. At about 34.5 ka, the Mono Lake excursion is evidenced in the stacked Black Sea PSV record by both a rPI minimum and directional shifts. Associated VGPs from stacked Black Sea data migrated from Alaska, via central Asia and the Tibetan Plateau, to Greenland, performing a clockwise loop. This agrees with data recorded in the Wilson Creek Formation, USA., and Arctic sediment core PS2644-5 from the Iceland Sea, suggesting a dominant dipole field. On the other hand, the Auckland lava flows, New Zealand, the Summer Lake, USA., and Arctic sediment core from ODP Site-919 yield distinct VGPs located in the central Pacific Ocean due to a presumably non-dipole (multi-pole) field configuration. A directional anomaly at 18.5 ka, associated with pronounced swings in inclination and declination, as well as a low in rPI, is probably contemporaneous with the Hilina Pali excursion, originally reported from Hawaiian lava flows. However, virtual geomagnetic poles (VGPs) calculated from Black Sea sediments are not located at latitudes lower than 60° N, which denotes normal, though pronounced secular variations. During the postulated Hilina Pali excursion, the VGPs calculated from Black Sea data migrated clockwise only along the coasts of the Arctic Ocean from NE Canada (20.0 ka), via Alaska (18.6 ka) and NE Siberia (18.0 ka) to Svalbard (17.0 ka), then looping clockwise through the Eastern Arctic Ocean. In addition to the Mono Lake and the Norwegian-Greenland Sea excursions, the Laschamp excursion was evidenced in the Black Sea PSV record with the lowest paleointensities at about 41.6 ka and a short-term (~500 years) full reversal centered at 41 ka. These excursions are further evidenced by an abnormal PSV index, though only the Laschamp and the Mono Lake excursions exhibit excursional VGP positions. The stacked Black Sea paleomagnetic record was also converted into one component parallel to the direction expected from a geocentric axial dipole (GAD) and two components perpendicular to it, representing only non-GAD components of the geomagnetic field. The Laschamp and the Norwegian-Greenland Sea excursions are characterized by extremely low GAD components, while the Mono Lake excursion is marked by large non-GAD contributions. Notably, negative values of the GAD component, indicating a fully reversed geomagnetic field, are observed only during the Laschamp excursion. In summary, this doctoral thesis reconstructed high-resolution and high-fidelity PSV records from SE Black Sea sediments. The obtained record comprises three geomagnetic excursions, the Norwegian-Greenland Sea excursion, the Laschamp excursion, and the Mono Lake excursion. They are characterized by abnormal secular variations of different amplitudes centered at about 64.5 ka, 41.0 ka and 34.5 ka, respectively. In addition, the obtained PSV record from the Black Sea do not provide evidence for the postulated 'Hilina Pali excursion' at about 18.5 ka. Anyway, the obtained Black Sea paleomagnetic record, covering field fluctuations from normal secular variations, over excursions, to a short but full reversal, points to a geomagnetic field characterized by a large dynamic range in intensity and a highly variable superposition of dipole and non-dipole contributions from the geodynamo during the past 68.9 to 14.5 ka.}, language = {en} }