@phdthesis{RomeroMujalli2019, author = {Romero Mujalli, Daniel}, title = {Ecological modeling of adaptive evolutionary responses to rapid climate change}, doi = {10.25932/publishup-43062}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430627}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2019}, abstract = {A contemporary challenge in Ecology and Evolutionary Biology is to anticipate the fate of populations of organisms in the context of a changing world. Climate change and landscape changes due to anthropic activities have been of major concern in the contemporary history. Organisms facing these threats are expected to respond by local adaptation (i.e., genetic changes or phenotypic plasticity) or by shifting their distributional range (migration). However, there are limits to their responses. For example, isolated populations will have more difficulties in developing adaptive innovations by means of genetic changes than interconnected metapopulations. Similarly, the topography of the environment can limit dispersal opportunities for crawling organisms as compared to those that rely on wind. Thus, populations of species with different life history strategy may differ in their ability to cope with changing environmental conditions. However, depending on the taxon, empirical studies investigating organisms' responses to environmental change may become too complex, long and expensive; plus, complications arising from dealing with endangered species. In consequence, eco-evolutionary modeling offers an opportunity to overcome these limitations and complement empirical studies, understand the action and limitations of underlying mechanisms, and project into possible future scenarios. In this work I take a modeling approach and investigate the effect and relative importance of evolutionary mechanisms (including phenotypic plasticity) on the ability for local adaptation of populations with different life strategy experiencing climate change scenarios. For this, I performed a review on the state of the art of eco-evolutionary Individual-Based Models (IBMs) and identify gaps for future research. Then, I used the results from the review to develop an eco-evolutionary individual-based modeling tool to study the role of genetic and plastic mechanisms in promoting local adaption of populations of organisms with different life strategies experiencing scenarios of climate change and environmental stochasticity. The environment was simulated through a climate variable (e.g., temperature) defining a phenotypic optimum moving at a given rate of change. The rate of change was changed to simulate different scenarios of climate change (no change, slow, medium, rapid climate change). Several scenarios of stochastic noise color resembling different climatic conditions were explored. Results show that populations of sexual species will rely mainly on standing genetic variation and phenotypic plasticity for local adaptation. Population of species with relatively slow growth rate (e.g., large mammals) - especially those of small size - are the most vulnerable, particularly if their plasticity is limited (i.e., specialist species). In addition, whenever organisms from these populations are capable of adaptive plasticity, they can buffer fitness losses in reddish climatic conditions. Likewise, whenever they can adjust their plastic response (e.g., bed-hedging strategy) they will cope with bluish environmental conditions as well. In contrast, life strategies of high fecundity can rely on non-adaptive plasticity for their local adaptation to novel environmental conditions, unless the rate of change is too rapid. A recommended management measure is to guarantee interconnection of isolated populations into metapopulations, such that the supply of useful genetic variation can be increased, and, at the same time, provide them with movement opportunities to follow their preferred niche, when local adaptation becomes problematic. This is particularly important for bluish and reddish climatic conditions, when the rate of change is slow, or for any climatic condition when the level of stress (rate of change) is relatively high.}, language = {en} } @phdthesis{Ruschel2019, author = {Ruschel, Matthias}, title = {Das Preußische Erbrecht in der Judikatur des Berliner Obertribunals in den Jahren 1836 bis 1865}, doi = {10.25932/publishup-52779}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-527798}, school = {Universit{\"a}t Potsdam}, pages = {283}, year = {2019}, abstract = {Die Dissertation befasst sich mit dem Allgemeinen Preußischen Landrecht von 1794 und der hierzu ergangenen Rechtsprechung des Berliner Obertribunals. Im Fokus der Untersuchung stehen die erbrechtlichen Regelungen des Landrechts und deren Anwendung sowie Auslegung in der Judikatur des h{\"o}chsten preußischen Gerichts. Der Forschungsgegenstand ergibt sich aus dem im Landrecht kodifizierten speziellen Gesetzesverst{\"a}ndnisses. Nach diesem sollte die Gesetzesauslegung durch die Rechtsprechung auf ein Minimum, n{\"a}mlich die Auslegung allein anhand des Wortlauts der Regelung reduziert werden, um dem absolutistischen Regierungsanspruch der preußischen Monarchen, namentlich Friedrich des Großen, hinreichend Rechnung zu tragen. In diesem Kontext wird der Frage nachgegangen, inwieweit das preußische Obertribunal das im Landrecht statuierte „Auslegungsverbot" beachtet hat und in welchen F{\"a}llen sich das Gericht von der Vorgabe emanzipierte und weitere Auslegungsmethoden anwendete und sich so eine unabh{\"a}ngige Rechtsprechung entwickeln konnte. Die Arbeit gliedert sich in drei Hauptabschnitte. Im Anschluss an die Einleitung, in der zun{\"a}chst die rechtshistorische Bedeutung des Landrechts und des Erbrechts sowie der Untersuchungsgegenstand umrissen werden, folgt die Darstellung der Entstehungsgeschichte des Landrechts und des Berliner Obertribunals. Hieran schließt sich in einem dritten Abschnitt eine Analyse der erbrechtlichen Vorschriften des Landrechts an. In dieser wird auf die Entstehungsgeschichte der verschiedenen erbrechtlichen Institute wie beispielsweise der gesetzlichen und gewillk{\"u}rten Erbfolge, dem Pflichtteilsrecht etc., unter Ber{\"u}cksichtigung des zeitgen{\"o}ssischen wissenschaftlichen Diskurses eingegangen. Im vierten Abschnitt geht es um die Judikate des Berliner Obertribunals aus den Jahren 1836-1865 in denen die zuvor dargestellten erbrechtlichen Regelungen entscheidungserheblich waren. Dabei wird der Forschungsfrage, inwieweit das Obertribunal das im Landrecht statuierte Auslegungsverbot beachtet hat und in welchen F{\"a}llen es von diesem abwich bzw. weitere Auslegungsmethoden anwendete, konkret nachgegangen wird. Insgesamt werden 26 Entscheidungen des Obertribunals unter dem Aspekt der Auslegungspraxis, der Kontinuit{\"a}t und der Beschleunigung der Rechtsprechung analysiert und ausgewertet.}, language = {de} } @phdthesis{Sablowski2019, author = {Sablowski, Daniel}, title = {Spectroscopic analysis of the benchmark system Alpha Aurigae}, doi = {10.25932/publishup-43239}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432396}, school = {Universit{\"a}t Potsdam}, pages = {169}, year = {2019}, abstract = {Binaries play an important role in observational and theoretical astrophysics. Since the mass and the chemical composition are key ingredients for stellar evolution, high-resolution spectroscopy is an important and necessary tool to derive those parameters to high confidence in binaries. This involves carefully measured orbital motion by the determination of radial velocity (RV) shifts and sophisticated techniques to derive the abundances of elements within the stellar atmosphere. A technique superior to conventional cross-correlation methods to determine RV shifts in known as spectral disentangling. Hence, a major task of this thesis was the design of a sophisticated software package for this approach. In order to investigate secondary effects, such as flux and line-profile variations, imprinting changes on the spectrum the behavior of spectral disentangling on such variability is a key to understand the derived values, to improve them, and to get information about the variability itself. Therefore, the spectral disentangling code presented in this thesis and available to the community combines multiple advantages: separation of the spectra for detailed chemical analysis, derivation of orbital elements, derivation of individual RVs in order to investigate distorted systems (either by third body interaction or relativistic effects), the suppression of telluric contaminations, the derivation of variability, and the possibility to apply the technique to eclipsing binaries (important for orbital inclination) or in general to systems that undergo flux-variations. This code in combination with the spectral synthesis codes MOOG and SME was used in order to derive the carbon 12C/13C isotope ratio (CIR) of the benchmark binary Capella. The observational result will be set into context with theoretical evolution by the use of MESA models and resolves the discrepancy of theory and observations existing since the first measurement of Capella's CIR in 1976. The spectral disentangling code has been made available to the community and its applicability to completely different behaving systems, Wolf-Rayet stars, have also been investigated and resulted in a published article. Additionally, since this technique relies strongly on data quality, continues development of scientific instruments to achieve best observational data is of great importance in observational astrophysics. That is the reason why there has also been effort in astronomical instrumentation during the work on this thesis.}, language = {en} } @phdthesis{Sarhan2019, author = {Sarhan, Radwan Mohamed}, title = {Plasmon-driven photocatalytic reactions monitored by surface-enhanced Raman spectroscopy}, doi = {10.25932/publishup-43330}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-433304}, school = {Universit{\"a}t Potsdam}, year = {2019}, abstract = {Plasmonic metal nanostructures can be tuned to efficiently interact with light, converting the photons into energetic charge carriers and heat. Therefore, the plasmonic nanoparticles such as gold and silver nanoparticles act as nano-reactors, where the molecules attached to their surfaces benefit from the enhanced electromagnetic field along with the generated energetic charge carriers and heat for possible chemical transformations. Hence, plasmonic chemistry presents metal nanoparticles as a unique playground for chemical reactions on the nanoscale remotely controlled by light. However, defining the elementary concepts behind these reactions represents the main challenge for understanding their mechanism in the context of the plasmonically assisted chemistry. Surface-enhanced Raman scattering (SERS) is a powerful technique employing the plasmon-enhanced electromagnetic field, which can be used for probing the vibrational modes of molecules adsorbed on plasmonic nanoparticles. In this cumulative dissertation, I use SERS to probe the dimerization reaction of 4-nitrothiophenol (4-NTP) as a model example of plasmonic chemistry. I first demonstrate that plasmonic nanostructures such as gold nanotriangles and nanoflowers have a high SERS efficiency, as evidenced by probing the vibrations of the rhodamine dye R6G and the 4-nitrothiophenol 4-NTP. The high signal enhancement enabled the measurements of SERS spectra with a short acquisition time, which allows monitoring the kinetics of chemical reactions in real time. To get insight into the reaction mechanism, several time-dependent SERS measurements of the 4-NTP have been performed under different laser and temperature conditions. Analysis of the results within a mechanistic framework has shown that the plasmonic heating significantly enhances the reaction rate, while the reaction is probably initiated by the energetic electrons. The reaction was shown to be intensity-dependent, where a certain light intensity is required to drive the reaction. Finally, first attempts to scale up the plasmonic catalysis have been performed showing the necessity to achieve the reaction threshold intensity. Meanwhile, the induced heat needs to quickly dissipate from the reaction substrate, since otherwise the reactants and the reaction platform melt. This study might open the way for further work seeking the possibilities to quickly dissipate the plasmonic heat generated during the reaction and therefore, scaling up the plasmonic catalysis.}, language = {en} } @phdthesis{Schlenter2019, author = {Schlenter, Judith}, title = {Predictive language processing in late bilinguals}, doi = {10.25932/publishup-43249}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432498}, school = {Universit{\"a}t Potsdam}, pages = {251}, year = {2019}, abstract = {The current thesis examined how second language (L2) speakers of German predict upcoming input during language processing. Early research has shown that the predictive abilities of L2 speakers relative to L1 speakers are limited, resulting in the proposal of the Reduced Ability to Generate Expectations (RAGE) hypothesis. Considering that prediction is assumed to facilitate language processing in L1 speakers and probably plays a role in language learning, the assumption that L1/L2 differences can be explained in terms of different processing mechanisms is a particularly interesting approach. However, results from more recent studies on the predictive processing abilities of L2 speakers have indicated that the claim of the RAGE hypothesis is too broad and that prediction in L2 speakers could be selectively limited. In the current thesis, the RAGE hypothesis was systematically put to the test. In this thesis, German L1 and highly proficient late L2 learners of German with Russian as L1 were tested on their predictive use of one or more information sources that exist as cues to sentence interpretation in both languages, to test for selective limits. The results showed that, in line with previous findings, L2 speakers can use the lexical-semantics of verbs to predict the upcoming noun. Here the level of prediction was more systematically controlled for than in previous studies by using verbs that restrict the selection of upcoming nouns to the semantic category animate or inanimate. Hence, prediction in L2 processing is possible. At the same time, this experiment showed that the L2 group was slower/less certain than the L1 group. Unlike previous studies, the experiment on case marking demonstrated that L2 speakers can use this morphosyntactic cue for prediction. Here, the use of case marking was tested by manipulating the word order (Dat > Acc vs. Acc > Dat) in double object constructions after a ditransitive verb. Both the L1 and the L2 group showed a difference between the two word order conditions that emerged within the critical time window for an anticipatory effect, indicating their sensitivity towards case. However, the results for the post-critical time window pointed to a higher uncertainty in the L2 group, who needed more time to integrate incoming information and were more affected by the word order variation than the L1 group, indicating that they relied more on surface-level information. A different cue weighting was also found in the experiment testing whether participants predict upcoming reference based on implicit causality information. Here, an additional child L1 group was tested, who had a lower memory capacity than the adult L2 group, as confirmed by a digit span task conducted with both learner groups. Whereas the children were only slightly delayed compared to the adult L1 group and showed the same effect of condition, the L2 speakers showed an over-reliance on surface-level information (first-mention/subjecthood). Hence, the pattern observed resulted more likely from L1/L2 differences than from resource deficits. The reviewed studies and the experiments conducted show that L2 prediction is affected by a range of factors. While some of the factors can be attributed to more individual differences (e.g., language similarity, slower processing) and can be interpreted by L2 processing accounts assuming that L1 and L2 processing are basically the same, certain limits are better explained by accounts that assume more substantial L1/L2 differences. Crucially, the experimental results demonstrate that the RAGE hypothesis should be refined: Although prediction as a fast-operating mechanism is likely to be affected in L2 speakers, there is no indication that prediction is the dominant source of L1/L2 differences. The results rather demonstrate that L2 speakers show a different weighting of cues and rely more on semantic and surface-level information to predict as well as to integrate incoming information.}, language = {en} } @phdthesis{Schmidt2019, author = {Schmidt, Martin}, title = {Fragmentation of landscapes: modelling ecosystem services of transition zones}, doi = {10.25932/publishup-44294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442942}, school = {Universit{\"a}t Potsdam}, pages = {XV, 103}, year = {2019}, abstract = {For millennia, humans have affected landscapes all over the world. Due to horizontal expansion, agriculture plays a major role in the process of fragmentation. This process is caused by a substitution of natural habitats by agricultural land leading to agricultural landscapes. These landscapes are characterized by an alternation of agriculture and other land use like forests. In addition, there are landscape elements of natural origin like small water bodies. Areas of different land use are beside each other like patches, or fragments. They are physically distinguishable which makes them look like a patchwork from an aerial perspective. These fragments are each an own ecosystem with conditions and properties that differ from their adjacent fragments. As open systems, they are in exchange of information, matter and energy across their boundaries. These boundary areas are called transition zones. Here, the habitat properties and environmental conditions are altered compared to the interior of the fragments. This changes the abundance and the composition of species in the transition zones, which in turn has a feedback effect on the environmental conditions. The literature mainly offers information and insights on species abundance and composition in forested transition zones. Abiotic effects, the gradual changes in energy and matter, received less attention. In addition, little is known about non-forested transition zones. For example, the effects on agricultural yield in transition zones of an altered microclimate, matter dynamics or different light regimes are hardly researched or understood. The processes in transition zones are closely connected with altered provisioning and regulating ecosystem services. To disentangle the mechanisms and to upscale the effects, models can be used. My thesis provides insights into these topics: literature was reviewed and a conceptual framework for the quantitative description of gradients of matter and energy in transition zones was introduced. The results of measurements of environmental gradients like microclimate, aboveground biomass and soil carbon and nitrogen content are presented that span from within the forest into arable land. Both the measurements and the literature review could not validate a transition zone of 100 m for abiotic effects. Although this value is often reported and used in the literature, it is likely to be smaller. Further, the measurements suggest that on the one hand trees in transition zones are smaller compared to those in the interior of the fragments, while on the other hand less biomass was measured in the arable lands' transition zone. These results support the hypothesis that less carbon is stored in the aboveground biomass in transition zones. The soil at the edge (zero line) between adjacent forest and arable land contains more nitrogen and carbon content compared to the interior of the fragments. One-year measurements in the transition zone also provided evidence that microclimate is different compared to the fragments' interior. To predict the possible yield decreases that transition zones might cause, a modelling approach was developed. Using a small virtual landscape, I modelled the effect of a forest fragment shading the adjacent arable land and the effects of this on yield using the MONICA crop growth model. In the transition zone yield was less compared to the interior due to shading. The results of the simulations were upscaled to the landscape level and exemplarily calculated for the arable land of a whole region in Brandenburg, Germany. The major findings of my thesis are: (1) Transition zones are likely to be much smaller than assumed in the scientific literature; (2) transition zones aren't solely a phenomenon of forested ecosystems, but significantly extend into arable land as well; (3) empirical and modelling results show that transition zones encompass biotic and abiotic changes that are likely to be important to a variety of agricultural landscape ecosystem services.}, language = {en} } @phdthesis{Schneider2019, author = {Schneider, Jan Niklas}, title = {Computational approaches for emotion research}, doi = {10.25932/publishup-45927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459275}, school = {Universit{\"a}t Potsdam}, pages = {xv, 145}, year = {2019}, abstract = {Emotionen sind ein zentrales Element menschlichen Erlebens und spielen eine wichtige Rolle bei der Entscheidungsfindung. Diese Dissertation identifiziert drei methodische Probleme der aktuellen Emotionsforschung und zeigt auf, wie diese mittels computergest{\"u}tzter Methoden gel{\"o}st werden k{\"o}nnen. Dieser Ansatz wird in drei Forschungsprojekten demonstriert, die die Entwicklung solcher Methoden sowie deren Anwendung auf konkrete Forschungsfragen beschreiben. Das erste Projekt beschreibt ein Paradigma welches es erm{\"o}glicht, die subjektive und objektive Schwierigkeit der Emotionswahrnehmung zu messen. Dar{\"u}ber hinaus erm{\"o}glicht es die Verwendung einer beliebigen Anzahl von Emotionskategorien im Vergleich zu den {\"u}blichen sechs Kategorien der Basisemotionen. Die Ergebnisse deuten auf eine Zunahme der Schwierigkeiten bei der Wahrnehmung von Emotionen mit zunehmendem Alter der Darsteller hin und liefern Hinweise darauf, dass junge Erwachsene, {\"a}ltere Menschen und M{\"a}nner ihre Schwierigkeit bei der Wahrnehmung von Emotionen untersch{\"a}tzen. Weitere Analysen zeigten eine geringe Relevanz personenbezogener Variablen und deuteten darauf hin, dass die Schwierigkeit der Emotionswahrnehmung vornehmlich durch die Auspr{\"a}gung der Wertigkeit des Ausdrucks bestimmt wird. Das zweite Projekt zeigt am Beispiel von Arousal, einem etablierten, aber vagen Konstrukt der Emotionsforschung, wie Face-Tracking-Daten dazu genutzt werden k{\"o}nnen solche Konstrukte zu sch{\"a}rfen. Es beschreibt, wie aus Face-Tracking-Daten Maße f{\"u}r die Entfernung, Geschwindigkeit und Beschleunigung von Gesichtsausdr{\"u}cken berechnet werden k{\"o}nnen. Das Projekt untersuchte wie diesen Maße mit der Arousal-Wahrnehmung in Menschen mit und ohne Autismus zusammenh{\"a}ngen. Der Abstand zum Neutralgesicht war pr{\"a}diktiv f{\"u}r die Arousal-Bewertungen in beiden Gruppen. Die Ergebnisse deuten auf eine qualitativ {\"a}hnliche Wahrnehmung von Arousal f{\"u}r Menschen mit und ohne Autismus hin. Im dritten Projekt stellen wir die Partial-Least-Squares-Analyse als allgemeine Methode vor, um eine optimale Repr{\"a}sentation zur Verkn{\"u}pfung zweier hochdimensionale Datens{\"a}tze zu finden. Das Projekt demonstriert die Anwendbarkeit dieser Methode in der Emotionsforschung anhand der Frage nach Unterschieden in der Emotionswahrnehmung zwischen M{\"a}nnern und Frauen. Wir konnten zeigen, dass die emotionale Wahrnehmung von Frauen systematisch mehr Varianz der Gesichtsausdr{\"u}cke erfasst und dass signifikante Unterschiede in der Art und Weise bestehen, wie Frauen und M{\"a}nner einige Gesichtsausdr{\"u}cke wahrnehmen. Diese konnten wir als dynamische Gesichtsausdr{\"u}cke visualisieren. Um die Anwendung der entwickelten Methode f{\"u}r die Forschungsgemeinschaft zu erleichtern, wurde ein Software-Paket f{\"u}r die Statistikumgebung R geschrieben. Zudem wurde eine Website entwickelt (thisemotiondoesnotexist.com), die es Besuchern erlaubt, ein Partial-Least-Squares-Modell von Emotionsbewertungen und Face-Tracking-Daten interaktiv zu erkunden, um die entwickelte Methode zu verbreiten und ihren Nutzen f{\"u}r die Emotionsforschung zu illustrieren.}, language = {en} } @phdthesis{SchulteOsseili2019, author = {Schulte-Osseili, Christine}, title = {Vom Monomer zum Glykopolymer}, doi = {10.25932/publishup-43216}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432169}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 149}, year = {2019}, abstract = {Glykopolymere sind synthetische und nat{\"u}rlich vorkommende Polymere, die eine Glykaneinheit in der Seitenkette des Polymers tragen. Glykane sind durch die Glykan-Protein-Wechselwirkung verantwortlich f{\"u}r viele biologische Prozesse. Die Beteiligung der Glykanen in diesen biologischen Prozessen erm{\"o}glicht das Imitieren und Analysieren der Wechselwirkungen durch geeignete Modellverbindungen, z.B. der Glykopolymere. Dieses System der Glykan-Protein-Wechselwirkung soll durch die Glykopolymere untersucht und studiert werden, um die spezifische und selektive Bindung der Proteine an die Glykopolymere nachzuweisen. Die Proteine, die in der Lage sind, Kohlenhydratstrukturen selektiv zu binden, werden Lektine genannt. In dieser Dissertationsarbeit wurden verschiedene Glykopolymere synthetisiert. Dabei sollte auf einen effizienten und kosteng{\"u}nstigen Syntheseweg geachtet werden. Verschiedene Glykopolymere wurden durch funktionalisierte Monomere mit verschiedenen Zuckern, wie z.B. Mannose, Laktose, Galaktose oder N-Acetyl-Glukosamin als funktionelle Gruppe, hergestellt. Aus diesen funktionalisierten Glykomonomeren wurden {\"u}ber ATRP und RAFT-Polymerisation Glykopolymere synthetisiert. Die erhaltenen Glykopolymere wurden in Diblockcopolymeren als hydrophiler Block angewendet und die Selbstassemblierung in w{\"a}ssriger L{\"o}sung untersucht. Die Polymere formten in w{\"a}ssriger L{\"o}sung Mizellen, bei denen der Zuckerblock an der Oberfl{\"a}che der Mizellen sitzt. Die Mizellen wurden mit einem hydrophoben Fluoreszenzfarbstoff beladen, wodurch die CMC der Mizellenbildung bestimmt werden konnte. Außerdem wurden die Glykopolymere als Oberfl{\"a}chenbeschichtung {\"u}ber „Grafting from" mit SI-ATRP oder {\"u}ber „Grafting to" auf verschiedene Oberfl{\"a}chen gebunden. Durch die glykopolymerbschichteten Oberfl{\"a}chen konnte die Glykan Protein Wechselwirkung {\"u}ber spektroskopische Messmethoden, wie SPR- und Mikroring Resonatoren untersucht werden. Hierbei wurde die spezifische und selektive Bindung der Lektine an die Glykopolymere nachgewiesen und die Bindungsst{\"a}rke untersucht. Die synthetisierten Glykopolymere k{\"o}nnten durch Austausch der Glykaneinheit f{\"u}r andere Lektine adressierbar werden und damit ein weites Feld an anderen Proteinen erschließen. Die biovertr{\"a}glichen Glykopolymere w{\"a}ren alternativen f{\"u}r den Einsatz in biologischen Prozessen als Transporter von Medikamenten oder Farbstoffe in den K{\"o}rper. Außerdem k{\"o}nnten die funktionalisierten Oberfl{\"a}chen in der Diagnostik zum Erkennen von Lektinen eingesetzt werden. Die Glykane, die keine selektive und spezifische Bindung zu Proteinen eingehen, k{\"o}nnten als antiadsorptive Oberfl{\"a}chenbeschichtung z.B. in der Zellbiologie eingesetzt werden.}, language = {de} } @phdthesis{Schaefer2019, author = {Sch{\"a}fer, Merlin}, title = {Understanding and predicting global change impacts on migratory birds}, doi = {10.25932/publishup-43925}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439256}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 153}, year = {2019}, abstract = {This is a publication-based dissertation comprising three original research stud-ies (one published, one submitted and one ready for submission; status March 2019). The dissertation introduces a generic computer model as a tool to investigate the behaviour and population dynamics of animals in cyclic environments. The model is further employed for analysing how migratory birds respond to various scenarios of altered food supply under global change. Here, ecological and evolutionary time-scales are considered, as well as the biological constraints and trade-offs the individual faces, which ultimately shape response dynamics at the population level. Further, the effect of fine-scale temporal patterns in re-source supply are studied, which is challenging to achieve experimentally. My findings predict population declines, altered behavioural timing and negative carry-over effects arising in migratory birds under global change. They thus stress the need for intensified research on how ecological mechanisms are affected by global change and for effective conservation measures for migratory birds. The open-source modelling software created for this dissertation can now be used for other taxa and related research questions. Overall, this thesis improves our mechanistic understanding of the impacts of global change on migratory birds as one prerequisite to comprehend ongoing global biodiversity loss. The research results are discussed in a broader ecological and scientific context in a concluding synthesis chapter.}, language = {en} } @phdthesis{Schuerings2019, author = {Sch{\"u}rings, Marco Philipp Hermann}, title = {Synthesis of 1D microgel strands and their motion analysis in solution}, doi = {10.25932/publishup-43953}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439532}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2019}, abstract = {The fabrication of 1D nanostrands composed of stimuli responsive microgels has been shown in this work. Microgels are well known materials able to respond to various stimuli from outer environment. Since these microgels respond via a volume change to an external stimulus, a targeted mechanical response can be achieved. Through carefully choosing the right composition of the polymer matrix, microgels can be designed to react precisely to the targeted stimuli (e.g. drug delivery via pH and temperature changes, or selective contractions through changes in electrical current125). In this work, it was aimed to create flexible nano-filaments which are capable of fast anisotropic contractions similar to muscle filaments. For the fabrication of such filaments or strands, nanostructured templates (PDMS wrinkles) were chosen due to a facile and low-cost fabrication and versatile tunability of their dimensions. Additionally, wrinkling is a well-known lithography-free method which enables the fabrication of nanostructures in a reproducible manner and with a high long-range periodicity. In Chapter 2.1, it was shown for the first time that microgels as soft matter particles can be aligned to densely packed microgel arrays of various lateral dimensions. The alignment of microgels with different compositions (e.g. VCL/AAEM, NIPAAm, NIPAAm/VCL and charged microgels) was shown by using different assembly techniques (e.g. spin-coating, template confined molding). It was chosen to set one experimental parameter constant which was the SiOx surface composition of the templates and substrates (e.g. oxidized PDMS wrinkles, Si-wafers and glass slides). It was shown that the fabrication of nanoarrays was feasible with all tested microgel types. Although the microgels exhibited different deformability when aligned on a flat surface, they retained their thermo-responsivity and swelling behavior. Towards the fabrication of 1D microgel strands interparticle connectivity was aspired. This was achieved via different cross-linking methods (i.e. cross-linking via UV-irradiation and host-guest complexation) discussed in Chapter 2.2. The microgel arrays created by different assembly methods and microgel types were tested for their cross-linking suitability. It was observed that NIPAAm based microgels cannot be cross-linked with UV light. Furthermore, it was found that these microgels exhibit a strong surface-particle-interaction and therefore could not be detached from the given substrates. In contrast to the latter, with VCL/AAEM based microgels it was possible to both UV cross-link them based on the keto-enol tautomerism of the AAEM copolymer, and to detach them from the substrate due to the lower adhesion energy towards SiOx surfaces. With VCL/AAEM microgels long, one-dimensional microgel strands could be re-dispersed in water for further analysis. It has also been shown that at least one lateral dimension of the free dispersed 1D microgel strands is easily controllable by adjusting the wavelength of the wrinkled template. For further work, only VCL/AAEM based microgels were used to focus on the main aim of this work, i.e. the fabrication of 1D microgel nanostrands. As an alternative to the unspecific and harsh UV cross-linking, the host-guest complexation via diazobenzene cross-linkers and cyclodextrin hosts was explored. The idea behind this approach was to give means to a future construction kit-like approach by incorporation of cyclodextrin comonomers in a broad variety of particle systems (e.g. microgels, nanoparticles). For this purpose, VCL/AAEM microgels were copolymerized with different amounts of mono-acrylate functionalized β-cyclodextrin (CD). After successfully testing the cross-linking capability in solution, the cross-linking of aligned VCL/AAEM/CD microgels was tried. Although the cross-linking worked well, once the single arrays came into contact to each other, they agglomerated. As a reason for this behavior residual amounts of mono-complexed diazobenzene linkers were suspected. Thus, end-capping strategies were tried out (e.g. excess amounts of β-cyclodextrin and coverage with azobenzene functionalized AuNPs) but were unsuccessful. With deeper thought, entropy effects were taken into consideration which favor the release of complexed diazobenzene linker leading to agglomerations. To circumvent this entropy driven effect, a multifunctional polymer with 50\% azobenzene groups (Harada polymer) was used. First experiments with this polymer showed promising results regarding a less pronounced agglomeration (Figure 77). Thus, this approach could be pursued in the future. In this chapter it was found out that in contrast to pearl necklace and ribbon like formations, particle alignment in zigzag formation provided the best compromise in terms of stability in dispersion (see Figure 44a and Figure 51) while maintaining sufficient flexibility. For this reason, microgel strands in zigzag formation were used for the motion analysis described in Chapter 2.3. The aim was to observe the properties of unrestrained microgel strands in solution (e.g. diffusion behavior, rotational properties and ideally, anisotropic contraction after temperature increase). Initially, 1D microgel strands were manipulated via AFM in a liquid cell setup. It could be observed that the strands required a higher load force compared to single microgels to be detached from the surface. However, with the AFM it was not possible to detach the strands in a controllable manner but resulted in a complete removal of single microgel particles and a tearing off the strands from the surface, respectively. For this reason, to observe the motion behavior of unrestrained microgel strands in solution, confocal microscopy was used. Furthermore, to hinder an adsorption of the strands, it was found out that coating the surface of the substrates with a repulsive polymer film was beneficial. Confocal and wide-field microscopy videos showed that the microgel strands exhibit translational and rotational diffusive motion in solution without perceptible bending. Unfortunately, with these methods the detection of the anisotropic stimuli responsive contraction of the free moving microgel strands was not possible. To summarize, the flexibility of microgel strands is more comparable to the mechanical behavior of a semi flexible cable than to a yarn. The strands studied here consist of dozens or even hundreds of discrete submicron units strung together by cross-linking, having few parallels in nanotechnology. With the insights gained in this work on microgel-surface interactions, in the future, a targeted functionalization of the template and substrate surfaces can be conducted to actively prevent unwanted microgel adsorption for a given microgel system (e.g. PVCL and polystyrene coating235). This measure would make the discussed alignment methods more diverse. As shown herein, the assembly methods enable a versatile microgel alignment (e.g. microgel meshes, double and triple strands). To go further, one could use more complex templates (e.g. ceramic rhombs and star shaped wrinkles (Figure 14) to expand the possibilities of microgel alignment and to precisely control their aspect ratios (e.g. microgel rods with homogeneous size distributions).}, language = {en} } @phdthesis{Sidarenka2019, author = {Sidarenka, Uladzimir}, title = {Sentiment analysis of German Twitter}, doi = {10.25932/publishup-43742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437422}, school = {Universit{\"a}t Potsdam}, pages = {vii, 217}, year = {2019}, abstract = {The immense popularity of online communication services in the last decade has not only upended our lives (with news spreading like wildfire on the Web, presidents announcing their decisions on Twitter, and the outcome of political elections being determined on Facebook) but also dramatically increased the amount of data exchanged on these platforms. Therefore, if we wish to understand the needs of modern society better and want to protect it from new threats, we urgently need more robust, higher-quality natural language processing (NLP) applications that can recognize such necessities and menaces automatically, by analyzing uncensored texts. Unfortunately, most NLP programs today have been created for standard language, as we know it from newspapers, or, in the best case, adapted to the specifics of English social media. This thesis reduces the existing deficit by entering the new frontier of German online communication and addressing one of its most prolific forms—users' conversations on Twitter. In particular, it explores the ways and means by how people express their opinions on this service, examines current approaches to automatic mining of these feelings, and proposes novel methods, which outperform state-of-the-art techniques. For this purpose, I introduce a new corpus of German tweets that have been manually annotated with sentiments, their targets and holders, as well as lexical polarity items and their contextual modifiers. Using these data, I explore four major areas of sentiment research: (i) generation of sentiment lexicons, (ii) fine-grained opinion mining, (iii) message-level polarity classification, and (iv) discourse-aware sentiment analysis. In the first task, I compare three popular groups of lexicon generation methods: dictionary-, corpus-, and word-embedding-based ones, finding that dictionary-based systems generally yield better polarity lists than the last two groups. Apart from this, I propose a linear projection algorithm, whose results surpass many existing automatically-generated lexicons. Afterwords, in the second task, I examine two common approaches to automatic prediction of sentiment spans, their sources, and targets: conditional random fields (CRFs) and recurrent neural networks, obtaining higher scores with the former model and improving these results even further by redefining the structure of CRF graphs. When dealing with message-level polarity classification, I juxtapose three major sentiment paradigms: lexicon-, machine-learning-, and deep-learning-based systems, and try to unite the first and last of these method groups by introducing a bidirectional neural network with lexicon-based attention. Finally, in order to make the new classifier aware of microblogs' discourse structure, I let it separately analyze the elementary discourse units of each tweet and infer the overall polarity of a message from the scores of its EDUs with the help of two new approaches: latent-marginalized CRFs and Recursive Dirichlet Process.}, language = {en} } @phdthesis{Solopow2019, author = {Solopow, Sergej}, title = {Wavelength dependent demagnetization dynamics in Co2MnGa Heusler-alloy}, doi = {10.25932/publishup-42786}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427860}, school = {Universit{\"a}t Potsdam}, pages = {91}, year = {2019}, abstract = {In dieser Arbeit haben wir ultraschnelle Entmagnetisierung an einer Heusler-Legierung untersucht. Es handelt sich um ein Halbmetall, das sich in einer ferromagnetischen Phase befindet. Die Besonderheit dieses Materials besteht im Aufbau einer Bandstruktur. Diese bildet Zustandsdichten, in der die Majorit{\"a}tselektronen eine metallische B{\"a}nderbildung aufweisen und die Minorit{\"a}tselektronen eine Bandl{\"u}cke in der N{\"a}he des Fermi-Niveaus aufweisen, das dem Aufbau eines Halbleiters entspricht. Mit Hilfe der Pump-Probe-Experimente haben wir zeitaufgel{\"o}ste Messungen durchgef{\"u}hrt. F{\"u}r das Pumpen wurden ultrakurze Laserpulse mit einer Pulsdauer von 100 fs benutzt. Wir haben dabei zwei verschiedene Wellenl{\"a}ngen mit 400 nm und 1240 nm benutzt, um den Effekt der Prim{\"a}ranregung und der Bandl{\"u}cke in den Minorit{\"a}tszust{\"a}nden zu untersuchen. Dabei wurde zum ersten Mal OPA (Optical Parametrical Amplifier) f{\"u}r die Erzeugung der langwelligen Pulse an der FEMTOSPEX-Beamline getestet und erfolgreich bei den Experimenten verwendet. Wir haben Wellenl{\"a}ngen bedingte Unterschiede in der Entmagnetisierungszeit gemessen. Mit der Erh{\"o}hung der Photonenenergie ist der Prozess der Entmagnetisierung deutlich schneller als bei einer niedrigeren Photonenenergie. Wir verkn{\"u}pften diese Ergebnisse mit der Existenz der Energiel{\"u}cke f{\"u}r Minorit{\"a}tselektronen. Mit Hilfe lokaler Elliot-Yafet-Streuprozesse k{\"o}nnen die beobachteten Zeiten gut erkl{\"a}rt werden. Wir haben in dieser Arbeit auch eine neue Probe-Methode f{\"u}r die Magnetisierung angewandt und somit experimentell deren Effektivit{\"a}t, n{\"a}mlich XMCD in Refletiongeometry, best{\"a}tigen k{\"o}nnen. Statische Experimente liefern somit deutliche Indizien daf{\"u}r, dass eine magnetische von einer rein elektronischen Antwort des Systems getrennt werden kann. Unter der Voraussetzung, dass die Photonenenergie der R{\"o}ntgenstrahlung auf die L3 Kante des entsprechenden Elements eingestellt, ein geeigneter Einfallswinkel gew{\"a}hlt und die zirkulare Polarisation fixiert wird, ist es m{\"o}glich, diese Methode zur Analyse magnetischer und elektronischer Respons anzuwenden.}, language = {en} } @phdthesis{Sotiropoulou2019, author = {Sotiropoulou, Stavroula}, title = {Pleiotropy of phonetic indices in the expression of syllabic organization}, doi = {10.25932/publishup-54639}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-546399}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2019}, abstract = {This dissertation is concerned with the relation between qualitative phonological organization in the form of syllabic structure and continuous phonetics, that is, the spatial and temporal dimensions of vocal tract action that express syllabic structure. The main claim of the dissertation is twofold. First, we argue that syllabic organization exerts multiple effects on the spatio-temporal properties of the segments that partake in that organization. That is, there is no unique or privileged exponent of syllabic organization. Rather, syllabic organization is expressed in a pleiotropy of phonetic indices. Second, we claim that a better understanding of the relation between qualitative phonological organization and continuous phonetics is reached when one considers how the string of segments (over which the nature of the phonological organization is assessed) responds to perturbations (scaling of phonetic variables) of localized properties (such as durations) within that string. Specifically, variation in phonetic variables and more specifically prosodic variation is a crucial key to understanding the nature of the link between (phonological) syllabic organization and the phonetic spatio-temporal manifestation of that organization. The effects of prosodic variation on segmental properties and on the overlap between the segments, we argue, offer the right pathway to discover patterns related to syllabic organization. In our approach, to uncover evidence for global organization, the sequence of segments partaking in that organization as well as properties of these segments or their relations with one another must be somehow locally varied. The consequences of such variation on the rest of the sequence can then be used to unveil the span of organization. When local perturbations to segments or relations between adjacent segments have effects that ripple through the rest of the sequence, this is evidence that organization is global. If instead local perturbations stay local with no consequences for the rest of the whole, this indicates that organization is local.}, language = {en} } @phdthesis{Sterzel2019, author = {Sterzel, Till}, title = {Analyzing global typologies of socio-ecological vulnerability}, doi = {10.25932/publishup-42883}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-428837}, school = {Universit{\"a}t Potsdam}, pages = {137}, year = {2019}, abstract = {On a planetary scale human populations need to adapt to both socio-economic and environmental problems amidst rapid global change. This holds true for coupled human-environment (socio-ecological) systems in rural and urban settings alike. Two examples are drylands and urban coasts. Such socio-ecological systems have a global distribution. Therefore, advancing the knowledge base for identifying socio-ecological adaptation needs with local vulnerability assessments alone is infeasible: The systems cover vast areas, while funding, time, and human resources for local assessments are limited. They are lacking in low an middle-income countries (LICs and MICs) in particular. But places in a specific socio-ecological system are not only unique and complex - they also exhibit similarities. A global patchwork of local rural drylands vulnerability assessments of human populations to socio-ecological and environmental problems has already been reduced to a limited number of problem structures, which typically cause vulnerability. However, the question arises whether this is also possible in urban socio-ecological systems. The question also arises whether these typologies provide added value in research beyond global change. Finally, the methodology employed for drylands needs refining and standardizing to increase its uptake in the scientific community. In this dissertation, I set out to fill these three gaps in research. The geographical focus in my dissertation is on LICs and MICs, which generally have lower capacities to adapt, and greater adaptation needs, regarding rapid global change. Using a spatially explicit indicator-based methodology, I combine geospatial and clustering methods to identify typical configurations of key factors in case studies causing vulnerability to human populations in two specific socio-ecological systems. Then I use statistical and analytical methods to interpret and appraise both the typical configurations and the global typologies they constitute. First, I improve the indicator-based methodology and then reanalyze typical global problem structures of socio-ecological drylands vulnerability with seven indicator datasets. The reanalysis confirms the key tenets and produces a more realistic and nuanced typology of eight spatially explicit problem structures, or vulnerability profiles: Two new profiles with typically high natural resource endowment emerge, in which overpopulation has led to medium or high soil erosion. Second, I determine whether the new drylands typology and its socio-ecological vulnerability concept advance a thematically linked scientific debate in human security studies: what drives violent conflict in drylands? The typology is a much better predictor for conflict distribution and incidence in drylands than regression models typically used in peace research. Third, I analyze global problem structures typically causing vulnerability in an urban socio-ecological system - the rapidly urbanizing coastal fringe (RUCF) - with eleven indicator datasets. The RUCF also shows a robust typology, and its seven profiles show huge asymmetries in vulnerability and adaptive capacity. The fastest population increase, lowest income, most ineffective governments, most prevalent poverty, and lowest adaptive capacity are all typically stacked in two profiles in LICs. This shows that beyond local case studies tropical cyclones and/or coastal flooding are neither stalling rapid population growth, nor urban expansion, in the RUCF. I propose entry points for scaling up successful vulnerability reduction strategies in coastal cities within the same vulnerability profile. This dissertation shows that patchworks of local vulnerability assessments can be generalized to structure global socio-ecological vulnerabilities in both rural and urban socio-ecological systems according to typical problems. In terms of climate-related extreme events in the RUCF, conflicting problem structures and means to deal with them are threatening to widen the development gap between LICs and high-income countries unless successful vulnerability reduction measures are comprehensively scaled up. The explanatory power for human security in drylands warrants further applications of the methodology beyond global environmental change research in the future. Thus, analyzing spatially explicit global typologies of socio-ecological vulnerability is a useful complement to local assessments: The typologies provide entry points for where to consider which generic measures to reduce typical problem structures - including the countless places without local assessments. This can save limited time and financial resources for adaptation under rapid global change.}, language = {en} } @phdthesis{StutterGarcia2019, author = {Stutter Garcia, Ana}, title = {The use of grammatical knowledge in an additional language}, doi = {10.25932/publishup-46932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469326}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 340}, year = {2019}, abstract = {This thesis investigates whether multilingual speakers' use of grammatical constraints in an additional language (La) is affected by the native (L1) and non-native grammars (L2) of their linguistic repertoire. Previous studies have used untimed measures of grammatical performance to show that L1 and L2 grammars affect the initial stages of La acquisition. This thesis extends this work by examining whether speakers at intermediate levels of La proficiency, who demonstrate mature untimed/offline knowledge of the target La constraints, are differentially affected by their L1 and L2 knowledge when they comprehend sentences under processing pressure. With this purpose, several groups of La German speakers were tested on word order and agreement phenomena using online/timed measures of grammatical knowledge. Participants had mirror distributions of their prior languages and they were either L1English/L2Spanish speakers or L1Spanish/L2English speakers. Crucially, in half of the phenomena the target La constraint aligned with English but not with Spanish, while in the other half it aligned with Spanish but not with English. Results show that the L1 grammar plays a major role in the use of La constraints under processing pressure, as participants displayed increased sensitivity to La constraints when they aligned with their L1, and reduced sensitivity when they did not. Further, in specific phenomena in which the L2 and La constraints aligned, increased L2 proficiency resulted in an enhanced sensitivity to the La constraint. These findings suggest that both native and non-native grammars affect how speakers use La grammatical constraints under processing pressure. However, L1 and L2 grammars differentially influence on participants' performance: While L1 constraints seem to be reliably recruited to cope with the processing demands of real-time La use, proficiency in an L2 can enhance sensitivity to La constraints only in specific circumstances, namely when L2 and La constraints align.}, language = {en} } @phdthesis{Teckentrup2019, author = {Teckentrup, Lisa}, title = {Understanding predator-prey interactions}, doi = {10.25932/publishup-43162}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431624}, school = {Universit{\"a}t Potsdam}, pages = {ix, 133}, year = {2019}, abstract = {Predators can have numerical and behavioral effects on prey animals. While numerical effects are well explored, the impact of behavioral effects is unclear. Furthermore, behavioral effects are generally either analyzed with a focus on single individuals or with a focus on consequences for other trophic levels. Thereby, the impact of fear on the level of prey communities is overlooked, despite potential consequences for conservation and nature management. In order to improve our understanding of predator-prey interactions, an assessment of the consequences of fear in shaping prey community structures is crucial. In this thesis, I evaluated how fear alters prey space use, community structure and composition, focusing on terrestrial mammals. By integrating landscapes of fear in an existing individual-based and spatially-explicit model, I simulated community assembly of prey animals via individual home range formation. The model comprises multiple hierarchical levels from individual home range behavior to patterns of prey community structure and composition. The mechanistic approach of the model allowed for the identification of underlying mechanism driving prey community responses under fear. My results show that fear modified prey space use and community patterns. Under fear, prey animals shifted their home ranges towards safer areas of the landscape. Furthermore, fear decreased the total biomass and the diversity of the prey community and reinforced shifts in community composition towards smaller animals. These effects could be mediated by an increasing availability of refuges in the landscape. Under landscape changes, such as habitat loss and fragmentation, fear intensified negative effects on prey communities. Prey communities in risky environments were subject to a non-proportional diversity loss of up to 30\% if fear was taken into account. Regarding habitat properties, I found that well-connected, large safe patches can reduce the negative consequences of habitat loss and fragmentation on prey communities. Including variation in risk perception between prey animals had consequences on prey space use. Animals with a high risk perception predominantly used safe areas of the landscape, while animals with a low risk perception preferred areas with a high food availability. On the community level, prey diversity was higher in heterogeneous landscapes of fear if individuals varied in their risk perception compared to scenarios in which all individuals had the same risk perception. Overall, my findings give a first, comprehensive assessment of the role of fear in shaping prey communities. The linkage between individual home range behavior and patterns at the community level allows for a mechanistic understanding of the underlying processes. My results underline the importance of the structure of the landscape of fear as a key driver of prey community responses, especially if the habitat is threatened by landscape changes. Furthermore, I show that individual landscapes of fear can improve our understanding of the consequences of trait variation on community structures. Regarding conservation and nature management, my results support calls for modern conservation approaches that go beyond single species and address the protection of biotic interactions.}, language = {en} } @phdthesis{Thater2019, author = {Thater, Sabine}, title = {The interplay between supermassive black holes and their host galaxies}, doi = {10.25932/publishup-43757}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437570}, school = {Universit{\"a}t Potsdam}, pages = {iv, 186}, year = {2019}, abstract = {Supermassive black holes reside in the hearts of almost all massive galaxies. Their evolutionary path seems to be strongly linked to the evolution of their host galaxies, as implied by several empirical relations between the black hole mass (M BH ) and different host galaxy properties. The physical driver of this co-evolution is, however, still not understood. More mass measurements over homogeneous samples and a detailed understanding of systematic uncertainties are required to fathom the origin of the scaling relations. In this thesis, I present the mass estimations of supermassive black holes in the nuclei of one late-type and thirteen early-type galaxies. Our SMASHING sample extends from the intermediate to the massive galaxy mass regime and was selected to fill in gaps in number of galaxies along the scaling relations. All galaxies were observed at high spatial resolution, making use of the adaptive-optics mode of integral field unit (IFU) instruments on state-of-the-art telescopes (SINFONI, NIFS, MUSE). I extracted the stellar kinematics from these observations and constructed dynamical Jeans and Schwarzschild models to estimate the mass of the central black holes robustly. My new mass estimates increase the number of early-type galaxies with measured black hole masses by 15\%. The seven measured galaxies with nuclear light deficits ('cores') augment the sample of cored galaxies with measured black holes by 40\%. Next to determining massive black hole masses, evaluating the accuracy of black hole masses is crucial for understanding the intrinsic scatter of the black hole- host galaxy scaling relations. I tested various sources of systematic uncertainty on my derived mass estimates. The M BH estimate of the single late-type galaxy of the sample yielded an upper limit, which I could constrain very robustly. I tested the effects of dust, mass-to-light ratio (M/L) variation, and dark matter on my measured M BH . Based on these tests, the typically assumed constant M/L ratio can be an adequate assumption to account for the small amounts of dark matter in the center of that galaxy. I also tested the effect of a variable M/L variation on the M BH measurement on a second galaxy. By considering stellar M/L variations in the dynamical modeling, the measured M BH decreased by 30\%. In the future, this test should be performed on additional galaxies to learn how an as constant assumed M/L flaws the estimated black hole masses. Based on our upper limit mass measurement, I confirm previous suggestions that resolving the predicted BH sphere-of-influence is not a strict condition to measure black hole masses. Instead, it is only a rough guide for the detection of the black hole if high-quality, and high signal-to-noise IFU data are used for the measurement. About half of our sample consists of massive early-type galaxies which show nuclear surface brightness cores and signs of triaxiality. While these types of galaxies are typically modeled with axisymmetric modeling methods, the effects on M BH are not well studied yet. The massive galaxies of our presented galaxy sample are well suited to test the effect of different stellar dynamical models on the measured black hole mass in evidently triaxial galaxies. I have compared spherical Jeans and axisymmetric Schwarzschild models and will add triaxial Schwarzschild models to this comparison in the future. The constructed Jeans and Schwarzschild models mostly disagree with each other and cannot reproduce many of the triaxial features of the galaxies (e.g., nuclear sub-components, prolate rotation). The consequence of the axisymmetric-triaxial assumption on the accuracy of M BH and its impact on the black hole - host galaxy relation needs to be carefully examined in the future. In the sample of galaxies with published M BH , we find measurements based on different dynamical tracers, requiring different observations, assumptions, and methods. Crucially, different tracers do not always give consistent results. I have used two independent tracers (cold molecular gas and stars) to estimate M BH in a regular galaxy of our sample. While the two estimates are consistent within their errors, the stellar-based measurement is twice as high as the gas-based. Similar trends have also been found in the literature. Therefore, a rigorous test of the systematics associated with the different modeling methods is required in the future. I caution to take the effects of different tracers (and methods) into account when discussing the scaling relations. I conclude this thesis by comparing my galaxy sample with the compilation of galaxies with measured black holes from the literature, also adding six SMASHING galaxies, which were published outside of this thesis. None of the SMASHING galaxies deviates significantly from the literature measurements. Their inclusion to the published early-type galaxies causes a change towards a shallower slope for the M BH - effective velocity dispersion relation, which is mainly driven by the massive galaxies of our sample. More unbiased and homogenous measurements are needed in the future to determine the shape of the relation and understand its physical origin.}, language = {en} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Trautwein2019, author = {Trautwein, Jutta}, title = {The Mental lexicon in acquisition}, doi = {10.25932/publishup-43431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434314}, school = {Universit{\"a}t Potsdam}, pages = {IV, 177}, year = {2019}, abstract = {The individual's mental lexicon comprises all known words as well related infor-mation on semantics, orthography and phonology. Moreover, entries connect due to simi-larities in these language domains building a large network structure. The access to lexical information is crucial for processing of words and sentences. Thus, a lack of information in-hibits the retrieval and can cause language processing difficulties. Hence, the composition of the mental lexicon is essential for language skills and its assessment is a central topic of lin-guistic and educational research. In early childhood, measurement of the mental lexicon is uncomplicated, for example through parental questionnaires or the analysis of speech samples. However, with growing content the measurement becomes more challenging: With more and more words in the mental lexicon, the inclusion of all possible known words into a test or questionnaire be-comes impossible. That is why there is a lack of methods to assess the mental lexicon for school children and adults. For the same reason, there are only few findings on the courses of lexical development during school years as well as its specific effect on other language skills. This dissertation is supposed to close this gap by pursuing two major goals: First, I wanted to develop a method to assess lexical features, namely lexicon size and lexical struc-ture, for children of different age groups. Second, I aimed to describe the results of this method in terms of lexical development of size and structure. Findings were intended to help understanding mechanisms of lexical acquisition and inform theories on vocabulary growth. The approach is based on the dictionary method where a sample of words out of a dictionary is tested and results are projected on the whole dictionary to determine an indi-vidual's lexicon size. In the present study, the childLex corpus, a written language corpus for children in German, served as the basis for lexicon size estimation. The corpus is assumed to comprise all words children attending primary school could know. Testing a sample of words out of the corpus enables projection of the results on the whole corpus. For this purpose, a vocabulary test based on the corpus was developed. Afterwards, test performance of virtual participants was simulated by drawing different lexicon sizes from the corpus and comparing whether the test items were included in the lexicon or not. This allowed determination of the relation between test performance and total lexicon size and thus could be transferred to a sample of real participants. Besides lexicon size, lexical content could be approximated with this approach and analyzed in terms of lexical structure. To pursue the presented aims and establish the sampling method, I conducted three consecutive studies. Study 1 includes the development of a vocabulary test based on the childLex corpus. The testing was based on the yes/no format and included three versions for different age groups. The validation grounded on the Rasch Model shows that it is a valid instrument to measure vocabulary for primary school children in German. In Study 2, I estab-lished the method to estimate lexicon sizes and present results on lexical development dur-ing primary school. Plausible results demonstrate that lexical growth follows a quadratic function starting with about 6,000 words at the beginning of school and about 73,000 words on average for young adults. Moreover, the study revealed large interindividual differences. Study 3 focused on the analysis of network structures and their development in the mental lexicon due to orthographic similarities. It demonstrates that networks possess small-word characteristics and decrease in interconnectivity with age. Taken together, this dissertation provides an innovative approach for the assessment and description of the development of the mental lexicon from primary school onwards. The studies determine recent results on lexical acquisition in different age groups that were miss-ing before. They impressively show the importance of this period and display the existence of extensive interindividual differences in lexical development. One central aim of future research needs to address the causes and prevention of these differences. In addition, the application of the method for further research (e.g. the adaptation for other target groups) and teaching purposes (e.g. adaptation of texts for different target groups) appears to be promising.}, language = {en} } @phdthesis{Veh2019, author = {Veh, Georg}, title = {Outburst floods from moraine-dammed lakes in the Himalayas}, doi = {10.25932/publishup-43607}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436071}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2019}, abstract = {The Himalayas are a region that is most dependent, but also frequently prone to hazards from changing meltwater resources. This mountain belt hosts the highest mountain peaks on earth, has the largest reserve of ice outside the polar regions, and is home to a rapidly growing population in recent decades. One source of hazard has attracted scientific research in particular in the past two decades: glacial lake outburst floods (GLOFs) occurred rarely, but mostly with fatal and catastrophic consequences for downstream communities and infrastructure. Such GLOFs can suddenly release several million cubic meters of water from naturally impounded meltwater lakes. Glacial lakes have grown in number and size by ongoing glacial mass losses in the Himalayas. Theory holds that enhanced meltwater production may increase GLOF frequency, but has never been tested so far. The key challenge to test this notion are the high altitudes of >4000 m, at which lakes occur, making field work impractical. Moreover, flood waves can attenuate rapidly in mountain channels downstream, so that many GLOFs have likely gone unnoticed in past decades. Our knowledge on GLOFs is hence likely biased towards larger, destructive cases, which challenges a detailed quantification of their frequency and their response to atmospheric warming. Robustly quantifying the magnitude and frequency of GLOFs is essential for risk assessment and management along mountain rivers, not least to implement their return periods in building design codes. Motivated by this limited knowledge of GLOF frequency and hazard, I developed an algorithm that efficiently detects GLOFs from satellite images. In essence, this algorithm classifies land cover in 30 years (~1988-2017) of continuously recorded Landsat images over the Himalayas, and calculates likelihoods for rapidly shrinking water bodies in the stack of land cover images. I visually assessed such detected tell-tale sites for sediment fans in the river channel downstream, a second key diagnostic of GLOFs. Rigorous tests and validation with known cases from roughly 10\% of the Himalayas suggested that this algorithm is robust against frequent image noise, and hence capable to identify previously unknown GLOFs. Extending the search radius to the entire Himalayan mountain range revealed some 22 newly detected GLOFs. I thus more than doubled the existing GLOF count from 16 previously known cases since 1988, and found a dominant cluster of GLOFs in the Central and Eastern Himalayas (Bhutan and Eastern Nepal), compared to the rarer affected ranges in the North. Yet, the total of 38 GLOFs showed no change in the annual frequency, so that the activity of GLOFs per unit glacial lake area has decreased in the past 30 years. I discussed possible drivers for this finding, but left a further attribution to distinct GLOF-triggering mechanisms open to future research. This updated GLOF frequency was the key input for assessing GLOF hazard for the entire Himalayan mountain belt and several subregions. I used standard definitions in flood hydrology, describing hazard as the annual exceedance probability of a given flood peak discharge [m3 s-1] or larger at the breach location. I coupled the empirical frequency of GLOFs per region to simulations of physically plausible peak discharges from all existing ~5,000 lakes in the Himalayas. Using an extreme-value model, I could hence calculate flood return periods. I found that the contemporary 100-year GLOF discharge (the flood level that is reached or exceeded on average once in 100 years) is 20,600+2,200/-2,300 m3 s-1 for the entire Himalayas. Given the spatial and temporal distribution of historic GLOFs, contemporary GLOF hazard is highest in the Eastern Himalayas, and lower for regions with rarer GLOF abundance. I also calculated GLOF hazard for some 9,500 overdeepenings, which could expose and fill with water, if all Himalayan glaciers have melted eventually. Assuming that the current GLOF rate remains unchanged, the 100-year GLOF discharge could double (41,700+5,500/-4,700 m3 s-1), while the regional GLOF hazard may increase largest in the Karakoram. To conclude, these three stages-from GLOF detection, to analysing their frequency and estimating regional GLOF hazard-provide a framework for modern GLOF hazard assessment. Given the rapidly growing population, infrastructure, and hydropower projects in the Himalayas, this thesis assists in quantifying the purely climate-driven contribution to hazard and risk from GLOFs.}, language = {en} } @phdthesis{vonKaphengst2019, author = {von Kaphengst, Dragana}, title = {Project's management quality in development cooperation}, doi = {10.25932/publishup-43099}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430992}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 237}, year = {2019}, abstract = {In light of the debate on the consequences of competitive contracting out of traditionally public services, this research compares two mechanisms used to allocate funds in development cooperation—direct awarding and competitive contracting out—aiming to identify their potential advantages and disadvantages. The agency theory is applied within the framework of rational-choice institutionalism to study the institutional arrangements that surround two different money allocation mechanisms, identify the incentives they create for the behavior of individual actors in the field, and examine how these then transfer into measurable differences in managerial quality of development aid projects. In this work, project management quality is seen as an important determinant of the overall project success. For data-gathering purposes, the German development agency, the Gesellschaft f{\"u}r Internationale Zusammenarbeit (GIZ), is used due to its unique way of work. Whereas the majority of projects receive funds via direct-award mechanism, there is a commercial department, GIZ International Services (GIZ IS) that has to compete for project funds. The data concerning project management practices on the GIZ and GIZ IS projects was gathered via a web-based, self-administered survey of project team leaders. Principal component analysis was applied to reduce the dimensionality of the independent variable to total of five components of project management. Furthermore, multiple regression analysis identified the differences between the separate components on these two project types. Enriched by qualitative data gathered via interviews, this thesis offers insights into everyday managerial practices in development cooperation and identifies the advantages and disadvantages of the two allocation mechanisms. The thesis first reiterates the responsibility of donors and implementers for overall aid effectiveness. It shows that the mechanism of competitive contracting out leads to better oversight and control of implementers, fosters deeper cooperation between the implementers and beneficiaries, and has a potential to strengthen ownership of recipient countries. On the other hand, it shows that the evaluation quality does not tremendously benefit from the competitive allocation mechanism and that the quality of the component knowledge management and learning is better when direct-award mechanisms are used. This raises questions about the lacking possibilities of actors in the field to learn about past mistakes and incorporate the finings into the future interventions, which is one of the fundamental issues of aid effectiveness. Finally, the findings show immense deficiencies in regard to oversight and control of individual projects in German development cooperation.}, language = {en} } @phdthesis{Voss2019, author = {Voß, Amira}, title = {F{\"u}r eine Reformierung des irakischen internationalen Privatrechts}, doi = {10.25932/publishup-43019}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430190}, school = {Universit{\"a}t Potsdam}, pages = {XIII; 272}, year = {2019}, abstract = {Seit 2003 hat sich das politische Bild des Irak stark ver{\"a}ndert. Dadurch begann der Prozess der Neugestaltung der irakischen Rechtsordnung. Die irakische Verfassung von 2005 legt erstmalig in der Geschichte des Irak den Islam und die Demokratie als zwei nebeneinander zu beachtende Grundprinzipien bei der Gesetzgebung fest. Trotz dieser signifikanten Ver{\"a}nderung im irakischen Rechtssystem und erheblicher Entwicklungen im internationalen Privat- und Zivilverfahrensrecht (IPR/IZVR) im internationalen Vergleich gilt die haupts{\"a}chlich im irakischen Zivilgesetzbuch (ZGB) von 1951 enthaltene gesetzliche Regelung des IPR/IZVR im Irak weiterhin. Deshalb entstand diese Arbeit f{\"u}r eine Reformierung des irakischen IPR/IZVR. Die Arbeit gilt als erste umfassende wissenschaftliche Untersuchung, die sich mit dem jetzigen Inhalt und der zuk{\"u}nftigen Reformierung des irakischen internationalen Privatrecht- und Zivilverfahrensrechts (IPR/IZVR) besch{\"a}ftigt. Die Verfasserin vermittelt einen Gesamt{\"u}berblick {\"u}ber das jetzt geltende irakische internationale Privat- und Zivilverfahrensrecht mit gelegentlicher punktueller und stichwortartiger Heranziehung des deutschen, islamischen, t{\"u}rkischen und tunesischen Rechts, zeigt dessen Schwachstellen auf und unterbreitet entsprechende Reformvorschl{\"a}ge. Wegen der besonderen Bedeutung des internationalen Vertragsrechts f{\"u}r die Wirtschaft im Irak und auch zum Teil f{\"u}r Deutschland gibt die Verfasserin einen genaueren {\"U}berblick {\"u}ber das irakische internationale Vertragsrecht und bekr{\"a}ftigt gleichzeitig dessen Reformbed{\"u}rftigkeit. Die Darstellung der wichtigen Entwicklungen im deutsch-europ{\"a}ischen, im traditionellen islamischen Recht und im t{\"u}rkischen und tunesischen internationalen Privat- und Zivilverfahrensrecht im zweiten Kapitel dienen als Grundlage, auf die bei der Reformierung des irakischen IPR/ IZVR zur{\"u}ck gegriffen werden kann. Da die Kenntnisse des islamischen Rechts nicht zwingend zum Rechtsstudium geh{\"o}ren, wird das islamische Recht dazu in Bezug auf seine Entstehung und die Rechtsquellen dargestellt. Am Ende der Arbeit wird ein Entwurf eines f{\"o}deralen Gesetzes zum internationalen Privatrecht im Irak katalogisiert, der sich im Rahmen der irakischen Verfassung gleichzeitig mit dem Islam und der Demokratie vereinbaren l{\"a}sst.}, language = {de} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Walczak2019, author = {Walczak, Ralf}, title = {Molecular design of nitrogen-doped nanoporous noble carbon materials for gas adsorption}, doi = {10.25932/publishup-43524}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435241}, school = {Universit{\"a}t Potsdam}, pages = {II, 155}, year = {2019}, abstract = {In den modernen Gesellschaften f{\"u}hrt ein stetig steigender Energiebedarf zu dem zunehmenden Verbrauch fossiler Brennstoffe wie Kohle, {\"O}l, und Gas. Die Verbrennung dieser kohlenstoffbasierten Brennstoffe f{\"u}hrt unweigerlich zur Freisetzung von Treibhausgasen, vor allem von CO2. Die CO2 Aufnahme unmittelbar bei den Verbrennungsanlagen oder direkt aus der Luft, zusammen mit Regulierung von CO2 produzierenden Energiesektoren (z.B. K{\"u}hlanlagen), k{\"o}nnen den CO2 Ausstoß reduzieren. Allerdings f{\"u}hren insbesondere bei der CO2 Aufnahme die geringen CO2 Konzentrationen und die Aufnahme konkurrierender Gase zu niedrigen CO2 Kapazit{\"a}ten und Selektivit{\"a}ten. Das Zusammenspiel der Gastmolek{\"u}le mit por{\"o}sen Materialien ist dabei essentiell. Por{\"o}se Kohlenstoffmaterialien besitzen attraktive Eigenschaften, unter anderem elektrische Leitf{\"a}higkeit, einstellbare Porosit{\"a}t, als auch chemische und thermische Stabilit{\"a}t. Allerdings f{\"u}hrt die zu geringe Polarisierbarkeit dieser Materialien zu einer geringen Affinit{\"a}t zu polaren Molek{\"u}len (z.B. CO2, H2O, oder NH3). Diese Affinit{\"a}t kann durch den Einbau von Stickstoff erh{\"o}ht werden. Solche Materialien sind oft „edler" als reine Kohlenstoffe, dies bedeutet, dass sie eher oxidierend wirken, als selbst oxidiert zu werden. Die Problematik besteht darin, einen hohen und gleichm{\"a}ßig verteilten Stickstoffgehalt in das Kohlenstoffger{\"u}st einzubauen. Die Zielsetzung dieser Dissertation ist die Erforschung neuer Synthesewege f{\"u}r stickstoffdotierte edle Kohlenstoffmaterialien und die Entwicklung eines grundlegenden Verst{\"a}ndnisses f{\"u}r deren Anwendung in Gasadsorption und elektrochemischer Energiespeicherung. Es wurde eine templatfreie Synthese f{\"u}r stickstoffreiche, edle, und mikropor{\"o}se Kohlenstoffmaterialien durch direkte Kondensation eines stickstoffreichen organischen Molek{\"u}ls als Vorl{\"a}ufer erarbeitet. Dadurch konnten Materialien mit hohen Adsorptionskapazit{\"a}ten f{\"u}r H2O und CO2 bei niedrigen Konzentrationen und moderate CO2/N2 Selektivit{\"a}ten erzielt werden. Um die CO2/N2 Selektivit{\"a}ten zu verbessern, wurden mittels der Einstellung des Kondensationsgrades die molekulare Struktur und Porosit{\"a}t der Kohlenstoffmaterialien kontrolliert. Diese Materialien besitzen die Eigenschaften eines molekularen Siebs f{\"u}r CO2 {\"u}ber N2, das zu herausragenden CO2/N2 Selektivit{\"a}ten f{\"u}hrt. Der ultrahydrophile Charakter der Porenoberfl{\"a}chen und die kleinen Mikroporen dieser Kohlenstoffmaterialien erm{\"o}glichen grundlegende Untersuchungen f{\"u}r die Wechselwirkungen mit Molek{\"u}len die polarer sind als CO2, n{\"a}mlich H2O und NH3. Eine weitere Reihe stickstoffdotierter Kohlenstoffmaterialien wurde durch Kondensation eines konjugierten mikropor{\"o}sen Polymers synthetisiert und deren strukturelle Besonderheiten als Anodenmaterial f{\"u}r die Natriumionen Batterie untersucht. Diese Dissertation leistet einen Beitrag zur Erforschung stickstoffdotierter Kohlenstoffmaterialien und deren Wechselwirkungen mit verschiedenen Gastmolek{\"u}len.}, language = {en} } @phdthesis{Willig2019, author = {Willig, Lisa}, title = {Ultrafast magneto-optical studies of remagnetisation dynamics in transition metals}, doi = {10.25932/publishup-44194}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441942}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 113, XVII}, year = {2019}, abstract = {Ultrafast magnetisation dynamics have been investigated intensely for two decades. The recovery process after demagnetisation, however, was rarely studied experimentally and discussed in detail. The focus of this work lies on the investigation of the magnetisation on long timescales after laser excitation. It combines two ultrafast time resolved methods to study the relaxation of the magnetic and lattice system after excitation with a high fluence ultrashort laser pulse. The magnetic system is investigated by time resolved measurements of the magneto-optical Kerr effect. The experimental setup has been implemented in the scope of this work. The lattice dynamics were obtained with ultrafast X-ray diffraction. The combination of both techniques leads to a better understanding of the mechanisms involved in magnetisation recovery from a non-equilibrium condition. Three different groups of samples are investigated in this work: Thin Nickel layers capped with nonmagnetic materials, a continuous sample of the ordered L10 phase of Iron Platinum and a sample consisting of Iron Platinum nanoparticles embedded in a carbon matrix. The study of the remagnetisation reveals a general trend for all of the samples: The remagnetisation process can be described by two time dependences. A first exponential recovery that slows down with an increasing amount of energy absorbed in the system until an approximately linear time dependence is observed. This is followed by a second exponential recovery. In case of low fluence excitation, the first recovery is faster than the second. With increasing fluence the first recovery is slowed down and can be described as a linear function. If the pump-induced temperature increase in the sample is sufficiently high, a phase transition to a paramagnetic state is observed. In the remagnetisation process, the transition into the ferromagnetic state is characterised by a distinct transition between the linear and exponential recovery. From the combination of the transient lattice temperature Tp(t) obtained from ultrafast X-ray measurements and magnetisation M(t) gained from magneto-optical measurements we construct the transient magnetisation versus temperature relations M(Tp). If the lattice temperature remains below the Curie temperature the remagnetisation curve M(Tp) is linear and stays below the M(T) curve in equilibrium in the continuous transition metal layers. When the sample is heated above phase transition, the remagnetisation converges towards the static temperature dependence. For the granular Iron Platinum sample the M(Tp) curves for different fluences coincide, i.e. the remagnetisation follows a similar path irrespective of the initial laser-induced temperature jump.}, language = {en} } @phdthesis{Wolf2019, author = {Wolf, Mathias Johannes}, title = {The role of partial melting on trace element and isotope systematics of granitic melts}, doi = {10.25932/publishup-42370}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423702}, school = {Universit{\"a}t Potsdam}, pages = {iv, 129}, year = {2019}, abstract = {Partial melting is a first order process for the chemical differentiation of the crust (Vielzeuf et al., 1990). Redistribution of chemical elements during melt generation crucially influences the composition of the lower and upper crust and provides a mechanism to concentrate and transport chemical elements that may also be of economic interest. Understanding of the diverse processes and their controlling factors is therefore not only of scientific interest but also of high economic importance to cover the demand for rare metals. The redistribution of major and trace elements during partial melting represents a central step for the understanding how granite-bound mineralization develops (Hedenquist and Lowenstern, 1994). The partial melt generation and mobilization of ore elements (e.g. Sn, W, Nb, Ta) into the melt depends on the composition of the sedimentary source and melting conditions. Distinct source rocks have different compositions reflecting their deposition and alteration histories. This specific chemical "memory" results in different mineral assemblages and melting reactions for different protolith compositions during prograde metamorphism (Brown and Fyfe, 1970; Thompson, 1982; Vielzeuf and Holloway, 1988). These factors do not only exert an important influence on the distribution of chemical elements during melt generation, they also influence the volume of melt that is produced, extraction of the melt from its source, and its ascent through the crust (Le Breton and Thompson, 1988). On a larger scale, protolith distribution and chemical alteration (weathering), prograde metamorphism with partial melting, melt extraction, and granite emplacement are ultimately depending on a (plate-)tectonic control (Romer and Kroner, 2016). Comprehension of the individual stages and their interaction is crucial in understanding how granite-related mineralization forms, thereby allowing estimation of the mineralization potential of certain areas. Partial melting also influences the isotope systematics of melt and restite. Radiogenic and stable isotopes of magmatic rocks are commonly used to trace back the source of intrusions or to quantify mixing of magmas from different sources with distinct isotopic signatures (DePaolo and Wasserburg, 1979; Lesher, 1990; Chappell, 1996). These applications are based on the fundamental requirement that the isotopic signature in the melt reflects that of the bulk source from which it is derived. Different minerals in a protolith may have isotopic compositions of radiogenic isotopes that deviate from their whole rock signature (Ayres and Harris, 1997; Knesel and Davidson, 2002). In particular, old minerals with a distinct parent-to-daughter (P/D) ratio are expected to have a specific radiogenic isotope signature. As the partial melting reaction only involves selective phases in a protolith, the isotopic signature of the melt reflects that of the minerals involved in the melting reaction and, therefore, should be different from the bulk source signature. Similar considerations hold true for stable isotopes.}, language = {en} } @phdthesis{Wozny2019, author = {Wozny, Florian}, title = {Three empirical essays in health economics}, doi = {10.25932/publishup-46991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469910}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2019}, abstract = {Modern health care systems are characterized by pronounced prevention and cost-optimized treatments. This dissertation offers novel empirical evidence on how useful such measures can be. The first chapter analyzes how radiation, a main pollutant in health care, can negatively affect cognitive health. The second chapter focuses on the effect of Low Emission Zones on public heath, as air quality is the major external source of health problems. Both chapters point out potentials for preventive measures. Finally, chapter three studies how changes in treatment prices affect the reallocation of hospital resources. In the following, I briefly summarize each chapter and discuss implications for health care systems as well as other policy areas. Based on the National Educational Panel Study that is linked to data on radiation, chapter one shows that radiation can have negative long-term effects on cognitive skills, even at subclinical doses. Exploiting arguably exogenous variation in soil contamination in Germany due to the Chernobyl disaster in 1986, the findings show that people exposed to higher radiation perform significantly worse in cognitive tests 25 years later. Identification is ensured by abnormal rainfall within a critical period of ten days. The results show that the effect is stronger among older cohorts than younger cohorts, which is consistent with radiation accelerating cognitive decline as people get older. On average, a one-standarddeviation increase in the initial level of CS137 (around 30 chest x-rays) is associated with a decrease in the cognitive skills by 4.1 percent of a standard deviation (around 0.05 school years). Chapter one shows that sub-clinical levels of radiation can have negative consequences even after early childhood. This is of particular importance because most of the literature focuses on exposure very early in life, often during pregnancy. However, population exposed after birth is over 100 times larger. These results point to substantial external human capital costs of radiation which can be reduced by choices of medical procedures. There is a large potential for reductions because about one-third of all CT scans are assumed to be not medically justified (Brenner and Hall, 2007). If people receive unnecessary CT scans because of economic incentives, this chapter points to additional external costs of health care policies. Furthermore, the results can inform the cost-benefit trade-off for medically indicated procedures. Chapter two provides evidence about the effectiveness of Low Emission Zones. Low Emission Zones are typically justified by improvements in population health. However, there is little evidence about the potential health benefits from policy interventions aiming at improving air quality in inner-cities. The chapter ask how the coverage of Low Emission Zones air pollution and hospitalization, by exploiting variation in the roll out of Low Emission Zones in Germany. It combines information on the geographic coverage of Low Emission Zones with rich panel data on the universe of German hospitals over the period from 2006 to 2016 with precise information on hospital locations and the annual frequency of detailed diagnoses. In order to establish that our estimates of Low Emission Zones' health impacts can indeed be attributed to improvements in local air quality, we use data from Germany's official air pollution monitoring system and assign monitor locations to Low Emission Zones and test whether measures of air pollution are affected by the coverage of a Low Emission Zone. Results in chapter two confirm former results showing that the introduction of Low Emission Zones improved air quality significantly by reducing NO2 and PM10 concentrations. Furthermore, the chapter shows that hospitals which catchment areas are covered by a Low Emission Zone, diagnose significantly less air pollution related diseases, in particular by reducing the incidents of chronic diseases of the circulatory and the respiratory system. The effect is stronger before 2012, which is consistent with a general improvement in the vehicle fleet's emission standards. Depending on the disease, a one-standard-deviation increase in the coverage of a hospitals catchment area covered by a Low Emission Zone reduces the yearly number of diagnoses up to 5 percent. These findings have strong implications for policy makers. In 2015, overall costs for health care in Germany were around 340 billion euros, of which 46 billion euros for diseases of the circulatory system, making it the most expensive type of disease caused by 2.9 million cases (Statistisches Bundesamt, 2017b). Hence, reductions in the incidence of diseases of the circulatory system may directly reduce society's health care costs. Whereas chapter one and two study the demand-side in health care markets and thus preventive potential, chapter three analyzes the supply-side. By exploiting the same hospital panel data set as in chapter two, chapter three studies the effect of treatment price shocks on the reallocation of hospital resources in Germany. Starting in 2005, the implementation of the German-DRG-System led to general idiosyncratic treatment price shocks for individual hospitals. Thus far there is little evidence of the impact of general price shocks on the reallocation of hospital resources. Additionally, I add to the exiting literature by showing that price shocks can have persistent effects on hospital resources even when these shocks vanish. However, simple OLS regressions would underestimate the true effect, due to endogenous treatment price shocks. I implement a novel instrument variable strategy that exploits the exogenous variation in the number of days of snow in hospital catchment areas. A peculiarity of the reform allowed variation in days of snow to have a persistent impact on treatment prices. I find that treatment price increases lead to increases in input factors such as nursing staff, physicians and the range of treatments offered but to decreases in the treatment volume. This indicates supplier-induced demand. Furthermore, the probability of hospital mergers and privatization decreases. Structural differences in pre-treatment characteristics between hospitals enhance these effects. For instance, private and larger hospitals are more affected. IV estimates reveal that OLS results are biased towards zero in almost all dimensions because structural hospital differences are correlated with the reallocation of hospital resources. These results are important for several reasons. The G-DRG-Reform led to a persistent polarization of hospital resources, as some hospitals were exposed to treatment price increases, while others experienced reductions. If hospitals increase the treatment volume as a response to price reductions by offering unnecessary therapies, it has a negative impact on population wellbeing and public spending. However, results show a decrease in the range of treatments if prices decrease. Hospitals might specialize more, thus attracting more patients. From a policy perspective it is important to evaluate if such changes in the range of treatments jeopardize an adequate nationwide provision of treatments. Furthermore, the results show a decrease in the number of nurses and physicians if prices decrease. This could partly explain the nursing crisis in German hospitals. However, since hospitals specialize more they might be able to realize efficiency gains which justify reductions in input factors without loses in quality. Further research is necessary to provide evidence for the impact of the G-DRG-Reform on health care quality. Another important aspect are changes in the organizational structure. Many public hospitals have been privatized or merged. The findings show that this is at least partly driven by the G-DRG-Reform. This can again lead to a lack in services offered in some regions if merged hospitals specialize more or if hospitals are taken over by ecclesiastical organizations which do not provide all treatments due to moral conviction. Overall, this dissertation reveals large potential for preventive health care measures and helps to explain reallocation processes in the hospital sector if treatment prices change. Furthermore, its findings have potentially relevant implications for other areas of public policy. Chapter one identifies an effect of low dose radiation on cognitive health. As mankind is searching for new energy sources, nuclear power is becoming popular again. However, results of chapter one point to substantial costs of nuclear energy which have not been accounted yet. Chapter two finds strong evidence that air quality improvements by Low Emission Zones translate into health improvements, even at relatively low levels of air pollution. These findings may, for instance, be of relevance to design further policies targeted at air pollution such as diesel bans. As pointed out in chapter three, the implementation of DRG-Systems may have unintended side-effects on the reallocation of hospital resources. This may also apply to other providers in the health care sector such as resident doctors.}, language = {en} } @phdthesis{Yan2019, author = {Yan, Runyu}, title = {Nitrogen-doped and porous carbons towards new energy storage mechanisms for supercapacitors with high energy density}, doi = {10.25932/publishup-43141}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431413}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2019}, abstract = {Supercapacitors are electrochemical energy storage devices with rapid charge/discharge rate and long cycle life. Their biggest challenge is the inferior energy density compared to other electrochemical energy storage devices such as batteries. Being the most widely spread type of supercapacitors, electrochemical double-layer capacitors (EDLCs) store energy by electrosorption of electrolyte ions on the surface of charged electrodes. As a more recent development, Na-ion capacitors (NICs) are expected to be a more promising tactic to tackle the inferior energy density due to their higher-capacity electrodes and larger operating voltage. The charges are simultaneously stored by ion adsorption on the capacitive-type cathode surface and via faradic process in the battery-type anode, respectively. Porous carbon electrodes are of great importance in these devices, but the paramount problems are the facile synthetic routes for high-performance carbons and the lack of fundamental understanding of the energy storage mechanisms. Therefore, the aim of the present dissertation is to develop novel synthetic methods for (nitrogen-doped) porous carbon materials with superior performance, and to reveal a deeper understanding energy storage mechanisms of EDLCs and NICs. The first part introduces a novel synthetic method towards hierarchical ordered meso-microporous carbon electrode materials for EDLCs. The large amount of micropores and highly ordered mesopores endow abundant sites for charge storage and efficient electrolyte transport, respectively, giving rise to superior EDLC performance in different electrolytes. More importantly, the controversial energy storage mechanism of EDLCs employing ionic liquid (IL) electrolytes is investigated by employing a series of porous model carbons as electrodes. The results not only allow to conclude on the relations between the porosity and ion transport dynamics, but also deliver deeper insights into the energy storage mechanism of IL-based EDLCs which is different from the one usually dominating in solvent-based electrolytes leading to compression double-layers. The other part focuses on anodes of NICs, where novel synthesis of nitrogen-rich porous carbon electrodes and their sodium storage mechanism are investigated. Free-standing fibrous nitrogen-doped carbon materials are synthesized by electrospinning using the nitrogen-rich monomer (hexaazatriphenylene-hexacarbonitrile, C18N12) as the precursor followed by condensation at high temperature. These fibers provide superior capacity and desirable charge/discharge rate for sodium storage. This work also allows insights into the sodium storage mechanism in nitrogen-doped carbons. Based on this mechanism, further optimization is done by designing a composite material composed of nitrogen-rich carbon nanoparticles embedded in conductive carbon matrix for a better charge/discharge rate. The energy density of the assembled NICs significantly prevails that of common EDLCs while maintaining the high power density and long cycle life.}, language = {en} } @phdthesis{Zapata2019, author = {Zapata, Sebastian Henao}, title = {Paleozoic to Pliocene evolution of the Andean retroarc between 26 and 28°S: interactions between tectonics, climate, and upper plate architecture}, doi = {10.25932/publishup-43903}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439036}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2019}, abstract = {Interactions and feedbacks between tectonics, climate, and upper plate architecture control basin geometry, relief, and depositional systems. The Andes is part of a longlived continental margin characterized by multiple tectonic cycles which have strongly modified the Andean upper plate architecture. In the Andean retroarc, spatiotemporal variations in the structure of the upper plate and tectonic regimes have resulted in marked along-strike variations in basin geometry, stratigraphy, deformational style, and mountain belt morphology. These along-strike variations include high-elevation plateaus (Altiplano and Puna) associated with a thin-skin fold-and-thrust-belt and thick-skin deformation in broken foreland basins such as the Santa Barbara system and the Sierras Pampeanas. At the confluence of the Puna Plateau, the Santa Barbara system and the Sierras Pampeanas, major along-strike changes in upper plate architecture, mountain belt morphology, basement exhumation, and deformation style can be recognized. I have used a source to sink approach to unravel the spatiotemporal tectonic evolution of the Andean retroarc between 26 and 28°S. I obtained a large low-temperature thermochronology data set from basement units which includes apatite fission track, apatite U-Th-Sm/He, and zircon U-Th/He (ZHe) cooling ages. Stratigraphic descriptions of Miocene units were temporally constrained by U-Pb LA-ICP-MS zircon ages from interbedded pyroclastic material. Modeled ZHe ages suggest that the basement of the study area was exhumed during the Famatinian orogeny (550-450 Ma), followed by a period of relative tectonic quiescence during the Paleozoic and the Triassic. The basement experienced horst exhumation during the Cretaceous development of the Salta rift. After initial exhumation, deposition of thick Cretaceous syn-rift strata caused reheating of several basement blocks within the Santa Barbara system. During the Eocene-Oligocene, the Andean compressional setting was responsible for the exhumation of several disconnected basement blocks. These exhumed blocks were separated by areas of low relief, in which humid climate and low erosion rates facilitated the development of etchplains on the crystalline basement. The exhumed basement blocks formed an Eocene to Oligocene broken foreland basin in the back-bulge depozone of the Andean foreland. During the Early Miocene, foreland basin strata filled up the preexisting Paleogene topography. The basement blocks in lower relief positions were reheated; associated geothermal gradients were higher than 25°C/km. Miocene volcanism was responsible for lateral variations on the amount of reheating along the Campo-Arenal basin. Around 12 Ma, a new deformational phase modified the drainage network and fragmented the lacustrine system. As deformation and rock uplift continued, the easily eroded sedimentary cover was efficiently removed and reworked by an ephemeral fluvial system, preventing the development of significant relief. After ~6 Ma, the low erodibility of the basement blocks which began to be exposed caused relief increase, leading to the development of stable fluvial systems. Progressive relief development modified atmospheric circulation, creating a rainfall gradient. After 3 Ma, orographic rainfall and high relief lead to the development of proximal fluvial-gravitational depositional systems in the surrounding basins.}, language = {en} } @phdthesis{Zemella2019, author = {Zemella, Anne}, title = {Fluoreszenzmarkierung und Modifizierung von komplexen Proteinen in eukaryotischen zellfreien Systemen durch die Etablierung von orthogonalen tRNA/Aminoacyl-tRNA-Synthetase-Paaren}, doi = {10.25932/publishup-44236}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442361}, school = {Universit{\"a}t Potsdam}, pages = {XI, 141}, year = {2019}, abstract = {Die funktionelle Charakterisierung von therapeutisch relevanten Proteinen kann bereits durch die Bereitstellung des Zielproteins in ad{\"a}quaten Mengen limitierend sein. Dies trifft besonders auf Membranproteine zu, die aufgrund von zytotoxischen Effekten auf die Produktionszelllinie und der Tendenz Aggregate zu bilden, in niedrigen Ausbeuten an aktivem Protein resultieren k{\"o}nnen. Der lebende Organismus kann durch die Verwendung von translationsaktiven Zelllysaten umgangen werden- die Grundlage der zellfreien Proteinsynthese. Zu Beginn der Arbeit wurde die ATP-abh{\"a}ngige Translation eines Lysates auf der Basis von kultivierten Insektenzellen (Sf21) analysiert. F{\"u}r diesen Zweck wurde ein ATP-bindendes Aptamer eingesetzt, durch welches die Translation der Nanoluziferase reguliert werden konnte. Durch die dargestellte Applizierung von Aptameren, k{\"o}nnten diese zuk{\"u}nftig in zellfreien Systemen f{\"u}r die Visualisierung der Transkription und Translation eingesetzt werden, wodurch zum Beispiel komplexe Prozesse validiert werden k{\"o}nnen. Neben der reinen Proteinherstellung k{\"o}nnen Faktoren wie posttranslationale Modifikationen sowie eine Integration in eine lipidische Membran essentiell f{\"u}r die Funktionalit{\"a}t des Membranproteins sein. Im zweiten Abschnitt konnte, im zellfreien Sf21-System, f{\"u}r den G-Protein-gekoppelten Rezeptor Endothelin B sowohl eine Integration in die endogen vorhandenen Endoplasmatisch Retikulum-basierten Membranstrukturen als auch Glykosylierungen, identifiziert werden. Auf der Grundlage der erfolgreichen Synthese des ET-B-Rezeptors wurden verschiedene Methoden zur Fluoreszenzmarkierung des Adenosin-Rezeptors A2a (Adora2a) angewandt und optimiert. Im dritten Abschnitt wurde der Adora2a mit Hilfe einer vorbeladenen tRNA, welche an eine fluoreszierende Aminos{\"a}ure gekoppelt war, im zellfreien Chinesischen Zwerghamster Ovarien (CHO)-System markiert. Zus{\"a}tzlich konnte durch den Einsatz eines modifizierten tRNA/Aminoacyl-tRNA-Synthetase-Paares eine nicht-kanonische Aminos{\"a}ure an Position eines integrierten Amber-Stopcodon in die Polypeptidkette eingebaut und die funktionelle Gruppe im Anschluss an einen Fluoreszenzfarbstoff gekoppelt werden. Aufgrund des offenen Charakters eignen sich zellfreie Proteinsynthesesysteme besonders f{\"u}r eine Integration von exogenen Komponenten in den Translationsprozess. Mit Hilfe der Fluoreszenzmarkierung wurde eine ligandvermittelte Konformations{\"a}nderung im Adora2a {\"u}ber einen Biolumineszenz-Resonanzenergietransfer detektiert. Durch die Etablierung der Amber-Suppression wurde dar{\"u}ber hinaus das Hormon Erythropoetin pegyliert, wodurch Eigenschaften wie Stabilit{\"a}t und Halbwertszeit des Proteins ver{\"a}ndert wurden. Zu guter Letzt wurde ein neues tRNA/Aminoacyl-tRNA-Synthetase-Paar auf Basis der Methanosarcina mazei Pyrrolysin-Synthetase etabliert, um das Repertoire an nicht-kanonischen Aminos{\"a}uren und den damit verbundenen Kopplungsreaktionen zu erweitern. Zusammenfassend wurden die Potenziale zellfreier Systeme in Bezug auf der Herstellung von komplexen Membranproteinen und der Charakterisierung dieser durch die Einbringung einer positionsspezifischen Fluoreszenzmarkierung verdeutlicht, wodurch neue M{\"o}glichkeiten f{\"u}r die Analyse und Funktionalisierung von komplexen Proteinen geschaffen wurden.}, language = {de} }