@phdthesis{Brandt2019, author = {Brandt, Christopher Georg}, title = {"… when the legend becomes fact …"}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-429-6}, doi = {10.25932/publishup-41044}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410441}, school = {Universit{\"a}t Potsdam}, pages = {377}, year = {2019}, abstract = {Der historische Spielfilm z{\"a}hlt zu den popul{\"a}rsten Formen geschichtskultureller Artikulation. Als solche ist er Gegenstand kontroverser Diskussionen {\"u}ber einen angemessenen didaktischen Umgang. Vor diesem Hintergrund ist es das Ziel der vorliegenden Arbeit, ein integratives, theoretisch und empirisch abgesichertes Analysemodell zu entwickeln, das nach den Tiefenstrukturen historischen Erz{\"a}hlens im Medium des Spielfilms fragt und dabei unterschiedliche Erscheinungsformen historischer Spielfilme ber{\"u}cksichtigt. Die {\"U}berlegungen bewegen sich deshalb in einem interdisziplin{\"a}ren Spannungsfeld von Theorien zum historischen Erz{\"a}hlen und Konzepten der Literatur- und Filmwissenschaft. Die Diskussion und Synthese dieser unterschiedlichen Konzepte geht dabei - auf der Grundlage einer großen Materialbasis - vom Gegenstand aus und ist induktiv angelegt. Als Orientierung f{\"u}r die praktische Arbeit werden am Ende der einzelnen Kapitel Toolkits entwickelt, die zu einer vertieften Auseinandersetzung mit historischen Spielfilmen anregen sollen.}, language = {de} } @phdthesis{BernaschinaSchuermann2019, author = {Bernaschina Sch{\"u}rmann, Vicente}, title = {{\´A}ngeles que cantan de continuo}, isbn = {978-3-86956-459-3}, issn = {2629-2548}, doi = {10.25932/publishup-42645}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426450}, school = {Universit{\"a}t Potsdam}, pages = {387}, year = {2019}, abstract = {Objeto de esta investigaci{\´o}n es el auge y ca{\´i}da de una legitimaci{\´o}n teol{\´o}gica de la poes{\´i}a que tuvo lugar en el virreinato del Per{\´u} entre fines del siglo XVI y la segunda mitad del siglo XVII. Su punto c{\´u}lmine est{\´a} marcado por el surgimiento de una "Academia Ant{\´a}rtica" en las primeras d{\´e}cadas del siglo XVII, mientras que su fin, se aprecia a fines del mismo siglo, cuando eruditos de las {\´o}rdenes religiosas, especialmente Juan de Espinosa y Medrano en sus textos en defensa de la poes{\´i}a y las ciencias, negaron a la poes{\´i}a cualquier estatuto teol{\´o}gico, sirvi{\´e}ndose sin embargo de ella para escribir sus sermones y textos. A partir del auge y ca{\´i}da de esta legitimaci{\´o}n teol{\´o}gica en el virreinato del Per{\´u}, este estudio muestra la existencia de dos movimientos que forman un quiasmo entre una teologizaci{\´o}n de la poes{\´i}a y una poetizaci{\´o}n de la teolog{\´i}a, en cuyo centro velado se encuentra en disputa el saber te{\´o}rico y pr{\´a}ctico de la poes{\´i}a. Lo que est{\´a} en disputa en este sentido no es la poes{\´i}a, entendida como una cumbre de las bellas letras, sino la posesi{\´o}n leg{\´i}tima de un modo de lectura anal{\´o}gico y tipol{\´o}gico del orden del universo, fundado en las Sagradas Escrituras y en la historia de la salvaci{\´o}n, y un modo po{\´e}tico para doctrinar a todos los miembros de la sociedad virreinal en concordancia con aquel modo de lectura.}, language = {es} } @phdthesis{MarimonTarter2019, author = {Marimon Tarter, Mireia}, title = {Word segmentation in German-learning infants and German-speaking adults}, doi = {10.25932/publishup-43740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437400}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2019}, abstract = {There is evidence that infants start extracting words from fluent speech around 7.5 months of age (e.g., Jusczyk \& Aslin, 1995) and that they use at least two mechanisms to segment words forms from fluent speech: prosodic information (e.g., Jusczyk, Cutler \& Redanz, 1993) and statistical information (e.g., Saffran, Aslin \& Newport, 1996). However, how these two mechanisms interact and whether they change during development is still not fully understood. The main aim of the present work is to understand in what way different cues to word segmentation are exploited by infants when learning the language in their environment, as well as to explore whether this ability is related to later language skills. In Chapter 3 we pursued to determine the reliability of the method used in most of the experiments in the present thesis (the Headturn Preference Procedure), as well as to examine correlations and individual differences between infants' performance and later language outcomes. In Chapter 4 we investigated how German-speaking adults weigh statistical and prosodic information for word segmentation. We familiarized adults with an auditory string in which statistical and prosodic information indicated different word boundaries and obtained both behavioral and pupillometry responses. Then, we conducted further experiments to understand in what way different cues to word segmentation are exploited by 9-month-old German-learning infants (Chapter 5) and by 6-month-old German-learning infants (Chapter 6). In addition, we conducted follow-up questionnaires with the infants and obtained language outcomes at later stages of development. Our findings from this thesis revealed that (1) German-speaking adults show a strong weight of prosodic cues, at least for the materials used in this study and that (2) German-learning infants weight these two kind of cues differently depending on age and/or language experience. We observed that, unlike English-learning infants, 6-month-old infants relied more strongly on prosodic cues. Nine-month-olds do not show any preference for either of the cues in the word segmentation task. From the present results it remains unclear whether the ability to use prosodic cues to word segmentation relates to later language vocabulary. We speculate that prosody provides infants with their first window into the specific acoustic regularities in the signal, which enables them to master the specific stress pattern of German rapidly. Our findings are a step forwards in the understanding of an early impact of the native prosody compared to statistical learning in early word segmentation.}, language = {en} } @phdthesis{Arf2019, author = {Arf, Shelan Ali}, title = {Women's everyday reality of social insecurity}, doi = {10.25932/publishup-43433}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434333}, school = {Universit{\"a}t Potsdam}, pages = {334}, year = {2019}, abstract = {Since 1980 Iraq passed through various wars and conflicts including Iraq-Iran war, Saddam Hussein's the Anfals and Halabja campaigns against the Kurds and the killing campaigns against Shiite in 1986, Saddam Hussein's invasion of Kuwait in August 1990, the Gulf war in 1990, Iraq war in 2003 and the fall of Saddam, the conflicts and chaos in the transmission of power after the death of Saddam, and the war against ISIS . All these wars left severe impacts in most households in Iraq; on women and children in particular. The consequences of such long wars could be observed in all sectors including economic, social, cultural and religious sectors. The social structure, norms and attitudes are intensely affected. Many women specifically divorced women found them-selves in challenging different difficulties such as social as well as economic situations. Thus the divorced women in Iraqi Kurdistan are the focus of this research. Considering the fact that there is very few empirical researches on this topic, a constructivist grounded theory methodology (CGT) is viewed as reliable in order to come up with a comprehensive picture about the everyday life of divorced women in Iraqi Kurdistan. Data collected in Sulaimani city in Iraqi Kurdistan. The work of Kathy Charmaz was chosen to be the main methodological context of the research and the main data collection method was individual intensive narrative interviews with divorced women. Women generally and divorced women specifically in Iraqi Kurdistan are living in a patriarchal society that passing through many changes due to the above mentioned wars among many other factors. This research is trying to study the everyday life of divorced women in such situations and the forms of social insecurity they are experiencing. The social institutions starting from the family as a very significant institution for women to the governmental and non-governmental institutions that are working to support women, and the copying strategies, are in focus in this research. The main research argument is that the family is playing ambivalent roles in divorced women's life. For instance, on one side families are revealed to be an essential source of security to most respondents, on the other side families posed also many threats and restrictions on those women. This argument supported by what called by Suad joseph "the paradox of support and suppression" . Another important finding is that the stat institution(laws , constitutions ,Offices of combating violence against woman and family) are supporting women somehow and offering them protection from the insecurities but it is clear that the existence of the laws does not stop the violence against women in Iraqi Kurdistan, As explained by Pateman because the laws /the contract is a sexual-social contract that upholds the sex rights of males and grants them more privileges than females. The political instability, Tribal social norms also play a major role in influencing the rule of law. It is noteworthy to refer that analyzing the interviews in this research showed that in spite that divorced women living in insecurities and facing difficulties but most of the respondents try to find a coping strategies to tackle difficult situations and to deal with the violence they face; these strategies are bargaining, sometimes compromising or resisting …etc. Different theories used to explain these coping strategies such as bargaining with patriarchy. Kandiyoti who stated that women living under certain restraints struggle to find way and strategies to enhance their situations. The research finding also revealed that the western liberal feminist view of agency is limited this is agree with Saba Mahmood and what she explained about Muslim women agency. For my respondents, who are divorced women, their agency reveals itself in different ways, in resisting or compromising with or even obeying the power of male relatives, and the normative system in the society. Agency is also explained the behavior of women contacting formal state institutions in cases of violence like the police or Offices of combating violence against woman and family.}, language = {en} } @phdthesis{Rabe2019, author = {Rabe, Sophie}, title = {Wirksamkeit einer telemedizinisch assistierten Bewegungstherapie f{\"u}r die postrehabilitative Versorgung von Patienten mit Knie- oder H{\"u}ft-Totalendoprothese im berufsf{\"a}higen Alter}, doi = {10.25932/publishup-43055}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430556}, school = {Universit{\"a}t Potsdam}, pages = {x, 78}, year = {2019}, abstract = {Einleitung Die Implantation einer Knie- oder H{\"u}ft-Totalendoprothese (TEP) ist eine der h{\"a}ufigsten operativen Eingriffe. Im Anschluss an die Operation und die postoperative Rehabilitation stellt die Bewegungstherapie einen wesentlichen Bestandteil der Behandlung zur Verbesserung der Gelenkfunktion und der Lebensqualit{\"a}t dar. In strukturschwachen Gebieten werden entsprechende Angebote nur in unzureichender Dichte vorgehalten. Zudem zeichnet sich ein fl{\"a}chendeckender Fachkr{\"a}ftemangel im Bereich der Physiotherapie ab. Die Tele-Nachsorge bietet daher einen innovativen Ansatz f{\"u}r die postrehabilitative Versorgung der Patienten. Das Ziel der vorliegenden Untersuchung war die {\"U}berpr{\"u}fung der Wirksamkeit einer interaktiven Tele-Nachsorgeintervention f{\"u}r Patienten mit Knie- oder H{\"u}ft-TEP im Vergleich zur herk{\"o}mmlichen Versorgung (usual care). Dazu wurden die Funktionalit{\"a}t und die berufliche Wiedereingliederung untersucht. Methode Zwischen August 2016 und August 2017 wurden 111 Patienten (54,9 ± 6,8 Jahre, 54,3 \% weiblich) zu Beginn ihrer station{\"a}ren Anschlussheilbehandlung nach Implantation einer Knie- oder H{\"u}ft-TEP in diese randomisiert, kontrolliert, multizentrische Studie eingeschlossen. Nach Entlassung aus der orthop{\"a}dischen Anschlussrehabilitation (Baseline) f{\"u}hrte die Interventionsgruppe (IG) ein dreimonatiges interaktives Training {\"u}ber ein Telerehabilitationssystem durch. Hierf{\"u}r erstellte ein betreuender Physiotherapeut einen individuellen Trainingsplan aus 38 {\"U}bungen zur Verbesserung der Kraft sowie der posturalen Kontrolle. Zur Anpassung des Trainingsplans {\"u}bermittelte das System dem Physiotherapeuten Daten zur Quantit{\"a}t sowie zur Qualit{\"a}t des Trainings. Die Kontrollgruppe (KG) konnte die herk{\"o}mmlichen Versorgungsangebote nutzen. Zur Beurteilung der Wirksamkeit der Intervention wurde die Differenz der Verbesserung im 6MWT zwischen der IG und der KG nach drei Monaten als prim{\"a}rer Endpunkt definiert. Als sekund{\"a}re Endpunkte wurden die Return-to-Work-Rate sowie die funktionelle Mobilit{\"a}t mittels des Stair Ascend Tests, des Five-Times-Sit-to-Stand Test und des Timed Up and Go Tests untersucht. Weiterhin wurden die gesundheitsbezogene Lebensqualit{\"a}t mit dem Short-Form 36 (SF-36) und die gelenkbezogenen Einschr{\"a}nkungen mit dem Western Ontario and McMaster Universities Osteoarthritis Index (WOMAC) evaluiert. Der prim{\"a}re und die sekund{\"a}ren Endpunkte wurden anhand von baseline-adjustierten Kovarianzanalysen im intention-to-treat-Ansatz ausgewertet. Zus{\"a}tzlich wurde die Teilnahme an Nachsorgeangeboten und die Adh{\"a}renz der Interventionsgruppe an der Tele-Nachsorge erfasst und evaluiert. Ergebnisse Zum Ende der Intervention wiesen beide Gruppen einen statistisch signifikanten Anstieg ihrer 6MWT Strecke auf (p < 0,001). Zu diesem Zeitpunkt legten die Teilnehmer der IG im Mittel 530,8 ± 79,7 m, die der KG 514,2 ± 71,2 m zur{\"u}ck. Dabei betrug die Differenz der Verbesserung der Gehstrecke in der IG 88,3 ± 57,7 m und in der KG 79,6 ± 48,7 m. Damit zeigt der prim{\"a}re Endpunkt keine signifikanten Gruppenunterschiede (p = 0,951). Bez{\"u}glich der beruflichen Wiedereingliederung konnte jedoch eine signifikant h{\"o}here Rate in der IG (64,6 \% versus 46,2 \%; p = 0,014) festgestellt werden. F{\"u}r die sekund{\"a}ren Endpunkte der funktionellen Mobilit{\"a}t, der Lebensqualit{\"a}t und der gelenkbezogenen Beschwerden belegen die Ergebnisse eine Gleichwertigkeit beider Gruppen zum Ende der Intervention. Schlussfolgerung Die telemedizinisch assistierte Bewegungstherapie f{\"u}r Knie- oder H{\"u}ft-TEP Patienten ist der herk{\"o}mmlichen Versorgung zur Nachsorge hinsichtlich der erzielten Verbesserungen der funktionellen Mobilit{\"a}t, der gesundheitsbezogenen Lebensqualit{\"a}t und der gelenkbezogenen Beschwerden gleichwertig. In dieser Patientenpopulation ließen sich klinisch relevante Verbesserungen unabh{\"a}ngig von der Form der Bewegungstherapie erzielen. Im Hinblick auf die berufliche Wiedereingliederung zeigte sich eine signifikant h{\"o}here Rate in der Interventionsgruppe. Die telemedizinisch assistierte Bewegungstherapie scheint eine geeignete Versorgungsform der Nachsorge zu sein, die orts- und zeitunabh{\"a}ngig durchgef{\"u}hrt werden kann und somit den Bed{\"u}rfnissen berufst{\"a}tiger Patienten entgegenkommt und in den Alltag der Patienten integriert werden kann. Die Tele-Nachsorge sollte daher als optionale und komplement{\"a}re Form der postrehabilitativen Nachsorge angeboten werden. Auch im Hinblick auf den zunehmenden Fachkr{\"a}ftemangel im Bereich der Physiotherapie und bestehende Versorgungsl{\"u}cken in strukturschwachen Gebieten kann der Einsatz der Tele-Nachsorge innovative und bedarfsgerechte L{\"o}sungsans{\"a}tze bieten.}, language = {de} } @phdthesis{Solopow2019, author = {Solopow, Sergej}, title = {Wavelength dependent demagnetization dynamics in Co2MnGa Heusler-alloy}, doi = {10.25932/publishup-42786}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427860}, school = {Universit{\"a}t Potsdam}, pages = {91}, year = {2019}, abstract = {In dieser Arbeit haben wir ultraschnelle Entmagnetisierung an einer Heusler-Legierung untersucht. Es handelt sich um ein Halbmetall, das sich in einer ferromagnetischen Phase befindet. Die Besonderheit dieses Materials besteht im Aufbau einer Bandstruktur. Diese bildet Zustandsdichten, in der die Majorit{\"a}tselektronen eine metallische B{\"a}nderbildung aufweisen und die Minorit{\"a}tselektronen eine Bandl{\"u}cke in der N{\"a}he des Fermi-Niveaus aufweisen, das dem Aufbau eines Halbleiters entspricht. Mit Hilfe der Pump-Probe-Experimente haben wir zeitaufgel{\"o}ste Messungen durchgef{\"u}hrt. F{\"u}r das Pumpen wurden ultrakurze Laserpulse mit einer Pulsdauer von 100 fs benutzt. Wir haben dabei zwei verschiedene Wellenl{\"a}ngen mit 400 nm und 1240 nm benutzt, um den Effekt der Prim{\"a}ranregung und der Bandl{\"u}cke in den Minorit{\"a}tszust{\"a}nden zu untersuchen. Dabei wurde zum ersten Mal OPA (Optical Parametrical Amplifier) f{\"u}r die Erzeugung der langwelligen Pulse an der FEMTOSPEX-Beamline getestet und erfolgreich bei den Experimenten verwendet. Wir haben Wellenl{\"a}ngen bedingte Unterschiede in der Entmagnetisierungszeit gemessen. Mit der Erh{\"o}hung der Photonenenergie ist der Prozess der Entmagnetisierung deutlich schneller als bei einer niedrigeren Photonenenergie. Wir verkn{\"u}pften diese Ergebnisse mit der Existenz der Energiel{\"u}cke f{\"u}r Minorit{\"a}tselektronen. Mit Hilfe lokaler Elliot-Yafet-Streuprozesse k{\"o}nnen die beobachteten Zeiten gut erkl{\"a}rt werden. Wir haben in dieser Arbeit auch eine neue Probe-Methode f{\"u}r die Magnetisierung angewandt und somit experimentell deren Effektivit{\"a}t, n{\"a}mlich XMCD in Refletiongeometry, best{\"a}tigen k{\"o}nnen. Statische Experimente liefern somit deutliche Indizien daf{\"u}r, dass eine magnetische von einer rein elektronischen Antwort des Systems getrennt werden kann. Unter der Voraussetzung, dass die Photonenenergie der R{\"o}ntgenstrahlung auf die L3 Kante des entsprechenden Elements eingestellt, ein geeigneter Einfallswinkel gew{\"a}hlt und die zirkulare Polarisation fixiert wird, ist es m{\"o}glich, diese Methode zur Analyse magnetischer und elektronischer Respons anzuwenden.}, language = {en} } @phdthesis{SchulteOsseili2019, author = {Schulte-Osseili, Christine}, title = {Vom Monomer zum Glykopolymer}, doi = {10.25932/publishup-43216}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432169}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 149}, year = {2019}, abstract = {Glykopolymere sind synthetische und nat{\"u}rlich vorkommende Polymere, die eine Glykaneinheit in der Seitenkette des Polymers tragen. Glykane sind durch die Glykan-Protein-Wechselwirkung verantwortlich f{\"u}r viele biologische Prozesse. Die Beteiligung der Glykanen in diesen biologischen Prozessen erm{\"o}glicht das Imitieren und Analysieren der Wechselwirkungen durch geeignete Modellverbindungen, z.B. der Glykopolymere. Dieses System der Glykan-Protein-Wechselwirkung soll durch die Glykopolymere untersucht und studiert werden, um die spezifische und selektive Bindung der Proteine an die Glykopolymere nachzuweisen. Die Proteine, die in der Lage sind, Kohlenhydratstrukturen selektiv zu binden, werden Lektine genannt. In dieser Dissertationsarbeit wurden verschiedene Glykopolymere synthetisiert. Dabei sollte auf einen effizienten und kosteng{\"u}nstigen Syntheseweg geachtet werden. Verschiedene Glykopolymere wurden durch funktionalisierte Monomere mit verschiedenen Zuckern, wie z.B. Mannose, Laktose, Galaktose oder N-Acetyl-Glukosamin als funktionelle Gruppe, hergestellt. Aus diesen funktionalisierten Glykomonomeren wurden {\"u}ber ATRP und RAFT-Polymerisation Glykopolymere synthetisiert. Die erhaltenen Glykopolymere wurden in Diblockcopolymeren als hydrophiler Block angewendet und die Selbstassemblierung in w{\"a}ssriger L{\"o}sung untersucht. Die Polymere formten in w{\"a}ssriger L{\"o}sung Mizellen, bei denen der Zuckerblock an der Oberfl{\"a}che der Mizellen sitzt. Die Mizellen wurden mit einem hydrophoben Fluoreszenzfarbstoff beladen, wodurch die CMC der Mizellenbildung bestimmt werden konnte. Außerdem wurden die Glykopolymere als Oberfl{\"a}chenbeschichtung {\"u}ber „Grafting from" mit SI-ATRP oder {\"u}ber „Grafting to" auf verschiedene Oberfl{\"a}chen gebunden. Durch die glykopolymerbschichteten Oberfl{\"a}chen konnte die Glykan Protein Wechselwirkung {\"u}ber spektroskopische Messmethoden, wie SPR- und Mikroring Resonatoren untersucht werden. Hierbei wurde die spezifische und selektive Bindung der Lektine an die Glykopolymere nachgewiesen und die Bindungsst{\"a}rke untersucht. Die synthetisierten Glykopolymere k{\"o}nnten durch Austausch der Glykaneinheit f{\"u}r andere Lektine adressierbar werden und damit ein weites Feld an anderen Proteinen erschließen. Die biovertr{\"a}glichen Glykopolymere w{\"a}ren alternativen f{\"u}r den Einsatz in biologischen Prozessen als Transporter von Medikamenten oder Farbstoffe in den K{\"o}rper. Außerdem k{\"o}nnten die funktionalisierten Oberfl{\"a}chen in der Diagnostik zum Erkennen von Lektinen eingesetzt werden. Die Glykane, die keine selektive und spezifische Bindung zu Proteinen eingehen, k{\"o}nnten als antiadsorptive Oberfl{\"a}chenbeschichtung z.B. in der Zellbiologie eingesetzt werden.}, language = {de} } @phdthesis{Crisologo2019, author = {Crisologo, Irene}, title = {Using spaceborne radar platforms to enhance the homogeneity of weather radar calibration}, doi = {10.25932/publishup-44570}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445704}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 61}, year = {2019}, abstract = {Accurate weather observations are the keystone to many quantitative applications, such as precipitation monitoring and nowcasting, hydrological modelling and forecasting, climate studies, as well as understanding precipitation-driven natural hazards (i.e. floods, landslides, debris flow). Weather radars have been an increasingly popular tool since the 1940s to provide high spatial and temporal resolution precipitation data at the mesoscale, bridging the gap between synoptic and point scale observations. Yet, many institutions still struggle to tap the potential of the large archives of reflectivity, as there is still much to understand about factors that contribute to measurement errors, one of which is calibration. Calibration represents a substantial source of uncertainty in quantitative precipitation estimation (QPE). A miscalibration of a few dBZ can easily deteriorate the accuracy of precipitation estimates by an order of magnitude. Instances where rain cells carrying torrential rains are misidentified by the radar as moderate rain could mean the difference between a timely warning and a devastating flood. Since 2012, the Philippine Atmospheric, Geophysical, and Astronomical Services Administration (PAGASA) has been expanding the country's ground radar network. We had a first look into the dataset from one of the longest running radars (the Subic radar) after devastating week-long torrential rains and thunderstorms in August 2012 caused by the annual southwestmonsoon and enhanced by the north-passing Typhoon Haikui. The analysis of the rainfall spatial distribution revealed the added value of radar-based QPE in comparison to interpolated rain gauge observations. However, when compared with local gauge measurements, severe miscalibration of the Subic radar was found. As a consequence, the radar-based QPE would have underestimated the rainfall amount by up to 60\% if they had not been adjusted by rain gauge observations—a technique that is not only affected by other uncertainties, but which is also not feasible in other regions of the country with very sparse rain gauge coverage. Relative calibration techniques, or the assessment of bias from the reflectivity of two radars, has been steadily gaining popularity. Previous studies have demonstrated that reflectivity observations from the Tropical Rainfall Measuring Mission (TRMM) and its successor, the Global Precipitation Measurement (GPM), are accurate enough to serve as a calibration reference for ground radars over low-to-mid-latitudes (± 35 deg for TRMM; ± 65 deg for GPM). Comparing spaceborne radars (SR) and ground radars (GR) requires cautious consideration of differences in measurement geometry and instrument specifications, as well as temporal coincidence. For this purpose, we implement a 3-D volume matching method developed by Schwaller and Morris (2011) and extended by Warren et al. (2018) to 5 years worth of observations from the Subic radar. In this method, only the volumetric intersections of the SR and GR beams are considered. Calibration bias affects reflectivity observations homogeneously across the entire radar domain. Yet, other sources of systematic measurement errors are highly heterogeneous in space, and can either enhance or balance the bias introduced by miscalibration. In order to account for such heterogeneous errors, and thus isolate the calibration bias, we assign a quality index to each matching SR-GR volume, and thus compute the GR calibration bias as a qualityweighted average of reflectivity differences in any sample of matching SR-GR volumes. We exemplify the idea of quality-weighted averaging by using beam blockage fraction (BBF) as a quality variable. Quality-weighted averaging is able to increase the consistency of SR and GR observations by decreasing the standard deviation of the SR-GR differences, and thus increasing the precision of the bias estimates. To extend this framework further, the SR-GR quality-weighted bias estimation is applied to the neighboring Tagaytay radar, but this time focusing on path-integrated attenuation (PIA) as the source of uncertainty. Tagaytay is a C-band radar operating at a lower wavelength and is therefore more affected by attenuation. Applying the same method used for the Subic radar, a time series of calibration bias is also established for the Tagaytay radar. Tagaytay radar sits at a higher altitude than the Subic radar and is surrounded by a gentler terrain, so beam blockage is negligible, especially in the overlapping region. Conversely, Subic radar is largely affected by beam blockage in the overlapping region, but being an SBand radar, attenuation is considered negligible. This coincidentally independent uncertainty contributions of each radar in the region of overlap provides an ideal environment to experiment with the different scenarios of quality filtering when comparing reflectivities from the two ground radars. The standard deviation of the GR-GR differences already decreases if we consider either BBF or PIA to compute the quality index and thus the weights. However, combining them multiplicatively resulted in the largest decrease in standard deviation, suggesting that taking both factors into account increases the consistency between the matched samples. The overlap between the two radars and the instances of the SR passing over the two radars at the same time allows for verification of the SR-GR quality-weighted bias estimation method. In this regard, the consistency between the two ground radars is analyzed before and after bias correction is applied. For cases when all three radars are coincident during a significant rainfall event, the correction of GR reflectivities with calibration bias estimates from SR overpasses dramatically improves the consistency between the two ground radars which have shown incoherent observations before correction. We also show that for cases where adequate SR coverage is unavailable, interpolating the calibration biases using a moving average can be used to correct the GR observations for any point in time to some extent. By using the interpolated biases to correct GR observations, we demonstrate that bias correction reduces the absolute value of the mean difference in most cases, and therefore improves the consistency between the two ground radars. This thesis demonstrates that in general, taking into account systematic sources of uncertainty that are heterogeneous in space (e.g. BBF) and time (e.g. PIA) allows for a more consistent estimation of calibration bias, a homogeneous quantity. The bias still exhibits an unexpected variability in time, which hints that there are still other sources of errors that remain unexplored. Nevertheless, the increase in consistency between SR and GR as well as between the two ground radars, suggests that considering BBF and PIA in a weighted-averaging approach is a step in the right direction. Despite the ample room for improvement, the approach that combines volume matching between radars (either SR-GR or GR-GR) and quality-weighted comparison is readily available for application or further scrutiny. As a step towards reproducibility and transparency in atmospheric science, the 3D matching procedure and the analysis workflows as well as sample data are made available in public repositories. Open-source software such as Python and wradlib are used for all radar data processing in this thesis. This approach towards open science provides both research institutions and weather services with a valuable tool that can be applied to radar calibration, from monitoring to a posteriori correction of archived data.}, language = {en} } @phdthesis{Numberger2019, author = {Numberger, Daniela}, title = {Urban wastewater and lakes as habitats for bacteria and potential vectors for pathogens}, doi = {10.25932/publishup-43709}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437095}, school = {Universit{\"a}t Potsdam}, pages = {VI, 130}, year = {2019}, abstract = {Wasser ist lebensnotwendig und somit eine essentielle Ressource. Jedoch sind unsere S{\"u}ßwasser-Ressourcen begrenzt und ihre Erhaltung daher besonders wichtig. Verschmutzungen mit Chemikalien und Krankheitserregern, die mit einer wachsenden Bev{\"o}lkerung und Urbanisierung einhergehen, verschlechtern die Qualit{\"a}t unseres S{\"u}ßwassers. Außerdem kann Wasser als {\"U}bertragungsvektor f{\"u}r Krankheitserreger dienen und daher wasserb{\"u}rtige Krankheiten verursachen. Der Leibniz-Forschungsverbund INFECTIONS'21 untersuchte innerhalb der interdisziplin{\"a}ren Forschungsgruppe III - „Wasser", Gew{\"a}sser als zentralen Mittelpunkt f{\"u}r Krankheiterreger. Dabei konzentrierte man sich auf Clostridioides difficile sowie avi{\"a}re Influenza A-Viren, von denen angenommen wird, dass sie in die Gew{\"a}sser ausgeschieden werden. Ein weiteres Ziel bestand darin, die bakterielle Gemeinschaften eines Kl{\"a}rwerkes der deutschen Hauptstadt Berlin zu charakterisieren, um anschließend eine Bewertung des potentiellen Gesundheitsrisikos geben zu k{\"o}nnen. Bakterielle Gemeinschaften des Roh- und Klarwassers aus dem Kl{\"a}rwerk unterschieden sich signifikant voneinander. Der Anteil an Darm-/F{\"a}kalbakterien war relativ niedrig und potentielle Darmpathogene wurden gr{\"o}ßtenteils aus dem Rohwasser entfernt. Ein potentielles Gesundheitsrisiko konnte allerdings von potentiell pathogenen Legionellen wie L. lytica festgestellt werden, deren relative Abundanz im Klarwasser h{\"o}her war als im Rohwasser. Es wurden außerdem drei C. difficile-Isolate aus den Kl{\"a}rwerk-Rohwasser und einem st{\"a}dtischen Badesee in Berlin (Weisser See) gewonnen und sequenziert. Die beiden Isolate aus dem Kl{\"a}rwerk tragen keine Toxin-Gene, wohingegen das Isolat aus dem See Toxin-Gene besitzt. Alle drei Isolate sind sehr nah mit humanen St{\"a}mmen verwandt. Dies deutet auf ein potentielles, wenn auch sporadisches Gesundheitsrisiko hin. (Avi{\"a}re) Influenza A-Viren wurden in 38.8\% der untersuchten Sedimentproben mittels PCR detektiert, aber die Virusisolierung schlug fehl. Ein Experiment mit beimpften Wasser- und Sedimentproben zeigte, dass f{\"u}r die Isolierung aus Sedimentproben eine relativ hohe Viruskonzentration n{\"o}tig ist. In Wasserproben ist jedoch ein niedriger Titer an Influenza A-Viren ausreichend, um eine Infektion auszul{\"o}sen. Es konnte zudem auch festgestellt werden, dass sich „Madin-Darby Canine Kidney (MDCK)―-Zellkulturen im Gegensatz zu embryonierten H{\"u}hnereiern besser eignen, um Influenza A-Viren aus Sediment zu isolieren. Zusammenfassend l{\"a}sst sich sagen, dass diese Arbeit m{\"o}gliche Gesundheitsrisiken aufgedeckt hat, wie etwa durch Legionellen im untersuchten Berliner Kl{\"a}rwerk, deren relative Abundanz in gekl{\"a}rtem Abwasser h{\"o}her ist als im Rohwasser. Desweiteren wird indiziert, dass Abwasser und Gew{\"a}sser als Reservoir und Vektor f{\"u}r pathogene Organismen dienen k{\"o}nnen, selbst f{\"u}r nicht-typische Wasser-Pathogene wie C. difficile.}, language = {en} } @phdthesis{Bauch2019, author = {Bauch, Marcel}, title = {Untersuchungen an neuartigen sauerstoffsubstituierten Donoren und Akzeptoren f{\"u}r Singulettsauerstoff}, doi = {10.25932/publishup-42514}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-425140}, school = {Universit{\"a}t Potsdam}, pages = {VI, 196, xiv, A-27}, year = {2019}, abstract = {Im Verlauf dieser Arbeit wurden Aromaten wie Naphthaline und Anthracene mit Singulettsauerstoff, einer reaktiven Form des gew{\"o}hnlichen Sauerstoffs, zu sogenannten Endoperoxiden umgesetzt. Die hier eingesetzten Systeme wurden mit funktionellen Gruppen modifiziert, die {\"u}ber eine Sauerstoffbr{\"u}cke mit dem Aromaten verkn{\"u}pft sind. Die daraus entstandenen Endoperoxide sind meist besonders labil und konnten in dieser Arbeit isoliert und umfassend untersucht werden. Hierbei wurde zum einen das Reaktionsverhalten untersucht. Es konnte gezeigt werden, dass die Aromaten in Abh{\"a}ngigkeit ihrer funktionellen Gruppen unterschiedlich schnell mit Singulettsauerstoff reagieren. Die so ermittelten Reaktivit{\"a}ten wurden zus{\"a}tzlich durch theoretische Berechnungen gest{\"u}tzt. Die resultierenden Endoperoxide wurden unter verschiedenen Bedingungen wie erh{\"o}hter Temperatur oder einem sauren bzw. basischen Milieu auf ihre Stabilit{\"a}t hin untersucht. Dabei konnte gezeigt werden, dass die auf Naphthalinen basierenden Endoperoxiden den gebundenen Singulettsauerstoff in guten Ausbeuten oft schon bei sehr niedrigen Temperaturen (-40 bis 0 °C) freisetzen. Diese Verbindungen k{\"o}nnen daher als milde Quellen dieser reaktiven Sauerstoffspezies eingesetzt werden. Weiterhin konnten bei den Anthracenendoperoxiden Zerfallsmechanismen aufgekl{\"a}rt und andere reaktive Sauerstoffspezies wie Wasserstoffperoxid oder Pers{\"a}uren nachgewiesen werden. Zu den Modifikationen der Aromaten geh{\"o}ren auch Glucosereste. Dadurch k{\"o}nnten sich die hier hergestellten Endoperoxide als vielversprechende Verbindungen in der Krebstherapie herausstellen, da Krebszellen deutlich st{\"a}rker als gesunde Zellen kohlenhydratreiche Verbindungen f{\"u}r ihren Stoffwechsel ben{\"o}tigen. Bei der Spaltung von Endoperoxiden mit Glucosesubstituenten werden ebenfalls reaktive Sauerstoffspezies frei, die so zum Zelltod f{\"u}hren k{\"o}nnten.}, language = {de} } @phdthesis{Farhy2019, author = {Farhy, Yael}, title = {Universals and particulars in morphology}, doi = {10.25932/publishup-47003}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470033}, school = {Universit{\"a}t Potsdam}, pages = {VI, 167}, year = {2019}, abstract = {For many years, psycholinguistic evidence has been predominantly based on findings from native speakers of Indo-European languages, primarily English, thus providing a rather limited perspective into the human language system. In recent years a growing body of experimental research has been devoted to broadening this picture, testing a wide range of speakers and languages, aiming to understanding the factors that lead to variability in linguistic performance. The present dissertation investigates sources of variability within the morphological domain, examining how and to what extent morphological processes and representations are shaped by specific properties of languages and speakers. Firstly, the present work focuses on a less explored language, Hebrew, to investigate how the unique non-concatenative morphological structure of Hebrew, namely a non-linear combination of consonantal roots and vowel patterns to form lexical entries (L-M-D + CiCeC = limed 'teach'), affects morphological processes and representations in the Hebrew lexicon. Secondly, a less investigated population was tested: late learners of a second language. We directly compare native (L1) and non-native (L2) speakers, specifically highly proficient and immersed late learners of Hebrew. Throughout all publications, we have focused on a morphological phenomenon of inflectional classes (called binyanim; singular: binyan), comparing productive (class Piel, e.g., limed 'teach') and unproductive (class Paal, e.g., lamad 'learn') verbal inflectional classes. By using this test case, two psycholinguistic aspects of morphology were examined: (i) how morphological structure affects online recognition of complex words, using masked priming (Publications I and II) and cross-modal priming (Publication III) techniques, and (ii) what type of cues are used when extending morpho-phonological patterns to novel complex forms, a process referred to as morphological generalization, using an elicited production task (Publication IV). The findings obtained in the four manuscripts, either published or under review, provide significant insights into the role of productivity in Hebrew morphological processing and generalization in L1 and L2 speakers. Firstly, the present L1 data revealed a close relationship between productivity of Hebrew verbal classes and recognition process, as revealed in both priming techniques. The consonantal root was accessed only in the productive class (Piel) but not the unproductive class (Paal). Another dissociation between the two classes was revealed in the cross-modal priming, yielding a semantic relatedness effect only for Paal but not Piel primes. These findings are taken to reflect that the Hebrew mental representations display a balance between stored undecomposable unstructured stems (Paal) and decomposed structured stems (Piel), in a similar manner to a typical dual-route architecture, showing that the Hebrew mental lexicon is less unique than previously claimed in psycholinguistic research. The results of the generalization study, however, indicate that there are still substantial differences between inflectional classes of Hebrew and other Indo-European classes, particularly in the type of information they rely on in generalization to novel forms. Hebrew binyan generalization relies more on cues of argument structure and less on phonological cues. Secondly, clear L1/L2 differences were observed in the sensitivity to abstract morphological and morpho-syntactic information during complex word recognition and generalization. While L1 Hebrew speakers were sensitive to the binyan information during recognition, expressed by the contrast in root priming, L2 speakers showed similar root priming effects for both classes, but only when the primes were presented in an infinitive form. A root priming effect was not obtained for primes in a finite form. These patterns are interpreted as evidence for a reduced sensitivity of L2 speakers to morphological information, such as information about inflectional classes, and evidence for processing costs in recognition of forms carrying complex morpho-syntactic information. Reduced reliance on structural information cues was found in production of novel verbal forms, when the L2 group displayed a weaker effect of argument structure for Piel responses, in comparison to the L1 group. Given the L2 results, we suggest that morphological and morphosyntactic information remains challenging for late bilinguals, even at high proficiency levels.}, language = {en} } @phdthesis{Teckentrup2019, author = {Teckentrup, Lisa}, title = {Understanding predator-prey interactions}, doi = {10.25932/publishup-43162}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431624}, school = {Universit{\"a}t Potsdam}, pages = {ix, 133}, year = {2019}, abstract = {Predators can have numerical and behavioral effects on prey animals. While numerical effects are well explored, the impact of behavioral effects is unclear. Furthermore, behavioral effects are generally either analyzed with a focus on single individuals or with a focus on consequences for other trophic levels. Thereby, the impact of fear on the level of prey communities is overlooked, despite potential consequences for conservation and nature management. In order to improve our understanding of predator-prey interactions, an assessment of the consequences of fear in shaping prey community structures is crucial. In this thesis, I evaluated how fear alters prey space use, community structure and composition, focusing on terrestrial mammals. By integrating landscapes of fear in an existing individual-based and spatially-explicit model, I simulated community assembly of prey animals via individual home range formation. The model comprises multiple hierarchical levels from individual home range behavior to patterns of prey community structure and composition. The mechanistic approach of the model allowed for the identification of underlying mechanism driving prey community responses under fear. My results show that fear modified prey space use and community patterns. Under fear, prey animals shifted their home ranges towards safer areas of the landscape. Furthermore, fear decreased the total biomass and the diversity of the prey community and reinforced shifts in community composition towards smaller animals. These effects could be mediated by an increasing availability of refuges in the landscape. Under landscape changes, such as habitat loss and fragmentation, fear intensified negative effects on prey communities. Prey communities in risky environments were subject to a non-proportional diversity loss of up to 30\% if fear was taken into account. Regarding habitat properties, I found that well-connected, large safe patches can reduce the negative consequences of habitat loss and fragmentation on prey communities. Including variation in risk perception between prey animals had consequences on prey space use. Animals with a high risk perception predominantly used safe areas of the landscape, while animals with a low risk perception preferred areas with a high food availability. On the community level, prey diversity was higher in heterogeneous landscapes of fear if individuals varied in their risk perception compared to scenarios in which all individuals had the same risk perception. Overall, my findings give a first, comprehensive assessment of the role of fear in shaping prey communities. The linkage between individual home range behavior and patterns at the community level allows for a mechanistic understanding of the underlying processes. My results underline the importance of the structure of the landscape of fear as a key driver of prey community responses, especially if the habitat is threatened by landscape changes. Furthermore, I show that individual landscapes of fear can improve our understanding of the consequences of trait variation on community structures. Regarding conservation and nature management, my results support calls for modern conservation approaches that go beyond single species and address the protection of biotic interactions.}, language = {en} } @phdthesis{Schaefer2019, author = {Sch{\"a}fer, Merlin}, title = {Understanding and predicting global change impacts on migratory birds}, doi = {10.25932/publishup-43925}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439256}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 153}, year = {2019}, abstract = {This is a publication-based dissertation comprising three original research stud-ies (one published, one submitted and one ready for submission; status March 2019). The dissertation introduces a generic computer model as a tool to investigate the behaviour and population dynamics of animals in cyclic environments. The model is further employed for analysing how migratory birds respond to various scenarios of altered food supply under global change. Here, ecological and evolutionary time-scales are considered, as well as the biological constraints and trade-offs the individual faces, which ultimately shape response dynamics at the population level. Further, the effect of fine-scale temporal patterns in re-source supply are studied, which is challenging to achieve experimentally. My findings predict population declines, altered behavioural timing and negative carry-over effects arising in migratory birds under global change. They thus stress the need for intensified research on how ecological mechanisms are affected by global change and for effective conservation measures for migratory birds. The open-source modelling software created for this dissertation can now be used for other taxa and related research questions. Overall, this thesis improves our mechanistic understanding of the impacts of global change on migratory birds as one prerequisite to comprehend ongoing global biodiversity loss. The research results are discussed in a broader ecological and scientific context in a concluding synthesis chapter.}, language = {en} } @phdthesis{Willig2019, author = {Willig, Lisa}, title = {Ultrafast magneto-optical studies of remagnetisation dynamics in transition metals}, doi = {10.25932/publishup-44194}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441942}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 113, XVII}, year = {2019}, abstract = {Ultrafast magnetisation dynamics have been investigated intensely for two decades. The recovery process after demagnetisation, however, was rarely studied experimentally and discussed in detail. The focus of this work lies on the investigation of the magnetisation on long timescales after laser excitation. It combines two ultrafast time resolved methods to study the relaxation of the magnetic and lattice system after excitation with a high fluence ultrashort laser pulse. The magnetic system is investigated by time resolved measurements of the magneto-optical Kerr effect. The experimental setup has been implemented in the scope of this work. The lattice dynamics were obtained with ultrafast X-ray diffraction. The combination of both techniques leads to a better understanding of the mechanisms involved in magnetisation recovery from a non-equilibrium condition. Three different groups of samples are investigated in this work: Thin Nickel layers capped with nonmagnetic materials, a continuous sample of the ordered L10 phase of Iron Platinum and a sample consisting of Iron Platinum nanoparticles embedded in a carbon matrix. The study of the remagnetisation reveals a general trend for all of the samples: The remagnetisation process can be described by two time dependences. A first exponential recovery that slows down with an increasing amount of energy absorbed in the system until an approximately linear time dependence is observed. This is followed by a second exponential recovery. In case of low fluence excitation, the first recovery is faster than the second. With increasing fluence the first recovery is slowed down and can be described as a linear function. If the pump-induced temperature increase in the sample is sufficiently high, a phase transition to a paramagnetic state is observed. In the remagnetisation process, the transition into the ferromagnetic state is characterised by a distinct transition between the linear and exponential recovery. From the combination of the transient lattice temperature Tp(t) obtained from ultrafast X-ray measurements and magnetisation M(t) gained from magneto-optical measurements we construct the transient magnetisation versus temperature relations M(Tp). If the lattice temperature remains below the Curie temperature the remagnetisation curve M(Tp) is linear and stays below the M(T) curve in equilibrium in the continuous transition metal layers. When the sample is heated above phase transition, the remagnetisation converges towards the static temperature dependence. For the granular Iron Platinum sample the M(Tp) curves for different fluences coincide, i.e. the remagnetisation follows a similar path irrespective of the initial laser-induced temperature jump.}, language = {en} } @phdthesis{Lefebvre2019, author = {Lefebvre, Marie G.}, title = {Two stages of skarn formation - two tin enrichments}, doi = {10.25932/publishup-42717}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427178}, school = {Universit{\"a}t Potsdam}, pages = {87}, year = {2019}, abstract = {Skarn deposits are found on every continents and were formed at different times from Precambrian to Tertiary. Typically, the formation of a skarn is induced by a granitic intrusion in carbonates-rich sedimentary rocks. During contact metamorphism, fluids derived from the granite interact with the sedimentary host rocks, which results in the formation of calc-silicate minerals at the expense of carbonates. Those newly formed minerals generally develop in a metamorphic zoned aureole with garnet in the proximal and pyroxene in the distal zone. Ore elements contained in magmatic fluids are precipitated due to the change in fluid composition. The temperature decrease of the entire system, due to the cooling of magmatic fluids and the entering of meteoric water, allows retrogression of some prograde minerals. The H{\"a}mmerlein skarn deposit has a multi-stage history with a skarn formation during regional metamorphism and a retrogression of primary skarn minerals during the granitic intrusion. Tin was mobilized during both events. The 340 Ma old tin-bearing skarn minerals show that tin was present in sediments before the granite intrusion, and that the first Sn enrichment occurred during the skarn formation by regional metamorphism fluids. In a second step at ca. 320 Ma, tin-bearing fluids were produced with the intrusion of the Eibenstock granite. Tin, which has been added by the granite and remobilized from skarn calc-silicates, precipitated as cassiterite. Compared to clay or marl, the skarn is enriched in Sn, W, In, Zn, and Cu. These metals have been supplied during both regional metamorphism and granite emplacement. In addition, the several isotopic and chemical data of skarn samples show that the granite selectively added elements such as Sn, and that there was no visible granitic contribution to the sedimentary signature of the skarn The example of H{\"a}mmerlein shows that it is possible to form a tin-rich skarn without associated granite when tin has already been transported from tin-bearing sediments during regional metamorphism by aqueous metamorphic fluids. These skarns are economically not interesting if tin is only contained in the skarn minerals. Later alteration of the skarn (the heat and fluid source is not necessarily a granite), however, can lead to the formation of secondary cassiterite (SnO2), with which the skarn can become economically highly interesting.}, language = {en} } @phdthesis{Peter2019, author = {Peter, Franziska}, title = {Transition to synchrony in finite Kuramoto ensembles}, doi = {10.25932/publishup-42916}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429168}, school = {Universit{\"a}t Potsdam}, pages = {vi, 93}, year = {2019}, abstract = {Synchronisation - die Ann{\"a}herung der Rhythmen gekoppelter selbst oszillierender Systeme - ist ein faszinierendes dynamisches Ph{\"a}nomen, das in vielen biologischen, sozialen und technischen Systemen auftritt. Die vorliegende Arbeit befasst sich mit Synchronisation in endlichen Ensembles schwach gekoppelter selbst-erhaltender Oszillatoren mit unterschiedlichen nat{\"u}rlichen Frequenzen. Das Standardmodell f{\"u}r dieses kollektive Ph{\"a}nomen ist das Kuramoto-Modell - unter anderem aufgrund seiner L{\"o}sbarkeit im thermodynamischen Limes unendlich vieler Oszillatoren. {\"A}hnlich einem thermodynamischen Phasen{\"u}bergang zeigt im Fall unendlich vieler Oszillatoren ein Ordnungsparameter den {\"U}bergang von Inkoh{\"a}renz zu einem partiell synchronen Zustand an, in dem ein Teil der Oszillatoren mit einer gemeinsamen Frequenz rotiert. Im endlichen Fall treten Fluktuationen auf. In dieser Arbeit betrachten wir den bisher wenig beachteten Fall von bis zu wenigen hundert Oszillatoren, unter denen vergleichbar starke Fluktuationen auftreten, bei denen aber ein Vergleich zu Frequenzverteilungen im unendlichen Fall m{\"o}glich ist. Zun{\"a}chst definieren wir einen alternativen Ordnungsparameter zur Feststellung einer kollektiven Mode im endlichen Kuramoto-Modell. Dann pr{\"u}fen wir die Abh{\"a}ngigkeit des Synchronisationsgrades und der mittleren Rotationsfrequenz der kollektiven Mode von Eigenschaften der nat{\"u}rlichen Frequenzverteilung f{\"u}r verschiedene Kopplungsst{\"a}rken. Wir stellen dabei zun{\"a}chst numerisch fest, dass der Synchronisationsgrad stark von der Form der Verteilung (gemessen durch die Kurtosis) und die Rotationsfrequenz der kollektiven Mode stark von der Asymmetrie der Verteilung (gemessen durch die Schiefe) der nat{\"u}rlichen Frequenzen abh{\"a}ngt. Beides k{\"o}nnen wir im thermodynamischen Limes analytisch verifizieren. Mit diesen Ergebnissen k{\"o}nnen wir Erkenntnisse anderer Autoren besser verstehen und verallgemeinern. Etwas abseits des roten Fadens dieser Arbeit finden wir außerdem einen analytischen Ausdruck f{\"u}r die Volumenkontraktion im Phasenraum. Der zweite Teil der Arbeit konzentriert sich auf den ordnenden Effekt von Fluktuationen, die durch die Endlichkeit des Systems zustande kommen. Im unendlichen Modell sind die Oszillatoren eindeutig in koh{\"a}rent und inkoh{\"a}rent und damit in geordnet und ungeordnet getrennt. Im endlichen Fall k{\"o}nnen die auftretenden Fluktuationen zus{\"a}tzliche Ordnung unter den asynchronen Oszillatoren erzeugen. Das grundlegende Prinzip, die rauschinduzierte Synchronisation, ist aus einer Reihe von Publikationen bekannt. Unter den gekoppelten Oszillatoren n{\"a}hern sich die Phasen aufgrund der Fluktuationen des Ordnungsparameters an, wie wir einerseits direkt numerisch zeigen und andererseits mit einem Synchronisationsmaß aus der gerichteten Statistik zwischen Paaren passiver Oszillatoren nachweisen. Wir bestimmen die Abh{\"a}ngigkeit dieses Synchronisationsmaßes vom Verh{\"a}ltnis von paarweiser nat{\"u}rlicher Frequenzdifferenz zur Varianz der Fluktuationen. Dabei finden wir eine gute {\"U}bereinstimmung mit einem einfachen analytischen Modell, in welchem wir die deterministischen Fluktuationen des Ordnungsparameters durch weißes Rauschen ersetzen.}, language = {en} } @phdthesis{Gawron2019, author = {Gawron, Marian}, title = {Towards automated advanced vulnerability analysis}, doi = {10.25932/publishup-42635}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426352}, school = {Universit{\"a}t Potsdam}, pages = {149}, year = {2019}, abstract = {The identification of vulnerabilities in IT infrastructures is a crucial problem in enhancing the security, because many incidents resulted from already known vulnerabilities, which could have been resolved. Thus, the initial identification of vulnerabilities has to be used to directly resolve the related weaknesses and mitigate attack possibilities. The nature of vulnerability information requires a collection and normalization of the information prior to any utilization, because the information is widely distributed in different sources with their unique formats. Therefore, the comprehensive vulnerability model was defined and different sources have been integrated into one database. Furthermore, different analytic approaches have been designed and implemented into the HPI-VDB, which directly benefit from the comprehensive vulnerability model and especially from the logical preconditions and postconditions. Firstly, different approaches to detect vulnerabilities in both IT systems of average users and corporate networks of large companies are presented. Therefore, the approaches mainly focus on the identification of all installed applications, since it is a fundamental step in the detection. This detection is realized differently depending on the target use-case. Thus, the experience of the user, as well as the layout and possibilities of the target infrastructure are considered. Furthermore, a passive lightweight detection approach was invented that utilizes existing information on corporate networks to identify applications. In addition, two different approaches to represent the results using attack graphs are illustrated in the comparison between traditional attack graphs and a simplistic graph version, which was integrated into the database as well. The implementation of those use-cases for vulnerability information especially considers the usability. Beside the analytic approaches, the high data quality of the vulnerability information had to be achieved and guaranteed. The different problems of receiving incomplete or unreliable information for the vulnerabilities are addressed with different correction mechanisms. The corrections can be carried out with correlation or lookup mechanisms in reliable sources or identifier dictionaries. Furthermore, a machine learning based verification procedure was presented that allows an automatic derivation of important characteristics from the textual description of the vulnerabilities.}, language = {en} } @phdthesis{Bieri2019, author = {Bieri, Pascal}, title = {Topmanager im Kreuzfeuer ihrer Dilemmata}, doi = {10.25932/publishup-44106}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441062}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 301}, year = {2019}, abstract = {Die vorliegende Forschungsarbeit untersucht den Umgang mit Dilemmata von Topmanagern. Dilemmata sind ein allt{\"a}gliches Gesch{\"a}ft im Topmanagement. Die entsprechenden Akteure sind daher immer wieder mit diesen konfrontiert und mit ihnen umzugehen, geh{\"o}rt gewissermaßen zu ihrer Berufsbeschreibung. Hinzu kommen Dilemmata im nicht direkt gesch{\"a}ftlichen Bereich, wie zum Beispiel jene zwischen Familien- und Arbeitszeit. Doch stellt dieses Feld ein kaum untersuchtes Forschungsgebiet dar. W{\"a}hrend Dilemmata in anderen Bereichen eine zunehmende Aufmerksamkeit erfuhren, wurden deren Besonderheiten im Topmanagement genauso wenig differenziert betrachtet wie zugeh{\"o}rige Umgangsweisen. Theorie und Praxis stellen bez{\"u}glich Dilemmata von Topmanagern vor allem einen Gegensatz dar, beziehungsweise fehlt es an einer theoretischen Fundierung der Empirie. Diesem Umstand wird mittels dieser Studie begegnet. Auf der Grundlage einer differenzierten und breiten Erfassung von Theorien zu Dilemmata, so diese auch noch nicht auf Topmanager bezogen wurden, und einer empirischen Erhebung, die im Mittelpunkt dieser Arbeit stehen, soll das Feld Dilemmata von Topmanagern der Forschung ge{\"o}ffnet werden. Empirische Grundlage sind vor allem narrative Interviews mit Topmanagern {\"u}ber ihre Dilemmata-Wahrnehmung, ausgemachte Ursachen, Umgangsweisen und Resultate. Dies erlaubt es, Topmanagertypen sowie Dilemmata-Arten, mit denen sie konfrontiert sind oder waren, analytisch herauszuarbeiten. Angesichts der Praxisrelevanz von Dilemmata von Topmanagern wird jedoch nicht nur ein theoretisches Modell zu dieser Thematik erarbeitet, es werden auch Reflexionen auf die Praxis in Form von Handlungsempfehlungen vorgenommen. Schließlich gilt es, die allgemeine Theorie zu Dilemmata, ohne konkreten Bezug zu Topmanagern, mit den theoretischen Erkenntnissen dieser Studie auf empirischer Basis zu kontrastieren. Dabei wird im Rahmen der empirischen Erfassung und Auswertung dem Ansatz der Grounded-Theory-Methodologie gefolgt.}, language = {de} } @phdthesis{Angwenyi2019, author = {Angwenyi, David}, title = {Time-continuous state and parameter estimation with application to hyperbolic SPDEs}, doi = {10.25932/publishup-43654}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436542}, school = {Universit{\"a}t Potsdam}, pages = {xi, 101}, year = {2019}, abstract = {Data assimilation has been an active area of research in recent years, owing to its wide utility. At the core of data assimilation are filtering, prediction, and smoothing procedures. Filtering entails incorporation of measurements' information into the model to gain more insight into a given state governed by a noisy state space model. Most natural laws are governed by time-continuous nonlinear models. For the most part, the knowledge available about a model is incomplete; and hence uncertainties are approximated by means of probabilities. Time-continuous filtering, therefore, holds promise for wider usefulness, for it offers a means of combining noisy measurements with imperfect model to provide more insight on a given state. The solution to time-continuous nonlinear Gaussian filtering problem is provided for by the Kushner-Stratonovich equation. Unfortunately, the Kushner-Stratonovich equation lacks a closed-form solution. Moreover, the numerical approximations based on Taylor expansion above third order are fraught with computational complications. For this reason, numerical methods based on Monte Carlo methods have been resorted to. Chief among these methods are sequential Monte-Carlo methods (or particle filters), for they allow for online assimilation of data. Particle filters are not without challenges: they suffer from particle degeneracy, sample impoverishment, and computational costs arising from resampling. The goal of this thesis is to:— i) Review the derivation of Kushner-Stratonovich equation from first principles and its extant numerical approximation methods, ii) Study the feedback particle filters as a way of avoiding resampling in particle filters, iii) Study joint state and parameter estimation in time-continuous settings, iv) Apply the notions studied to linear hyperbolic stochastic differential equations. The interconnection between It{\^o} integrals and stochastic partial differential equations and those of Stratonovich is introduced in anticipation of feedback particle filters. With these ideas and motivated by the variants of ensemble Kalman-Bucy filters founded on the structure of the innovation process, a feedback particle filter with randomly perturbed innovation is proposed. Moreover, feedback particle filters based on coupling of prediction and analysis measures are proposed. They register a better performance than the bootstrap particle filter at lower ensemble sizes. We study joint state and parameter estimation, both by means of extended state spaces and by use of dual filters. Feedback particle filters seem to perform well in both cases. Finally, we apply joint state and parameter estimation in the advection and wave equation, whose velocity is spatially varying. Two methods are employed: Metropolis Hastings with filter likelihood and a dual filter comprising of Kalman-Bucy filter and ensemble Kalman-Bucy filter. The former performs better than the latter.}, language = {en} } @phdthesis{Wozny2019, author = {Wozny, Florian}, title = {Three empirical essays in health economics}, doi = {10.25932/publishup-46991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469910}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2019}, abstract = {Modern health care systems are characterized by pronounced prevention and cost-optimized treatments. This dissertation offers novel empirical evidence on how useful such measures can be. The first chapter analyzes how radiation, a main pollutant in health care, can negatively affect cognitive health. The second chapter focuses on the effect of Low Emission Zones on public heath, as air quality is the major external source of health problems. Both chapters point out potentials for preventive measures. Finally, chapter three studies how changes in treatment prices affect the reallocation of hospital resources. In the following, I briefly summarize each chapter and discuss implications for health care systems as well as other policy areas. Based on the National Educational Panel Study that is linked to data on radiation, chapter one shows that radiation can have negative long-term effects on cognitive skills, even at subclinical doses. Exploiting arguably exogenous variation in soil contamination in Germany due to the Chernobyl disaster in 1986, the findings show that people exposed to higher radiation perform significantly worse in cognitive tests 25 years later. Identification is ensured by abnormal rainfall within a critical period of ten days. The results show that the effect is stronger among older cohorts than younger cohorts, which is consistent with radiation accelerating cognitive decline as people get older. On average, a one-standarddeviation increase in the initial level of CS137 (around 30 chest x-rays) is associated with a decrease in the cognitive skills by 4.1 percent of a standard deviation (around 0.05 school years). Chapter one shows that sub-clinical levels of radiation can have negative consequences even after early childhood. This is of particular importance because most of the literature focuses on exposure very early in life, often during pregnancy. However, population exposed after birth is over 100 times larger. These results point to substantial external human capital costs of radiation which can be reduced by choices of medical procedures. There is a large potential for reductions because about one-third of all CT scans are assumed to be not medically justified (Brenner and Hall, 2007). If people receive unnecessary CT scans because of economic incentives, this chapter points to additional external costs of health care policies. Furthermore, the results can inform the cost-benefit trade-off for medically indicated procedures. Chapter two provides evidence about the effectiveness of Low Emission Zones. Low Emission Zones are typically justified by improvements in population health. However, there is little evidence about the potential health benefits from policy interventions aiming at improving air quality in inner-cities. The chapter ask how the coverage of Low Emission Zones air pollution and hospitalization, by exploiting variation in the roll out of Low Emission Zones in Germany. It combines information on the geographic coverage of Low Emission Zones with rich panel data on the universe of German hospitals over the period from 2006 to 2016 with precise information on hospital locations and the annual frequency of detailed diagnoses. In order to establish that our estimates of Low Emission Zones' health impacts can indeed be attributed to improvements in local air quality, we use data from Germany's official air pollution monitoring system and assign monitor locations to Low Emission Zones and test whether measures of air pollution are affected by the coverage of a Low Emission Zone. Results in chapter two confirm former results showing that the introduction of Low Emission Zones improved air quality significantly by reducing NO2 and PM10 concentrations. Furthermore, the chapter shows that hospitals which catchment areas are covered by a Low Emission Zone, diagnose significantly less air pollution related diseases, in particular by reducing the incidents of chronic diseases of the circulatory and the respiratory system. The effect is stronger before 2012, which is consistent with a general improvement in the vehicle fleet's emission standards. Depending on the disease, a one-standard-deviation increase in the coverage of a hospitals catchment area covered by a Low Emission Zone reduces the yearly number of diagnoses up to 5 percent. These findings have strong implications for policy makers. In 2015, overall costs for health care in Germany were around 340 billion euros, of which 46 billion euros for diseases of the circulatory system, making it the most expensive type of disease caused by 2.9 million cases (Statistisches Bundesamt, 2017b). Hence, reductions in the incidence of diseases of the circulatory system may directly reduce society's health care costs. Whereas chapter one and two study the demand-side in health care markets and thus preventive potential, chapter three analyzes the supply-side. By exploiting the same hospital panel data set as in chapter two, chapter three studies the effect of treatment price shocks on the reallocation of hospital resources in Germany. Starting in 2005, the implementation of the German-DRG-System led to general idiosyncratic treatment price shocks for individual hospitals. Thus far there is little evidence of the impact of general price shocks on the reallocation of hospital resources. Additionally, I add to the exiting literature by showing that price shocks can have persistent effects on hospital resources even when these shocks vanish. However, simple OLS regressions would underestimate the true effect, due to endogenous treatment price shocks. I implement a novel instrument variable strategy that exploits the exogenous variation in the number of days of snow in hospital catchment areas. A peculiarity of the reform allowed variation in days of snow to have a persistent impact on treatment prices. I find that treatment price increases lead to increases in input factors such as nursing staff, physicians and the range of treatments offered but to decreases in the treatment volume. This indicates supplier-induced demand. Furthermore, the probability of hospital mergers and privatization decreases. Structural differences in pre-treatment characteristics between hospitals enhance these effects. For instance, private and larger hospitals are more affected. IV estimates reveal that OLS results are biased towards zero in almost all dimensions because structural hospital differences are correlated with the reallocation of hospital resources. These results are important for several reasons. The G-DRG-Reform led to a persistent polarization of hospital resources, as some hospitals were exposed to treatment price increases, while others experienced reductions. If hospitals increase the treatment volume as a response to price reductions by offering unnecessary therapies, it has a negative impact on population wellbeing and public spending. However, results show a decrease in the range of treatments if prices decrease. Hospitals might specialize more, thus attracting more patients. From a policy perspective it is important to evaluate if such changes in the range of treatments jeopardize an adequate nationwide provision of treatments. Furthermore, the results show a decrease in the number of nurses and physicians if prices decrease. This could partly explain the nursing crisis in German hospitals. However, since hospitals specialize more they might be able to realize efficiency gains which justify reductions in input factors without loses in quality. Further research is necessary to provide evidence for the impact of the G-DRG-Reform on health care quality. Another important aspect are changes in the organizational structure. Many public hospitals have been privatized or merged. The findings show that this is at least partly driven by the G-DRG-Reform. This can again lead to a lack in services offered in some regions if merged hospitals specialize more or if hospitals are taken over by ecclesiastical organizations which do not provide all treatments due to moral conviction. Overall, this dissertation reveals large potential for preventive health care measures and helps to explain reallocation processes in the hospital sector if treatment prices change. Furthermore, its findings have potentially relevant implications for other areas of public policy. Chapter one identifies an effect of low dose radiation on cognitive health. As mankind is searching for new energy sources, nuclear power is becoming popular again. However, results of chapter one point to substantial costs of nuclear energy which have not been accounted yet. Chapter two finds strong evidence that air quality improvements by Low Emission Zones translate into health improvements, even at relatively low levels of air pollution. These findings may, for instance, be of relevance to design further policies targeted at air pollution such as diesel bans. As pointed out in chapter three, the implementation of DRG-Systems may have unintended side-effects on the reallocation of hospital resources. This may also apply to other providers in the health care sector such as resident doctors.}, language = {en} } @phdthesis{Behm2019, author = {Behm, Laura Vera Johanna}, title = {Thermoresponsive Zellkultursubstrate f{\"u}r zeitlich-r{\"a}umlich gesteuertes Auswachsen neuronaler Zellen}, doi = {10.25932/publishup-43619}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436196}, school = {Universit{\"a}t Potsdam}, pages = {VII, 105}, year = {2019}, abstract = {Ein wichtiges Ziel der Neurowissenschaften ist das Verst{\"a}ndnis der komplexen und zugleich faszinierenden, hochgeordneten Vernetzung der Neurone im Gehirn, welche neuronalen Prozessen, wie zum Beispiel dem Wahrnehmen oder Lernen wie auch Neuropathologien zu Grunde liegt. F{\"u}r verbesserte neuronale Zellkulturmodelle zur detaillierten Untersuchung dieser Prozesse ist daher die Rekonstruktion von geordneten neuronalen Verbindungen dringend erforderlich. Mit Oberfl{\"a}chenstrukturen aus zellattraktiven und zellabweisenden Beschichtungen k{\"o}nnen neuronale Zellen und ihre Neuriten in vitro strukturiert werden. Zur Kontrolle der neuronalen Verbindungsrichtung muss das Auswachsen der Axone zu benachbarten Zellen dynamisch gesteuert werden, zum Beispiel {\"u}ber eine ver{\"a}nderliche Zug{\"a}nglichkeit der Oberfl{\"a}che. In dieser Arbeit wurde untersucht, ob mit thermoresponsiven Polymeren (TRP) beschichtete Zellkultursubstrate f{\"u}r eine dynamische Kontrolle des Auswachsens neuronaler Zellen geeignet sind. TRP k{\"o}nnen {\"u}ber die Temperatur von einem zellabweisenden in einen zellattraktiven Zustand geschaltet werden, womit die Zug{\"a}nglichkeit der Oberfl{\"a}che f{\"u}r Zellen dynamisch gesteuert werden kann. Die TRP-Beschichtung wurde mikrostrukturiert, um einzelne oder wenige neuronale Zellen zun{\"a}chst auf der Oberfl{\"a}che anzuordnen und das Auswachsen der Zellen und Neuriten {\"u}ber definierte TRP-Bereiche in Abh{\"a}ngigkeit der Temperatur zeitlich und r{\"a}umlich zu kontrollieren. Das Protokoll wurde mit der neuronalen Zelllinie SH-SY5Y etabliert und auf humane induzierte Neurone {\"u}bertragen. Die Anordnung der Zellen konnte bei Kultivierung im zellabweisenden Zustand des TRPs f{\"u}r bis zu 7 Tage aufrecht erhalten werden. Durch Schalten des TRPs in den zellattraktiven Zustand konnte das Auswachsen der Neuriten und Zellen zeitlich und r{\"a}umlich induziert werden. Immunozytochemische F{\"a}rbungen und Patch-Clamp-Ableitungen der Neurone demonstrierten die einfache Anwendbarkeit und Zellkompatibilit{\"a}t der TRP-Substrate. Eine pr{\"a}zisere r{\"a}umliche Kontrolle des Auswachsens der Zellen sollte durch lokales Schalten der TRP-Beschichtung erreicht werden. Daf{\"u}r wurden Mikroheizchips mit Mikroelektroden zur lokalen Jouleschen Erw{\"a}rmung der Substratoberfl{\"a}che entwickelt. Zur Evaluierung der generierten Temperaturprofile wurde eine Temperaturmessmethode entwickelt und die erhobenen Messwerte mit numerisch simulierten Werten abgeglichen. Die Temperaturmessmethode basiert auf einfach zu applizierenden Sol-Gel-Schichten, die den temperatursensitiven Fluoreszenzfarbstoff Rhodamin B enthalten. Sie erm{\"o}glicht oberfl{\"a}chennahe Temperaturmessungen in trockener und w{\"a}ssriger Umgebung mit hoher Orts- und Temperaturaufl{\"o}sung. Numerische Simulationen der Temperaturprofile korrelierten gut mit den experimentellen Daten. Auf dieser Basis konnten Geometrie und Material der Mikroelektroden hinsichtlich einer lokal stark begrenzten Temperierung optimiert werden. Ferner wurden f{\"u}r die Kultvierung der Zellen auf den Mikroheizchips eine Zellkulturkammer und Kontaktboard f{\"u}r die elektrische Kontaktierung der Mikroelektroden geschaffen. Die vorgestellten Ergebnisse demonstrieren erstmalig das enorme Potential thermoresponsiver Zellkultursubstrate f{\"u}r die zeitlich und r{\"a}umlich gesteuerte Formation geordneter neuronaler Verbindungen in vitro. Zuk{\"u}nftig k{\"o}nnte dies detaillierte Studien zur neuronalen Informationsverarbeitung oder zu Neuropathologien an relevanten, humanen Zellmodellen erm{\"o}glichen.}, language = {de} } @phdthesis{Krejca2019, author = {Krejca, Martin Stefan}, title = {Theoretical analyses of univariate estimation-of-distribution algorithms}, doi = {10.25932/publishup-43487}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434870}, school = {Universit{\"a}t Potsdam}, pages = {xii, 243}, year = {2019}, abstract = {Optimization is a core part of technological advancement and is usually heavily aided by computers. However, since many optimization problems are hard, it is unrealistic to expect an optimal solution within reasonable time. Hence, heuristics are employed, that is, computer programs that try to produce solutions of high quality quickly. One special class are estimation-of-distribution algorithms (EDAs), which are characterized by maintaining a probabilistic model over the problem domain, which they evolve over time. In an iterative fashion, an EDA uses its model in order to generate a set of solutions, which it then uses to refine the model such that the probability of producing good solutions is increased. In this thesis, we theoretically analyze the class of univariate EDAs over the Boolean domain, that is, over the space of all length-n bit strings. In this setting, the probabilistic model of a univariate EDA consists of an n-dimensional probability vector where each component denotes the probability to sample a 1 for that position in order to generate a bit string. My contribution follows two main directions: first, we analyze general inherent properties of univariate EDAs. Second, we determine the expected run times of specific EDAs on benchmark functions from theory. In the first part, we characterize when EDAs are unbiased with respect to the problem encoding. We then consider a setting where all solutions look equally good to an EDA, and we show that the probabilistic model of an EDA quickly evolves into an incorrect model if it is always updated such that it does not change in expectation. In the second part, we first show that the algorithms cGA and MMAS-fp are able to efficiently optimize a noisy version of the classical benchmark function OneMax. We perturb the function by adding Gaussian noise with a variance of σ², and we prove that the algorithms are able to generate the true optimum in a time polynomial in σ² and the problem size n. For the MMAS-fp, we generalize this result to linear functions. Further, we prove a run time of Ω(n log(n)) for the algorithm UMDA on (unnoisy) OneMax. Last, we introduce a new algorithm that is able to optimize the benchmark functions OneMax and LeadingOnes both in O(n log(n)), which is a novelty for heuristics in the domain we consider.}, language = {en} } @phdthesis{StutterGarcia2019, author = {Stutter Garcia, Ana}, title = {The use of grammatical knowledge in an additional language}, doi = {10.25932/publishup-46932}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469326}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 340}, year = {2019}, abstract = {This thesis investigates whether multilingual speakers' use of grammatical constraints in an additional language (La) is affected by the native (L1) and non-native grammars (L2) of their linguistic repertoire. Previous studies have used untimed measures of grammatical performance to show that L1 and L2 grammars affect the initial stages of La acquisition. This thesis extends this work by examining whether speakers at intermediate levels of La proficiency, who demonstrate mature untimed/offline knowledge of the target La constraints, are differentially affected by their L1 and L2 knowledge when they comprehend sentences under processing pressure. With this purpose, several groups of La German speakers were tested on word order and agreement phenomena using online/timed measures of grammatical knowledge. Participants had mirror distributions of their prior languages and they were either L1English/L2Spanish speakers or L1Spanish/L2English speakers. Crucially, in half of the phenomena the target La constraint aligned with English but not with Spanish, while in the other half it aligned with Spanish but not with English. Results show that the L1 grammar plays a major role in the use of La constraints under processing pressure, as participants displayed increased sensitivity to La constraints when they aligned with their L1, and reduced sensitivity when they did not. Further, in specific phenomena in which the L2 and La constraints aligned, increased L2 proficiency resulted in an enhanced sensitivity to the La constraint. These findings suggest that both native and non-native grammars affect how speakers use La grammatical constraints under processing pressure. However, L1 and L2 grammars differentially influence on participants' performance: While L1 constraints seem to be reliably recruited to cope with the processing demands of real-time La use, proficiency in an L2 can enhance sensitivity to La constraints only in specific circumstances, namely when L2 and La constraints align.}, language = {en} } @phdthesis{Meessen2019, author = {Meeßen, Christian}, title = {The thermal and rheological state of the Northern Argentinian foreland basins}, doi = {10.25932/publishup-43994}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439945}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 151}, year = {2019}, abstract = {The foreland of the Andes in South America is characterised by distinct along strike changes in surface deformational styles. These styles are classified into two end-members, the thin-skinned and the thick-skinned style. The superficial expression of thin-skinned deformation is a succession of narrowly spaced hills and valleys, that form laterally continuous ranges on the foreland facing side of the orogen. Each of the hills is defined by a reverse fault that roots in a basal d{\´e}collement surface within the sedimentary cover, and acted as thrusting ramp to stack the sedimentary pile. Thick-skinned deformation is morphologically characterised by spatially disparate, basement-cored mountain ranges. These mountain ranges are uplifted along reactivated high-angle crustal-scale discontinuities, such as suture zones between different tectonic terranes. Amongst proposed causes for the observed variation are variations in the dip angle of the Nazca plate, variation in sediment thickness, lithospheric thickening, volcanism or compositional differences. The proposed mechanisms are predominantly based on geological observations or numerical thermomechanical modelling, but there has been no attempt to understand the mechanisms from a point of data-integrative 3D modelling. The aim of this dissertation is therefore to understand how lithospheric structure controls the deformational behaviour. The integration of independent data into a consistent model of the lithosphere allows to obtain additional evidence that helps to understand the causes for the different deformational styles. Northern Argentina encompasses the transition from the thin-skinned fold-and-thrust belt in Bolivia, to the thick-skinned Sierras Pampeanas province, which makes this area a well suited location for such a study. The general workflow followed in this study first involves data-constrained structural- and density-modelling in order to obtain a model of the study area. This model was then used to predict the steady-state thermal field, which was then used to assess the present-day rheological state in northern Argentina. The structural configuration of the lithosphere in northern Argentina was determined by means of data-integrative, 3D density modelling verified by Bouguer gravity. The model delineates the first-order density contrasts in the lithosphere in the uppermost 200 km, and discriminates bodies for the sediments, the crystalline crust, the lithospheric mantle and the subducting Nazca plate. To obtain the intra-crustal density structure, an automated inversion approach was developed and applied to a starting structural model that assumed a homogeneously dense crust. The resulting final structural model indicates that the crustal structure can be represented by an upper crust with a density of 2800 kg/m³, and a lower crust of 3100 kg/m³. The Transbrazilian Lineament, which separates the Pampia terrane from the R{\´i}o de la Plata craton, is expressed as a zone of low average crustal densities. In an excursion, we demonstrate in another study, that the gravity inversion method developed to obtain intra-crustal density structures, is also applicable to obtain density variations in the uppermost lithospheric mantle. Densities in such sub-crustal depths are difficult to constrain from seismic tomographic models due to smearing of crustal velocities. With the application to the uppermost lithospheric mantle in the north Atlantic, we demonstrate in Tan et al. (2018) that lateral density trends of at least 125\,km width are robustly recovered by the inversion method, thereby providing an important tool for the delineation of subcrustal density trends. Due to the genetic link between subduction, orogenesis and retroarc foreland basins the question rises whether the steady-state assumption is valid in such a dynamic setting. To answer this question, I analysed (i) the impact of subduction on the conductive thermal field of the overlying continental plate, (ii) the differences between the transient and steady-state thermal fields of a geodynamic coupled model. Both studies indicate that the assumption of a thermal steady-state is applicable in most parts of the study area. Within the orogenic wedge, where the assumption cannot be applied, I estimated the transient thermal field based on the results of the conducted analyses. Accordingly, the structural model that had been obtained in the first step, could be used to obtain a 3D conductive steady-state thermal field. The rheological assessment based on this thermal field indicates that the lithosphere of the thin-skinned Subandean ranges is characterised by a relatively strong crust and a weak mantle. Contrarily, the adjacent foreland basin consists of a fully coupled, very strong lithosphere. Thus, shortening in northern Argentina can only be accommodated within the weak lithosphere of the orogen and the Subandean ranges. The analysis suggests that the d{\´e}collements of the fold-and-thrust belt are the shallow continuation of shear zones that reside in the ductile sections of the orogenic crust. Furthermore, the localisation of the faults that provide strain transfer between the deeper ductile crust and the shallower d{\´e}collement is strongly influenced by crustal weak zones such as foliation. In contrast to the northern foreland, the lithosphere of the thick-skinned Sierras Pampeanas is fully coupled and characterised by a strong crust and mantle. The high overall strength prevents the generation of crustal-scale faults by tectonic stresses. Even inherited crustal-scale discontinuities, such as sutures, cannot sufficiently reduce the strength of the lithosphere in order to be reactivated. Therefore, magmatism that had been identified to be a precursor of basement uplift in the Sierras Pampeanas, is the key factor that leads to the broken foreland of this province. Due to thermal weakening, and potentially lubrication of the inherited discontinuities, the lithosphere is locally weakened such that tectonic stresses can uplift the basement blocks. This hypothesis explains both the spatially disparate character of the broken foreland, as well as the observed temporal delay between volcanism and basement block uplift. This dissertation provides for the first time a data-driven 3D model that is consistent with geophysical data and geological observations, and that is able to causally link the thermo-rheological structure of the lithosphere to the observed variation of surface deformation styles in the retroarc foreland of northern Argentina.}, language = {en} } @phdthesis{Nasery2019, author = {Nasery, Mustafa}, title = {The success and failure of civil service reforms in Afghanistan}, doi = {10.25932/publishup-44473}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444738}, school = {Universit{\"a}t Potsdam}, pages = {viii, 258}, year = {2019}, abstract = {The Government will create a motivated, merit-based, performance-driven, and professional civil service that is resistant to temptations of corruption and which provides efficient, effective and transparent public services that do not force customers to pay bribes. — (GoIRA, 2006, p. 106) We were in a black hole! We had an empty glass and had nothing from our side to fill it with! Thus, we accepted anything anybody offered; that is how our glass was filled; that is how we reformed our civil service. — (Former Advisor to IARCSC, personal communication, August 2015) How and under what conditions were the post-Taleban Civil Service Reforms of Afghanistan initiated? What were the main components of the reforms? What were their objectives and to which extent were they achieved? Who were the leading domestic and foreign actors involved in the process? Finally, what specific factors influenced the success and failure Afghanistan's Civil Service Reforms since 2002? Guided by such fundamental questions, this research studies the wicked process of reforming the Afghan civil service in an environment where a variety of contextual, programmatic, and external factors affected the design and implementation of reforms that were entirely funded and technically assisted by the international community. Focusing on the core components of reforms—recruitment, remuneration, and appraisal of civil servants—the qualitative study provides a detailed picture of the pre-reform civil service and its major human resources developments in the past. Following discussions on the content and purposes of the main reform programs, it will then analyze the extent of changes in policies and practices by examining the outputs and effects of these reforms. Moreover, the study defines the specific factors that led the reforms toward a situation where most of the intended objectives remain unachieved. Doing so, it explores and explains how an overwhelming influence of international actors with conflicting interests, large-scale corruption, political interference, networks of patronage, institutionalized nepotism, culturally accepted cronyism and widespread ethnic favoritism created a very complex environment and prevented the reforms from transforming Afghanistan's patrimonial civil service into a professional civil service, which is driven by performance and merit.}, language = {en} } @phdthesis{Wolf2019, author = {Wolf, Mathias Johannes}, title = {The role of partial melting on trace element and isotope systematics of granitic melts}, doi = {10.25932/publishup-42370}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423702}, school = {Universit{\"a}t Potsdam}, pages = {iv, 129}, year = {2019}, abstract = {Partial melting is a first order process for the chemical differentiation of the crust (Vielzeuf et al., 1990). Redistribution of chemical elements during melt generation crucially influences the composition of the lower and upper crust and provides a mechanism to concentrate and transport chemical elements that may also be of economic interest. Understanding of the diverse processes and their controlling factors is therefore not only of scientific interest but also of high economic importance to cover the demand for rare metals. The redistribution of major and trace elements during partial melting represents a central step for the understanding how granite-bound mineralization develops (Hedenquist and Lowenstern, 1994). The partial melt generation and mobilization of ore elements (e.g. Sn, W, Nb, Ta) into the melt depends on the composition of the sedimentary source and melting conditions. Distinct source rocks have different compositions reflecting their deposition and alteration histories. This specific chemical "memory" results in different mineral assemblages and melting reactions for different protolith compositions during prograde metamorphism (Brown and Fyfe, 1970; Thompson, 1982; Vielzeuf and Holloway, 1988). These factors do not only exert an important influence on the distribution of chemical elements during melt generation, they also influence the volume of melt that is produced, extraction of the melt from its source, and its ascent through the crust (Le Breton and Thompson, 1988). On a larger scale, protolith distribution and chemical alteration (weathering), prograde metamorphism with partial melting, melt extraction, and granite emplacement are ultimately depending on a (plate-)tectonic control (Romer and Kroner, 2016). Comprehension of the individual stages and their interaction is crucial in understanding how granite-related mineralization forms, thereby allowing estimation of the mineralization potential of certain areas. Partial melting also influences the isotope systematics of melt and restite. Radiogenic and stable isotopes of magmatic rocks are commonly used to trace back the source of intrusions or to quantify mixing of magmas from different sources with distinct isotopic signatures (DePaolo and Wasserburg, 1979; Lesher, 1990; Chappell, 1996). These applications are based on the fundamental requirement that the isotopic signature in the melt reflects that of the bulk source from which it is derived. Different minerals in a protolith may have isotopic compositions of radiogenic isotopes that deviate from their whole rock signature (Ayres and Harris, 1997; Knesel and Davidson, 2002). In particular, old minerals with a distinct parent-to-daughter (P/D) ratio are expected to have a specific radiogenic isotope signature. As the partial melting reaction only involves selective phases in a protolith, the isotopic signature of the melt reflects that of the minerals involved in the melting reaction and, therefore, should be different from the bulk source signature. Similar considerations hold true for stable isotopes.}, language = {en} } @phdthesis{Doering2019, author = {D{\"o}ring, Matthias}, title = {The public encounter}, doi = {10.25932/publishup-50227}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502276}, school = {Universit{\"a}t Potsdam}, pages = {xi, 115}, year = {2019}, abstract = {This thesis puts the citizen-state interaction at its center. Building on a comprehensive model incorporating various perspectives on this interaction, I derive selected research gaps. The three articles, comprising this thesis, tackle these gaps. A focal role plays the citizens' administrative literacy, the relevant competences and knowledge necessary to successfully interact with public organizations. The first article elaborates on the different dimensions of administrative literacy and develops a survey instrument to assess these. The second study shows that public employees change their behavior according to the competences that citizens display during public encounters. They treat citizens preferentially that are well prepared and able to persuade them of their application's potential. Thereby, they signal a higher success potential for bureaucratic success criteria which leads to the employees' cream-skimming behavior. The third article examines the dynamics of employees' communication strategies when recovering from a service failure. The study finds that different explanation strategies yield different effects on the client's frustration. While accepting the responsibility and explaining the reasons for a failure alleviates the frustration and anger, refusing the responsibility leads to no or even reinforcing effects on the client's frustration. The results emphasize the different dynamics that characterize the nature of citizen-state interactions and how they establish their short- and long-term outcomes.}, language = {en} } @phdthesis{Trautwein2019, author = {Trautwein, Jutta}, title = {The Mental lexicon in acquisition}, doi = {10.25932/publishup-43431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434314}, school = {Universit{\"a}t Potsdam}, pages = {IV, 177}, year = {2019}, abstract = {The individual's mental lexicon comprises all known words as well related infor-mation on semantics, orthography and phonology. Moreover, entries connect due to simi-larities in these language domains building a large network structure. The access to lexical information is crucial for processing of words and sentences. Thus, a lack of information in-hibits the retrieval and can cause language processing difficulties. Hence, the composition of the mental lexicon is essential for language skills and its assessment is a central topic of lin-guistic and educational research. In early childhood, measurement of the mental lexicon is uncomplicated, for example through parental questionnaires or the analysis of speech samples. However, with growing content the measurement becomes more challenging: With more and more words in the mental lexicon, the inclusion of all possible known words into a test or questionnaire be-comes impossible. That is why there is a lack of methods to assess the mental lexicon for school children and adults. For the same reason, there are only few findings on the courses of lexical development during school years as well as its specific effect on other language skills. This dissertation is supposed to close this gap by pursuing two major goals: First, I wanted to develop a method to assess lexical features, namely lexicon size and lexical struc-ture, for children of different age groups. Second, I aimed to describe the results of this method in terms of lexical development of size and structure. Findings were intended to help understanding mechanisms of lexical acquisition and inform theories on vocabulary growth. The approach is based on the dictionary method where a sample of words out of a dictionary is tested and results are projected on the whole dictionary to determine an indi-vidual's lexicon size. In the present study, the childLex corpus, a written language corpus for children in German, served as the basis for lexicon size estimation. The corpus is assumed to comprise all words children attending primary school could know. Testing a sample of words out of the corpus enables projection of the results on the whole corpus. For this purpose, a vocabulary test based on the corpus was developed. Afterwards, test performance of virtual participants was simulated by drawing different lexicon sizes from the corpus and comparing whether the test items were included in the lexicon or not. This allowed determination of the relation between test performance and total lexicon size and thus could be transferred to a sample of real participants. Besides lexicon size, lexical content could be approximated with this approach and analyzed in terms of lexical structure. To pursue the presented aims and establish the sampling method, I conducted three consecutive studies. Study 1 includes the development of a vocabulary test based on the childLex corpus. The testing was based on the yes/no format and included three versions for different age groups. The validation grounded on the Rasch Model shows that it is a valid instrument to measure vocabulary for primary school children in German. In Study 2, I estab-lished the method to estimate lexicon sizes and present results on lexical development dur-ing primary school. Plausible results demonstrate that lexical growth follows a quadratic function starting with about 6,000 words at the beginning of school and about 73,000 words on average for young adults. Moreover, the study revealed large interindividual differences. Study 3 focused on the analysis of network structures and their development in the mental lexicon due to orthographic similarities. It demonstrates that networks possess small-word characteristics and decrease in interconnectivity with age. Taken together, this dissertation provides an innovative approach for the assessment and description of the development of the mental lexicon from primary school onwards. The studies determine recent results on lexical acquisition in different age groups that were miss-ing before. They impressively show the importance of this period and display the existence of extensive interindividual differences in lexical development. One central aim of future research needs to address the causes and prevention of these differences. In addition, the application of the method for further research (e.g. the adaptation for other target groups) and teaching purposes (e.g. adaptation of texts for different target groups) appears to be promising.}, language = {en} } @phdthesis{Herrmann2019, author = {Herrmann, Johannes}, title = {The mechanical behavior of shales}, doi = {10.25932/publishup-42968}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429683}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 156}, year = {2019}, abstract = {The thesis comprises three experimental studies, which were carried out to unravel the short- as well as the long-term mechanical properties of shale rocks. Short-term mechanical properties such as compressive strength and Young's modulus were taken from recorded stress-strain curves of constant strain rate tests. Long-term mechanical properties are represented by the time- dependent creep behavior of shales. This was obtained from constant stress experiments, where the test duration ranged from a couple minutes up to two weeks. A profound knowledge of the mechanical behavior of shales is crucial to reliably estimate the potential of a shale reservoir for an economical and sustainable extraction of hydrocarbons (HC). In addition, healing of clay-rich forming cap rocks involving creep and compaction is important for underground storage of carbon dioxide and nuclear waste. Chapter 1 introduces general aspects of the research topic at hand and highlights the motivation for conducting this study. At present, a shift from energy recovered from conventional resources e.g., coal towards energy provided by renewable resources such as wind or water is a big challenge. Gas recovered from unconventional reservoirs (shale plays) is considered a potential bridge technology. In Chapter 2, short-term mechanical properties of two European mature shale rocks are presented, which were determined from constant strain rate experiments performed at ambient and in situ deformation conditions (confining pressure, pc ≤ 100 MPa, temperature, T ≤ 125 °C, representing pc, T - conditions at < 4 km depth) using a Paterson- type gas deformation apparatus. The investigated shales were mainly from drill core material of Posidonia (Germany) shale and weathered material of Bowland (United Kingdom) shale. The results are compared with mechanical properties of North American shales. Triaxial compression tests performed perpendicular to bedding revealed semibrittle deformation behavior of Posidonia shale with pronounced inelastic deformation. This is in contrast to Bowland shale samples that deformed brittle and displayed predominantly elastic deformation. The static Young's modulus, E, and triaxial compressive strength, σTCS, determined from recorded stress-strain curves strongly depended on the applied confining pressure and sample composition, whereas the influence of temperature and strain rate on E and σTCS was minor. Shales with larger amounts of weak minerals (clay, mica, total organic carbon) yielded decreasing E and σTCS. This may be related to a shift from deformation supported by a load-bearing framework of hard phases (e.g., quartz) towards deformation of interconnected weak minerals, particularly for higher fractions of about 25 - 30 vol\% weak phases. Comparing mechanical properties determined at reservoir conditions with mechanical data applying effective medium theories revealed that E and σTCS of Posidonia and Bowland shale are close to the lower (Reuss) bound. Brittleness B is often quoted as a measure indicating the response of a shale formation to stimulation and economic production. The brittleness, B, of Posidonia and Bowland shale, estimated from E, is in good agreement with the experimental results. This correlation may be useful to predict B from sonic logs, from which the (dynamic) Young's modulus can be retrieved. Chapter 3 presents a study of the long-term creep properties of an immature Posidonia shale. Constant stress experiments (σ = const.) were performed at elevated confining pressures (pc = 50 - 200 MPa) and temperatures (T = 50 - 200 °C) to simulate reservoir pc, T - conditions. The Posidonia shale samples were acquired from a quarry in South Germany. At stresses below ≈ 84 \% compressive strength of Posidonia shale, at high temperature and low confining pressure, samples showed pronounced transient (primary) creep with high deformation rates in the semibrittle regime. Sample deformation was mainly accommodated by creep of weak sample constituents and pore space reduction. An empirical power law relation between strain and time, which also accounts for the influence of pc, T and σ on creep strain was formulated to describe the primary creep phase. Extrapolation of the results to a creep period of several years, which is the typical time interval for a large production decline, suggest that fracture closure is unlikely at low stresses. At high stresses as expected for example at the contact between the fracture surfaces and proppants added during stimulation measures, subcritical crack growth may lead to secondary and tertiary creep. An empirical power law is suggested to describe secondary creep of shale rocks as a function of stress, pressure and temperature. The predicted closure rates agree with typical production decline curves recorded during the extraction of hydrocarbons. At the investigated conditions, the creep behavior of Posidonia shale was found to correlate with brittleness, calculated from sample composition. In Chapter 4 the creep properties of mature Posidonia and Bowland shales are presented. The observed long-term creep behavior is compared to the short-term behavior determined in Chapter 2. Creep experiments were performed at simulated reservoir conditions of pc = 50 - 115 MPa and T = 75 - 150 °C. Similar to the mechanical response of immature Posidonia shale samples investigated in Chapter 3, creep strain rates of mature Bowland and Posidonia shales were enhanced with increasing stress and temperature and decreasing confining pressures. Depending on applied deformation conditions, samples displayed either only a primary (decelerating) or in addition also a secondary (quasi-steady state) and subsequently a tertiary (accelerating) creep phase before failure. At the same deformation conditions, creep strain of Posidonia shale, which is rich in weak constituents, is tremendously higher than of quartz-rich Bowland shale. Typically, primary creep strain is again mostly accommodated by deformation of weak minerals and local pore space reduction. At the onset of tertiary creep most of the deformation was accommodated by micro crack growth. A power law was used to characterize the primary creep phase of Posidonia and Bowland shale. Primary creep strain of shale rocks is inversely correlated to triaxial compressive strength and brittleness, as described in Chapter 2. Chapter 5 provides a synthesis of the experimental findings and summarizes the major results of the studies presented in Chapters 2 - 4 and potential applications in the Exploration \& Production industry. Chapter 6 gives a brief outlook on potential future experimental research that would help to further improve our understanding of processes leading to fracture closure involving proppant embedment in unconventional shale gas reservoirs. Such insights may allow to improve stimulation techniques aimed at maintaining economical extraction of hydrocarbons over several years.}, language = {en} } @phdthesis{Kolk2019, author = {Kolk, Jens}, title = {The long-term legacy of historical land cover changes}, doi = {10.25932/publishup-43939}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439398}, school = {Universit{\"a}t Potsdam}, pages = {196}, year = {2019}, abstract = {Over the last years there is an increasing awareness that historical land cover changes and associated land use legacies may be important drivers for present-day species richness and biodiversity due to time-delayed extinctions or colonizations in response to historical environmental changes. Historically altered habitat patches may therefore exhibit an extinction debt or colonization credit and can be expected to lose or gain species in the future. However, extinction debts and colonization credits are difficult to detect and their actual magnitudes or payments have rarely been quantified because species richness patterns and dynamics are also shaped by recent environmental conditions and recent environmental changes. In this thesis we aimed to determine patterns of herb-layer species richness and recent species richness dynamics of forest herb layer plants and link those patterns and dynamics to historical land cover changes and associated land use legacies. The study was conducted in the Prignitz, NE-Germany, where the forest distribution remained stable for the last ca. 100 years but where a) the deciduous forest area had declined by more than 90 per cent (leaving only remnants of "ancient forests"), b) small new forests had been established on former agricultural land ("post-agricultural forests"). Here, we analyzed the relative importance of land use history and associated historical land cover changes for herb layer species richness compared to recent environmental factors and determined magnitudes of extinction debt and colonization credit and their payment in ancient and post-agricultural forests, respectively. We showed that present-day species richness patterns were still shaped by historical land cover changes that ranged back to more than a century. Although recent environmental conditions were largely comparable we found significantly more forest specialists, species with short-distance dispersal capabilities and clonals in ancient forests than in post-agricultural forests. Those species richness differences were largely contingent to a colonization credit in post-agricultural forests that ranged up to 9 species (average 4.7), while the extinction debt in ancient forests had almost completely been paid. Environmental legacies from historical agricultural land use played a minor role for species richness differences. Instead, patch connectivity was most important. Species richness in ancient forests was still dependent on historical connectivity, indicating a last glimpse of an extinction debt, and the colonization credit was highest in isolated post-agricultural forests. In post-agricultural forests that were better connected or directly adjacent to ancient forest patches the colonization credit was way smaller and we were able to verify a gradual payment of the colonization credit from 2.7 species to 1.5 species over the last six decades.}, language = {en} } @phdthesis{Thater2019, author = {Thater, Sabine}, title = {The interplay between supermassive black holes and their host galaxies}, doi = {10.25932/publishup-43757}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437570}, school = {Universit{\"a}t Potsdam}, pages = {iv, 186}, year = {2019}, abstract = {Supermassive black holes reside in the hearts of almost all massive galaxies. Their evolutionary path seems to be strongly linked to the evolution of their host galaxies, as implied by several empirical relations between the black hole mass (M BH ) and different host galaxy properties. The physical driver of this co-evolution is, however, still not understood. More mass measurements over homogeneous samples and a detailed understanding of systematic uncertainties are required to fathom the origin of the scaling relations. In this thesis, I present the mass estimations of supermassive black holes in the nuclei of one late-type and thirteen early-type galaxies. Our SMASHING sample extends from the intermediate to the massive galaxy mass regime and was selected to fill in gaps in number of galaxies along the scaling relations. All galaxies were observed at high spatial resolution, making use of the adaptive-optics mode of integral field unit (IFU) instruments on state-of-the-art telescopes (SINFONI, NIFS, MUSE). I extracted the stellar kinematics from these observations and constructed dynamical Jeans and Schwarzschild models to estimate the mass of the central black holes robustly. My new mass estimates increase the number of early-type galaxies with measured black hole masses by 15\%. The seven measured galaxies with nuclear light deficits ('cores') augment the sample of cored galaxies with measured black holes by 40\%. Next to determining massive black hole masses, evaluating the accuracy of black hole masses is crucial for understanding the intrinsic scatter of the black hole- host galaxy scaling relations. I tested various sources of systematic uncertainty on my derived mass estimates. The M BH estimate of the single late-type galaxy of the sample yielded an upper limit, which I could constrain very robustly. I tested the effects of dust, mass-to-light ratio (M/L) variation, and dark matter on my measured M BH . Based on these tests, the typically assumed constant M/L ratio can be an adequate assumption to account for the small amounts of dark matter in the center of that galaxy. I also tested the effect of a variable M/L variation on the M BH measurement on a second galaxy. By considering stellar M/L variations in the dynamical modeling, the measured M BH decreased by 30\%. In the future, this test should be performed on additional galaxies to learn how an as constant assumed M/L flaws the estimated black hole masses. Based on our upper limit mass measurement, I confirm previous suggestions that resolving the predicted BH sphere-of-influence is not a strict condition to measure black hole masses. Instead, it is only a rough guide for the detection of the black hole if high-quality, and high signal-to-noise IFU data are used for the measurement. About half of our sample consists of massive early-type galaxies which show nuclear surface brightness cores and signs of triaxiality. While these types of galaxies are typically modeled with axisymmetric modeling methods, the effects on M BH are not well studied yet. The massive galaxies of our presented galaxy sample are well suited to test the effect of different stellar dynamical models on the measured black hole mass in evidently triaxial galaxies. I have compared spherical Jeans and axisymmetric Schwarzschild models and will add triaxial Schwarzschild models to this comparison in the future. The constructed Jeans and Schwarzschild models mostly disagree with each other and cannot reproduce many of the triaxial features of the galaxies (e.g., nuclear sub-components, prolate rotation). The consequence of the axisymmetric-triaxial assumption on the accuracy of M BH and its impact on the black hole - host galaxy relation needs to be carefully examined in the future. In the sample of galaxies with published M BH , we find measurements based on different dynamical tracers, requiring different observations, assumptions, and methods. Crucially, different tracers do not always give consistent results. I have used two independent tracers (cold molecular gas and stars) to estimate M BH in a regular galaxy of our sample. While the two estimates are consistent within their errors, the stellar-based measurement is twice as high as the gas-based. Similar trends have also been found in the literature. Therefore, a rigorous test of the systematics associated with the different modeling methods is required in the future. I caution to take the effects of different tracers (and methods) into account when discussing the scaling relations. I conclude this thesis by comparing my galaxy sample with the compilation of galaxies with measured black holes from the literature, also adding six SMASHING galaxies, which were published outside of this thesis. None of the SMASHING galaxies deviates significantly from the literature measurements. Their inclusion to the published early-type galaxies causes a change towards a shallower slope for the M BH - effective velocity dispersion relation, which is mainly driven by the massive galaxies of our sample. More unbiased and homogenous measurements are needed in the future to determine the shape of the relation and understand its physical origin.}, language = {en} } @phdthesis{Kehm2019, author = {Kehm, Richard}, title = {The impact of metabolic stress and aging on functionality and integrity of pancreatic islets and beta-cells}, doi = {10.25932/publishup-44109}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441099}, school = {Universit{\"a}t Potsdam}, pages = {VI, 138}, year = {2019}, abstract = {The increasing age of worldwide population is a major contributor for the rising prevalence of major pathologies and disease, such as type 2 diabetes, mediated by massive insulin resistance and a decline in functional beta-cell mass, highly associated with an elevated incidence of obesity. Thus, the impact of aging under physiological conditions and in combination with diet-induced metabolic stress on characteristics of pancreatic islets and beta-cells, with the focus on functionality and structural integrity, were investigated in the present dissertation. Primarily induced by malnutrition due to chronic and excess intake of high caloric diets, containing large amounts of carbohydrates and fats, obesity followed by systemic inflammation and peripheral insulin resistance occurs over time, initiating metabolic stress conditions. Elevated insulin demands initiate an adaptive response by beta-cell mass expansion due to increased proliferation, but prolonged stress conditions drive beta-cell failure and loss. Aging has been also shown to affect beta-cell functionality and morphology, in particular by proliferative limitations. However, most studies in rodents were performed under beta-cell challenging conditions, such as high-fat diet interventions. Thus, in the first part of the thesis (publication I), a characterization of age-related alterations on pancreatic islets and beta-cells was performed by using plasma samples and pancreatic tissue sections of standard diet-fed C57BL/6J wild-type mice in several age groups (2.5, 5, 10, 15 and 21 months). Aging was accompanied by decreased but sustained islet proliferative potential as well as an induction of cellular senescence. This was associated with a progressive islet expansion to maintain normoglycemia throughout lifespan. Moreover, beta-cell function and mass were not impaired although the formation and accumulation of AGEs occurred, located predominantly in the islet vasculature, accompanied by an induction of oxidative and nitrosative (redox) stress. The nutritional behavior throughout human lifespan; however, is not restricted to a balanced diet. This emphasizes the significance to investigate malnutrition by the intake of high-energy diets, inducing metabolic stress conditions that synergistically with aging might amplify the detrimental effects on endocrine pancreas. Using diabetes-prone NZO mice aged 7 weeks, fed a dietary regimen of carbohydrate restriction for different periods (young mice - 11 weeks, middle-aged mice - 32 weeks) followed by a carbohydrate intervention for 3 weeks, offered the opportunity to distinguish the effects of diet-induced metabolic stress in different ages on the functionality and integrity of pancreatic islets and their beta-cells (publication II, manuscript). Interestingly, while young NZO mice exhibited massive hyperglycemia in response to diet-induced metabolic stress accompanied by beta-cell dysfunction and apoptosis, middle-aged animals revealed only moderate hyperglycemia by the maintenance of functional beta-cells. The loss of functional beta-cell mass in islets of young mice was associated with reduced expression of PDX1 transcription factor, increased endocrine AGE formation and related redox stress as well as TXNIP-dependent induction of the mitochondrial death pathway. Although the amounts of secreted insulin and the proliferative potential were comparable in both age groups, islets of middle-aged mice exhibited sustained PDX1 expression, almost regular insulin secretory function, increased capacity for cell cycle progression as well as maintained redox potential. The results of the present thesis indicate a loss of functional beta-cell mass in young diabetes-prone NZO mice, occurring by redox imbalance and induction of apoptotic signaling pathways. In contrast, aging under physiological conditions in C57BL/6J mice and in combination with diet-induced metabolic stress in NZO mice does not appear to have adverse effects on the functionality and structural integrity of pancreatic islets and beta-cells, associated with adaptive responses on changing metabolic demands. However, considering the detrimental effects of aging, it has to be assumed that the compensatory potential of mice might be exhausted at a later point of time, finally leading to a loss of functional beta-cell mass and the onset and progression of type 2 diabetes. The polygenic, diabetes-prone NZO mouse is a suitable model for the investigation of human obesity-associated type 2 diabetes. However, mice at advanced age attenuated the diabetic phenotype or do not respond to the dietary stimuli. This might be explained by the middle age of mice, corresponding to the human age of about 38-40 years, in which the compensatory mechanisms of pancreatic islets and beta cells towards metabolic stress conditions are presumably more active.}, language = {en} } @phdthesis{Kerutt2019, author = {Kerutt, Josephine Victoria}, title = {The high-redshift voyage of Lyman alpha and Lyman continuum emission as told by MUSE}, doi = {10.25932/publishup-47881}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-478816}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2019}, abstract = {Most of the matter in the universe consists of hydrogen. The hydrogen in the intergalactic medium (IGM), the matter between the galaxies, underwent a change of its ionisation state at the epoch of reionisation, at a redshift roughly between 6>z>10, or ~10^8 years after the Big Bang. At this time, the mostly neutral hydrogen in the IGM was ionised but the source of the responsible hydrogen ionising emission remains unclear. In this thesis I discuss the most likely candidates for the emission of this ionising radiation, which are a type of galaxy called Lyman alpha emitters (LAEs). As implied by their name, they emit Lyman alpha radiation, produced after a hydrogen atom has been ionised and recombines with a free electron. The ionising radiation itself (also called Lyman continuum emission) which is needed for this process inside the LAEs could also be responsible for ionising the IGM around those galaxies at the epoch of reionisation, given that enough Lyman continuum escapes. Through this mechanism, Lyman alpha and Lyman continuum radiation are closely linked and are both studied to better understand the properties of high redshift galaxies and the reionisation state of the universe. Before I can analyse their Lyman alpha emission lines and the escape of Lyman continuum emission from them, the first step is the detection and correct classification of LAEs in integral field spectroscopic data, specifically taken with the Multi-Unit Spectroscopic Explorer (MUSE). After detecting emission line objects in the MUSE data, the task of classifying them and determining their redshift is performed with the graphical user interface QtClassify, which I developed during the work on this thesis. It uses the strength of the combination of spectroscopic and photometric information that integral field spectroscopy offers to enable the user to quickly identify the nature of the detected emission lines. The reliable classification of LAEs and determination of their redshifts is a crucial first step towards an analysis of their properties. Through radiative transfer processes, the properties of the neutral hydrogen clouds in and around LAEs are imprinted on the shape of the Lyman alpha line. Thus after identifying the LAEs in the MUSE data, I analyse the properties of the Lyman alpha emission line, such as the equivalent width (EW) distribution, the asymmetry and width of the line as well as the double peak fraction. I challenge the common method of displaying EW distributions as histograms without taking the limits of the survey into account and construct a more independent EW distribution function that better reflects the properties of the underlying population of galaxies. I illustrate this by comparing the fraction of high EW objects between the two surveys MUSE-Wide and MUSE-Deep, both consisting of MUSE pointings (each with the size of one square arcminute) of different depths. In the 60 MUSE-Wide fields of one hour exposure time I find a fraction of objects with extreme EWs above EW_0>240A of ~20\%, while in the MUSE-Deep fields (9 fields with an exposure time of 10 hours and one with an exposure time of 31 hours) I find a fraction of only ~1\%, which is due to the differences in the limiting line flux of the surveys. The highest EW I measure is EW_0 = 600.63 +- 110A, which hints at an unusual underlying stellar population, possibly with a very low metallicity. With the knowledge of the redshifts and positions of the LAEs detected in the MUSE-Wide survey, I also look for Lyman continuum emission coming from these galaxies and analyse the connection between Lyman continuum emission and Lyman alpha emission. I use ancillary Hubble Space Telescope (HST) broadband photometry in the bands that contain the Lyman continuum and find six Lyman continuum leaker candidates. To test whether the Lyman continuum emission of LAEs is coming only from those individual objects or the whole population, I select LAEs that are most promising for the detection of Lyman continuum emission, based on their rest-frame UV continuum and Lyman alpha line shape properties. After this selection, I stack the broadband data of the resulting sample and detect a signal in Lyman continuum with a significance of S/N = 5.5, pointing towards a Lyman continuum escape fraction of ~80\%. If the signal is reliable, it strongly favours LAEs as the providers of the hydrogen ionising emission at the epoch of reionisation and beyond.}, language = {en} } @phdthesis{Rector2019, author = {Rector, Michael V.}, title = {The acute effect of exercise on flow-mediated dilation in young people with cystic fibrosis}, doi = {10.25932/publishup-43893}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438938}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2019}, abstract = {Introduction: Cystic fibrosis (CF) is a genetic disease which disrupts the function of an epithelial surface anion channel, CFTR (cystic fibrosis transmembrane conductance regulator). Impairment to this channel leads to inflammation and infection in the lung causing the majority of morbidity and mortality. However, CF is a multiorgan disease affecting many tissues, including vascular smooth muscle. Studies have revealed young people with cystic fibrosis lacking inflammation and infection still demonstrate vascular endothelial dysfunction, measured per flow-mediated dilation (FMD). In other disease cohorts, i.e. diabetic and obese, endurance exercise interventions have been shown improve or taper this impairment. However, long-term exercise interventions are risky, as well as costly in terms of time and resources. Nevertheless, emerging research has correlated the acute effects of exercise with its long-term benefits and advocates the study of acute exercise effects on FMD prior to longitudinal studies. The acute effects of exercise on FMD have previously not been examined in young people with CF, but could yield insights on the potential benefits of long-term exercise interventions. The aims of these studies were to 1) develop and test the reliability of the FMD method and its applicability to study acute exercise effects; 2) compare baseline FMD and the acute exercise effect on FMD between young people with and without CF; and 3) explore associations between the acute effects of exercise on FMD and demographic characteristics, physical activity levels, lung function, maximal exercise capacity or inflammatory hsCRP levels. Methods: Thirty young volunteers (10 people with CF, 10 non-CF and 10 non-CF active matched controls) between the ages of 10 and 30 years old completed blood draws, pulmonary function tests, maximal exercise capacity tests and baseline FMD measurements, before returning approximately 1 week later and performing a 30-min constant load training at 75\% HRmax. FMD measurements were taken prior, immediately after, 30 minutes after and 1 hour after constant load training. ANOVAs and repeated measures ANOVAs were employed to explore differences between groups and timepoints, respectively. Linear regression was implemented and evaluated to assess correlations between FMD and demographic characteristics, physical activity levels, lung function, maximal exercise capacity or inflammatory hsCRP levels. For all comparisons, statistical significance was set at a p-value of α < 0.05. Results: Young people with CF presented with decreased lung function and maximal exercise capacity compared to matched controls. Baseline FMD was also significantly decreased in the CF group (CF: 5.23\% v non-CF: 8.27\% v non-CF active: 9.12\%). Immediately post-training, FMD was significantly attenuated (approximately 40\%) in all groups with CF still demonstrating the most minimal FMD. Follow-up measurements of FMD revealed a slow recovery towards baseline values 30 min post-training and improvements in the CF and non-CF active groups 60 min post-training. Linear regression exposed significant correlations between maximal exercise capacity (VO2 peak), BMI and FMD immediately post-training. Conclusion: These new findings confirm that CF vascular endothelial dysfunction can be acutely modified by exercise and will aid in underlining the importance of exercise in CF populations. The potential benefits of long-term exercise interventions on vascular endothelial dysfunction in young people with CF warrants further investigation.}, language = {en} } @phdthesis{Civitillo2019, author = {Civitillo, Sauro}, title = {Teachers' cultural diversity beliefs and culturally responsive practices}, doi = {10.25932/publishup-42776}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427763}, school = {Universit{\"a}t Potsdam}, pages = {161}, year = {2019}, abstract = {The present dissertation about teachers' cultural diversity beliefs and culturally responsive practices includes a general introduction (Chapter 1), a systematic literature review (Chapter 2), three empirical studies (Chapter 3, 4, and 5) and it ends with a general discussion and conclusion (Chapter 6). The major focus of investigation laid in creating a deeper understanding of teachers' beliefs about cultural diversity and how those beliefs are related to teaching practices, which could or could not be considered to be culturally responsive. In this dissertation, I relied on insights from theoretical perspectives that derived from the field of psychology such as social cognitive theory and intergroup ideologies, as well as from the field of multicultural education such as culturally responsive teaching. In Chapter 1, I provide the background of this dissertation, with contextual information regarding the German educational system, the theoretical framework used and the main research objectives of each study. In Chapter 2, I conducted a systematic review of the existing international studies on trainings addressing cultural diversity beliefs with pre-service teachers. More specifically, the aims of the systematic literature review were (1) to provide a description of main components and contextual characteristics of teacher trainings targeting cultural diversity beliefs, (2) report the training effects, and (3) detail the methodological strengths and weaknesses of these studies. By examining the main components and contextual characteristics of teacher trainings, the effects on beliefs about cultural diversity as well as the methodological strengths and weaknesses of these studies in a single review, I took an integrated approach to these three processes. To review the final pool of studies (N = 36) I used a descriptive and narrative approach, relying primarily on the use of words and text to summarise and explain findings of the synthesis. The three empirical studies that follow, all highlight aspects of how far and how teacher beliefs about cultural diversity translate into real-world practices in schools. In Chapter 3, to expand the validity of culturally responsive teaching to the German context, I aimed at verifying the dimensional structure of German version of the Culturally Responsive Classroom Management Self-Efficacy Scale (CRCMSES; Siwatu, Putman, Starker-Glass, \& Lewis, 2015). I conducted Exploratory and Confirmatory Factor Analysis, and run correlations between the subscales of the CRCMSES and a measure of cultural diversity- related stress. Data (n = 504) used for the first empirical study (Chapter 3) were collected in the InTePP-project (Inclusive Teaching Professionalization Panel) in which pre-service teachers' competencies and beliefs were assessed longitudinally at two universities: the University of Potsdam and the University of Cologne. In the second empirical study, which forms Chapter 4, the focus is on teachers' practices resembling school approaches to cultural diversity. In this study, I investigated two research questions: (1a) What types of descriptive norms regarding cultural diversity are perceived by teachers and students with and without an immigrant background and (1b) what is their degree of congruence? Additionally, I was also interested in how are teachers' and students' perceptions of descriptive norms about cultural diversity related to practices and artefacts in the physical and virtual school environment? Data for the second empirical study (Chapter 4) were previously collected in a dissertation project of doctor Maja Schachner funded by the federal program "ProExzellenz" of the Free State of Thuringia. Adopting a mixed-methods research design I conducted a secondary analysis of data from teachers' (n = 207) and students' (n = 1,644) gathered in 22 secondary schools in south-west Germany. Additional sources of data in this study were based on pictures of school interiors (hall and corridors) and sixth-grade classrooms' walls (n = 2,995), and screenshots from each school website (n = 6,499). Chapter 5 addresses the question of how culturally responsive teaching, teacher cultural diversity beliefs, and self-reflection on own teaching are related. More specifically, in this study I addressed two research questions: (1) How does CRT relate to teachers' beliefs about incorporating cultural diversity content into daily teaching and learning activities? And (2) how does the level of teachers' self-reflection on their own teaching relate to CRT? For this last empirical chapter, I conducted a multiple case study with four ethnic German teachers who work in one culturally and ethnically diverse high school in Berlin, using classroom video observations and post-observation interviews. In the final chapter (Chapter 6), I summarised the main findings of the systematic literature review and three empirical studies, and discuss their scientific and practical implications. This dissertation makes a significant contribution to the field of educational science to understanding culturally responsive teaching in terms of its measurement, focus on both beliefs and practices and the link between the two, and theoretical, practical, and future study implications.}, language = {en} } @phdthesis{Schuerings2019, author = {Sch{\"u}rings, Marco Philipp Hermann}, title = {Synthesis of 1D microgel strands and their motion analysis in solution}, doi = {10.25932/publishup-43953}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439532}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2019}, abstract = {The fabrication of 1D nanostrands composed of stimuli responsive microgels has been shown in this work. Microgels are well known materials able to respond to various stimuli from outer environment. Since these microgels respond via a volume change to an external stimulus, a targeted mechanical response can be achieved. Through carefully choosing the right composition of the polymer matrix, microgels can be designed to react precisely to the targeted stimuli (e.g. drug delivery via pH and temperature changes, or selective contractions through changes in electrical current125). In this work, it was aimed to create flexible nano-filaments which are capable of fast anisotropic contractions similar to muscle filaments. For the fabrication of such filaments or strands, nanostructured templates (PDMS wrinkles) were chosen due to a facile and low-cost fabrication and versatile tunability of their dimensions. Additionally, wrinkling is a well-known lithography-free method which enables the fabrication of nanostructures in a reproducible manner and with a high long-range periodicity. In Chapter 2.1, it was shown for the first time that microgels as soft matter particles can be aligned to densely packed microgel arrays of various lateral dimensions. The alignment of microgels with different compositions (e.g. VCL/AAEM, NIPAAm, NIPAAm/VCL and charged microgels) was shown by using different assembly techniques (e.g. spin-coating, template confined molding). It was chosen to set one experimental parameter constant which was the SiOx surface composition of the templates and substrates (e.g. oxidized PDMS wrinkles, Si-wafers and glass slides). It was shown that the fabrication of nanoarrays was feasible with all tested microgel types. Although the microgels exhibited different deformability when aligned on a flat surface, they retained their thermo-responsivity and swelling behavior. Towards the fabrication of 1D microgel strands interparticle connectivity was aspired. This was achieved via different cross-linking methods (i.e. cross-linking via UV-irradiation and host-guest complexation) discussed in Chapter 2.2. The microgel arrays created by different assembly methods and microgel types were tested for their cross-linking suitability. It was observed that NIPAAm based microgels cannot be cross-linked with UV light. Furthermore, it was found that these microgels exhibit a strong surface-particle-interaction and therefore could not be detached from the given substrates. In contrast to the latter, with VCL/AAEM based microgels it was possible to both UV cross-link them based on the keto-enol tautomerism of the AAEM copolymer, and to detach them from the substrate due to the lower adhesion energy towards SiOx surfaces. With VCL/AAEM microgels long, one-dimensional microgel strands could be re-dispersed in water for further analysis. It has also been shown that at least one lateral dimension of the free dispersed 1D microgel strands is easily controllable by adjusting the wavelength of the wrinkled template. For further work, only VCL/AAEM based microgels were used to focus on the main aim of this work, i.e. the fabrication of 1D microgel nanostrands. As an alternative to the unspecific and harsh UV cross-linking, the host-guest complexation via diazobenzene cross-linkers and cyclodextrin hosts was explored. The idea behind this approach was to give means to a future construction kit-like approach by incorporation of cyclodextrin comonomers in a broad variety of particle systems (e.g. microgels, nanoparticles). For this purpose, VCL/AAEM microgels were copolymerized with different amounts of mono-acrylate functionalized β-cyclodextrin (CD). After successfully testing the cross-linking capability in solution, the cross-linking of aligned VCL/AAEM/CD microgels was tried. Although the cross-linking worked well, once the single arrays came into contact to each other, they agglomerated. As a reason for this behavior residual amounts of mono-complexed diazobenzene linkers were suspected. Thus, end-capping strategies were tried out (e.g. excess amounts of β-cyclodextrin and coverage with azobenzene functionalized AuNPs) but were unsuccessful. With deeper thought, entropy effects were taken into consideration which favor the release of complexed diazobenzene linker leading to agglomerations. To circumvent this entropy driven effect, a multifunctional polymer with 50\% azobenzene groups (Harada polymer) was used. First experiments with this polymer showed promising results regarding a less pronounced agglomeration (Figure 77). Thus, this approach could be pursued in the future. In this chapter it was found out that in contrast to pearl necklace and ribbon like formations, particle alignment in zigzag formation provided the best compromise in terms of stability in dispersion (see Figure 44a and Figure 51) while maintaining sufficient flexibility. For this reason, microgel strands in zigzag formation were used for the motion analysis described in Chapter 2.3. The aim was to observe the properties of unrestrained microgel strands in solution (e.g. diffusion behavior, rotational properties and ideally, anisotropic contraction after temperature increase). Initially, 1D microgel strands were manipulated via AFM in a liquid cell setup. It could be observed that the strands required a higher load force compared to single microgels to be detached from the surface. However, with the AFM it was not possible to detach the strands in a controllable manner but resulted in a complete removal of single microgel particles and a tearing off the strands from the surface, respectively. For this reason, to observe the motion behavior of unrestrained microgel strands in solution, confocal microscopy was used. Furthermore, to hinder an adsorption of the strands, it was found out that coating the surface of the substrates with a repulsive polymer film was beneficial. Confocal and wide-field microscopy videos showed that the microgel strands exhibit translational and rotational diffusive motion in solution without perceptible bending. Unfortunately, with these methods the detection of the anisotropic stimuli responsive contraction of the free moving microgel strands was not possible. To summarize, the flexibility of microgel strands is more comparable to the mechanical behavior of a semi flexible cable than to a yarn. The strands studied here consist of dozens or even hundreds of discrete submicron units strung together by cross-linking, having few parallels in nanotechnology. With the insights gained in this work on microgel-surface interactions, in the future, a targeted functionalization of the template and substrate surfaces can be conducted to actively prevent unwanted microgel adsorption for a given microgel system (e.g. PVCL and polystyrene coating235). This measure would make the discussed alignment methods more diverse. As shown herein, the assembly methods enable a versatile microgel alignment (e.g. microgel meshes, double and triple strands). To go further, one could use more complex templates (e.g. ceramic rhombs and star shaped wrinkles (Figure 14) to expand the possibilities of microgel alignment and to precisely control their aspect ratios (e.g. microgel rods with homogeneous size distributions).}, language = {en} } @phdthesis{Riemer2019, author = {Riemer, Janine}, title = {Synthese und Charakterisierung selektiver Fluoroionophore f{\"u}r intra- und extrazellul{\"a}re Bestimmungen von Kalium- und Natrium-Ionen}, doi = {10.25932/publishup-44193}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441932}, school = {Universit{\"a}t Potsdam}, pages = {IV, 165}, year = {2019}, abstract = {Im Rahmen dieser Dissertation konnten neue Kalium- und Natrium-Ionen Fluoreszenzfarbstoffe von der Klasse der Fluoroionophore synthetisiert und charakterisiert werden. Sie bestehen aus einem N Phenylazakronenether als Ionophor und unterschiedlichen Fluorophoren und sind {\"u}ber einen π-konjugierten 1,2,3-Triazol-1,4-diyl Spacer verbunden. Dabei lag der Fokus w{\"a}hrend ihrer Entwicklung darauf, diese in ihrer Sensitivit{\"a}t, Selektivit{\"a}t und in ihren photophysikalischen Eigenschaften so zu funktionalisieren, dass sie f{\"u}r intra- bzw. extrazellul{\"a}re Konzentrationsbestimmungen geeignet sind. Durch Variation der in ortho Position der N-Phenylazakronenether befindlichen Alkoxy-Gruppen und der fluorophoren Gruppe der Fluoroionophore konnte festgestellt werden, dass die Sensitivit{\"a}t und Selektivit{\"a}t f{\"u}r Kalium- bzw. Natrium-Ionen jeweils durch eine bestimmte Isomerie der 1,2,3-Triazol-1,4-diyl-Einheit erh{\"o}ht wird. Des Weiteren wurde gezeigt, dass durch eine erh{\"o}hte Einschr{\"a}nkung der N,N-Diethylamino-Gruppe des Fluorophors eine Steigerung der Fluoreszenzquantenausbeute und eine Verschiebung des Emissionsmaximums auf {\"u}ber 500 nm erreicht werden konnte. Die Einf{\"u}hrung einer Isopropoxy-Gruppe an einem N-Phenylaza-[18]krone-6-ethers resultierte dabei in einem hoch selektiven Kalium-Ionen Fluoroionophor und erm{\"o}glichte eine in vitro {\"U}berwachung von 10 - 80 mM Kalium-Ionen. Die Substitution einer Methoxy-Gruppe an einem N-Phenylaza-[15]krone-5-ether kombiniert mit unterschiedlich N,N-Diethylamino-Coumarinen lieferte hingegen zwei Natrium-Ionen Fluoroionophore, die f{\"u}r die {\"U}berwachung von intra- bzw. extrazellul{\"a}ren Natrium-Ionen Konzentrationen geeignet sind. In einem weiteren Schritt wurden N-Phenylaza-[18]krone-6-ether mit einem Fluorophor, basierend auf einem [1,3]-Dioxolo[4,5-f][1,3]benzodioxol-(DBD)-Grundger{\"u}st, funktionalisiert. Die im Anschluss durchgef{\"u}hrten spektroskopischen Untersuchungen ergaben, dass die Isopropoxy-Gruppe in ortho Position des N-Phenylaza-[18]krone-6-ether in einen f{\"u}r extrazellul{\"a}re Kalium-Ionen Konzentrationen selektiven Fluoroionophor resultierte, der die Konzentrationsbestimmungen {\"u}ber die Fluoreszenzintensit{\"a}t und -lebensdauer erm{\"o}glicht. In einem abschließenden Schritt konnte unter Verwendung eines Pyrens als fluorophore Gruppe ein weiterer f{\"u}r extrazellul{\"a}re Kalium-Ionen Konzentrationen geeigneter Fluoroionophor entwickelt werden. Die Bestimmung der Kalium-Ionen Konzentration erfolgte hierbei anhand der Fluoreszenzintensit{\"a}tsverh{\"a}ltnisse bei zwei Emissionswellenl{\"a}ngen. Insgesamt konnten 17 verschiedene neue Fluoroionophore f{\"u}r die Bestimmung von Kalium- bzw. Natrium-Ionen synthetisiert und charakterisiert werden. Sechs dieser neuen Molek{\"u}le erm{\"o}glichen in vitro Messungen der intra- oder extrazellul{\"a}ren Kalium- und Natrium-Ionen Konzentrationen und k{\"o}nnten zuk{\"u}nftig f{\"u}r in vivo Konzentrationsmessungen verwendet werden.}, language = {de} } @phdthesis{Gong2019, author = {Gong, Chen Chris}, title = {Synchronization of coupled phase oscillators}, doi = {10.25932/publishup-48752}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487522}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 115}, year = {2019}, abstract = {Oscillatory systems under weak coupling can be described by the Kuramoto model of phase oscillators. Kuramoto phase oscillators have diverse applications ranging from phenomena such as communication between neurons and collective influences of political opinions, to engineered systems such as Josephson Junctions and synchronized electric power grids. This thesis includes the author's contribution to the theoretical framework of coupled Kuramoto oscillators and to the understanding of non-trivial N-body dynamical systems via their reduced mean-field dynamics. The main content of this thesis is composed of four parts. First, a partially integrable theory of globally coupled identical Kuramoto oscillators is extended to include pure higher-mode coupling. The extended theory is then applied to a non-trivial higher-mode coupled model, which has been found to exhibit asymmetric clustering. Using the developed theory, we could predict a number of features of the asymmetric clustering with only information of the initial state provided. The second part consists of an iterated discrete-map approach to simulate phase dynamics. The proposed map --- a Moebius map --- not only provides fast computation of phase synchronization, it also precisely reflects the underlying group structure of the dynamics. We then compare the iterated-map dynamics and various analogous continuous-time dynamics. We are able to replicate known phenomena such as the synchronization transition of the Kuramoto-Sakaguchi model of oscillators with distributed natural frequencies, and chimera states for identical oscillators under non-local coupling. The third part entails a particular model of repulsively coupled identical Kuramoto-Sakaguchi oscillators under common random forcing, which can be shown to be partially integrable. Via both numerical simulations and theoretical analysis, we determine that such a model cannot exhibit stationary multi-cluster states, contrary to the numerical findings in previous literature. Through further investigation, we find that the multi-clustering states reported previously occur due to the accumulation of discretization errors inherent in the integration algorithms, which introduce higher-mode couplings into the model. As a result, the partial integrability condition is violated. Lastly, we derive the microscopic cross-correlation of globally coupled non-identical Kuramoto oscillators under common fluctuating forcing. The effect of correlation arises naturally in finite populations, due to the non-trivial fluctuations of the meanfield. In an idealized model, we approximate the finite-sized fluctuation by a Gaussian white noise. The analytical approximation qualitatively matches the measurements in numerical experiments, however, due to other periodic components inherent in the fluctuations of the mean-field there still exist significant inconsistencies.}, language = {en} } @phdthesis{Pohlenz2019, author = {Pohlenz, Julia}, title = {Structural insights into sodium-rich silicate - carbonate glasses and melts}, doi = {10.25932/publishup-42382}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423826}, school = {Universit{\"a}t Potsdam}, pages = {XXII, 117}, year = {2019}, abstract = {Carbonate-rich silicate and carbonate melts play a crucial role in deep Earth magmatic processes and their melt structure is a key parameter, as it controls physical and chemical properties. Carbonate-rich melts can be strongly enriched in geochemically important trace elements. The structural incorporation mechanisms of these elements are difficult to study because such melts generally cannot be quenched to glasses, which are usually employed for structural investigations. This thesis investigates the influence of CO2 on the local environments of trace elements contained in silicate glasses with variable CO2 concentrations as well as in silicate and carbonate melts. The compositions studied include sodium-rich peralkaline silicate melts and glasses and carbonate melts similar to those occurring naturally at Oldoinyo Lengai volcano, Tanzania. The local environments of the three elements yttrium (Y), lanthanum (La) and strontium (Sr) were investigated in synthesized glasses and melts using X-ray absorption fine structure (XAFS) spectroscopy. Especially extended X-ray absorption fine structure spectroscopy (EXAFS) provides element specific information on local structure, such as bond lengths, coordination numbers and the degree of disorder. To cope with the enhanced structural disorder present in glasses and melts, EXAFS analysis was based on fitting approaches using an asymmetric distribution function as well as a correlation model according to bond valence theory. Firstly, silicate glasses quenched from high pressure/temperature melts with up to 7.6 wt \% CO2 were investigated. In strongly and extremely peralkaline glasses the local structure of Y is unaffected by the CO2 content (with oxygen bond lengths of ~ 2.29 {\AA}). Contrary, the bond lengths for Sr-O and La-O increase with increasing CO2 content in the strongly peralkaline glasses from ~ 2.53 to ~ 2.57 {\AA} and from ~ 2.52 to ~ 2.54 {\AA}, respectively, while they remain constant in extremely peralkaline glasses (at ~ 2.55 {\AA} and 2.54 {\AA}, respectively). Furthermore, silicate and unquenchable carbonate melts were investigated in-situ at high pressure/temperature conditions (2.2 to 2.6 GPa, 1200 to 1500 °C) using a Paris-Edinburgh press. A novel design of the pressure medium assembly for this press was developed, which features increased mechanical stability as well as enhanced transmittance at relevant energies to allow for low content element EXAFS in transmission. Compared to glasses the bond lengths of Y-O, La-O and Sr-O are elongated by up to + 3 \% in the melt and exhibit higher asymmetric pair distributions. For all investigated silicate melt compositions Y-O bond lengths were found constant at ~ 2.37 {\AA}, while in the carbonate melt the Y-O length increases slightly to 2.41 {\AA}. The La-O bond lengths in turn, increase systematically over the whole silicate - carbonate melt joint from 2.55 to 2.60 {\AA}. Sr-O bond lengths in melts increase from ~ 2.60 to 2.64 {\AA} from pure silicate to silicate-bearing carbonate composition with constant elevated bond length within the carbonate region. For comparison and deeper insight, glass and melt structures of Y and Sr bearing sodium-rich silicate to carbonate compositions were simulated in an explorative ab initio molecular dynamics (MD) study. The simulations confirm observed patterns of CO2-dependent local changes around Y and Sr and additionally provide further insights into detailed incorporation mechanisms of the trace elements and CO2. Principle findings include that in sodium-rich silicate compositions carbon either is mainly incorporated as a free carbonate-group or shares one oxygen with a network former (Si or [4]Al) to form a non-bridging carbonate. Of minor importance are bridging carbonates between two network formers. Here, a clear preference for two [4]Al as adjacent network formers occurs, compared to what a statistical distribution would suggest. In C-bearing silicate melts minor amounts of molecular CO2 are present, which is almost totally dissolved as carbonate in the quenched glasses. The combination of experiment and simulation provides extraordinary insights into glass and melt structures. The new data is interpreted on the basis of bond valence theory and is used to deduce potential mechanisms for structural incorporation of investigated elements, which allow for prediction on their partitioning behavior in natural melts. Furthermore, it provides unique insights into the dissolution mechanisms of CO2 in silicate melts and into the carbonate melt structure. For the latter, a structural model is suggested, which is based on planar CO3-groups linking 7- to 9-fold cation polyhedra, in accordance to structural units as found in the Na-Ca carbonate nyerereite. Ultimately, the outcome of this study contributes to rationalize the unique physical properties and geological phenomena related to carbonated silicate-carbonate melts.}, language = {en} } @phdthesis{Ghani2019, author = {Ghani, Humaad}, title = {Structural evolution of the Kohat and Potwar fold and thrust belts of Pakistan}, doi = {10.25932/publishup-44077}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440775}, school = {Universit{\"a}t Potsdam}, pages = {viii, 121}, year = {2019}, abstract = {Fold and thrust belts are characteristic features of collisional orogen that grow laterally through time by deforming the upper crust in response to stresses caused by convergence. The deformation propagation in the upper crust is accommodated by shortening along major folds and thrusts. The formation of these structures is influenced by the mechanical strength of d{\´e}collements, basement architecture, presence of preexisting structures and taper of the wedge. These factors control not only the sequence of deformation but also cause differences in the structural style. The Himalayan fold and thrust belt exhibits significant differences in the structural style from east to west. The external zone of the Himalayan fold and thrust belt, also called the Subhimalaya, has been extensively studied to understand the temporal development and differences in the structural style in Bhutan, Nepal and India; however, the Subhimalaya in Pakistan remains poorly studied. The Kohat and Potwar fold and thrust belts (herein called Kohat and Potwar) represent the Subhimalaya in Pakistan. The Main Boundary Thrust (MBT) marks the northern boundary of both Kohat and Potwar, showing that these belts are genetically linked to foreland-vergent deformation within the Himalayan orogen, despite the pronounced contrast in structural style. This contrast becomes more pronounced toward south, where the active strike-slip Kalabagh Fault Zone links with the Kohat and Potwar range fronts, known as the Surghar Range and the Salt Range, respectively. The Surghar and Salt Ranges developed above the Surghar Thrust (SGT) and Main Frontal Thrust (MFT). In order to understand the structural style and spatiotemporal development of the major structures in Kohat and Potwar, I have used structural modeling and low temperature thermochronolgy methods in this study. The structural modeling is based on construction of balanced cross-sections by integrating surface geology, seismic reflection profiles and well data. In order to constrain the timing and magnitude of exhumation, I used apatite (U-Th-Sm)/He (AHe) and apatite fission track (AFT) dating. The results obtained from both methods are combined to document the Paleozoic to Recent history of Kohat and Potwar. The results of this research suggest two major events in the deformation history. The first major deformation event is related to Late Paleozoic rifting associated with the development of the Neo-Tethys Ocean. The second major deformation event is related to the Late Miocene to Pliocene development of the Himalayan fold and thrust belt in the Kohat and Potwar. The Late Paleozoic rifting is deciphered by inverse thermal modelling of detrital AFT and AHe ages from the Salt Range. The process of rifting in this area created normal faulting that resulted in the exhumation/erosion of Early to Middle Paleozoic strata, forming a major unconformity between Cambrian and Permian strata that is exposed today in the Salt Range. The normal faults formed in Late Paleozoic time played an important role in localizing the Miocene-Pliocene deformation in this area. The combination of structural reconstructions and thermochronologic data suggest that deformation initiated at 15±2 Ma on the SGT ramp in the southern part of Kohat. The early movement on the SGT accreted the foreland into the Kohat deforming wedge, forming the range front. The development of the MBT at 12±2 Ma formed the northern boundary of Kohat and Potwar. Deformation propagated south of the MBT in the Kohat on double d{\´e}collements and in the Potwar on a single basal d{\´e}collement. The double d{\´e}collement in the Kohat adopted an active roof-thrust deformation style that resulted in the disharmonic structural style in the upper and lower parts of the stratigraphic section. Incremental shortening resulted in the development of duplexes in the subsurface between two d{\´e}collements and imbrication above the roof thrust. Tectonic thickening caused by duplexes resulted in cooling and exhumation above the roof thrust by removal of a thick sequence of molasse strata. The structural modelling shows that the ramps on which duplexes formed in Kohat continue as tip lines of fault propagation folds in the Potwar. The absence of a double d{\´e}collement in the Potwar resulted in the preservation of a thick sequence of molasse strata there. The temporal data suggest that deformation propagated in-sequence from ~ 8 to 3 Ma in the northern part of Kohat and Potwar; however, internal deformation in the Kohat was more intense, probably required for maintaining a critical taper after a significant load was removed above the upper d{\´e}collement. In the southern part of Potwar, a steeper basement slope (β≥3°) and the presence of salt at the base of the stratigraphic section allowed for the complete preservation of the stratigraphic wedge, showcased by very little internal deformation. Activation of the MFT at ~4 Ma allowed the Salt Range to become the range front of the Potwar. The removal of a large amount of molasse strata above the MFT ramp enhanced the role of salt in shaping the structural style of the Salt Range and Kalabagh Fault Zone. Salt accumulation and migration resulted in the formation of normal faults in both areas. Salt migration in the Kalabagh fault zone has triggered out-of-sequence movement on ramps in the Kohat. The amount of shortening calculated between the MBT and the SGT in Kohat is 75±5 km and between the MBT and the MFT in Potwar is 65±5 km. A comparable amount of shortening is accommodated in the Kohat and Potwar despite their different widths: 70 km Kohat and 150 km Potwar. In summary, this research suggests that deformation switched between different structures during the last ~15 Ma through different modes of fault propagation, resulting in different structural styles and the out-of-sequence development of Kohat and Potwar.}, language = {en} } @phdthesis{Raatz2019, author = {Raatz, Michael}, title = {Strategies within predator-prey interactions - from individuals to ecosystems}, doi = {10.25932/publishup-42658}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426587}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2019}, abstract = {Predator-prey interactions provide central links in food webs. These interaction are directly or indirectly impacted by a number of factors. These factors range from physiological characteristics of individual organisms, over specifics of their interaction to impacts of the environment. They may generate the potential for the application of different strategies by predators and prey. Within this thesis, I modelled predator-prey interactions and investigated a broad range of different factors driving the application of certain strategies, that affect the individuals or their populations. In doing so, I focused on phytoplankton-zooplankton systems as established model systems of predator-prey interactions. At the level of predator physiology I proposed, and partly confirmed, adaptations to fluctuating availability of co-limiting nutrients as beneficial strategies. These may allow to store ingested nutrients or to regulate the effort put into nutrient assimilation. We found that these two strategies are beneficial at different fluctuation frequencies of the nutrients, but may positively interact at intermediate frequencies. The corresponding experiments supported our model results. We found that the temporal structure of nutrient fluctuations indeed has strong effects on the juvenile somatic growth rate of {\itshape Daphnia}. Predator colimitation by energy and essential biochemical nutrients gave rise to another physiological strategy. High-quality prey species may render themselves indispensable in a scenario of predator-mediated coexistence by being the only source of essential biochemical nutrients, such as cholesterol. Thereby, the high-quality prey may even compensate for a lacking defense and ensure its persistence in competition with other more defended prey species. We found a similar effect in a model where algae and bacteria compete for nutrients. Now, being the only source of a compound that is required by the competitor (bacteria) prevented the competitive exclusion of the algae. In this case, the essential compounds were the organic carbon provided by the algae. Here again, being indispensable served as a prey strategy that ensured its coexistence. The latter scenario also gave rise to the application of the two metabolic strategies of autotrophy and heterotrophy by algae and bacteria, respectively. We found that their coexistence allowed the recycling of resources in a microbial loop that would otherwise be lost. Instead, these resources were made available to higher trophic levels, increasing the trophic transfer efficiency in food webs. The predation process comprises the next higher level of factors shaping the predator-prey interaction, besides these factors that originated from the functioning or composition of individuals. Here, I focused on defensive mechanisms and investigated multiple scenarios of static or adaptive combinations of prey defense and predator offense. I confirmed and extended earlier reports on the coexistence-promoting effects of partially lower palatability of the prey community. When bacteria and algae are coexisting, a higher palatability of bacteria may increase the average predator biomass, with the side effect of making the population dynamics more regular. This may facilitate experimental investigations and interpretations. If defense and offense are adaptive, this allows organisms to maximize their growth rate. Besides this fitness-enhancing effect, I found that co-adaptation may provide the predator-prey system with the flexibility to buffer external perturbations. On top of these rather internal factors, environmental drivers also affect predator-prey interactions. I showed that environmental nutrient fluctuations may create a spatio-temporal resource heterogeneity that selects for different predator strategies. I hypothesized that this might favour either storage or acclimation specialists, depending on the frequency of the environmental fluctuations. We found that many of these factors promote the coexistence of different strategies and may therefore support and sustain biodiversity. Thus, they might be relevant for the maintenance of crucial ecosystem functions that also affect us humans. Besides this, the richness of factors that impact predator-prey interactions might explain why so many species, especially in the planktonic regime, are able to coexist.}, language = {en} } @phdthesis{Hanschmann2019, author = {Hanschmann, Raffael Tino}, title = {Stalling the engine? EU climate politics after the 'Great Recession'}, doi = {10.25932/publishup-44044}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-440441}, school = {Universit{\"a}t Potsdam}, pages = {XXVIII, 303}, year = {2019}, abstract = {This dissertation investigates the impact of the economic and fiscal crisis starting in 2008 on EU climate policy-making. While the overall number of adopted greenhouse gas emission reduction policies declined in the crisis aftermath, EU lawmakers decided to introduce new or tighten existing regulations in some important policy domains. Existing knowledge about the crisis impact on EU legislative decision-making cannot explain these inconsistencies. In response, this study develops an actor-centred conceptual framework based on rational choice institutionalism that provides a micro-level link to explain how economic crises translate into altered policy-making patterns. The core theoretical argument draws on redistributive conflicts, arguing that tensions between 'beneficiaries' and 'losers' of a regulatory initiative intensify during economic crises and spill over to the policy domain. To test this hypothesis and using social network analysis, this study analyses policy processes in three case studies: The introduction of carbon dioxide emission limits for passenger cars, the expansion of the EU Emissions Trading System to aviation, and the introduction of a regulatory framework for biofuels. The key finding is that an economic shock causes EU policy domains to polarise politically, resulting in intensified conflict and more difficult decision-making. The results also show that this process of political polarisation roots in the industry that is the subject of the regulation, and that intergovernmental bargaining among member states becomes more important, but also more difficult in times of crisis.}, language = {en} } @phdthesis{Brase2019, author = {Brase, Alexa Kristin}, title = {Spiele um Studium und Lehre? Zur mikropolitischen Nutzung von Qualit{\"a}tsmanagementsystemen an Hochschulen in Deutschland}, doi = {10.25932/publishup-43737}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437377}, school = {Universit{\"a}t Potsdam}, pages = {IX, 228}, year = {2019}, abstract = {Die Wissenschaftsfreiheit ist ein Grundrecht, dessen Sinn und Auslegung im Rahmen von Reformen des Hochschulsystems nicht nur der Justiz, sondern auch der Wissenschaft selbst immer wieder Anlass zur Diskussion geben, so auch im Zuge der Einf{\"u}hrung des so genannten Qualit{\"a}tsmanagements von Studium und Lehre an deutschen Hochschulen. Die vorliegende Dissertationsschrift stellt die Ergebnisse einer empirischen Studie vor, die mit einer soziologischen Betrachtung des Qualit{\"a}tsmanagements unterschiedlicher Hochschulen zu dieser Diskussion beitr{\"a}gt. Auf Grundlage der Pr{\"a}misse, dass Verlauf und Folgen einer organisationalen Innovation nur verstanden werden k{\"o}nnen, wenn der allt{\"a}gliche Umgang der Organisationsmitglieder mit den neuen Strukturen und Prozessen in die Analyse einbezogen wird, geht die Studie von der Frage aus, wie Akteurinnen und Akteure an deutschen Hochschulen die Qualit{\"a}tsmanagementsysteme ihrer Organisationen nutzen. Die qualitative inhaltsanalytische Auswertung von 26 Leitfaden-Interviews mit Prorektorinnen und -rektoren, Qualit{\"a}tsmanagement-Personal und Studiendekaninnen und -dekanen an neun Hochschulen ergibt, dass die Strategien der Akteursgruppen an den Hochschulen im Zusammenspiel mit strukturellen Aspekten unterschiedliche Dynamiken entstehen lassen, mit denen Implikationen f{\"u}r die Lehrfreiheit verbunden sind: W{\"a}hrend die Autonomie der Lehrenden durch das Qualit{\"a}tsmanagement an einigen Hochschulen unterst{\"u}tzt wird, sind sowohl Autonomie als auch Verantwortung f{\"u}r Studium und Lehre an anderen Hochschulen Gegenstand andauernder Konflikte, die auch das Qualit{\"a}tsmanagement einschließen.}, language = {de} } @phdthesis{Sablowski2019, author = {Sablowski, Daniel}, title = {Spectroscopic analysis of the benchmark system Alpha Aurigae}, doi = {10.25932/publishup-43239}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432396}, school = {Universit{\"a}t Potsdam}, pages = {169}, year = {2019}, abstract = {Binaries play an important role in observational and theoretical astrophysics. Since the mass and the chemical composition are key ingredients for stellar evolution, high-resolution spectroscopy is an important and necessary tool to derive those parameters to high confidence in binaries. This involves carefully measured orbital motion by the determination of radial velocity (RV) shifts and sophisticated techniques to derive the abundances of elements within the stellar atmosphere. A technique superior to conventional cross-correlation methods to determine RV shifts in known as spectral disentangling. Hence, a major task of this thesis was the design of a sophisticated software package for this approach. In order to investigate secondary effects, such as flux and line-profile variations, imprinting changes on the spectrum the behavior of spectral disentangling on such variability is a key to understand the derived values, to improve them, and to get information about the variability itself. Therefore, the spectral disentangling code presented in this thesis and available to the community combines multiple advantages: separation of the spectra for detailed chemical analysis, derivation of orbital elements, derivation of individual RVs in order to investigate distorted systems (either by third body interaction or relativistic effects), the suppression of telluric contaminations, the derivation of variability, and the possibility to apply the technique to eclipsing binaries (important for orbital inclination) or in general to systems that undergo flux-variations. This code in combination with the spectral synthesis codes MOOG and SME was used in order to derive the carbon 12C/13C isotope ratio (CIR) of the benchmark binary Capella. The observational result will be set into context with theoretical evolution by the use of MESA models and resolves the discrepancy of theory and observations existing since the first measurement of Capella's CIR in 1976. The spectral disentangling code has been made available to the community and its applicability to completely different behaving systems, Wolf-Rayet stars, have also been investigated and resulted in a published article. Additionally, since this technique relies strongly on data quality, continues development of scientific instruments to achieve best observational data is of great importance in observational astrophysics. That is the reason why there has also been effort in astronomical instrumentation during the work on this thesis.}, language = {en} } @phdthesis{Batoulis2019, author = {Batoulis, Kimon}, title = {Sound integration of process and decision models}, doi = {10.25932/publishup-43738}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437386}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 155}, year = {2019}, abstract = {Business process management is an established technique for business organizations to manage and support their processes. Those processes are typically represented by graphical models designed with modeling languages, such as the Business Process Model and Notation (BPMN). Since process models do not only serve the purpose of documentation but are also a basis for implementation and automation of the processes, they have to satisfy certain correctness requirements. In this regard, the notion of soundness of workflow nets was developed, that can be applied to BPMN process models in order to verify their correctness. Because the original soundness criteria are very restrictive regarding the behavior of the model, different variants of the soundness notion have been developed for situations in which certain violations are not even harmful. All of those notions do only consider the control-flow structure of a process model, however. This poses a problem, taking into account the fact that with the recent release and the ongoing development of the Decision Model and Notation (DMN) standard, an increasing number of process models are complemented by respective decision models. DMN is a dedicated modeling language for decision logic and separates the concerns of process and decision logic into two different models, process and decision models respectively. Hence, this thesis is concerned with the development of decisionaware soundness notions, i.e., notions of soundness that build upon the original soundness ideas for process models, but additionally take into account complementary decision models. Similar to the various notions of workflow net soundness, this thesis investigates different notions of decision soundness that can be applied depending on the desired degree of restrictiveness. Since decision tables are a standardized means of DMN to represent decision logic, this thesis also puts special focus on decision tables, discussing how they can be translated into an unambiguous format and how their possible output values can be efficiently determined. Moreover, a prototypical implementation is described that supports checking a basic version of decision soundness. The decision soundness notions were also empirically evaluated on models from participants of an online course on process and decision modeling as well as from a process management project of a large insurance company. The evaluation demonstrates that violations of decision soundness indeed occur and can be detected with our approach.}, language = {en} } @phdthesis{Reeg2019, author = {Reeg, Jette}, title = {Simulating the impact of herbicide drift exposure on non-target terrestrial plant communities}, doi = {10.25932/publishup-42907}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429073}, school = {Universit{\"a}t Potsdam}, pages = {178}, year = {2019}, abstract = {In Europe, almost half of the terrestrial landscape is used for agriculture. Thus, semi-natural habitats such as field margins are substantial for maintaining diversity in intensively managed farmlands. However, plants located at field margins are threatened by agricultural practices such as the application of pesticides within the fields. Pesticides are chemicals developed to control for undesired species within agricultural fields to enhance yields. The use of pesticides implies, however, effects on non-target organisms within and outside of the agricultural fields. Non-target organisms are organisms not intended to be sprayed or controlled for. For example, plants occurring in field margins are not intended to be sprayed, however, can be impaired due to herbicide drift exposure. The authorization of plant protection products such as herbicides requires risk assessments to ensure that the application of the product has no unacceptable effects on the environment. For non-target terrestrial plants (NTTPs), the risk assessment is based on standardized greenhouse studies on plant individual level. To account for the protection of plant populations and communities under realistic field conditions, i.e. extrapolating from greenhouse studies to field conditions and from individual-level to community-level, assessment factors are applied. However, recent studies question the current risk assessment scheme to meet the specific protection goals for non-target terrestrial plants as suggested by the European Food Safety Authority (EFSA). There is a need to clarify the gaps of the current risk assessment and to include suitable higher tier options in the upcoming guidance document for non-target terrestrial plants. In my thesis, I studied the impact of herbicide drift exposure on NTTP communities using a mechanistic modelling approach. I addressed main gaps and uncertainties of the current risk assessment and finally suggested this modelling approach as a novel higher tier option in future risk assessments. Specifically, I extended the plant community model IBC-grass (Individual-based community model for grasslands) to reflect herbicide impacts on plant individuals. In the first study, I compared model predictions of short-term herbicide impacts on artificial plant communities with empirical data. I demonstrated the capability of the model to realistically reflect herbicide impacts. In the second study, I addressed the research question whether or not reproductive endpoints need to be included in future risk assessments to protect plant populations and communities. I compared the consequences of theoretical herbicide impacts on different plant attributes for long-term plant population dynamics in the community context. I concluded that reproductive endpoints only need to be considered if the herbicide effect is assumed to be very high. The endpoints measured in the current vegetative vigour and seedling emergence studies had high impacts for the dynamic of plant populations and communities already at lower effect intensities. Finally, the third study analysed long-term impacts of herbicide application for three different plant communities. This study highlighted the suitability of the modelling approach to simulate different communities and thus detecting sensitive environmental conditions. Overall, my thesis demonstrates the suitability of mechanistic modelling approaches to be used as higher tier options for risk assessments. Specifically, IBC-grass can incorporate available individual-level effect data of standardized greenhouse experiments to extrapolate to community-level under various environmental conditions. Thus, future risk assessments can be improved by detecting sensitive scenarios and including worst-case impacts on non-target plant communities.}, language = {en} } @phdthesis{Sidarenka2019, author = {Sidarenka, Uladzimir}, title = {Sentiment analysis of German Twitter}, doi = {10.25932/publishup-43742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437422}, school = {Universit{\"a}t Potsdam}, pages = {vii, 217}, year = {2019}, abstract = {The immense popularity of online communication services in the last decade has not only upended our lives (with news spreading like wildfire on the Web, presidents announcing their decisions on Twitter, and the outcome of political elections being determined on Facebook) but also dramatically increased the amount of data exchanged on these platforms. Therefore, if we wish to understand the needs of modern society better and want to protect it from new threats, we urgently need more robust, higher-quality natural language processing (NLP) applications that can recognize such necessities and menaces automatically, by analyzing uncensored texts. Unfortunately, most NLP programs today have been created for standard language, as we know it from newspapers, or, in the best case, adapted to the specifics of English social media. This thesis reduces the existing deficit by entering the new frontier of German online communication and addressing one of its most prolific forms—users' conversations on Twitter. In particular, it explores the ways and means by how people express their opinions on this service, examines current approaches to automatic mining of these feelings, and proposes novel methods, which outperform state-of-the-art techniques. For this purpose, I introduce a new corpus of German tweets that have been manually annotated with sentiments, their targets and holders, as well as lexical polarity items and their contextual modifiers. Using these data, I explore four major areas of sentiment research: (i) generation of sentiment lexicons, (ii) fine-grained opinion mining, (iii) message-level polarity classification, and (iv) discourse-aware sentiment analysis. In the first task, I compare three popular groups of lexicon generation methods: dictionary-, corpus-, and word-embedding-based ones, finding that dictionary-based systems generally yield better polarity lists than the last two groups. Apart from this, I propose a linear projection algorithm, whose results surpass many existing automatically-generated lexicons. Afterwords, in the second task, I examine two common approaches to automatic prediction of sentiment spans, their sources, and targets: conditional random fields (CRFs) and recurrent neural networks, obtaining higher scores with the former model and improving these results even further by redefining the structure of CRF graphs. When dealing with message-level polarity classification, I juxtapose three major sentiment paradigms: lexicon-, machine-learning-, and deep-learning-based systems, and try to unite the first and last of these method groups by introducing a bidirectional neural network with lexicon-based attention. Finally, in order to make the new classifier aware of microblogs' discourse structure, I let it separately analyze the elementary discourse units of each tweet and infer the overall polarity of a message from the scores of its EDUs with the help of two new approaches: latent-marginalized CRFs and Recursive Dirichlet Process.}, language = {en} } @phdthesis{Nikaj2019, author = {Nikaj, Adriatik}, title = {Restful choreographies}, doi = {10.25932/publishup-43890}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-438903}, school = {Universit{\"a}t Potsdam}, pages = {xix, 146}, year = {2019}, abstract = {Business process management has become a key instrument to organize work as many companies represent their operations in business process models. Recently, business process choreography diagrams have been introduced as part of the Business Process Model and Notation standard to represent interactions between business processes, run by different partners. When it comes to the interactions between services on the Web, Representational State Transfer (REST) is one of the primary architectural styles employed by web services today. Ideally, the RESTful interactions between participants should implement the interactions defined at the business choreography level. The problem, however, is the conceptual gap between the business process choreography diagrams and RESTful interactions. Choreography diagrams, on the one hand, are modeled from business domain experts with the purpose of capturing, communicating and, ideally, driving the business interactions. RESTful interactions, on the other hand, depend on RESTful interfaces that are designed by web engineers with the purpose of facilitating the interaction between participants on the internet. In most cases however, business domain experts are unaware of the technology behind web service interfaces and web engineers tend to overlook the overall business goals of web services. While there is considerable work on using process models during process implementation, there is little work on using choreography models to implement interactions between business processes. This thesis addresses this research gap by raising the following research question: How to close the conceptual gap between business process choreographies and RESTful interactions? This thesis offers several research contributions that jointly answer the research question. The main research contribution is the design of a language that captures RESTful interactions between participants---RESTful choreography modeling language. Formal completeness properties (with respect to REST) are introduced to validate its instances, called RESTful choreographies. A systematic semi-automatic method for deriving RESTful choreographies from business process choreographies is proposed. The method employs natural language processing techniques to translate business interactions into RESTful interactions. The effectiveness of the approach is shown by developing a prototypical tool that evaluates the derivation method over a large number of choreography models. In addition, the thesis proposes solutions towards implementing RESTful choreographies. In particular, two RESTful service specifications are introduced for aiding, respectively, the execution of choreographies' exclusive gateways and the guidance of RESTful interactions.}, language = {en} } @phdthesis{Kochlik2019, author = {Kochlik, Bastian Max}, title = {Relevance of biomarkers for the diagnosis of the frailty syndrome}, doi = {10.25932/publishup-44118}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441186}, school = {Universit{\"a}t Potsdam}, pages = {IV, 99}, year = {2019}, abstract = {Frailty and sarcopenia share some underlying characteristics like loss of muscle mass, low muscle strength, and low physical performance. Imaging parameters and functional examinations mainly assess frailty and sarcopenia criteria; however, these measures can have limitations in clinical settings. Therefore, finding suitable biomarkers that reflect a catabolic muscle state e.g. an elevated muscle protein turnover as suggested in frailty, are becoming more relevant concerning frailty diagnosis and risk assessment. 3-Methylhistidine (3-MH) and its ratios 3-MH-to-creatinine (3-MH/Crea) and 3 MH-to-estimated glomerular filtration rate (3-MH/eGFR) are under discussion as possible biomarkers for muscle protein turnover and might support the diagnosis of frailty. However, there is some skepticism about the reliability of 3-MH measures since confounders such as meat and fish intake might influence 3-MH plasma concentrations. Therefore, the influence of dietary habits and an intervention with white meat on plasma 3-MH was determined in young and healthy individuals. In another study, the cross-sectional associations of plasma 3-MH, 3-MH/Crea and 3-MH/eGFR with the frailty status (robust, pre-frail and frail) were investigated. Oxidative stress (OS) is a possible contributor to frailty development, and high OS levels as well as low micronutrient levels are associated with the frailty syndrome. However, data on simultaneous measures of OS biomarkers together with micronutrients are lacking in studies including frail, pre-frail and robust individuals. Therefore, cross-sectional associations of protein carbonyls (PrCarb), 3-nitrotyrosine (3-NT) and several micronutrients with the frailty status were determined. A validated UPLC-MS/MS (ultra-performance liquid chromatography tandem mass spectrometry) method for the simultaneous quantification of 3-MH and 1-MH (1 methylhistidine, as marker for meat and fish consumption) was presented and used for further analyses. Omnivores showed higher plasma 3-MH and 1-MH concentrations than vegetarians and a white meat intervention resulted in an increase in plasma 3-MH, 3 MH/Crea, 1-MH and 1-MH/Crea in omnivores. Elevated 3-MH and 3-MH/Crea levels declined significantly within 24 hours after this white meat intervention. Thus, 3-MH and 3-MH/Crea might be used as biomarker for muscle protein turnover when subjects did not consume meat 24 hours prior to blood samplings. Plasma 3-MH, 3-MH/Crea and 3-MH/eGFR were higher in frail individuals than in robust individuals. Additionally, these biomarkers were positively associated with frailty in linear regression models, and higher odds to be frail were found for every increase in 3 MH and 3-MH/eGFR quintile in multivariable logistic regression models adjusted for several confounders. This was the first study using 3-MH/eGFR and it is concluded that plasma 3-MH, 3-MH/Crea and 3-MH/eGFR might be used to identify frail individuals or individuals at higher risk to be frail, and that there might be threshold concentrations or ratios to support these diagnoses. Higher vitamin D3, lutein/zeaxanthin, γ-tocopherol, α-carotene, β-carotene, lycopene and β-cryptoxanthin concentrations and additionally lower PrCarb concentrations were found in robust compared to frail individuals in multivariate linear models. Frail subjects had higher odds to be in the lowest than in the highest tertile for vitamin D3 α-tocopherol, α-carotene, β-carotene, lycopene, lutein/zeaxanthin, and β cryptoxanthin, and had higher odds to be in the highest than in the lowest tertile for PrCarb than robust individuals in multivariate logistic regression models. Thus, a low micronutrient together with a high PrCarb status is associated with pre-frailty and frailty.}, language = {en} } @phdthesis{LeBot2019, author = {Le Bot, Nils}, title = {Quel avenir pour les gares m{\´e}tropolitaines fran{\c{c}}aises et allemandes ?}, doi = {10.25932/publishup-44220}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442201}, school = {Universit{\"a}t Potsdam}, pages = {589}, year = {2019}, abstract = {Cette th{\`e}se d'urbanisme s'est donn{\´e}e pour objectif de r{\´e}fl{\´e}chir {\`a} l'avenir des gares m{\´e}tropolitaines fran{\c{c}}aises et allemandes {\`a} horizon 2050. Elle porte une interrogation sur les fondements de la gare comme objet urbain conceptuel (abord{\´e} comme un syst{\`e}me) et pose comme hypoth{\`e}se qu'il serait en quelque sorte dot{\´e} de propri{\´e}t{\´e}s autonomes. Parmi ces propri{\´e}t{\´e}s, c'est le processus d'expansion et de dialogue sans cesse renouvel{\´e} et conflictuel, entre la gare et son tissu urbain environnant, qui guide cette recherche ; notamment dans le rapport qu'il entretient avec l'hypermobilit{\´e} des m{\´e}tropoles. Pour ce faire, cette th{\`e}se convoque quatre terrains d'{\´e}tudes : les gares principales de Cologne et de Stuttgart en Allemagne et les gares de Paris-Montparnasse et Lyon-Part-Dieu en France ; et commence par un historique d{\´e}taill{\´e} de leurs {\´e}volutions morphologiques, pour d{\´e}gager une s{\´e}rie de variables architectoniques et urbaines. Il proc{\`e}de dans un deuxi{\`e}me temps {\`a} une s{\´e}rie d'analyse prospective, permettant de juger de l'influence possible des politiques publiques en mati{\`e}re transports et de mobilit{\´e}, sur l'avenir conceptuel des gares. Cette th{\`e}se propose alors le concept de syst{\`e}me-gare, pour d{\´e}crire l'expansion et l'int{\´e}gration des gares m{\´e}tropolitaines avec leur environnement urbain ; un processus de n{\´e}gociation dialectique qui ne trouve pas sa r{\´e}solution dans le concept de gare comme lieu de vie/ville. Elle invite alors {\`a} penser la gare comme une h{\´e}t{\´e}rotopie, et propose une lecture d{\´e}polaris{\´e}e et d{\´e}hi{\´e}rarchis{\´e}e de ces espaces, en introduisant les concepts d'orchestre de gares et de m{\´e}tagare. Cette recherche propose enfin une lecture critique de la « ville num{\´e}rique » et du concept de « mobilit{\´e} comme service. » Pour {\´e}viter une mise en flux tendus potentiellement dommageables, l'application de ces concepts en gare ne pourra se soustraire {\`a} une augmentation simultan{\´e}e des espaces physiques.}, language = {fr} }