@phdthesis{Zuehlke2017, author = {Z{\"u}hlke, Martin}, title = {Elektrosprayionisation Ionenmobilit{\"a}tsspektrometrie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407452}, school = {Universit{\"a}t Potsdam}, pages = {viii, 113, XIV}, year = {2017}, abstract = {Die Elektrosprayionisation (ESI) ist eine der weitverbreitetsten Ionisationstechniken f{\"u}r fl{\"u}ssige Pro-ben in der Massen- und Ionenmobilit{\"a}ts(IM)-Spektrometrie. Aufgrund ihrer schonenden Ionisierung wird ESI vorwiegend f{\"u}r empfindliche, komplexe Molek{\"u}le in der Biologie und Medizin eingesetzt. {\"U}berdies ist sie allerdings f{\"u}r ein sehr breites Spektrum an Substanzklassen anwendbar. Die IM-Spektrometrie wurde urspr{\"u}nglich zur Detektion gasf{\"o}rmiger Proben entwickelt, die haupts{\"a}chlich durch radioaktive Quellen ionisiert werden. Sie ist die einzige analytische Methode, bei der Isomere in Echtzeit getrennt und {\"u}ber ihre charakteristische IM direkt identifiziert werden k{\"o}nnen. ESI wurde in den 90ger Jahren durch die Hill Gruppe in die IM-Spektrometrie eingef{\"u}hrt. Die Kombination wird bisher jedoch nur von wenigen Gruppen verwendet und hat deshalb noch ein hohes Entwick-lungspotential. Ein vielversprechendes Anwendungsfeld ist der Einsatz in der Hochleistungs-fl{\"u}ssigkeitschromatographie (HPLC) zur mehrdimensionalen Trennung. Heutzutage ist die HPLC die Standardmethode zur Trennung komplexer Proben in der Routineanalytik. HPLC-Trennungsg{\"a}nge sind jedoch h{\"a}ufig langwierig und der Einsatz verschiedener Laufmittel, hoher Flussraten, von Puffern, sowie Laufmittelgradienten stellt hohe Anforderungen an die Detektoren. Die ESI-IM-Spektrometrie wurde in einigen Studien bereits als HPLC-Detektor eingesetzt, war dort bisher jedoch auf Flussratensplitting oder geringe Flussraten des Laufmittels beschr{\"a}nkt. In dieser kumulativen Doktorarbeit konnte daher erstmals ein ESI IM-Spektrometer als HPLC-Detektor f{\"u}r den Flussratenbereich von 200-1500 μl/min entwickelt werden. Anhand von f{\"u}nf Publi-kationen wurden (1) {\"u}ber eine umfassende Charakterisierung die Eignung des Spektrometers als HPLC-Detektor festgestellt, (2) ausgew{\"a}hlte komplexe Trenng{\"a}nge pr{\"a}sentiert und (3) die Anwen-dung zum Reaktionsmonitoring und (4, 5) m{\"o}gliche Weiterentwicklungen gezeigt. Erfolgreich konnten mit dem selbst-entwickelten ESI IM-Spektrometer typische HPLC-Bedingungen wie Wassergehalte im Laufmittel von bis zu 90\%, Pufferkonzentrationen von bis zu 10 mM, sowie Nachweisgrenzen von bis zu 50 nM erreicht werden. Weiterhin wurde anhand der komplexen Trennungsg{\"a}nge (24 Pestizide/18 Aminos{\"a}uren) gezeigt, dass die HPLC und die IM-Spektrometrie eine hohe Orthogonalit{\"a}t besitzen. Eine effektive Peakkapazit{\"a}t von 240 wurde so realisiert. Auf der HPLC-S{\"a}ule koeluierende Substanzen konnten {\"u}ber die Driftzeit getrennt und {\"u}ber ihre IM identifi-ziert werden, sodass die Gesamttrennzeiten erheblich minimiert werden konnten. Die Anwend-barkeit des ESI IM-Spektrometers zur {\"U}berwachung chemischer Synthesen wurde anhand einer dreistufigen Reaktion demonstriert. Es konnten die wichtigsten Edukte, Zwischenprodukte und Produkte aller Stufen identifiziert werden. Eine quantitative Auswertung war sowohl {\"u}ber eine kurze HPLC-Vortrennung als auch durch die Entwicklung eines eigenen Kalibrierverfahrens, welches die Ladungskonkurrenz bei ESI ber{\"u}cksichtigt, ohne HPLC m{\"o}glich. Im zweiten Teil der Arbeit werden zwei Weiterentwicklungen des Spektrometers pr{\"a}sentiert. Eine M{\"o}glichkeit ist die Reduzierung des Drucks in den intermedi{\"a}ren Bereich (300 - 1000 mbar) mit dem Ziel der Verringerung der ben{\"o}tigten Spannungen. Mithilfe von Streulichtbildern und Strom-Spannungs-Kurven wurden f{\"u}r geringe Dr{\"u}cke eine verminderte Freisetzung der Analyt-Ionen aus den Tropfen festgestellt. Die Verluste konnten jedoch {\"u}ber h{\"o}here elektrische Feldst{\"a}rken ausgeglichen werden, sodass gleiche Nachweisgrenzen bei 500 mbar und bei 1 bar erreicht wurden. Die zweite Weiterentwicklung ist ein neuartiges Ionentors mit Pulsschaltung, welches eine Verdopplung der Aufl{\"o}sung auf bis zu R > 100 bei gleicher Sensitivit{\"a}t erm{\"o}glichte. Eine denkbare Anwendung im Bereich der Peptidanalytik wurde mit beachtlichen Aufl{\"o}sungen der Peptide von R = 90 gezeigt.}, language = {de} } @phdthesis{Zuo2017, author = {Zuo, Zhe}, title = {From unstructured to structured: Context-based named entity mining from text}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412576}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2017}, abstract = {With recent advances in the area of information extraction, automatically extracting structured information from a vast amount of unstructured textual data becomes an important task, which is infeasible for humans to capture all information manually. Named entities (e.g., persons, organizations, and locations), which are crucial components in texts, are usually the subjects of structured information from textual documents. Therefore, the task of named entity mining receives much attention. It consists of three major subtasks, which are named entity recognition, named entity linking, and relation extraction. These three tasks build up an entire pipeline of a named entity mining system, where each of them has its challenges and can be employed for further applications. As a fundamental task in the natural language processing domain, studies on named entity recognition have a long history, and many existing approaches produce reliable results. The task is aiming to extract mentions of named entities in text and identify their types. Named entity linking recently received much attention with the development of knowledge bases that contain rich information about entities. The goal is to disambiguate mentions of named entities and to link them to the corresponding entries in a knowledge base. Relation extraction, as the final step of named entity mining, is a highly challenging task, which is to extract semantic relations between named entities, e.g., the ownership relation between two companies. In this thesis, we review the state-of-the-art of named entity mining domain in detail, including valuable features, techniques, evaluation methodologies, and so on. Furthermore, we present two of our approaches that focus on the named entity linking and relation extraction tasks separately. To solve the named entity linking task, we propose the entity linking technique, BEL, which operates on a textual range of relevant terms and aggregates decisions from an ensemble of simple classifiers. Each of the classifiers operates on a randomly sampled subset of the above range. In extensive experiments on hand-labeled and benchmark datasets, our approach outperformed state-of-the-art entity linking techniques, both in terms of quality and efficiency. For the task of relation extraction, we focus on extracting a specific group of difficult relation types, business relations between companies. These relations can be used to gain valuable insight into the interactions between companies and perform complex analytics, such as predicting risk or valuating companies. Our semi-supervised strategy can extract business relations between companies based on only a few user-provided seed company pairs. By doing so, we also provide a solution for the problem of determining the direction of asymmetric relations, such as the ownership_of relation. We improve the reliability of the extraction process by using a holistic pattern identification method, which classifies the generated extraction patterns. Our experiments show that we can accurately and reliably extract new entity pairs occurring in the target relation by using as few as five labeled seed pairs.}, language = {en} } @phdthesis{Ziegler2017, author = {Ziegler, Moritz O.}, title = {The 3D in-situ stress field and its changes in geothermal reservoirs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403838}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 110, XV}, year = {2017}, abstract = {Information on the contemporary in-situ stress state of the earth's crust is essential for geotechnical applications and physics-based seismic hazard assessment. Yet, stress data records for a data point are incomplete and their availability is usually not dense enough to allow conclusive statements. This demands a thorough examination of the in-situ stress field which is achieved by 3D geomechanicalnumerical models. However, the models spatial resolution is limited and the resulting local stress state is subject to large uncertainties that confine the significance of the findings. In addition, temporal variations of the in-situ stress field are naturally or anthropogenically induced. In my thesis I address these challenges in three manuscripts that investigate (1) the current crustal stress field orientation, (2) the 3D geomechanical-numerical modelling of the in-situ stress state, and (3) the phenomenon of injection induced temporal stress tensor rotations. In the first manuscript I present the first comprehensive stress data compilation of Iceland with 495 data records. Therefore, I analysed image logs from 57 boreholes in Iceland for indicators of the orientation of the maximum horizontal stress component. The study is the first stress survey from different kinds of stress indicators in a geologically very young and tectonically active area of an onshore spreading ridge. It reveals a distinct stress field with a depth independent stress orientation even very close to the spreading centre. In the second manuscript I present a calibrated 3D geomechanical-numerical modelling approach of the in-situ stress state of the Bavarian Molasse Basin that investigates the regional (70x70x10km³) and local (10x10x10km³) stress state. To link these two models I develop a multi-stage modelling approach that provides a reliable and efficient method to derive from the larger scale model initial and boundary conditions for the smaller scale model. Furthermore, I quantify the uncertainties in the models results which are inherent to geomechanical-numerical modelling in general and the multi-stage approach in particular. I show that the significance of the models results is mainly reduced due to the uncertainties in the material properties and the low number of available stress magnitude data records for calibration. In the third manuscript I investigate the phenomenon of injection induced temporal stress tensor rotation and its controlling factors. I conduct a sensitivity study with a 3D generic thermo-hydro-mechanical model. I show that the key control factors for the stress tensor rotation are the permeability as the decisive factor, the injection rate, and the initial differential stress. In particular for enhanced geothermal systems with a low permeability large rotations of the stress tensor are indicated. According to these findings the estimation of the initial differential stress in a reservoir is possible provided the permeability is known and the angle of stress rotation is observed. I propose that the stress tensor rotations can be a key factor in terms of the potential for induced seismicity on pre-existing faults due to the reorientation of the stress field that changes the optimal orientation of faults.}, language = {en} } @phdthesis{Zieger2017, author = {Zieger, Tobias}, title = {Self-adaptive data quality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 125}, year = {2017}, abstract = {Carrying out business processes successfully is closely linked to the quality of the data inventory in an organization. Lacks in data quality lead to problems: Incorrect address data prevents (timely) shipments to customers. Erroneous orders lead to returns and thus to unnecessary effort. Wrong pricing forces companies to miss out on revenues or to impair customer satisfaction. If orders or customer records cannot be retrieved, complaint management takes longer. Due to erroneous inventories, too few or too much supplies might be reordered. A special problem with data quality and the reason for many of the issues mentioned above are duplicates in databases. Duplicates are different representations of same real-world objects in a dataset. However, these representations differ from each other and are for that reason hard to match by a computer. Moreover, the number of required comparisons to find those duplicates grows with the square of the dataset size. To cleanse the data, these duplicates must be detected and removed. Duplicate detection is a very laborious process. To achieve satisfactory results, appropriate software must be created and configured (similarity measures, partitioning keys, thresholds, etc.). Both requires much manual effort and experience. This thesis addresses automation of parameter selection for duplicate detection and presents several novel approaches that eliminate the need for human experience in parts of the duplicate detection process. A pre-processing step is introduced that analyzes the datasets in question and classifies their attributes semantically. Not only do these annotations help understanding the respective datasets, but they also facilitate subsequent steps, for example, by selecting appropriate similarity measures or normalizing the data upfront. This approach works without schema information. Following that, we show a partitioning technique that strongly reduces the number of pair comparisons for the duplicate detection process. The approach automatically finds particularly suitable partitioning keys that simultaneously allow for effective and efficient duplicate retrieval. By means of a user study, we demonstrate that this technique finds partitioning keys that outperform expert suggestions and additionally does not need manual configuration. Furthermore, this approach can be applied independently of the attribute types. To measure the success of a duplicate detection process and to execute the described partitioning approach, a gold standard is required that provides information about the actual duplicates in a training dataset. This thesis presents a technique that uses existing duplicate detection results and crowdsourcing to create a near gold standard that can be used for the purposes above. Another part of the thesis describes and evaluates strategies how to reduce these crowdsourcing costs and to achieve a consensus with less effort.}, language = {en} } @phdthesis{Zhou2017, author = {Zhou, Bin}, title = {On the assessment of surface urban heat island}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404383}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 119}, year = {2017}, abstract = {Inwiefern St{\"a}dte unter den Megatrends der Urbanisierung und des Klimawandels nachhaltig gestaltet werden k{\"o}nnen, bleibt umstritten. Dies ist zum Teil auf unzureichende Kenntnisse der Mensch-Umwelt-Interaktionen zur{\"u}ckzuf{\"u}hren. Als die am vollst{\"a}ndigsten dokumentierte anthropogene Klimamodifikation ruft der Urbane Hitzeinsel (UHI) Effekt weltweit Sorgen hinsichtlich der Gesundheit der Bev{\"o}lkerung hervor. Dazu kommt noch ein immer h{\"a}ufigeres und intensiveres Auftreten von Hitzewellen, wodurch das Wohlbefinden der Stadtbewohner weiter beeintr{\"a}chtigt wird. Trotz eines deutlichen Anstiegs der Zahl der UHI-bezogenen Ver{\"o}ffentlichungen in den letzten Jahrzehnten haben die unterschiedlichen Definitionen von st{\"a}dtischen und l{\"a}ndlichen Gebieten in bisherigen Studien die allgemeine Vergleichbarkeit der Resultate stark erschwert. Dar{\"u}ber hinaus haben nur wenige Studien den UHI-Effekt und seine Einflussfaktoren anhand einer Kombination der Landnutzungsdaten und der thermischen Fernerkundung systematisch untersucht. Diese Arbeit stellt einen allgemeinen Rahmen zur Quantifizierung von UHI-Intensit{\"a}ten mittels eines automatisierten Algorithmus vor, wobei St{\"a}dte als Agglomerationen maximal r{\"a}umlicher Kontinuit{\"a}t basierend auf Landnutzungsdaten identifiziert, sowie deren l{\"a}ndliche Umfelder analog definiert werden. Durch Verkn{\"u}pfung der Landnutzungsdaten mit Landoberfl{\"a}chentemperaturen von Satelliten kann die UHI-Intensit{\"a}t robust und konsistent berechnet werden. Anhand dieser Innovation wurde nicht nur der Zusammenhang zwischen Stadtgr{\"o}ße und UHI-Intensit{\"a}t erneut untersucht, sondern auch die Auswirkungen der Stadtform auf die UHI-Intensit{\"a}t quantifiziert. Diese Arbeit leistet vielf{\"a}ltige Beitr{\"a}ge zum tieferen Verst{\"a}ndnis des UHI-Ph{\"a}nomens. Erstens wurde eine log-lineare Beziehung zwischen UHI-Intensit{\"a}t und Stadtgr{\"o}ße unter Ber{\"u}cksichtigung der 5,000 europ{\"a}ischen St{\"a}dte best{\"a}tigt. Werden kleinere St{\"a}dte auch ber{\"u}cksichtigt, ergibt sich eine log-logistische Beziehung. Zweitens besteht ein komplexes Zusammenspiel zwischen der Stadtform und der UHI-Intensit{\"a}t: die Stadtgr{\"o}ße stellt den st{\"a}rksten Einfluss auf die UHI-Intensit{\"a}t dar, gefolgt von der fraktalen Dimension und der Anisometrie. Allerdings zeigen ihre relativen Beitr{\"a}ge zur UHI-Intensit{\"a}t eine regionale Heterogenit{\"a}t, welche die Bedeutung r{\"a}umlicher Muster w{\"a}hrend der Umsetzung von UHI-Anpassungsmaßnahmen hervorhebt. Des Weiteren ergibt sich eine neue Saisonalit{\"a}t der UHI-Intensit{\"a}t f{\"u}r individuelle St{\"a}dte in Form von Hysteresekurven, die eine Phasenverschiebung zwischen den Zeitreihen der UHI-Intensit{\"a}t und der Hintergrundtemperatur andeutet. Diese Saisonalit{\"a}t wurde anhand von Luft- und Landoberfl{\"a}chentemperaturen untersucht, indem die Satellitenbeobachtung und die Modellierung der urbanen Grenzschicht mittels des UrbClim-Modells kombiniert wurden. Am Beispiel von London ist die Diskrepanz der Saisonalit{\"a}ten zwischen den beiden Temperaturen vor allem auf die mit der einfallenden Sonnenstrahlung verbundene Besonderheit der Landoberfl{\"a}chentemperatur zur{\"u}ckzuf{\"u}hren. Dar{\"u}ber hinaus spielt das regionale Klima eine wichtige Rolle bei der Entwicklung der UHI. Diese Arbeit ist eine der ersten Studien dieser Art, die eine systematische und statistische Untersuchung des UHI-Effektes erm{\"o}glicht. Die Ergebnisse sind von besonderer Bedeutung f{\"u}r die allgemeine r{\"a}umliche Planung und Regulierung auf Meso- und Makroebenen, damit sich Vorteile der rapiden Urbanisierung nutzbar machen und zeitgleich die folgende Hitzebelastung proaktiv vermindern lassen.}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Julia}, title = {Schadenserkennung in Beton durch {\"U}berwachung mit eingebetteten Ultraschallpr{\"u}fk{\"o}pfen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397363}, school = {Universit{\"a}t Potsdam}, pages = {ix, 142}, year = {2017}, abstract = {Die zerst{\"o}rungsfreien Pr{\"u}fungen von Bauwerken mit Hilfe von Ultraschallmessverfahren haben in den letzten Jahren an Bedeutung gewonnen. Durch Ultraschallmessungen k{\"o}nnen die Geometrien von Bauteilen bestimmt sowie von außen nicht sichtbare Fehler wie Delaminationen und Kiesnester erkannt werden. Mit neuartigen, in das Betonbauteil eingebetteten Ultraschallpr{\"u}fk{\"o}pfen sollen nun Bauwerke dauerhaft auf Ver{\"a}nderungen {\"u}berpr{\"u}ft werden. Dazu werden Ultraschallsignale direkt im Inneren eines Bauteils erzeugt, was die M{\"o}glichkeiten der herk{\"o}mmlichen Methoden der Bauwerks{\"u}berwachung wesentlich erweitert. Ein Ultraschallverfahren k{\"o}nnte mit eingebetteten Pr{\"u}fk{\"o}pfen ein Betonbauteil kontinuierlich integral {\"u}berwachen und damit auch stetig fortschreitende Gef{\"u}ge{\"a}nderungen, wie beispielsweise Mikrorisse, registrieren. Sicherheitsrelevante Bauteile, die nach dem Einbau f{\"u}r Messungen unzug{\"a}nglich oder mittels Ultraschall, beispielsweise durch zus{\"a}tzliche Beschichtungen der Oberfl{\"a}che, nicht pr{\"u}fbar sind, lassen sich mit eingebetteten Pr{\"u}fk{\"o}pfen {\"u}berwachen. An bereits vorhandenen Bauwerken k{\"o}nnen die Ultraschallpr{\"u}fk{\"o}pfe mithilfe von Bohrl{\"o}chern und speziellem Verpressm{\"o}rtel auch nachtr{\"a}glich in das Bauteil integriert werden. F{\"u}r Fertigbauteile bieten sich eingebettete Pr{\"u}fk{\"o}pfe zur Herstellungskontrolle sowie zur {\"U}berwachung der Baudurchf{\"u}hrung als Werkzeug der Qualit{\"a}tssicherung an. Auch die schnelle Schadensanalyse eines Bauwerks nach Naturkatastrophen, wie beispielsweise einem Erdbeben oder einer Flut, ist denkbar. Durch die gute Ankopplung erm{\"o}glichen diese neuartigen Pr{\"u}fk{\"o}pfe den Einsatz von empfindlichen Auswertungsmethoden, wie die Kreuzkorrelation, die Coda-Wellen-Interferometrie oder die Amplitudenauswertung, f{\"u}r die Signalanalyse. Bei regelm{\"a}ßigen Messungen k{\"o}nnen somit sich anbahnende Sch{\"a}den eines Bauwerks fr{\"u}hzeitig erkannt werden. Da die Sch{\"a}digung eines Bauwerks keine direkt messbare Gr{\"o}ße darstellt, erfordert eine eindeutige Schadenserkennung in der Regel die Messung mehrerer physikalischer Gr{\"o}ßen die geeignet verkn{\"u}pft werden. Physikalische Gr{\"o}ßen k{\"o}nnen sein: Ultraschalllaufzeit, Amplitude des Ultraschallsignals und Umgebungstemperatur. Dazu m{\"u}ssen Korrelationen zwischen dem Zustand des Bauwerks, den Umgebungsbedingungen und den Parametern des gemessenen Ultraschallsignals untersucht werden. In dieser Arbeit werden die neuartigen Pr{\"u}fk{\"o}pfe vorgestellt. Es wird beschrieben, dass sie sich, sowohl in bereits errichtete Betonbauwerke als auch in der Konstruktion befindliche, einbauen lassen. Experimentell wird gezeigt, dass die Pr{\"u}fk{\"o}pfe in mehreren Ebenen eingebettet sein k{\"o}nnen da ihre Abstrahlcharakteristik im Beton nahezu ungerichtet ist. Die Mittenfrequenz von rund 62 kHz erm{\"o}glicht Abst{\"a}nde, je nach Betonart und SRV, von mindestens 3 m zwischen Pr{\"u}fk{\"o}pfen die als Sender und Empf{\"a}nger arbeiten. Die Empfindlichkeit der eingebetteten Pr{\"u}fk{\"o}pfe gegen{\"u}ber Ver{\"a}nderungen im Beton wird an Hand von zwei Laborexperimenten gezeigt, einem Drei-Punkt-Biegeversuch und einem Versuch zur Erzeugung von Frost-Tau-Wechsel Sch{\"a}den. Die Ergebnisse werden mit anderen zerst{\"o}rungsfreien Pr{\"u}fverfahren verglichen. Es zeigt sich, dass die Pr{\"u}fk{\"o}pfe durch die Anwendung empfindlicher Auswertemethoden, auftretende Risse im Beton detektieren, bevor diese eine Gefahr f{\"u}r das Bauwerk darstellen. Abschließend werden Beispiele von Installation der neuartigen Ultraschallpr{\"u}fk{\"o}pfe in realen Bauteilen, zwei Br{\"u}cken und einem Fundament, gezeigt und basierend auf dort gewonnenen ersten Erfahrungen ein Konzept f{\"u}r die Umsetzung einer Langzeit{\"u}berwachung aufgestellt.}, language = {de} } @phdthesis{Wittenbecher2017, author = {Wittenbecher, Clemens}, title = {Linking whole-grain bread, coffee, and red meat to the risk of type 2 diabetes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404592}, school = {Universit{\"a}t Potsdam}, pages = {XII, 194, ii}, year = {2017}, abstract = {Background: Consumption of whole-grain, coffee, and red meat were consistently related to the risk of developing type 2 diabetes in prospective cohort studies, but potentially underlying biological mechanisms are not well understood. Metabolomics profiles were shown to be sensitive to these dietary exposures, and at the same time to be informative with respect to the risk of type 2 diabetes. Moreover, graphical network-models were demonstrated to reflect the biological processes underlying high-dimensional metabolomics profiles. Aim: The aim of this study was to infer hypotheses on the biological mechanisms that link consumption of whole-grain bread, coffee, and red meat, respectively, to the risk of developing type 2 diabetes. More specifically, it was aimed to consider network models of amino acid and lipid profiles as potential mediators of these risk-relations. Study population: Analyses were conducted in the prospective EPIC-Potsdam cohort (n = 27,548), applying a nested case-cohort design (n = 2731, including 692 incident diabetes cases). Habitual diet was assessed with validated semiquantitative food-frequency questionnaires. Concentrations of 126 metabolites (acylcarnitines, phosphatidylcholines, sphingomyelins, amino acids) were determined in baseline-serum samples. Incident type 2 diabetes cases were assed and validated in an active follow-up procedure. The median follow-up time was 6.6 years. Analytical design: The methodological approach was conceptually based on counterfactual causal inference theory. Observations on the network-encoded conditional independence structure restricted the space of possible causal explanations of observed metabolomics-data patterns. Given basic directionality assumptions (diet affects metabolism; metabolism affects future diabetes incidence), adjustment for a subset of direct neighbours was sufficient to consistently estimate network-independent direct effects. Further model-specification, however, was limited due to missing directionality information on the links between metabolites. Therefore, a multi-model approach was applied to infer the bounds of possible direct effects. All metabolite-exposure links and metabolite-outcome links, respectively, were classified into one of three categories: direct effect, ambiguous (some models indicated an effect others not), and no-effect. Cross-sectional and longitudinal relations were evaluated in multivariable-adjusted linear regression and Cox proportional hazard regression models, respectively. Models were comprehensively adjusted for age, sex, body mass index, prevalence of hypertension, dietary and lifestyle factors, and medication. Results: Consumption of whole-grain bread was related to lower levels of several lipid metabolites with saturated and monounsaturated fatty acids. Coffee was related to lower aromatic and branched-chain amino acids, and had potential effects on the fatty acid profile within lipid classes. Red meat was linked to lower glycine levels and was related to higher circulating concentrations of branched-chain amino acids. In addition, potential marked effects of red meat consumption on the fatty acid composition within the investigated lipid classes were identified. Moreover, potential beneficial and adverse direct effects of metabolites on type 2 diabetes risk were detected. Aromatic amino acids and lipid metabolites with even-chain saturated (C14-C18) and with specific polyunsaturated fatty acids had adverse effects on type 2 diabetes risk. Glycine, glutamine, and lipid metabolites with monounsaturated fatty acids and with other species of polyunsaturated fatty acids were classified as having direct beneficial effects on type 2 diabetes risk. Potential mediators of the diet-diabetes links were identified by graphically overlaying this information in network models. Mediation analyses revealed that effects on lipid metabolites could potentially explain about one fourth of the whole-grain bread effect on type 2 diabetes risk; and that effects of coffee and red meat consumption on amino acid and lipid profiles could potentially explain about two thirds of the altered type 2 diabetes risk linked to these dietary exposures. Conclusion: An algorithm was developed that is capable to integrate single external variables (continuous exposures, survival time) and high-dimensional metabolomics-data in a joint graphical model. Application to the EPIC-Potsdam cohort study revealed that the observed conditional independence patterns were consistent with the a priori mediation hypothesis: Early effects on lipid and amino acid metabolism had the potential to explain large parts of the link between three of the most widely discussed diabetes-related dietary exposures and the risk of developing type 2 diabetes.}, language = {en} } @phdthesis{Willersinn2017, author = {Willersinn, Jochen}, title = {Self-Assembly of double hydrophilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408578}, school = {Universit{\"a}t Potsdam}, pages = {119, clxxiv}, year = {2017}, abstract = {The motivation of this work was to investigate the self-assembly of a block copolymer species that attended little attraction before, double hydrophilic block copolymers (DHBCs). DHBCs consist of two linear hydrophilic polymer blocks. The self-assembly of DHBCs towards suprastructures such as particles and vesicles is determined via a strong difference in hydrophilicity between the corresponding blocks leading to a microphase separation due to immiscibility. The benefits of DHBCs and the corresponding particles and vesicles, such as biocompatibility, high permeability towards water and hydrophilic compounds as well as the large amount of possible functionalizations that can be addressed to the block copolymers make the application of DHBC based structures a viable choice in biomedicine. In order to assess a route towards self-assembled structures from DHBCs that display the potential to act as cargos for future applications, several block copolymers containing two hydrophilic polymer blocks were synthesized. Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone) (PEO-b-PVP) and Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone-co-N-vinylimidazole) (PEO-b-P(VP-co-VIm) block copolymers were synthesized via reversible deactivation radical polymerization (RDRP) techniques starting from a PEO-macro chain transfer agent. The block copolymers displayed a concentration dependent self-assembly behavior in water which was determined via dynamic light scattering (DLS). It was possible to observe spherical particles via laser scanning confocal microscopy (LSCM) and cryogenic scanning electron microscopy (cryo SEM) at highly concentrated solutions of PEO-b-PVP. Furthermore, a crosslinking strategy with (PEO-b-P(VP-co-VIm) was developed applying a diiodo derived crosslinker diethylene glycol bis(2-iodoethyl) ether to form quaternary amines at the VIm units. The formed crosslinked structures proved stability upon dilution and transfer into organic solvents. Moreover, self-assembly and crosslinking in DMF proved to be more advantageous and the crosslinked structures could be successfully transferred to aqueous solution. The afforded spherical submicron particles could be visualized via LSCM, cryo SEM and Cryo TEM. Double hydrophilic pullulan-b-poly(acrylamide) block copolymers were synthesized via copper catalyzed alkyne azide cycloaddition (CuAAC) starting from suitable pullulan alkyne and azide functionalized poly(N,N-dimethylacrylamide) (PDMA) and poly(N-ethylacrylamide) (PEA) homopolymers. The conjugation reaction was confirmed via SEC and 1H-NMR measurements. The self-assembly of the block copolymers was monitored with DLS and static light scattering (SLS) measurements indicating the presence of hollow spherical structures. Cryo SEM measurements could confirm the presence of vesicular structures for Pull-b-PEA block copolymers. Solutions of Pull-b-PDMA displayed particles in cryo SEM. Moreover, an end group functionalization of Pull-b-PDMA with Rhodamine B allowed assessing the structure via LSCM and hollow spherical structures were observed indicating the presence of vesicles, too. An exemplified pathway towards a DHBC based drug delivery vehicle was demonstrated with the block copolymer Pull-b-PVP. The block copolymer was synthesized via RAFT/MADIX techniques starting from a pullulan chain transfer agent. Pull-b-PVP displayed a concentration dependent self-assembly in water with an efficiency superior to the PEO-b-PVP system, which could be observed via DLS. Cryo SEM and LSCM microscopy displayed the presence of spherical structures. In order to apply a reversible crosslinking strategy on the synthesized block copolymer, the pullulan block was selectively oxidized to dialdehydes with NaIO4. The oxidation of the block copolymer was confirmed via SEC and 1H-NMR measurements. The self-assembled and oxidized structures were subsequently crosslinked with cystamine dihiydrochloride, a pH and redox responsive crosslinker resulting in crosslinked vesicles which were observed via cryo SEM. The vesicular structures of crosslinked Pull-b-PVP could be disassembled by acid treatment or the application of the redox agent tris(2-carboxyethyl)-phosphin-hydrochloride. The successful disassembly was monitored with DLS measurements. To conclude, self-assembled structures from DHBCs such as particles and vesicles display a strong potential to generate an impact on biomedicine and nanotechnologies. The variety of DHBC compositions and functionalities are very promising features for future applications.}, language = {en} } @phdthesis{Wierzba2017, author = {Wierzba, Marta}, title = {Revisiting prosodic reconstruction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403152}, school = {Universit{\"a}t Potsdam}, pages = {vi, 224}, year = {2017}, abstract = {In this thesis, I develop a theoretical implementation of prosodic reconstruction and apply it to the empirical domain of German sentences in which part of a focus or contrastive topic is fronted. Prosodic reconstruction refers to the idea that sentences involving syntactic movement show prosodic parallels with corresponding simpler structures without movement. I propose to model this recurrent observation by ordering syntax-prosody mapping before copy deletion. In order to account for the partial fronting data, the idea is extended to the mapping between prosody and information structure. This assumption helps to explain why object-initial sentences containing a broad focus or broad contrastive topic show similar prosodic and interpretative restrictions as sentences with canonical word order. The empirical adequacy of the model is tested against a set of gradient acceptability judgments.}, language = {en} } @phdthesis{Weissenberger2017, author = {Weißenberger, Martin}, title = {Start-up subsidies for the unemployed - New evaluation approaches and insights}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406362}, school = {Universit{\"a}t Potsdam}, pages = {viii, 239}, year = {2017}, abstract = {Start-up incentives targeted at unemployed individuals have become an important tool of the Active Labor Market Policy (ALMP) to fight unemployment in many countries in recent years. In contrast to traditional ALMP instruments like training measures, wage subsidies, or job creation schemes, which are aimed at reintegrating unemployed individuals into dependent employment, start-up incentives are a fundamentally different approach to ALMP, in that they intend to encourage and help unemployed individuals to exit unemployment by entering self-employment and, thus, by creating their own jobs. In this sense, start-up incentives for unemployed individuals serve not only as employment and social policy to activate job seekers and combat unemployment but also as business policy to promote entrepreneurship. The corresponding empirical literature on this topic so far has been mainly focused on the individual labor market perspective, however. The main part of the thesis at hand examines the new start-up subsidy ("Gr{\"u}ndungszuschuss") in Germany and consists of four empirical analyses that extend the existing evidence on start-up incentives for unemployed individuals from multiple perspectives and in the following directions: First, it provides the first impact evaluation of the new start-up subsidy in Germany. The results indicate that participation in the new start-up subsidy has significant positive and persistent effects on both reintegration into the labor market as well as the income profiles of participants, in line with previous evidence on comparable German and international programs, which emphasizes the general potential of start-up incentives as part of the broader ALMP toolset. Furthermore, a new innovative sensitivity analysis of the applied propensity score matching approach integrates findings from entrepreneurship and labor market research about the key role of an individual's personality on start-up decision, business performance, as well as general labor market outcomes, into the impact evaluation of start-up incentives. The sensitivity analysis with regard to the inclusion and exclusion of usually unobserved personality variables reveals that differences in the estimated treatment effects are small in magnitude and mostly insignificant. Consequently, concerns about potential overestimation of treatment effects in previous evaluation studies of similar start-up incentives due to usually unobservable personality variables are less justified, as long as the set of observed control variables is sufficiently informative (Chapter 2). Second, the thesis expands our knowledge about the longer-term business performance and potential of subsidized businesses arising from the start-up subsidy program. In absolute terms, the analysis shows that a relatively high share of subsidized founders successfully survives in the market with their original businesses in the medium to long run. The subsidy also yields a "double dividend" to a certain extent in terms of additional job creation. Compared to "regular", i.e., non-subsidized new businesses founded by non-unemployed individuals in the same quarter, however, the economic and growth-related impulses set by participants of the subsidy program are only limited with regard to employment growth, innovation activity, or investment. Further investigations of possible reasons for these differences show that differential business growth paths of subsidized founders in the longer run seem to be mainly limited by higher restrictions to access capital and by unobserved factors, such as less growth-oriented business strategies and intentions, as well as lower (subjective) entrepreneurial persistence. Taken together, the program has only limited potential as a business and entrepreneurship policy intended to induce innovation and economic growth (Chapters 3 and 4). And third, an empirical analysis on the level of German regional labor markets yields that there is a high regional variation in subsidized start-up activity relative to overall new business formation. The positive correlation between regular start-up intensity and the share among all unemployed individuals who participate in the start-up subsidy program suggests that (nascent) unemployed founders also profit from the beneficial effects of regional entrepreneurship capital. Moreover, the analysis of potential deadweight and displacement effects from an aggregated regional perspective emphasizes that the start-up subsidy for unemployed individuals represents a market intervention into existing markets, which affects incumbents and potentially produces inefficiencies and market distortions. This macro perspective deserves more attention and research in the future (Chapter 5).}, language = {en} } @phdthesis{Weise2017, author = {Weise, Katja}, title = {Gez{\"a}hmte Kleider, geb{\"a}ndigte K{\"o}rper?}, doi = {10.25932/publishup-43986}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439868}, school = {Universit{\"a}t Potsdam}, pages = {270}, year = {2017}, abstract = {Gegenstand der Dissertation ist die Pr{\"a}sentation von Kleidermode in ihr gewidmeten Sonderausstellungen, die in zunehmender Zahl seit den 1990er Jahren in musealen und musems{\"a}hnlichen Kontexten veranstaltet werden. Es geht darum, wie Modek{\"o}rper und vestiment{\"a}re Artefakte in diesen Ausstellungen gezeigt werden und welche {\"a}sthetischen Erfahrungen sich f{\"u}r die RezipientInnen aus der jeweiligen Konstellation von vestiment{\"a}rem Objekt und Inszenierungsmittel ergeben k{\"o}nnen. Das Augenmerk liegt auf der Spannung zwischen dem visuellen Imperativ musealer Zeigepraktiken und den multisensorischen Qualit{\"a}ten der Kleidermode, v. a. jener hautsinnlichen, die sich aus dem unmittelbaren Kontakt zwischen K{\"o}rper und Kleid ergeben. Die zentrale These ist, dass sich das Hautsinnliche der Kleidermode trotz des Ber{\"u}hrungsverbots in vielen Ausstellungsinszenierungen zeigen kann. D. h., dass - entgegen h{\"a}ufig wiederholter Behauptungen - ‚der K{\"o}rper', das Tragen und die Bewegung nicht per se oder komplett aus den Kleidern gewichen sind, werden diese musealisiert und ausgestellt. Es findet eine Verschiebung des K{\"o}rperlichen und Hautsinnlichen, wie das Anfassen, Tragen und Bewegen, in visuelle Darstellungsformen statt. Hautsinnliche Qualit{\"a}ten der vestiment{\"a}ren Exponate k{\"o}nnen, auch in Abh{\"a}ngigkeit von den jeweils verwendeten Pr{\"a}sentationsmitteln, von den BesucherInnen in unterschiedlichen Abstufungen sehend oder buchst{\"a}blich gesp{\"u}rt werden. An konkreten Beispielen wird zum einen das Verh{\"a}ltnis von ausgestelltem Kleid und Pr{\"a}sentationsmittel(n) in den Displays untersucht. Dabei stehen folgende Mittel im Fokus, mit deren Hilfe die vestiment{\"a}ren Exponate zur Schau gestellt werden: Vitrinen, Podeste, Ersatzk{\"o}rper wie Mannequins, optische Hilfsmittel wie Lupen, Bildmedien oder (bewegte) Installationen. Zum anderen wird analysiert, welche Wirkungen die Arrangements jeweils erzielen oder verhindern k{\"o}nnen, und zwar in Hinblick auf m{\"o}gliche {\"a}sthetische Erfahrungen, die taktilen, haptischen und kin{\"a}sthetischen Qualit{\"a}ten der Exponate als BesucherIn sehend oder buchst{\"a}blich zu f{\"u}hlen oder zu sp{\"u}ren. Ob als Identifikation, Projektion, Haptic Vision - es handelt sich um {\"a}sthetische Erfahrungen, die sich aus den modischen Kompetenzen der BetrachterInnen speisen und bei denen sich Visuelles und Hautsinnliches oft {\"u}berlagern. In der Untersuchung wird eine vernachl{\"a}ssigte, wenn nicht gar unerw{\"u}nschte Rezeptionsweise diskutiert, die von den AkteurInnen der spezifischen Debatte bspw. als konsumptives Sehen abgewertet wird. Die von mir vorgeschlagene, st{\"a}rker differenzierende Perspektive ist zugleich eine Kritik an dem bisherigen Diskurs und seinem eng gefassten, teilweise elit{\"a}ren Verst{\"a}ndnis von Museum, Bildung und Wissen, mit dem sich AkteurInnen und Institutionen abgrenzen. Der Spezialdiskurs {\"u}ber musealisierte und exponierte Kleidermode steht zudem exemplarisch f{\"u}r die Diskussion, was das Museum, verstanden als Institution, sein kann und soll(te) und ob (und wenn ja, wie) es sich {\"u}berhaupt noch von anderen Orten und R{\"a}umen klar abgrenzen l{\"a}sst.}, language = {de} } @phdthesis{Weege2017, author = {Weege, Stefanie}, title = {Climatic drivers of retrogressive thaw slump activity and resulting sediment and carbon release to the nearshore zone of Herschel Island, Yukon Territory, Canada}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397947}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2017}, abstract = {The Yukon Coast in Canada is an ice-rich permafrost coast and highly sensitive to changing environmental conditions. Retrogressive thaw slumps are a common thermoerosion feature along this coast, and develop through the thawing of exposed ice-rich permafrost on slopes and removal of accumulating debris. They contribute large amounts of sediment, including organic carbon and nitrogen, to the nearshore zone. The objective of this study was to 1) identify the climatic and geomorphological drivers of sediment-meltwater release, 2) quantify the amount of released meltwater, sediment, organic carbon and nitrogen, and 3) project the evolution of sediment-meltwater release of retrogressive thaw slumps in a changing future climate. The analysis is based on data collected over 18 days in July 2013 and 18 days in August 2012. A cut-throat flume was set up in the main sediment-meltwater channel of the largest retrogressive thaw slump on Herschel Island. In addition, two weather stations, one on top of the undisturbed tundra and one on the slump floor, measured incoming solar radiation, air temperature, wind speed and precipitation. The discharge volume eroding from the ice-rich permafrost and retreating snowbanks was measured and compared to the meteorological data collected in real time with a resolution of one minute. The results show that the release of sediment-meltwater from thawing of the ice-rich permafrost headwall is strongly related to snowmelt, incoming solar radiation and air temperature. Snowmelt led to seasonal differences, especially due to the additional contribution of water to the eroding sediment-meltwater from headwall ablation, lead to dilution of the sediment-meltwater composition. Incoming solar radiation and air temperature were the main drivers for diurnal and inter-diurnal fluctuations. In July (2013), the retrogressive thaw slump released about 25 000 m³ of sediment-meltwater, containing 225 kg dissolved organic carbon and 2050 t of sediment, which in turn included 33 t organic carbon, and 4 t total nitrogen. In August (2012), just 15 600 m³ of sediment-meltwater was released, since there was no additional contribution from snowmelt. However, even without the additional dilution, 281 kg dissolved organic carbon was released. The sediment concentration was twice as high as in July, with sediment contents of up to 457 g l-1 and 3058 t of sediment, including 53 t organic carbon and 5 t nitrogen, being released. In addition, the data from the 36 days of observations from Slump D were upscaled to cover the main summer season of 1 July to 31 August (62 days) and to include all 229 active retrogressive thaw slumps along the Yukon Coast. In total, all retrogressive thaw slumps along the Yukon Coast contribute a minimum of 1.4 Mio. m³ sediment-meltwater each thawing season, containing a minimum of 172 000 t sediment with 3119 t organic carbon, 327 t nitrogen and 17 t dissolved organic carbon. Therefore, in addition to the coastal erosion input to the Beaufort Sea, retrogressive thaw slumps additionally release 3 \% of sediment and 8 \% of organic carbon into the ocean. Finally, the future evolution of retrogressive thaw slumps under a warming scenario with summer air temperatures increasing by 2-3 °C by 2081-2100, would lead to an increase of 109-114\% in release of sediment-meltwater. It can be concluded that retrogressive thaw slumps are sensitive to climatic conditions and under projected future Arctic warming will contribute larger amounts of thawed permafrost material (including organic carbon and nitrogen) into the environment.}, language = {en} } @phdthesis{Wagner2017, author = {Wagner, Mario}, title = {Industrie 4.0 und demografische Entwicklung aus strukturationstheoretischer Sicht}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412230}, school = {Universit{\"a}t Potsdam}, pages = {235}, year = {2017}, abstract = {Das Ziel der Arbeit ist die Entwicklung eines heuristischen Bezugsrahmens zur Erkl{\"a}rung der Komplexit{\"a}t im Kontext von Industrie 4.0 und der demografischen Entwicklung aus strukturationstheoretischer Sicht. Dabei sind in Bezug auf die zuk{\"u}nftig zu erwartenden kognitiven Anforderungen an die Besch{\"a}ftigten die Fragen essentiell, vor welchen Herausforderungen Unternehmen bez{\"u}glich der Einstellung und dem Verhalten sowie dem Erfahrungswissen der Besch{\"a}ftigten stehen und welche L{\"o}sungsans{\"a}tze sich im Umgang mit den Herausforderungen in der Praxis bisher als hilfreich erweisen. In Kapitel 1 erfolgt zun{\"a}chst die Beschreibung der Ausgangslage. Es werden die Begriffe Industrie 4.0 und demografische Entwicklung inhaltlich diskutiert und in einen theoretischen Zusammenhang gebracht. In Kapitel 2 erfolgt die theoretische Fundierung der Arbeit. Dabei wird eine strukturationstheoretische Sicht auf Unternehmen als soziotechnische Systeme eingenommen. Durch diese „nicht deterministische" Sichtweise wird ein prozessualer Blick auf den Wandlungsprozess in Unternehmen geschaffen, der es m{\"o}glich macht, die Besch{\"a}ftigten als aktiv handelnde Akteure im Sinne von „organisieren" zur Erkl{\"a}rung m{\"o}glicher Zusammenh{\"a}nge zwischen Industrie 4.0 und der demografischen Entwicklung mit einzubeziehen. Der soziotechnische Systemansatz und die Strukturationstheorie bilden in diesem Sinne den „Kern" des zu entwickelnden heuristischen Bezugsrahmens. Die inhaltliche Gestaltung des theoriebasierten heuristischen Bezugsrahmens erfolgt in Kapitel 3 und Kapitel 4. Kapitel 3 beschreibt ausgew{\"a}hlte Aspekte zuk{\"u}nftiger Anforderungen an die Arbeit, die durch eine systematische Aufbereitung des derzeitigen Forschungsstandes zu Industrie 4.0 ermittelt wurden. Sie bilden die „Gestaltungsgrenzen", innerhalb derer sich je nach betrieblicher Situation unterschiedliche neue oder ge{\"a}nderte Anforderungen an die Besch{\"a}ftigten bei der Gestaltung von Industrie 4.0 ableiten lassen. In Kapitel 4 werden ausgew{\"a}hlte Aspekte menschlichen Handelns am Beispiel {\"a}lterer Besch{\"a}ftigter in Form zweier Schwerpunkte beschrieben. Der erste Schwerpunkt betrifft m{\"o}gliche Einflussfaktoren auf die Einstellung und das Verhalten {\"a}lterer Besch{\"a}ftigter im Wandlungsprozess aufgrund eines vorherrschenden Altersbildes im Unternehmen. Grundlage hierzu bildete die Stigmatisierungstheorie als interaktionistischer Ansatz der Sozialtheorie. Mit dem zweiten Schwerpunkt, den ausgew{\"a}hlten handlungstheoretischen Aspekten der Alternsforschung aus der Entwicklungspsychologie, wird eine Lebensspannenperspektive eingenommen. Inhaltlich werden die komplexit{\"a}tsinduzierten Faktoren, die sich aus handlungstheoretischer Perspektive mit der Adaptation von {\"a}lteren Besch{\"a}ftigten an ver{\"a}nderte {\"a}ußere und pers{\"o}nliche Lebensbedingungen besch{\"a}ftigen, systematisiert. Anschließend wird auf Grundlage der bisherigen theoretischen Vor{\"u}berlegungen ein erster theoriebasierter Bezugsrahmen abgeleitet. Kapitel 5 und Kapitel 6 beschreiben den empirischen Teil, die Durchf{\"u}hrung teilstrukturierter Interviews, der Arbeit. Ziel der empirischen Untersuchung war es, neben der theoretischen Fundierung den theoriebasierten heuristischen Bezugsrahmen um Praxiserfahrungen zu konkretisieren und gegebenenfalls zu erg{\"a}nzen. Hierzu wurde auf Grundlage des theoriebasierten heuristischen Bezugsrahmens mittels teilstrukturierter Interviews das Erfahrungswissen von 23 Experten in pers{\"o}nlichen Gespr{\"a}chen abgefragt. Nachdem in Kapitel 5 die Vorgehensweise der empirischen Untersuchung beschrieben wird, erfolgt in Kapitel 6 die Beschreibung der Ergebnisse aus der qualitativen Befragung. Hierzu werden aus den pers{\"o}nlichen Gespr{\"a}chen zentrale Einflussfaktoren bei der Gestaltung und Umsetzung von Industrie 4.0 im Kontext mit der demografischen Entwicklung analysiert und in die {\"u}bergeordneten Kategorien Handlungskompetenzen, Einstellung/ Verhalten sowie Erfahrungswissen geclustert. Anschließend wird der theoriebasierte heuristische Bezugsrahmen durch die {\"u}bergeordneten Kategorien und Faktoren aus den Expertengespr{\"a}chen konkretisiert und erg{\"a}nzt. In Kapitel 7 werden auf Grundlage des heuristischen Bezugsrahmens sowie der Empfehlungen aus den Experteninterviews beispielhaft Implikationen f{\"u}r die Praxis abgeleitet. Es werden Interventionsm{\"o}glichkeiten zur Unterst{\"u}tzung einer positiven Ver{\"a}nderungsbereitschaft und einem positiven Ver{\"a}nderungsverhalten f{\"u}r den Strukturwandel aufgezeigt. Hierzu geh{\"o}ren die Anpassung des F{\"u}hrungsverhaltens im Wandlungsprozess, der Umgang mit der Paradoxie von Stabilit{\"a}t und Flexibilit{\"a}t, der Umgang mit Altersstereotypen in Unternehmen, die Unterst{\"u}tzung von Strategien zu Selektion, Optimierung und Kompensation sowie Maßnahmen zur Ausrichtung von Aktivit{\"a}ten an die Potenzialrisiken der Besch{\"a}ftigten. Eine Zusammenfassung, ein Res{\"u}mee und ein Ausblick erfolgen abschließend in Kapitel 8.}, language = {de} } @phdthesis{Tyree2017, author = {Tyree, Susan}, title = {Arc expression in the parabrachial nucleus following taste stimulation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396600}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 109}, year = {2017}, abstract = {Researchers have made many approaches to study the complexities of the mammalian taste system; however molecular mechanisms of taste processing in the early structures of the central taste pathway remain unclear. More recently the Arc catFISH (cellular compartment analysis of temporal activity by fluorescent in situ hybridisation) method has been used in our lab to study neural activation following taste stimulation in the first central structure in the taste pathway, the nucleus of the solitary tract. This method uses the immediate early gene Arc as a neural activity marker to identify taste-responsive neurons. Arc plays a critical role in memory formation and is necessary for conditioned taste aversion memory formation. In the nucleus of the solitary tract only bitter taste stimulation resulted in increased Arc expression, however this did not occur following stimulation with tastants of any other taste quality. The primary target for gustatory NTS neurons is the parabrachial nucleus (PbN) and, like Arc, the PbN plays an important role in conditioned taste aversion learning. The aim of this thesis is to investigate Arc expression in the PbN following taste stimulation to elucidate the molecular identity and function of Arc expressing, taste- responsive neurons. Naïve and taste-conditioned mice were stimulated with tastants from each of the five basic taste qualities (sweet, salty, sour, umami, and bitter), with additional bitter compounds included for comparison. The expression patterns of Arc and marker genes were analysed using in situ hybridisation (ISH). The Arc catFISH method was used to observe taste-responsive neurons following each taste stimulation. A double fluorescent in situ hybridisation protocol was then established to investigate possible neuropeptide genes involved in neural responses to taste stimulation. The results showed that bitter taste stimulation induces increased Arc expression in the PbN in naïve mice. This was not true for other taste qualities. In mice conditioned to find an umami tastant aversive, subsequent umami taste stimulation resulted in an increase in Arc expression similar to that seen in bitter-stimulated mice. Taste-responsive Arc expression was denser in the lateral PbN than the medial PbN. In mice that received two temporally separated taste stimulations, each stimulation time-point showed a distinct population of Arc-expressing neurons, with only a small population (10 - 18 \%) of neurons responding to both stimulations. This suggests that either each stimulation event activates a different population of neurons, or that Arc is marking something other than simple cellular activation, such as long-term cellular changes that do not occur twice within a 25 minute time frame. Investigation using the newly established double-FISH protocol revealed that, of the bitter-responsive Arc expressing neuron population: 16 \% co-expressed calcitonin RNA; 17 \% co-expressed glucagon-like peptide 1 receptor RNA; 17 \% co-expressed hypocretin receptor 1 RNA; 9 \% co-expressed gastrin-releasing peptide RNA; and 20 \% co-expressed neurotensin RNA. This co-expression with multiple different neuropeptides suggests that bitter-activated Arc expression mediates multiple neural responses to the taste event, such as taste aversion learning, suppression of food intake, increased heart rate, and involves multiple brain structures such as the lateral hypothalamus, amygdala, bed nucleus of the stria terminalis, and the thalamus. The increase in Arc-expression suggests that bitter taste stimulation, and umami taste stimulation in umami-averse animals, may result in an enhanced state of Arc- dependent synaptic plasticity in the PbN, allowing animals to form taste-relevant memories to these aversive compounds more readily. The results investigating neuropeptide RNA co- expression suggest the amygdala, bed nucleus of the stria terminalis, and thalamus as possible targets for bitter-responsive Arc-expressing PbN neurons.}, language = {en} } @phdthesis{Trendelenburg2017, author = {Trendelenburg, Val{\´e}rie}, title = {Therapie der Erdnussallergie durch orale Immuntherapie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403557}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 198}, year = {2017}, abstract = {Einleitung: Die Erdnussallergie z{\"a}hlt zu den h{\"a}ufigsten Nahrungsmittelallergien im Kindesalter. Bereits kleine Mengen Erdnuss (EN) k{\"o}nnen zu schweren allergischen Reaktionen f{\"u}hren. EN ist der h{\"a}ufigste Ausl{\"o}ser einer lebensbedrohlichen Anaphylaxie bei Kindern und Jugendlichen. Im Gegensatz zu anderen fr{\"u}hkindlichen Nahrungsmittelallergien entwickeln Patienten mit einer EN-Allergie nur selten eine nat{\"u}rliche Toleranz. Seit mehreren Jahren wird daher an kausalen Therapiem{\"o}glichkeiten f{\"u}r EN-Allergiker, insbesondere an der oralen Immuntherapie (OIT), geforscht. Erste kleinere Studien zur OIT bei EN-Allergie zeigten erfolgsversprechende Ergebnisse. Im Rahmen einer randomisierten, doppelblind, Placebo-kontrollierten Studie mit gr{\"o}ßerer Fallzahl werden in der vorliegenden Arbeit die klinische Wirksamkeit und Sicherheit dieser Therapieoption bei Kindern mit EN-Allergie genauer evaluiert. Des Weiteren werden immunologische Ver{\"a}nderungen sowie die Lebensqualit{\"a}t und Therapiebelastung unter OIT untersucht. Methoden: Kinder zwischen 3-18 Jahren mit einer IgE-vermittelten EN-Allergie wurden in die Studie eingeschlossen. Vor Beginn der OIT wurde eine orale Provokation mit EN durchgef{\"u}hrt. Die Patienten wurden 1:1 randomisiert und entsprechend der Verum- oder Placebogruppe zugeordnet. Begonnen wurde mit 2-120 mg EN bzw. Placebo pro Tag, abh{\"a}ngig von der Reaktionsdosis bei der oralen Provokation. Zun{\"a}chst wurde die t{\"a}gliche OIT-Dosis alle zwei Wochen {\"u}ber etwa 14 Monate langsam bis zu einer Erhaltungsdosis von mindestens 500 mg EN (= 125 mg EN-Protein, ~ 1 kleine EN) bzw. Placebo gesteigert. Die maximal erreichte Dosis wurde dann {\"u}ber zwei Monate t{\"a}glich zu Hause verabreicht. Im Anschluss erfolgte erneut eine orale Provokation mit EN. Der prim{\"a}re Endpunkt der Studie war die Anzahl an Patienten der Verum- und Placebogruppe, die unter oraler Provokation nach OIT ≥1200 mg EN vertrugen (=„partielle Desensibilisierung"). Sowohl vor als auch nach OIT wurde ein Hautpricktest mit EN durchgef{\"u}hrt und EN-spezifisches IgE und IgG4 im Serum bestimmt. Außerdem wurden die Basophilenaktivierung sowie die Aussch{\"u}ttung von T-Zell-spezifischen Zytokinen nach Stimulation mit EN in vitro gemessen. Anhand von Frageb{\"o}gen wurde die Lebensqualit{\"a}t vor und nach OIT sowie die Therapiebelastung w{\"a}hrend OIT erfasst. Ergebnisse: 62 Patienten wurden in die Studie eingeschlossen und randomisiert. Nach etwa 16 Monaten unter OIT zeigten 74,2\% (23/31) der Patienten der Verumgruppe und nur 16,1\% (5/31) der Placebogruppe eine „partielle Desensibilisierung" gegen{\"u}ber EN (p<0,001). Im Median vertrugen Patienten der Verumgruppe 4000 mg EN (~8 kleine EN) unter der Provokation nach OIT wohingegen Patienten der Placebogruppe nur 80 mg EN (~1/6 kleine EN) vertrugen (p<0,001). Fast die H{\"a}lfte der Patienten der Verumgruppe (41,9\%) tolerierten die H{\"o}chstdosis von 18 g EN unter Provokation („komplette Desensibilisierung"). Es zeigte sich ein vergleichbares Sicherheitsprofil unter Verum- und Placebo-OIT in Bezug auf objektive Nebenwirkungen. Unter Verum-OIT kam es jedoch signifikant h{\"a}ufiger zu subjektiven Nebenwirkungen wie oralem Juckreiz oder Bauchschmerzen im Vergleich zu Placebo (3,7\% der Verum-OIT-Gaben vs. 0,5\% der Placebo-OIT-Gaben, p<0,001). Drei Kinder der Verumgruppe (9,7\%) und sieben Kinder der Placebogruppe (22,6\%) beendeten die Studie vorzeitig, je zwei Patienten beider Gruppen aufgrund von Nebenwirkungen. Im Gegensatz zu Placebo, zeigten sich unter Verum-OIT signifikante immunologische Ver{\"a}nderungen. So kam es zu einer Abnahme des EN-spezifischen Quaddeldurchmessers im Hautpricktest, einem Anstieg der EN-spezifischen IgG4-Werte im Serum sowie zu einer verminderten EN-spezifischen Zytokinsekretion, insbesondere der Th2-spezifischen Zytokine IL-4 und IL-5. Hinsichtlich der EN-spezifischen IgE-Werte sowie der EN-spezifischen Basophilenaktivierung zeigten sich hingegen keine Ver{\"a}nderungen unter OIT. Die Lebensqualit{\"a}t von Kindern der Verumgruppe war nach OIT signifikant verbessert, jedoch nicht bei Kindern der Placebogruppe. W{\"a}hrend der OIT wurde die Therapie von fast allen Kindern (82\%) und M{\"u}ttern (82\%) als positiv bewertet (= niedrige Therapiebelastung). Diskussion: Die EN-OIT f{\"u}hrte bei einem Großteil der EN-allergischen Kinder zu einer Desensibilisierung und einer deutlich erh{\"o}hten Reaktionsschwelle auf EN. Somit sind die Kinder im Alltag vor akzidentellen Reaktionen auf EN gesch{\"u}tzt, was die Lebensqualit{\"a}t der Kinder deutlich verbessert. Unter den kontrollierten Studienbedingungen zeigte sich ein akzeptables Sicherheitsprofil, mit vorrangig milder Symptomatik. Die klinische Desensibilisierung ging mit Ver{\"a}nderungen auf immunologischer Ebene einher. Langzeitstudien zur EN-OIT m{\"u}ssen jedoch abgewartet werden, um die klinische und immunologische Wirksamkeit hinsichtlich einer m{\"o}glichen langfristigen oralen Toleranzinduktion sowie die Sicherheit unter langfristiger OIT zu untersuchen, bevor das Therapiekonzept in die Praxis {\"u}bertragen werden kann.}, language = {de} } @phdthesis{Titov2017, author = {Titov, Evgenii}, title = {Quantum chemistry and surface hopping dynamics of azobenzenes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394610}, school = {Universit{\"a}t Potsdam}, pages = {205}, year = {2017}, abstract = {This cumulative doctoral dissertation, based on three publications, is devoted to the investigation of several aspects of azobenzene molecular switches, with the aid of computational chemistry. In the first paper, the isomerization rates of a thermal cis → trans isomerization of azobenzenes for species formed upon an integer electron transfer, i.e., with added or removed electron, are calculated from Eyring's transition state theory and activation energy barriers, computed by means of density functional theory. The obtained results are discussed in connection with an experimental study of the thermal cis → trans isomerization of azobenzene derivatives in the presence of gold nanoparticles, which is demonstrated to be greatly accelerated in comparison to the same isomerization reaction in the absence of nanoparticles. The second paper is concerned with electronically excited states of (i) dimers, composed of two photoswitchable units placed closely side-by-side, as well as (ii) monomers and dimers adsorbed on a silicon cluster. A variety of quantum chemistry methods, capable of calculating molecular electronic absorption spectra, based on density functional and wave function theories, is employed to quantify changes in optical absorption upon dimerization and covalent grafting to a surface. Specifically, the exciton (Davydov) splitting between states of interest is determined from first-principles calculations with the help of natural transition orbital analysis, allowing for insight into the nature of excited states. In the third paper, nonadiabatic molecular dynamics with trajectory surface hopping is applied to model the photoisomerization of azobenzene dimers, (i) for the isolated case (exhibiting the exciton coupling between two molecules) as well as (ii) for the constrained case (providing the van der Waals interaction with environment in addition to the exciton coupling between two monomers). For the latter, the additional azobenzene molecules, surrounding the dimer, are introduced, mimicking a densely packed self-assembled monolayer. From obtained results it is concluded that the isolated dimer is capable of isomerization likewise the monomer, whereas the steric hindrance considerably suppresses trans → cis photoisomerization. Furthermore, the present dissertation comprises the general introduction describing the main features of the azobenzene photoswitch and objectives of this work, theoretical basis of the employed methods, and discussion of gained findings in the light of existing literature. Also, additional results on (i) activation parameters of the thermal cis → trans isomerization of azobenzenes, (ii) an approximate scheme to account for anharmonicity of molecular vibrations in calculation of the activation entropy, as well as (iii) absorption spectra of photoswitch-silicon composites obtained from time-demanding wave function-based methods are presented.}, language = {en} } @phdthesis{Thim2017, author = {Thim, Christof}, title = {Technologieakzeptanz in Organisationen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401070}, school = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {Technological change is influencing organisations in their operation. It is used as a means to enhance productivity or to gain momentum on the market. The success of introducing new technologies into the organisation relies heavily on user acceptance. Existing explanations like the Diffusion of Innovation Theory (Rogers, 2003) and the Technology Acceptance Model and its extensions (Davis, 1989; Venkatesh and Davis, 1996; Venkatesh and Davis, 2000; Venkatesh, Morris, et al., 2003) do not address the organisational context sufficiently. Their models concentrate on technology adoption in a non-mandatory environment. Furthermore they do not encompass resistance against a new technology. Hence these models cannot be used to analyse the acceptance and usage decision process within organisations. This thesis therefore aims at investigating the organisational dynamics evoked by the introduction of new technologies with regard to acceptance and usage. More precisely it answers the question, whether different organisation types exert varying influences on their members and produce different patterns of acceptance and usage. The groundwork to achieve this insight is the synthesis and extension of different models of technology acceptance and organisational governance. The resulting model describes the development dynamics within an organisation and model combines two perspectives. On one hand the individual level encompasses socio-psychological aspects and individual decision making processes. This perspective is based on the aforementioned theories of individual acceptance, which are extended with different fit theories (Goodhue and Thompson, 1995; Floyd, 1986; Liu, Lee, and Chen, 2011; Parkes, 2013). Furthermore the resistance to new technology is introduced into the analysis as another possible course of action (Patsiotis, Hughes, and Webber, 2012). The organisational perspective on the other hand embeds the individual acceptance and usage decision into a social context. The interaction between organisation members based on the observation of others and the internalisation of social pressure are introduced as determinants of acceptance and usage. Furthermore organisational governance structures moderate this social influence and specify its impact. The relationship between governance and social influence is elaborated through the application of system theory to the organisational context: Actors like change agents or management use governance media (Luhmann, 1997; Fischer, 2009) to intervene in the individual decisions. The effect of these governance media varies with certain attributes of the organisation. Different coordination mechanisms of organisational configurations (Mintzberg, 1979) provide a link to governance media and their connectivity to individual decision processes. In order to demonstrate the feasibility of model a simulation experiment is conducted in AnyLogic. The validity of the model was tested in a sensitivity analysis. The results from the experiment show a specific acceptance and usage pattern. The acceptance is dropping at first due to the initial frustration. It then recovers and is growing in a bounded manner. Since usage is mandatory in an organisation, it is enforced by the management. This leads to a rapid increase of usage at first and stabilises on different levels during the course of the simulation. It was also found that different organisation configurations produce varying outcomes. The bureaucratic organisation enforces the usage better than any other configuration, leading to a higher usage level. However it fails to produce acceptance. The adhocracy on the other hand reaches a higher acceptance level through mutual adjustment. Its downside is the lack of usage. Furthermore the behaviour is not predictable, which can either lead to mostly positive outcomes or the complete break-down of the diffusion process. The simulation shows that organisations have to decide during the introduction of a new technology whether they want high usage rates fast with the risk of failing in the long term or establish a self-enforcing and sustainable diffusion processes which requires more time to be effective.}, language = {de} } @phdthesis{ThielemannKuehn2017, author = {Thielemann-K{\"u}hn, Nele}, title = {Optically induced ferro- and antiferromagnetic dynamics in the rare-earth metal dysprosium}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402994}, school = {Universit{\"a}t Potsdam}, pages = {iv, 121}, year = {2017}, abstract = {Approaching physical limits in speed and size of today's magnetic storage and processing technologies demands new concepts for controlling magnetization and moves researches on optically induced magnetic dynamics. Studies on photoinduced magnetization dynamics and their underlying mechanisms have been primarily performed on ferromagnetic metals. Ferromagnetic dynamics bases on transfer of the conserved angular momentum connected with atomic magnetic moments out of the parallel aligned magnetic system into other degrees of freedom. In this thesis the so far rarely studied response of antiferromagnetic order to ultra-short optical laser pulses in a metal is investigated. The experiments were performed at the FemtoSpex slicing facility at the storage ring BESSY II, an unique source for ultra-short elliptically polarized x-ray pulses. Laser-induced changes of the 4f-magnetic order parameter in ferro- and antiferromagnetic dysprosium (Dy), were studied by x-ray methods, which yield directly comparable quantities. The discovered fundamental differences in the temporal and spatial behavior of ferro- and antiferrmagnetic dynamics are assinged to an additional channel for angular momentum transfer, which reduces the antiferromagnetic order by redistributing angular momentum within the non-parallel aligned magnetic system, and hence conserves the zero net magnetization. It is shown that antiferromagnetic dynamics proceeds considerably faster and more energy-efficient than demagnetization in ferromagnets. By probing antiferromagnetic order in time and space, it is found to be affected along the whole sample depth of an in situ grown 73 nm tick Dy film. Interatomic transfer of angular momentum via fast diffusion of laser-excited 5d electrons is held responsible for the out-most long-ranging effect. Ultrafast ferromagnetic dynamics can be expected to base on the same origin, which however leads to demagnetization only in regions close to interfaces caused by super-diffusive spin transport. Dynamics due to local scattering processes of excited but less mobile electrons, occur in both magnetic alignments only in directly excited regions of the sample and on slower pisosecond timescales. The thesis provides fundamental insights into photoinduced magnetic dynamics by directly comparing ferro- and antiferromagnetic dynamics in the same material and by consideration of the laser-induced magnetic depth profile.}, language = {en} } @phdthesis{Theuring2017, author = {Theuring, Philipp Christian}, title = {Suspended sediments in the Kharaa River, sources and impacts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410550}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2017}, abstract = {Anthropogenically amplified erosion leads to increased fine-grained sediment input into the fluvial system in the 15.000 km2 Kharaa River catchment in northern Mongolia and constitutes a major stressing factor for the aquatic ecosystem. This study uniquely combines the application of intensive monitoring, source fingerprinting and catchment modelling techniques to allow for the comparison of the credibility and accuracy of each single method. High-resolution discharge data were used in combination with daily suspended solid measurements to calculate the suspended sediment budget and compare it with estimations of the sediment budget model SedNet. The comparison of both techniques showed that the development of an overall sediment budget with SedNet was possible, yielding results in the same order of magnitude (20.3 kt a- 1 and 16.2 kt a- 1). Radionuclide sediment tracing, using Be-7, Cs-137 and Pb-210 was applied to differentiate sediment sources for particles < 10μm from hillslope and riverbank erosion and showed that riverbank erosion generates 74.5\% of the suspended sediment load, whereas surface erosion contributes 21.7\% and gully erosion only 3.8\%. The contribution of the single subcatchments of the Kharaa to the suspended sediment load was assessed based on their variation in geochemical composition (e.g. in Ti, Sn, Mo, Mn, As, Sr, B, U, Ca and Sb). These variations were used for sediment source discrimination with geochemical composite fingerprints based on Genetic Algorithm driven Discriminant Function Analysis, the Kruskal-Wallis H-test and Principal Component Analysis. The contributions of the individual sub-catchment varied from 6.4\% to 36.2\%, generally showing higher contributions from the sub-catchments in the middle, rather than the upstream portions of the study area. The results indicate that river bank erosion generated by existing grazing practices of livestock is the main cause for elevated fine sediment input. Actions towards the protection of the headwaters and the stabilization of the river banks within the middle reaches were identified as the highest priority. Deforestation and by lodging and forest fires should be prevented to avoid increased hillslope erosion in the mountainous areas. Mining activities are of minor importance for the overall catchment sediment load but can constitute locally important point sources for particular heavy metals in the fluvial system.}, language = {en} } @phdthesis{Solms2017, author = {Solms, Alexander Maximilian}, title = {Integrating nonlinear mixed effects and physiologically-based modeling approaches for the analysis of repeated measurement studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397070}, school = {Universit{\"a}t Potsdam}, pages = {x, 141}, year = {2017}, abstract = {During the drug discovery \& development process, several phases encompassing a number of preclinical and clinical studies have to be successfully passed to demonstrate safety and efficacy of a new drug candidate. As part of these studies, the characterization of the drug's pharmacokinetics (PK) is an important aspect, since the PK is assumed to strongly impact safety and efficacy. To this end, drug concentrations are measured repeatedly over time in a study population. The objectives of such studies are to describe the typical PK time-course and the associated variability between subjects. Furthermore, underlying sources significantly contributing to this variability, e.g. the use of comedication, should be identified. The most commonly used statistical framework to analyse repeated measurement data is the nonlinear mixed effect (NLME) approach. At the same time, ample knowledge about the drug's properties already exists and has been accumulating during the discovery \& development process: Before any drug is tested in humans, detailed knowledge about the PK in different animal species has to be collected. This drug-specific knowledge and general knowledge about the species' physiology is exploited in mechanistic physiological based PK (PBPK) modeling approaches -it is, however, ignored in the classical NLME modeling approach. Mechanistic physiological based models aim to incorporate relevant and known physiological processes which contribute to the overlying process of interest. In comparison to data--driven models they are usually more complex from a mathematical perspective. For example, in many situations, the number of model parameters outrange the number of measurements and thus reliable parameter estimation becomes more complex and partly impossible. As a consequence, the integration of powerful mathematical estimation approaches like the NLME modeling approach -which is widely used in data-driven modeling -and the mechanistic modeling approach is not well established; the observed data is rather used as a confirming instead of a model informing and building input. Another aggravating circumstance of an integrated approach is the inaccessibility to the details of the NLME methodology so that these approaches can be adapted to the specifics and needs of mechanistic modeling. Despite the fact that the NLME modeling approach exists for several decades, details of the mathematical methodology is scattered around a wide range of literature and a comprehensive, rigorous derivation is lacking. Available literature usually only covers selected parts of the mathematical methodology. Sometimes, important steps are not described or are only heuristically motivated, e.g. the iterative algorithm to finally determine the parameter estimates. Thus, in the present thesis the mathematical methodology of NLME modeling is systemically described and complemented to a comprehensive description, comprising the common theme from ideas and motivation to the final parameter estimation. Therein, new insights for the interpretation of different approximation methods used in the context of the NLME modeling approach are given and illustrated; furthermore, similarities and differences between them are outlined. Based on these findings, an expectation-maximization (EM) algorithm to determine estimates of a NLME model is described. Using the EM algorithm and the lumping methodology by Pilari2010, a new approach on how PBPK and NLME modeling can be combined is presented and exemplified for the antibiotic levofloxacin. Therein, the lumping identifies which processes are informed by the available data and the respective model reduction improves the robustness in parameter estimation. Furthermore, it is shown how apriori known factors influencing the variability and apriori known unexplained variability is incorporated to further mechanistically drive the model development. Concludingly, correlation between parameters and between covariates is automatically accounted for due to the mechanistic derivation of the lumping and the covariate relationships. A useful feature of PBPK models compared to classical data-driven PK models is in the possibility to predict drug concentration within all organs and tissue in the body. Thus, the resulting PBPK model for levofloxacin is used to predict drug concentrations and their variability within soft tissues which are the site of action for levofloxacin. These predictions are compared with data of muscle and adipose tissue obtained by microdialysis, which is an invasive technique to measure a proportion of drug in the tissue, allowing to approximate the concentrations in the interstitial fluid of tissues. Because, so far, comparing human in vivo tissue PK and PBPK predictions are not established, a new conceptual framework is derived. The comparison of PBPK model predictions and microdialysis measurements shows an adequate agreement and reveals further strengths of the presented new approach. We demonstrated how mechanistic PBPK models, which are usually developed in the early stage of drug development, can be used as basis for model building in the analysis of later stages, i.e. in clinical studies. As a consequence, the extensively collected and accumulated knowledge about species and drug are utilized and updated with specific volunteer or patient data. The NLME approach combined with mechanistic modeling reveals new insights for the mechanistic model, for example identification and quantification of variability in mechanistic processes. This represents a further contribution to the learn \& confirm paradigm across different stages of drug development. Finally, the applicability of mechanism--driven model development is demonstrated on an example from the field of Quantitative Psycholinguistics to analyse repeated eye movement data. Our approach gives new insight into the interpretation of these experiments and the processes behind.}, language = {en} } @phdthesis{Siddiqui2017, author = {Siddiqui, Tarique Adnan}, title = {Long-term investigation of the lunar tide in the equatorial electrojet during stratospheric sudden warmings}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406384}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 116}, year = {2017}, abstract = {The ionosphere, which is strongly influenced by the Sun, is known to be also affected by meteorological processes. These processes, despite having their origin in the troposphere and stratosphere, interact with the upper atmosphere. Such an interaction between atmospheric layers is known as vertical coupling. During geomagnetically quiet times, when near-Earth space is not under the influence of solar storms, these processes become important drivers for ionospheric variability. Studying the link between these processes in the lower atmosphere and the ionospheric variability is important for our understanding of fundamental mechanisms in ionospheric and meteorological research. A prominent example of vertical coupling between the stratosphere and the ionosphere are the so-called stratospheric sudden warming (SSW) events that occur usually during northern winters and result in an increase in the polar stratospheric temperature and a reversal of the circumpolar winds. While the phenomenon of SSW is confined to the northern polar stratosphere, its influence on the ionosphere can be observed even at equatorial latitudes. During SSW events, the connection between the polar stratosphere and the equatorial ionosphere is believed to be through the modulation of global atmospheric tides. These tides are fundamental for the ionospheric E-region wind dynamo that generates electric fields and currents in the ionosphere. Observations of ionospheric currents indicate a large enhancement of the semidiurnal lunar tide in response to SSW events. Thus, the semidiurnal lunar tide becomes an important driver of ionospheric variability during SSW events. In this thesis, the ionospheric effect of SSW events is investigated in the equatorial region, where a narrow but an intense E-region current known as the equatorial electrojet (EEJ) flows above the dip equator during the daytime. The day-to-day variability of the EEJ can be determined from magnetic field records at geomagnetic observatories close to the dip equator. Such magnetic data are available for several decades and allows to investigate the impact of SSW events on the EEJ and, even more importantly, helps in understanding the effects of SSW events on the equatorial ionosphere. An excellent long-term record of the geomagnetic field at the equator from 1922 onwards is available for the observatory Huancayo in Peru and is extensively utilized in this study. The central subject of this thesis is the investigation of lunar tides in the EEJ during SSW events by analyzing long time series. This is done by estimating the lunar tidal amplitude in the EEJ from the magnetic records at Huancayo and by comparing them to measurements of the polar stratospheric wind and temperature, which led to the identification of the known SSW events from 1952 onwards. One goal of this thesis is to identify SSW events that predate 1952. To this end, superposed epoch analysis (SEA) is employed to establish a relationship between the lunar tidal power and the wind and temperature conditions in the lower atmosphere. A threshold value for the lunar tidal power is identified that is discriminative for the known SSW events. This threshold is then used to identify lunar tidal enhancements, which are indicative for any historic SSW events prior to 1952. It can be shown, that the number of lunar tidal enhancements and thus the occurrence frequency of historic SSW events between 1926 and 1952 is similar to the occurrence frequency of the known SSW events from 1952 onwards. Next to the classic SSW definition, the concept of polar vortex weakening (PVW) is utilized in this thesis. PVW is defined for higher latitudes and altitudes (≈ 40km) than the classical SSW definition (≈ 32km). The correlation between the timing and magnitude of lunar tidal enhancements in the EEJ and the timing and magnitude of PVW is found to be better than for the classic SSW definition. This suggests that the lunar tidal enhancements in the EEJ are closely linked to the state of the middle atmosphere. Geomagnetic observatories located in different longitudes at the dip equator allow investigating the longitudinally dependent variability of the EEJ during SSW events. For this purpose, the lunar tidal enhancements in the EEJ are determined for the Peruvian and Indian sectors during the major SSW events of the years 2006 and 2009. It is found that the lunar tidal amplitude shows similar enhancements in the Peruvian sector during both SSW events, while the enhancements are notably different for the two events in the Indian sector. In summary, this thesis shows that lunar tidal enhancements in the EEJ are indeed correlated to the occurrence of SSW events and they should be considered a prominent driver of low latitude ionospheric variability. Secondly, lunar tidal enhancements are found to be longitudinally variable. This suggests that regional effects, such as ionospheric conductivity and the geometry and strength of the geomagnetic field, also play an important role and have to be considered when investigating the mechanisms behind vertical coupling.}, language = {en} } @phdthesis{Shenar2017, author = {Shenar, Tomer}, title = {Comprehensive analyses of massive binaries and implications on stellar evolution}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-104857}, school = {Universit{\"a}t Potsdam}, pages = {187}, year = {2017}, abstract = {Via their powerful radiation, stellar winds, and supernova explosions, massive stars (Mini \& 8 M☉) bear a tremendous impact on galactic evolution. It became clear in recent decades that the majority of massive stars reside in binary systems. This thesis sets as a goal to quantify the impact of binarity (i.e., the presence of a companion star) on massive stars. For this purpose, massive binary systems in the Local Group, including OB-type binaries, high mass X-ray binaries (HMXBs), and Wolf-Rayet (WR) binaries, were investigated by means of spectral, orbital, and evolutionary analyses. The spectral analyses were performed with the non-local thermodynamic equillibrium (non-LTE) Potsdam Wolf-Rayet (PoWR) model atmosphere code. Thanks to critical updates in the calculation of the hydrostatic layers, the code became a state-of-the-art tool applicable for all types of hot massive stars (Chapter 2). The eclipsing OB-type triple system δ Ori served as an intriguing test-case for the new version of the PoWR code, and provided key insights regarding the formation of X-rays in massive stars (Chapter 3). We further analyzed two prototypical HMXBs, Vela X-1 and IGR J17544-2619, and obtained fundamental conclusions regarding the dichotomy of two basic classes of HMXBs (Chapter 4). We performed an exhaustive analysis of the binary R 145 in the Large Magellanic Cloud (LMC), which was claimed to host the most massive stars known. We were able to disentangle the spectrum of the system, and performed an orbital, polarimetric, and spectral analysis, as well as an analysis of the wind-wind collision region. The true masses of the binary components turned out to be significantly lower than suggested, impacting our understanding of the initial mass function and stellar evolution at low metallicity (Chapter 5). Finally, all known WR binaries in the Small Magellanic Cloud (SMC) were analyzed. Although it was theoretical predicted that virtually all WR stars in the SMC should be formed via mass-transfer in binaries, we find that binarity was not important for the formation of the known WR stars in the SMC, implying a strong discrepancy between theory and observations (Chapter 6).}, language = {en} } @phdthesis{Schuermann2017, author = {Sch{\"u}rmann, Robin Mathis}, title = {Interaction of the potential DNA-radiosensitizer 8-bromoadenine with free and plasmonically generated electrons}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407017}, school = {Universit{\"a}t Potsdam}, pages = {xi, 120}, year = {2017}, abstract = {In Germany more than 200.000 persons die of cancer every year, which makes it the second most common cause of death. Chemotherapy and radiation therapy are often combined to exploit a supra-additive effect, as some chemotherapeutic agents like halogenated nucleobases sensitize the cancerous tissue to radiation. The radiosensitizing action of certain therapeutic agents can be at least partly assigned to their interaction with secondary low energy electrons (LEEs) that are generated along the track of the ionizing radiation. In the therapy of cancer DNA is an important target, as severe DNA damage like double strand breaks induce the cell death. As there is only a limited number of radiosensitizing agents in clinical practice, which are often strongly cytotoxic, it would be beneficial to get a deeper understanding of the interaction of less toxic potential radiosensitizers with secondary reactive species like LEEs. Beyond that LEEs can be generated by laser illuminated nanoparticles that are applied in photothermal therapy (PTT) of cancer, which is an attempt to treat cancer by an increase of temperature in the cells. However, the application of halogenated nucleobases in PTT has not been taken into account so far. In this thesis the interaction of the potential radiosensitizer 8-bromoadenine (8BrA) with LEEs was studied. In a first step the dissociative electron attachment (DEA) in the gas phase was studied in a crossed electron-molecular beam setup. The main fragmentation pathway was revealed as the cleavage of the C-Br bond. The formation of a stable parent anion was observed for electron energies around 0 eV. Furthermore, DNA origami nanostructures were used as platformed to determine electron induced strand break cross sections of 8BrA sensitized oligonucleotides and the corresponding nonsensitized sequence as a function of the electron energy. In this way the influence of the DEA resonances observed for the free molecules on the DNA strand breaks was examined. As the surrounding medium influences the DEA, pulsed laser illuminated gold nanoparticles (AuNPs) were used as a nanoscale electron source in an aqueous environment. The dissociation of brominated and native nucleobases was tracked with UV-Vis absorption spectroscopy and the generated fragments were identified with surface enhanced Raman scattering (SERS). Beside the electron induced damage, nucleobase analogues are decomposed in the vicinity of the laser illuminatednanoparticles due to the high temperatures. In order to get a deeper understanding of the different dissociation mechanisms, the thermal decomposition of the nucleobases in these systems was studied and the influence of the adsorption kinetics of the molecules was elucidated. In addition to the pulsed laser experiments, a dissociative electron transfer from plasmonically generated "hot electrons" to 8BrA was observed under low energy continuous wave laser illumination and tracked with SERS. The reaction was studied on AgNPs and AuNPs as a function of the laser intensity and wavelength. On dried samples the dissociation of the molecule was described by fractal like kinetics. In solution, the dissociative electron transfer was observed as well. It turned out that the timescale of the reaction rates were slightly below typical integration times of Raman spectra. In consequence such reactions need to be taken into account in the interpretation of SERS spectra of electrophilic molecules. The findings in this thesis help to understand the interaction of brominated nucleobases with plasmonically generated electrons and free electrons. This might help to evaluate the potential radiosensitizing action of such molecules in cancer radiation therapy and PTT.}, language = {en} } @phdthesis{Schuster2017, author = {Schuster, Isabell}, title = {Prevalence and Predictors of Sexual Aggression Victimization and Perpetration in Chile and Turkey}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413897}, school = {Universit{\"a}t Potsdam}, pages = {285}, year = {2017}, abstract = {Background: Although sexual aggression is recognized as a serious issue worldwide, the current knowledge base is primarily built on evidence from Western countries, particularly the U.S. For the present doctoral research, Chile and Turkey were selected based on theoretical considerations to examine the prevalence as well as predictors of sexual aggression victimization and perpetration. The first aim of this research project was to systematically review the available evidence provided by past studies on this topic within each country. The second aim was to empirically study the prevalence of experiencing and engaging in sexual aggression since the age of consent among college students in Chile and Turkey. The third aim was to conduct cross-cultural analyses examining pathways to victimization and perpetration based on a two-wave longitudinal design. Methods: This research adopted a gender-inclusive approach by considering men and women in both victim and perpetrator roles. For the systematic reviews, multiple-stage literature searches were performed, and based on a predefined set of eligibility criteria, 28 studies in Chile and 56 studies in Turkey were identified for inclusion. A two-wave longitudinal study was conducted to examine the prevalence and predictors of sexual aggression among male and female college students in Chile and Turkey. Self-reports of victimization and perpetration were assessed with a Chilean Spanish or Turkish version of the Sexual Aggression and Victimization Scale. Two path models were conceptualized in which participants' risky sexual scripts for consensual sex, risky sexual behavior, sexual self-esteem, sexual assertiveness, and religiosity were assessed at T1 and used as predictors of sexual aggression victimization and perpetration at T2 in the following 12 months, mediated through past victimization or perpetration, respectively. The models differed in that sexual assertiveness was expected to serve different functions for victimization (refusal assertiveness negatively linked to victimization) and perpetration (initiation assertiveness positively linked to perpetration). Results: Both systematic reviews revealed that victimization was addressed by all included studies, but data on perpetration was severely limited. A great heterogeneity not only in victimization rates but also in predictors was found, which may be attributed to a lack of conceptual and methodological consistency across studies. The empirical analysis of the prevalence of sexual aggression in Chile revealed a victimization rate of 51.9\% for women and 48.0\% for men, and a perpetration rate of 26.8\% for men and 16.5\% for women. In the Turkish original data, victimization was reported by 77.6\% of women and 65.5\% of men, whereas, again, lower rates were found for perpetration, with 28.9\% of men and 14.2\% of women reporting at least one incident. The cross-cultural analyses showed, as expected, that risky sexual scripts informed risky sexual behavior, and thereby indirectly increased the likelihood of victimization and perpetration at T2 in both samples. More risky sexual scripts were also linked to lower levels of refusal assertiveness in both samples, indirectly increasing the vulnerability to victimization at T2. High sexual self-esteem decreased the probability of victimization at T2 through higher refusal assertiveness as well as through less risky sexual behavior also in both samples, whereas it increased the odds of perpetration at T2 via higher initiation assertiveness in the Turkish sample only. Furthermore, high religiosity decreased the odds of perpetration and victimization at T2 through less risky sexual scripts and less risky sexual behavior in both samples. It reduced the vulnerability to victimization through less risky sexual scripts and higher refusal assertiveness in the Chilean sample only. In the Turkish sample only, it increased the odds of perpetration and victimization through lower sexual self-esteem. Conclusions: The findings showed that sexual aggression is a widespread problem in both Chile and Turkey, contributing cross-cultural evidence to the international knowledge base and indicating the clear need for implementing policy measures and prevention strategies in each country. Based on the results of the prospective analyses, concrete implications for intervention efforts are discussed.}, language = {en} } @phdthesis{Schunack2017, author = {Schunack, Silke}, title = {Processing of non-canonical word orders in an L2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103750}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 443 Seiten}, year = {2017}, abstract = {This thesis investigates the processing of non-canonical word orders and whether non-canonical orders involving object topicalizations, midfield scrambling and particle verbs are treated the same by native (L1) and non-native (L2) speakers. The two languages investigated are Norwegian and German. 32 L1 Norwegian and 32 L1 German advanced learners of Norwegian were tested in two experiments on object topicalization in Norwegian. The results from the online self-paced reading task and the offline agent identification task show that both groups are able to identify the non-canonical word order and show a facilitatory effect of animate subjects in their reanalysis. Similarly high error rates in the agent identification task suggest that globally unambiguous object topicalizations are a challenging structure for L1 and L2 speakers alike. The same participants were also tested in two experiments on particle placement in Norwegian, again using a self-paced reading task, this time combined with an acceptability rating task. In the acceptability rating L1 and L2 speakers show the same preference for the verb-adjacent placement of the particle over the non-adjacent placement after the direct object. However, this preference for adjacency is only found in the L1 group during online processing, whereas the L2 group shows no preference for either order. Another set of experiments tested 33 L1 German and 39 L1 Slavic advanced learners of German on object scrambling in ditransitive clauses in German. Non-native speakers accept both object orders and show neither a preference for either order nor a processing advantage for the canonical order. The L1 group, in contrast, shows a small, but significant preference for the canonical dative-first order in the judgment and the reading task. The same participants were also tested in two experiments on the application of the split rule in German particle verbs. Advanced L2 speakers of German are able to identify particle verbs and can apply the split rule in V2 contexts in an acceptability judgment task in the same way as L1 speakers. However, unlike the L1 group, the L2 group is not sensitive to the grammaticality manipulation during online processing. They seem to be sensitive to the additional lexical information provided by the particle, but are unable to relate the split particle to the preceding verb and recognize the ungrammaticality in non-V2 contexts. Taken together, my findings suggest that non-canonical word orders are not per se more difficult to identify for L2 speakers than L1 speakers and can trigger the same reanalysis processes as in L1 speakers. I argue that L2 speakers' ability to identify a non-canonical word order depends on how the non-canonicity is signaled (case marking vs. surface word order), on the constituents involved (identical vs. different word types), and on the impact of the word order change on sentence meaning. Non-canonical word orders that are signaled by morphological case marking and cause no change to the sentence's content are hard to detect for L2 speakers.}, language = {en} } @phdthesis{Schumacher2017, author = {Schumacher, Kinga}, title = {Hybride semantische Suche - eine Kombination aus Fakten- und Dokumentretrieval}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405973}, school = {Universit{\"a}t Potsdam}, pages = {vii, 187}, year = {2017}, abstract = {Das Thema der vorliegenden Arbeit ist die semantische Suche im Kontext heutiger Informationsmanagementsysteme. Zu diesen Systemen z{\"a}hlen Intranets, Web 3.0-Anwendungen sowie viele Webportale, die Informationen in heterogenen Formaten und Strukturen beinhalten. Auf diesen befinden sich einerseits Daten in strukturierter Form und andererseits Dokumente, die inhaltlich mit diesen Daten in Beziehung stehen. Diese Dokumente sind jedoch in der Regel nur teilweise strukturiert oder vollst{\"a}ndig unstrukturiert. So beschreiben beispielsweise Reiseportale durch strukturierte Daten den Zeitraum, das Reiseziel, den Preis einer Reise und geben in unstrukturierter Form weitere Informationen, wie Beschreibungen zum Hotel, Zielort, Ausflugsziele an. Der Fokus heutiger semantischer Suchmaschinen liegt auf dem Finden von Wissen entweder in strukturierter Form, auch Faktensuche genannt, oder in semi- bzw. unstrukturierter Form, was {\"u}blicherweise als semantische Dokumentensuche bezeichnet wird. Einige wenige Suchmaschinen versuchen die L{\"u}cke zwischen diesen beiden Ans{\"a}tzen zu schließen. Diese durchsuchen zwar gleichzeitig strukturierte sowie unstrukturierte Daten, werten diese jedoch entweder weitgehend voneinander unabh{\"a}ngig aus oder schr{\"a}nken die Suchm{\"o}glichkeiten stark ein, indem sie beispielsweise nur bestimmte Fragemuster unterst{\"u}tzen. Hierdurch werden die im System verf{\"u}gbaren Informationen nicht ausgesch{\"o}pft und gleichzeitig unterbunden, dass Zusammenh{\"a}nge zwischen einzelnen Inhalten der jeweiligen Informationssysteme und sich erg{\"a}nzende Informationen den Benutzer erreichen.
 Um diese L{\"u}cke zu schließen, wurde in der vorliegenden Arbeit ein neuer hybrider semantischer Suchansatz entwickelt und untersucht, der strukturierte und semi- bzw. unstrukturierte Inhalte w{\"a}hrend des gesamten Suchprozesses kombiniert. Durch diesen Ansatz werden nicht nur sowohl Fakten als auch Dokumente gefunden, es werden auch Zusammenh{\"a}nge, die zwischen den unterschiedlich strukturierten Daten bestehen, in jeder Phase der Suche genutzt und fließen in die Suchergebnisse mit ein. Liegt die Antwort zu einer Suchanfrage nicht vollst{\"a}ndig strukturiert, in Form von Fakten, oder unstrukturiert, in Form von Dokumenten vor, so liefert dieser Ansatz eine Kombination der beiden. Die Ber{\"u}cksichtigung von unterschiedlich Inhalten w{\"a}hrend des gesamten Suchprozesses stellt jedoch besondere Herausforderungen an die Suchmaschine. Diese muss in der Lage sein, Fakten und Dokumente in Abh{\"a}ngigkeit voneinander zu durchsuchen, sie zu kombinieren sowie die unterschiedlich strukturierten Ergebnisse in eine geeignete Rangordnung zu bringen. Weiterhin darf die Komplexit{\"a}t der Daten nicht an die Endnutzer weitergereicht werden. Die Darstellung der Inhalte muss vielmehr sowohl bei der Anfragestellung als auch bei der Darbietung der Ergebnisse verst{\"a}ndlich und leicht interpretierbar sein. Die zentrale Fragestellung der Arbeit ist, ob ein hybrider Ansatz auf einer vorgegebenen Datenbasis die Suchanfragen besser beantworten kann als die semantische Dokumentensuche und die Faktensuche f{\"u}r sich genommen, bzw. als eine Suche die diese Ans{\"a}tze im Rahmen des Suchprozesses nicht kombiniert. Die durchgef{\"u}hrten Evaluierungen aus System- und aus Benutzersicht zeigen, dass die im Rahmen der Arbeit entwickelte hybride semantische Suchl{\"o}sung durch die Kombination von strukturierten und unstrukturierten Inhalten im Suchprozess bessere Antworten liefert als die oben genannten Verfahren und somit Vorteile gegen{\"u}ber bisherigen Ans{\"a}tzen bietet. Eine Befragung von Benutzern macht deutlich, dass die hybride semantische Suche als verst{\"a}ndlich empfunden und f{\"u}r heterogen strukturierte Datenmengen bevorzugt wird.}, language = {de} } @phdthesis{Schulze2017, author = {Schulze, Nicole}, title = {Neue Templatphasen zur anisotropen Goldnanopartikelherstellung durch den Einsatz strukturbildender Polymere}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409515}, school = {Universit{\"a}t Potsdam}, pages = {VI, 117, xv}, year = {2017}, abstract = {Ziel der vorliegenden Arbeit war die Synthese und Charakterisierung von anisotropen Goldnanopartikeln in einer geeigneten Polyelektrolyt-modifizierten Templatphase. Der Mittelpunkt bildet dabei die Auswahl einer geeigneten Templatphase, zur Synthese von einheitlichen und reproduzierbaren anisotropen Goldnanopartikeln mit den daraus resultierenden besonderen Eigenschaften. Bei der Synthese der anisotropen Goldnanopartikeln lag der Fokus in der Verwendung von Vesikeln als Templatphase, wobei hier der Einfluss unterschiedlicher strukturbildender Polymere (stark alternierende Maleamid-Copolymere PalH, PalPh, PalPhCarb und PalPhBisCarb mit verschiedener Konformation) und Tenside (SDS, AOT - anionische Tenside) bei verschiedenen Synthese- und Abtrennungsbedingungen untersucht werden sollte. Im ersten Teil der Arbeit konnte gezeigt werden, dass PalPhBisCarb bei einem pH-Wert von 9 die Bedingungen eines R{\"o}hrenbildners f{\"u}r eine morphologische Transformation von einer vesikul{\"a}ren Phase in eine r{\"o}hrenf{\"o}rmige Netzwerkstruktur erf{\"u}llt und somit als Templatphase zur formgesteuerten Bildung von Nanopartikeln genutzt werden kann. Im zweiten Teil der Arbeit wurde dargelegt, dass die Templatphase PalPhBisCarb (pH-Wert von 9, Konzentration von 0,01 wt.\%) mit AOT als Tensid und PL90G als Phospholipid (im Verh{\"a}ltnis 1:1) die effektivste Wahl einer Templatphase f{\"u}r die Bildung von anisotropen Strukturen in einem einstufigen Prozess darstellt. Bei einer konstanten Synthesetemperatur von 45 °C wurden die besten Ergebnisse bei einer Goldchloridkonzentration von 2 mM, einem Gold-Templat-Verh{\"a}ltnis von 3:1 und einer Synthesezeit von 30 Minuten erzielt. Ausbeute an anisotropen Strukturen lag bei 52 \% (Anteil an dreieckigen Nanopl{\"a}ttchen von 19 \%). Durch Erh{\"o}hung der Synthesetemperatur konnte die Ausbeute auf 56 \% (29 \%) erh{\"o}ht werden. Im dritten Teil konnte durch zeitabh{\"a}ngige Untersuchungen gezeigt werden, dass bei Vorhandensein von PalPhBisCarb die Bildung der energetisch nicht bevorzugten Pl{\"a}ttchen-Strukturen bei Raumtemperatur initiiert wird und bei 45 °C ein Optimum annimmt. Kintetische Untersuchungen haben gezeigt, dass die Bildung dreieckiger Nanopl{\"a}ttchen bei schrittweiser Zugabe der Goldchlorid-Pr{\"a}kursorl{\"o}sung zur PalPhBisCarb enthaltenden Templatphase durch die Dosierrate der vesikul{\"a}ren Templatphase gesteuert werden kann. In umgekehrter Weise findet bei Zugabe der Templatphase zur Goldchlorid-Pr{\"a}kursorl{\"o}sung bei 45 °C ein {\"a}hnlicher, kinetisch gesteuerter Prozess der Bildung von Nanodreiecken statt mit einer maximalen Ausbeute dreieckigen Nanopl{\"a}ttchen von 29 \%. Im letzten Kapitel erfolgten erste Versuche zur Abtrennung dreieckiger Nanopl{\"a}ttchen von den {\"u}brigen Geometrien der gemischten Nanopartikell{\"o}sung mittels tensidinduzierter Verarmungsf{\"a}llung. Bei Verwendung von AOT mit einer Konzentration von 0,015 M wurde eine Ausbeute an Nanopl{\"a}ttchen von 99 \%, wovon 72 \% dreieckiger Geometrien hatten, erreicht.}, language = {de} } @phdthesis{Schmierer2017, author = {Schmierer, Christoph}, title = {Technologietransfer und Spillovereffekte ausl{\"a}ndischer Tochterunternehmen in Entwicklungs- und Schwellenl{\"a}ndern}, series = {Potsdam Economic Studies}, journal = {Potsdam Economic Studies}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-398-5}, issn = {2196-8691}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103988}, school = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {F{\"u}r den Industrialisierungsprozess von Entwicklungs- und Schwellenl{\"a}ndern haben ausl{\"a}ndische Direktinvestitionen (ADI) eine wichtige Funktion. Sie k{\"o}nnen zum einen zu einer Erh{\"o}hung des industriellen Output des Ziellandes f{\"u}hren und zum anderen als Tr{\"a}ger von technologischem Wissen fungieren. Neues Wissen kann den Empf{\"a}ngerl{\"a}ndern der ADI durch Spillovereffekte und Technologietransfers ausl{\"a}ndischer Tochterunternehmen zufließen. Diese Arbeit soll Antworten auf die Fragen geben, durch welche Mechanismen Spillovereffekte und Technologietransfers ausgel{\"o}st werden und wie Entwicklungs- und Schwellenl{\"a}ndern diesen Wissenszufluss zur Beschleunigung ihres Industrialisierungsprozesses einsetzen k{\"o}nnen. Hierf{\"u}r wird ein Konzept zur F{\"o}rderung von Spillovereffekten entwickelt. Weiterhin wird ein theoretisches Modell entwickelt, in dem der Technologietransfer ausl{\"a}ndischer Exportplattformen erstmals in Abh{\"a}ngigkeit des Anteils der Vorprodukte, die im Gastland nachgefragt werden, untersucht. In den Fallstudien Irland und Malaysia werden die Ergebnisse des theoretischen Modells sowie des entwickelten Konzepts illustriert.}, language = {de} } @phdthesis{Schmidt2017, author = {Schmidt, Silke Regina}, title = {Analyzing lakes in the time frequency domain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406955}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 126}, year = {2017}, abstract = {The central aim of this thesis is to demonstrate the benefits of innovative frequency-based methods to better explain the variability observed in lake ecosystems. Freshwater ecosystems may be the most threatened part of the hydrosphere. Lake ecosystems are particularly sensitive to changes in climate and land use because they integrate disturbances across their entire catchment. This makes understanding the dynamics of lake ecosystems an intriguing and important research priority. This thesis adds new findings to the baseline knowledge regarding variability in lake ecosystems. It provides a literature-based, data-driven and methodological framework for the investigation of variability and patterns in environmental parameters in the time frequency domain. Observational data often show considerable variability in the environmental parameters of lake ecosystems. This variability is mostly driven by a plethora of periodic and stochastic processes inside and outside the ecosystems. These run in parallel and may operate at vastly different time scales, ranging from seconds to decades. In measured data, all of these signals are superimposed, and dominant processes may obscure the signals of other processes, particularly when analyzing mean values over long time scales. Dominant signals are often caused by phenomena at long time scales like seasonal cycles, and most of these are well understood in the limnological literature. The variability injected by biological, chemical and physical processes operating at smaller time scales is less well understood. However, variability affects the state and health of lake ecosystems at all time scales. Besides measuring time series at sufficiently high temporal resolution, the investigation of the full spectrum of variability requires innovative methods of analysis. Analyzing observational data in the time frequency domain allows to identify variability at different time scales and facilitates their attribution to specific processes. The merit of this approach is subsequently demonstrated in three case studies. The first study uses a conceptual analysis to demonstrate the importance of time scales for the detection of ecosystem responses to climate change. These responses often occur during critical time windows in the year, may exhibit a time lag and can be driven by the exceedance of thresholds in their drivers. This can only be detected if the temporal resolution of the data is high enough. The second study applies Fast Fourier Transform spectral analysis to two decades of daily water temperature measurements to show how temporal and spatial scales of water temperature variability can serve as an indicator for mixing in a shallow, polymictic lake. The final study uses wavelet coherence as a diagnostic tool for limnology on a multivariate high-frequency data set recorded between the onset of ice cover and a cyanobacteria summer bloom in the year 2009 in a polymictic lake. Synchronicities among limnological and meteorological time series in narrow frequency bands were used to identify and disentangle prevailing limnological processes. Beyond the novel empirical findings reported in the three case studies, this thesis aims to more generally be of interest to researchers dealing with now increasingly available time series data at high temporal resolution. A set of innovative methods to attribute patterns to processes, their drivers and constraints is provided to help make more efficient use of this kind of data.}, language = {en} } @phdthesis{Schmidt2017, author = {Schmidt, Katja}, title = {Assessing, testing, and implementing socio-cultural valuation methods to operationalise ecosystem services in land use management}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411049}, school = {Universit{\"a}t Potsdam}, pages = {165}, year = {2017}, abstract = {Ecosystem services (ESs) are defined as the contributions that ecosystems make to human wellbeing and are increasingly being used as an approach to explore the importance of ecosystems for humans through their valuation. Although value plurality has been recognised long before the mainstreaming of ESs research, socio-cultural valuation is still underrepresented in ESs assessments. It is the central goal of this PhD dissertation to explore the ability of socio-cultural valuation methods for the operationalisation of ESs research in land management. To address this, I formulated three research objectives that are briefly outlined below and relate to the three studies conducted during this dissertation. The first objective relates to the assessment of the current role of socio-cultural valuation in ESs research. Human values are central to ESs research yet non-monetary socio-cultural valuation methods have been found underrepresented in the field of ESs science. In regard to the unbalanced consideration of value domains and conceptual uncertainties, I perform a systematic literature review aiming to answer the research question: To what extent have socio-cultural values been addressed in ESs assessments. The second objective aims to test socio-cultural valuation methods of ESs and their relevance for land use preferences by exploring their methodological opportunities and limitations. Socio-cultural valuation methods have only recently become a focus in ESs research and therefore bear various uncertainties in regard to their methodological implications. To overcome these uncertainties, I analysed responses to a visitor survey. The research questions related to the second objective were: What are the implications of different valuation methods for ESs values? To what extent are land use preferences explained by socio-cultural values of ESs? The third objective addressed in this dissertation is the implementation of ESs research into land management through socio-cultural valuation. Though it is emphasised that the ESs approach can assist decision making, there is little empirical evidence of the effect of ESs knowledge on land management. I proposed a way to implement transdisciplinary, spatially explicit research on ESs by answering the following research questions: Which landscape features underpinning ESs supply are considered in land management? How can participatory approaches accounting for ESs be operationalised in land management? The empirical research resulted in five main findings that provide answers to the research questions. First, this dissertation provides evidence that socio-cultural values are an integral part of ESs research. I found that they can be assessed for provisioning, regulating, and cultural services though they are linked to cultural services to a greater degree. Socio-cultural values have been assessed by monetary and non-monetary methods and their assessment is effectively facilitated by stakeholder participation. Second, I found that different methods of socio-cultural valuation revealed different information. Whereas rating revealed a general value of ESs, weighting was found more suitable to identify priorities across ESs. Value intentions likewise differed in the distribution of values, generally implying a higher value for others than for respondents themselves. Third, I showed that ESs values were distributed similarly across groups with differing land use preferences. Thus, I provided empirical evidence that ESs values and landscape values should not be used interchangeably. Fourth, I showed which landscape features important for ESs supply in a Scottish regional park are not sufficiently accounted for in the current management strategy. This knowledge is useful for the identification of priority sites for land management. Finally, I provide an approach to explore how ESs knowledge elicited by participatory mapping can be operationalised in land management. I demonstrate how stakeholder knowledge and values can be used for the identification of ESs hotspots and how these hotspots can be compared to current management priorities. This dissertation helps to bridge current gaps of ESs science by advancing the understanding of the current role of socio-cultural values in ESs research, testing different methods and their relevance for land use preferences, and implementing ESs knowledge into land management. If and to what extent ESs and their values are implemented into ecosystem management is mainly the choice of the management. An advanced understanding of socio-cultural valuation methods contributes to the normative basis of this management, while the proposal for the implementation of ESs in land management presents a practical approach of how to transfer this type of knowledge into practice. The proposed methods for socio-cultural valuation can support guiding land management towards a balanced consideration of ESs and conservation goals.}, language = {en} } @phdthesis{Schiprowski2017, author = {Schiprowski, Amelie}, title = {Four empirical essays on the economics of job search}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413508}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 209}, year = {2017}, abstract = {Modern welfare states aim at designing unemployment insurance (UI) schemes which minimize the length of unemployment spells. A variety of institutions and incentives, which are embedded in UI schemes across OECD countries, reflect this attempt. For instance, job seekers entering UI are often provided with personal support through a caseworker. They also face the requirement to regularly submit a minimum number of job applications, which is typically enforced through benefit cuts in the case of non-compliance. Moreover, job seekers may systematically receive information on their re-employment prospects. As a consequence, UI design has become a complex task. Policy makers need to define not only the amount and duration of benefit payments, but also several other choice parameters. These include the intensity and quality of personal support through caseworkers, the level of job search requirements, the strictness of enforcement, and the information provided to unemployed individuals. Causal estimates on how these parameters affect re-employment outcomes are thus central inputs to the design of modern UI systems: how much do individual caseworkers influence the transition out of unemployment? Does the requirement of an additional job application translate into increased job finding? Do individuals behave differently when facing a strict versus mild enforcement system? And how does information on re-employment prospects influence the job search decision? This dissertation proposes four novel research designs to answer this question. Chapters one to three elaborate quasi-experimental identification strategies, which are applied to large-scale administrative data from Switzerland. They, respectively, measure how personal interactions with caseworkers (chapter one), the level of job search requirements (chapter two) and the strictness of enforcement (chapter three) affect re-employment outcomes. Chapter four proposes a structural estimation approach, based on linked survey and administrative data from Germany. It studies how over-optimism on future wage offers affects the decision to search for work, and how the provision of information changes this decision.}, language = {en} } @phdthesis{Schibalski2017, author = {Schibalski, Anett}, title = {Statistical and process-based models for understanding species distributions in changing environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401482}, school = {Universit{\"a}t Potsdam}, pages = {ix, 129}, year = {2017}, abstract = {Understanding the distribution of species is fundamental for biodiversity conservation, ecosystem management, and increasingly also for climate impact assessment. The presence of a species in a given site depends on physiological limitations (abiotic factors), interactions with other species (biotic factors), migratory or dispersal processes (site accessibility) as well as the continuing effects of past events, e.g. disturbances (site legacy). Existing approaches to predict species distributions either (i) correlate observed species occurrences with environmental variables describing abiotic limitations, thus ignoring biotic interactions, dispersal and legacy effects (statistical species distribution model, SDM); or (ii) mechanistically model the variety of processes determining species distributions (process-based model, PBM). SDMs are widely used due to their easy applicability and ability to handle varied data qualities. But they fail to reproduce the dynamic response of species distributions to changing conditions. PBMs are expected to be superior in this respect, but they need very specific data unavailable for many species, and are often more complex and require more computational effort. More recently, hybrid models link the two approaches to combine their respective strengths. In this thesis, I apply and compare statistical and process-based approaches to predict species distributions, and I discuss their respective limitations, specifically for applications in changing environments. Detailed analyses of SDMs for boreal tree species in Finland reveal that nonclimatic predictors - edaphic properties and biotic interactions - are important limitations at the treeline, contesting the assumption of unrestricted, climatically induced range expansion. While the estimated SDMs are successful within their training data range, spatial and temporal model transfer fails. Mapping and comparing sampled predictor space among data subsets identifies spurious extrapolation as the plausible explanation for limited model transferability. Using these findings, I analyze the limited success of an established PBM (LPJ-GUESS) applied to the same problem. Examination of process representation and parameterization in the PBM identifies implemented processes to adjust (competition between species, disturbance) and missing processes that are crucial in boreal forests (nutrient limitation, forest management). Based on climatic correlations shifting over time, I stress the restricted temporal transferability of bioclimatic limits used in LPJ-GUESS and similar PBMs. By critically assessing the performance of SDM and PBM in this application, I demonstrate the importance of understanding the limitations of the applied methods. As a potential solution, I add a novel approach to the repertoire of existing hybrid models. By simulation experiments with an individual-based PBM which reproduces community dynamics resulting from biotic factors, dispersal and legacy effects, I assess the resilience of coastal vegetation to abrupt hydrological changes. According to the results of the resilience analysis, I then modify temporal SDM predictions, thereby transferring relevant process detail from PBM to SDM. The direction of knowledge transfer from PBM to SDM avoids disadvantages of current hybrid models and increases the applicability of the resulting model in long-term, large-scale applications. A further advantage of the proposed framework is its flexibility, as it is readily extended to other model types, disturbance definitions and response characteristics. Concluding, I argue that we already have a diverse range of promising modelling tools at hand, which can be refined further. But most importantly, they need to be applied more thoughtfully. Bearing their limitations in mind, combining their strengths and openly reporting underlying assumptions and uncertainties is the way forward.}, language = {en} } @phdthesis{Santilli2017, author = {Santilli, Mario}, title = {Higher order rectifiability in Euclidean space}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403632}, school = {Universit{\"a}t Potsdam}, pages = {45}, year = {2017}, abstract = {The first main goal of this thesis is to develop a concept of approximate differentiability of higher order for subsets of the Euclidean space that allows to characterize higher order rectifiable sets, extending somehow well known facts for functions. We emphasize that for every subset A of the Euclidean space and for every integer k ≥ 2 we introduce the approximate differential of order k of A and we prove it is a Borel map whose domain is a (possibly empty) Borel set. This concept could be helpful to deal with higher order rectifiable sets in applications. The other goal is to extend to general closed sets a well known theorem of Alberti on the second order rectifiability properties of the boundary of convex bodies. The Alberti theorem provides a stratification of second order rectifiable subsets of the boundary of a convex body based on the dimension of the (convex) normal cone. Considering a suitable generalization of this normal cone for general closed subsets of the Euclidean space and employing some results from the first part we can prove that the same stratification exists for every closed set.}, language = {en} } @phdthesis{Rossnagl2017, author = {Roßnagl, Susanne}, title = {Zum Zusammenhang von Nutzung von Lerngelegenheiten, sozialer Kooperation, individuellen Determinanten und Kompetenzselbsteinsch{\"a}tzungen von Lehrkr{\"a}ften im Berufseinstieg im Allgemeinen Pflichtschulbereich in Nieder{\"o}sterreich}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398950}, school = {Universit{\"a}t Potsdam}, pages = {302}, year = {2017}, abstract = {Die vorliegende Studie besch{\"a}ftigt sich mit dem Berufseinstieg von Lehrpersonen und untersucht Zusammenh{\"a}nge zwischen der Nutzung von Lerngelegenheiten, sozialer Kooperation, individuellen Determinanten und Kompetenzselbsteinsch{\"a}tzungen, die erstmalig in dieser Zusammenstellung {\"u}berpr{\"u}ft worden sind. Die Literaturrecherche machte deutlich, dass der Berufseinstieg eine besonders wichtige Phase f{\"u}r die berufliche Sozialisation darstellt. Die Nutzung von Lerngelegenheiten wurde empirisch noch nicht sehr oft untersucht vor allem nicht im Zusammenhang mit individuellen Determinanten und die Kooperation meist nur unter dem Fokus der Bedeutung f{\"u}r die Schule. An der empirischen Untersuchung nahmen 223 berufseinsteigende Lehrkr{\"a}fte aus dem Allgemeinen Pflichtschulbereich in Nieder{\"o}sterreich teil, die verpflichtend in den ersten beiden Dienstjahren Fortbildungen besuchen m{\"u}ssen. Diese Situation ist einzigartig in {\"O}sterreich, da es nur in diesem Bundesland seit 2011 eine verpflichtend zu besuchende Berufseinstiegsphase gibt. Ab 2019 wird es in {\"O}sterreich eine Induktionsphase f{\"u}r alle Lehrkr{\"a}fte geben, die in den Beruf einsteigen. Mit Hilfe von Strukturgleichungsmodellen wurden die Zusammenh{\"a}nge untersucht und es zeigte sich, dass es diese in allen Bereichen gibt. Am Ende konnte ein neues Theoriemodell zur Nutzung von Lerngelegenheiten abgeleitet werden, das sich in Theorien zu professionellen Kompetenzentwicklung und zu Lernen in Aus-, Fortund Weiterbildung einordnen l{\"a}sst.}, language = {de} } @phdthesis{Roland2017, author = {Roland, Steffen}, title = {Charge carrier recombination and open circuit voltage in organic solar cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397721}, school = {Universit{\"a}t Potsdam}, pages = {VI, 145}, year = {2017}, abstract = {Tremendous progress in the development of thin film solar cell techniques has been made over the last decade. The field of organic solar cells is constantly developing, new material classes like Perowskite solar cells are emerging and different types of hybrid organic/inorganic material combinations are being investigated for their physical properties and their applicability in thin film electronics. Besides typical single-junction architectures for solar cells, multi-junction concepts are also being investigated as they enable the overcoming of theoretical limitations of a single-junction. In multi-junction devices each sub-cell operates in different wavelength regimes and should exhibit optimized band-gap energies. It is exactly this tunability of the band-gap energy that renders organic solar cell materials interesting candidates for multi-junction applications. Nevertheless, only few attempts have been made to combine inorganic and organic solar cells in series connected multi-junction architectures. Even though a great diversity of organic solar cells exists nowadays, their open circuit voltage is usually low compared to the band-gap of the active layer. Hence, organic low band-gap solar cells in particular show low open circuit voltages and the key factors that determine the voltage losses are not yet fully understood. Besides open circuit voltage losses the recombination of charges in organic solar cells is also a prevailing research topic, especially with respect to the influence of trap states. The exploratory focus of this work is therefore set, on the one hand, on the development of hybrid organic/inorganic multi-junctions and, on the other hand, on gaining a deeper understanding of the open circuit voltage and the recombination processes of organic solar cells. In the first part of this thesis, the development of a hybrid organic/inorganic triple-junction will be discussed which showed at that time (Jan. 2015) a record power conversion efficiency of 11.7\%. The inorganic sub-cells of these devices consist of hydrogenated amorphous silicon and were delivered by the Competence Center Thin-Film and Nanotechnology for Photovoltaics in Berlin. Different recombination contacts and organic sub-cells were tested in conjunction with these inorganic sub-cells on the basis of optical modeling predictions for the optimal layer thicknesses to finally reach record efficiencies for this type of solar cells. In the second part, organic model systems will be investigated to gain a better understanding of the fundamental loss mechanisms that limit the open circuit voltage of organic solar cells. First, bilayer systems with different orientation of the donor and acceptor molecules were investigated to study the influence of the donor/acceptor orientation on non-radiative voltage loss. Secondly, three different bulk heterojunction solar cells all comprising the same amount of fluorination and the same polymer backbone in the donor component were examined to study the influence of long range electrostatics on the open circuit voltage. Thirdly, the device performance of two bulk heterojunction solar cells was compared which consisted of the same donor polymer but used different fullerene acceptor molecules. By this means, the influence of changing the energetics of the acceptor component on the open circuit voltage was investigated and a full analysis of the charge carrier dynamics was presented to unravel the reasons for the worse performance of the solar cell with the higher open circuit voltage. In the third part, a new recombination model for organic solar cells will be introduced and its applicability shown for a typical low band-gap cell. This model sheds new light on the recombination process in organic solar cells in a broader context as it re-evaluates the recombination pathway of charge carriers in devices which show the presence of trap states. Thereby it addresses a current research topic and helps to resolve alleged discrepancies which can arise from the interpretation of data derived by different measurement techniques.}, language = {en} } @phdthesis{RobainaEstevez2017, author = {Robaina Estevez, Semidan}, title = {Context-specific metabolic predictions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401365}, school = {Universit{\"a}t Potsdam}, pages = {vi, 158}, year = {2017}, abstract = {All life-sustaining processes are ultimately driven by thousands of biochemical reactions occurring in the cells: the metabolism. These reactions form an intricate network which produces all required chemical compounds, i.e., metabolites, from a set of input molecules. Cells regulate the activity through metabolic reactions in a context-specific way; only reactions that are required in a cellular context, e.g., cell type, developmental stage or environmental condition, are usually active, while the rest remain inactive. The context-specificity of metabolism can be captured by several kinds of experimental data, such as by gene and protein expression or metabolite profiles. In addition, these context-specific data can be assimilated into computational models of metabolism, which then provide context-specific metabolic predictions. This thesis is composed of three individual studies focussing on context-specific experimental data integration into computational models of metabolism. The first study presents an optimization-based method to obtain context-specific metabolic predictions, and offers the advantage of being fully automated, i.e., free of user defined parameters. The second study explores the effects of alternative optimal solutions arising during the generation of context-specific metabolic predictions. These alternative optimal solutions are metabolic model predictions that represent equally well the integrated data, but that can markedly differ. This study proposes algorithms to analyze the space of alternative solutions, as well as some ways to cope with their impact in the predictions. Finally, the third study investigates the metabolic specialization of the guard cells of the plant Arabidopsis thaliana, and compares it with that of a different cell type, the mesophyll cells. To this end, the computational methods developed in this thesis are applied to obtain metabolic predictions specific to guard cell and mesophyll cells. These cell-specific predictions are then compared to explore the differences in metabolic activity between the two cell types. In addition, the effects of alternative optima are taken into consideration when comparing the two cell types. The computational results indicate a major reorganization of the primary metabolism in guard cells. These results are supported by an independent 13C labelling experiment.}, language = {en} } @phdthesis{RibeiroMartins2017, author = {Ribeiro Martins, Renata Filipa}, title = {Deciphering evolutionary histories of Southeast Asian Ungulates}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404669}, school = {Universit{\"a}t Potsdam}, pages = {vii, 115}, year = {2017}, abstract = {Im Verlauf von Jahrmillionen gestalteten evolution{\"a}re Kr{\"a}fte die Verbreitung und genetische Variabilit{\"a}t von Arten, indem sie die Anpassungsf{\"a}higkeit und {\"U}berlebenswahrscheinlichkeit dieser Arten beeinflussten. Da S{\"u}dostasien eine außerordentlich artenreiche Region darstellt, eignet sie sich besonders, um den Einfluss dieser Kr{\"a}fte zu untersuchen. Historische Klimaver{\"a}nderungen hatten dramatische Auswirkungen auf die Verf{\"u}gbarkeit sowie die Verbreitung von Habitaten in S{\"u}dostasien, weil hierdurch wiederholt das Festland mit sonst isolierten Inseln verbunden wurde. Dies beeinflusste nicht nur, wie Arten in dieser Region verbreitet sind, sondern erm{\"o}glichte auch eine zunehmende genetische Variabilit{\"a}t. Zwar ist es bekannt, dass Arten mit {\"a}hnlicher Evolutionsgeschichte unterschiedliche phylogeographische Muster aufweisen k{\"o}nnen. Die zugrundeliegenden Mechanismen sind jedoch nur gering verstanden. Diese Dissertation behandelt die Phylogeographie von drei Gruppen von Huftieren, welche im S{\"u}den und S{\"u}dosten Asiens vorkommen. Dabei war das vornehmliche Ziel, zu verstehen, wie es zur Ausbildung verschiedener Arten sowie zu einer regionalen Verteilung von genetischer Variabilit{\"a}t kam. Hierf{\"u}r untersuchte ich die mitochondrialen Genome alter Proben. Dadurch war es m{\"o}glich, Populationen des gesamten Verbreitungsgebietes der jeweiligen Arten zu untersuchen - auch solche Populationen, die heutzutage nicht mehr existieren. Entsprechend der einzelnen Huftiergruppen ist diese Arbeit in drei Kapitel unterteilt: Muntjaks (Muntiacus sp.), Hirsche der Gattung Rusa und asiatische Nash{\"o}rner. Alle drei Gruppen weisen eine Aufteilung in unterschiedliche Linien auf, was jeweils direkt auf Ereignisse des Pleistoz{\"a}ns zur{\"u}ckgef{\"u}hrt werden kann. Muntjaks sind eine weit verbreitete Art, die in verschiedensten Habitaten vorkommen kann. Ich wies nach, dass es in der Vergangenheit zu genetischem Austausch zwischen Populationen von verschiedenen Inseln des Sundalandes kam. Dies deutet auf die F{\"a}higkeit von Muntjaks hin, sich an die ehemaligen Landbr{\"u}cken anzupassen. Jedoch zeige ich auch, dass mindestens zwei Hindernisse bei ihrer Verbreitung existierten, wodurch es zu einer Differenzierung von Populationen kam: eine Barriere trennte Populationen des asiatischen Festlands von denen der Sundainseln, die andere isolierte sri-lankische von restlichen Muntjaks. Die zwei untersuchten Rusa-Arten weisen ein anderes Muster auf, was wiederum eine weitere Folge der pleistoz{\"a}nen Landbr{\"u}cken darstellt. Beide Arten sind ausschließlich monophyletisch. Allerdings gibt es Anzeichen f{\"u}r die Hybridisierung dieser Arten auf Java, was durch eine fr{\"u}here Ausbreitung des sambar (R. unicolor) gef{\"o}rdert wurde. Aufgrund dessen fand ich zudem, dass all jene Individuen der anderen Art, R. timorensis, die durch den Menschen auf die {\"o}stlichen Sundainseln gebracht wurden, in Wahrheit Hybride sind. F{\"u}r den dritten Teil war es mir m{\"o}glich, Proben von Vertretern ausgestorbener Populationen vom asiatischen Festland des Sumatra- und des Java-Nashorns (Dicerorhinus sumatrensis und Rhinoceros sondaicus) zu analysieren. Die Ergebnisse meiner Arbeit belegen, dass die genetische Vielfalt dieser historischen Populationen bedeutend gr{\"o}ßer war als die der heutigen Nachkommen. Ihre jeweilige Evolutionsgeschichte korreliert stark mit pleistoz{\"a}nen Prozessen. Außerdem betonen meine Ergebnisse das enorme Ausmaß von verlorener genetischer Diversit{\"a}t dieser stark bedrohten Arten. Jede Art besitzt eine individuelle phylogeographische Geschichte. Ebenso fand ich aber auch allgemeing{\"u}ltige Muster von genetischer Differenzierung in allen Gruppen, welche direkt mit Ereignissen des Pleistoz{\"a}ns assoziiert werden k{\"o}nnen. Vergleicht man jedoch die einzelnen Ergebnisse der Arten, wird deutlich, dass die gleichen geologischen Prozesse nicht zwangsl{\"a}ufig in gleiche evolutive Ergebnisse resultieren. Einer der Gr{\"u}nde hierf{\"u}r k{\"o}nnte zum Beispiel die unterschiedliche Durchl{\"a}ssigkeit der entstandenen Landkorridore des Sundaschelfs sein. Die M{\"o}glichkeit diese neuen Habitate zu nutzen und somit auch zu passieren steht im direkten Bezug zu den spezifischen {\"o}kologischen Bed{\"u}rfnissen der Arten.Zusammenfassend leisten meine Erkenntnisse einen wichtigen Beitrag, die Evolution und geographische Aufteilung der genetischen Vielfalt in diesem Hotspot an Biodiversit{\"a}t zu verstehen. Obendrein k{\"o}nnen sie aber auch Auswirkungen auf die Erhaltung und systematische Klassifikation der untersuchten Arten haben.}, language = {en} } @phdthesis{ReynaGonzalez2017, author = {Reyna Gonz{\´a}lez, Emmanuel}, title = {Engineering of the microviridin post-translational modification enzymes for the production of synthetic protease inhibitors}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406979}, school = {Universit{\"a}t Potsdam}, pages = {XI, 91, CI}, year = {2017}, abstract = {Natural products and their derivatives have always been a source of drug leads. In particular, bacterial compounds have played an important role in drug development, for example in the field of antibiotics. A decrease in the discovery of novel leads from natural sources and the hope of finding new leads through the generation of large libraries of drug-like compounds by combinatorial chemistry aimed at specific molecular targets drove the pharmaceutical companies away from research on natural products. However, recent technological advances in genetics, bioinformatics and analytical chemistry have revived the interest in natural products. The ribosomally synthesized and post-translationally modified peptides (RiPPs) are a group of natural products generated by the action of post-translationally modifying enzymes on precursor peptides translated from mRNA by ribosomes. The great substrate promiscuity exhibited by many of the enzymes from RiPP biosynthetic pathways have led to the generation of hundreds of novel synthetic and semisynthetic variants, including variants carrying non-canonical amino acids (ncAAs). The microviridins are a family of RiPPs characterized by their atypical tricyclic structure composed of lactone and lactam rings, and their activity as serine protease inhibitors. The generalities of their biosynthetic pathway have already been described, however, the lack of information on details such as the protease responsible for cleaving off the leader peptide from the cyclic core peptide has impeded the fast and cheap production of novel microviridin variants. In the present work, knowledge on leader peptide activation of enzymes from other RiPP families has been extrapolated to the microviridin family, making it possible to bypass the need of a leader peptide. This feature allowed for the exploitation of the microviridin biosynthetic machinery for the production of novel variants through the establishment of an efficient one-pot in vitro platform. The relevance of this chemoenzymatic approach has been exemplified by the synthesis of novel potent serine protease inhibitors from both rationally-designed peptide libraries and bioinformatically predicted microviridins. Additionally, new structure-activity relationships (SARs) could be inferred by screening microviridin intermediates. The significance of this technique was further demonstrated by the simple incorporation of ncAAs into the microviridin scaffold.}, language = {en} } @phdthesis{Reike2017, author = {Reike, Dennis}, title = {A look behind perceptual performance in numerical cognition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407821}, school = {Universit{\"a}t Potsdam}, pages = {vi, 136}, year = {2017}, abstract = {Recognizing, understanding, and responding to quantities are considerable skills for human beings. We can easily communicate quantities, and we are extremely efficient in adapting our behavior to numerical related tasks. One usual task is to compare quantities. We also use symbols like digits in numerical-related tasks. To solve tasks including digits, we must to rely on our previously learned internal number representations. This thesis elaborates on the process of number comparison with the use of noisy mental representations of numbers, the interaction of number and size representations and how we use mental number representations strategically. For this, three studies were carried out. In the first study, participants had to decide which of two presented digits was numerically larger. They had to respond with a saccade in the direction of the anticipated answer. Using only a small set of meaningfully interpretable parameters, a variant of random walk models is described that accounts for response time, error rate, and variance of response time for the full matrix of 72 digit pairs. In addition, the used random walk model predicts a numerical distance effect even for error response times and this effect clearly occurs in the observed data. In relation to corresponding correct answers error responses were systematically faster. However, different from standard assumptions often made in random walk models, this account required that the distributions of step sizes of the induced random walks be asymmetric to account for this asymmetry between correct and incorrect responses. Furthermore, the presented model provides a well-defined framework to investigate the nature and scale (e.g., linear vs. logarithmic) of the mapping of numerical magnitude onto its internal representation. In comparison of the fits of proposed models with linear and logarithmic mapping, the logarithmic mapping is suggested to be prioritized. Finally, we discuss how our findings can help interpret complex findings (e.g., conflicting speed vs. accuracy trends) in applied studies that use number comparison as a well-established diagnostic tool. Furthermore, a novel oculomotoric effect is reported, namely the saccadic overschoot effect. The participants responded by saccadic eye movements and the amplitude of these saccadic responses decreases with numerical distance. For the second study, an experimental design was developed that allows us to apply the signal detection theory to a task where participants had to decide whether a presented digit was physically smaller or larger. A remaining question is, whether the benefit in (numerical magnitude - physical size) congruent conditions is related to a better perception than in incongruent conditions. Alternatively, the number-size congruency effect is mediated by response biases due to numbers magnitude. The signal detection theory is a perfect tool to distinguish between these two alternatives. It describes two parameters, namely sensitivity and response bias. Changes in the sensitivity are related to the actual task performance due to real differences in perception processes whereas changes in the response bias simply reflect strategic implications as a stronger preparation (activation) of an anticipated answer. Our results clearly demonstrate that the number-size congruency effect cannot be reduced to mere response bias effects, and that genuine sensitivity gains for congruent number-size pairings contribute to the number-size congruency effect. Third, participants had to perform a SNARC task - deciding whether a presented digit was odd or even. Local transition probability of irrelevant attributes (magnitude) was varied while local transition probability of relevant attributes (parity) and global probability occurrence of each stimulus were kept constantly. Participants were quite sensitive in recognizing the underlying local transition probability of irrelevant attributes. A gain in performance was observed for actual repetitions of the irrelevant attribute in relation to changes of the irrelevant attribute in high repetition conditions compared to low repetition conditions. One interpretation of these findings is that information about the irrelevant attribute (magnitude) in the previous trial is used as an informative precue, so that participants can prepare early processing stages in the current trial, with the corresponding benefits and costs typical of standard cueing studies. Finally, the results reported in this thesis are discussed in relation to recent studies in numerical cognition.}, language = {en} } @phdthesis{Perillon2017, author = {P{\´e}rillon, C{\´e}cile}, title = {The effect of groundwater on benthic primary producers and their interaction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406883}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 180}, year = {2017}, abstract = {In littoral zones of lakes, multiple processes determine lake ecology and water quality. Lacustrine groundwater discharge (LGD), most frequently taking place in littoral zones, can transport or mobilize nutrients from the sediments and thus contribute significantly to lake eutrophication. Furthermore, lake littoral zones are the habitat of benthic primary producers, namely submerged macrophytes and periphyton, which play a key role in lake food webs and influence lake water quality. Groundwater-mediated nutrient-influx can potentially affect the asymmetric competition between submerged macrophytes and periphyton for light and nutrients. While rooted macrophytes have superior access to sediment nutrients, periphyton can negatively affect macrophytes by shading. LGD may thus facilitate periphyton production at the expense of macrophyte production, although studies on this hypothesized effect are missing. The research presented in this thesis is aimed at determining how LGD influences periphyton, macrophytes, and the interactions between these benthic producers. Laboratory experiments were combined with field experiments and measurements in an oligo-mesotrophic hard water lake. In the first study, a general concept was developed based on a literature review of the existing knowledge regarding the potential effects of LGD on nutrients and inorganic and organic carbon loads to lakes, and the effect of these loads on periphyton and macrophytes. The second study includes a field survey and experiment examining the effects of LGD on periphyton in an oligotrophic, stratified hard water lake (Lake Stechlin). This study shows that LGD, by mobilizing phosphorus from the sediments, significantly promotes epiphyton growth, especially at the end of the summer season when epilimnetic phosphorus concentrations are low. The third study focuses on the potential effects of LGD on submerged macrophytes in Lake Stechlin. This study revealed that LGD may have contributed to an observed change in macrophyte community composition and abundance in the shallow littoral areas of the lake. Finally, a laboratory experiment was conducted which mimicked the conditions of a seepage lake. Groundwater circulation was shown to mobilize nutrients from the sediments, which significantly promoted periphyton growth. Macrophyte growth was negatively affected at high periphyton biomasses, confirming the initial hypothesis. More generally, this thesis shows that groundwater flowing into nutrient-limited lakes may import or mobilize nutrients. These nutrients first promote periphyton, and subsequently provoke radical changes in macrophyte populations before finally having a possible influence on the lake's trophic state. Hence, the eutrophying effect of groundwater is delayed and, at moderate nutrient loading rates, partly dampened by benthic primary producers. The present research emphasizes the importance and complexity of littoral processes, and the need to further investigate and monitor the benthic environment. As present and future global changes can significantly affect LGD, the understanding of these complex interactions is required for the sustainable management of lake water quality.}, language = {en} } @phdthesis{Pedeches2017, author = {P{\´e}d{\`e}ches, Laure}, title = {Stochastic models for collective motions of populations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405491}, school = {Universit{\"a}t Potsdam}, pages = {187}, year = {2017}, abstract = {Stochastisches Modell f{\"u}r kollektive Bewegung von Populationen In dieser Doktorarbeit befassen wir uns mit stochastischen Systemen, die eines der mysteri{\"o}sesten biologischen Ph{\"a}nomene als Modell darstellen: die kollektive Bewegung von Gemeinschaften. Diese werden bei V{\"o}gel- und Fischschw{\"a}rmen, aber auch bei manchen Bakterien, Viehherden oder gar bei Menschen beobachtet. Dieser Verhaltenstyp spielt ebenfalls in anderen Bereichen wie Finanzwesen, Linguistik oder auch Robotik eine Rolle. Wir nehmen uns der Dynamik einer Gruppe von N Individuen, insbesondere zweier asymptotischen Verhaltenstypen an. Einerseits befassen wir uns mit den Eigenschaften der Ergodizit{\"a}t in Langzeit: Existenz einer invarianten Wahrscheinlichkeitsverteilung durch Ljapunow-Funktionen, und Konvergenzrate der {\"U}bergangshalbgruppe gegen diese Wahrscheinlichkeit. Eine ebenfalls zentrale Thematik unserer Forschung ist der Begriff Flocking: es wird damit definiert, dass eine Gruppe von Individuen einen dynamischen Konsens ohne hierarchische Struktur erreichen kann; mathematisch gesehen entspricht dies der Aneinanderreihung der Geschwindigkeiten und dem Zusammenkommen des Schwarmes. Andererseits gehen wir das Ph{\"a}nomen der "Propagation of Chaos" an, wenn die Anzahl N der Teilchen ins Unendliche tendiert: die Bewegungen der jeweiligen Individuen werden asymptotisch unabh{\"a}ngig. Unser Ausgangspunkt ist das Cucker-Smale-Modell, ein deterministisches kinetisches Molekular-Modell f{\"u}r eine Gruppe ohne hierarchische Struktur. Die Wechselwirkung zwischen zwei Teilchen variiert gem{\"a}ß deren "Kommunikationsrate", die wiederum von deren relativen Entfernung abh{\"a}ngt und polynomisch abnimmt. Im ersten Kapitel adressieren wir das asymptotische Verhalten eines Cucker-Smale-Modells mit Rauschst{\"o}rung und dessen Varianten. Kapitel 2 stellt mehrere Definitionen des Flockings in einem Zufallsrahmen dar: diverse stochastische Systeme, die verschiedenen Rauschformen entsprechen (die eine gest{\"o}rte Umgebung, den "freien Willen" des jeweiligen Individuums oder eine unterbrochene {\"U}bertragung suggerieren) werden im Zusammenhang mit diesen Begriffen unter die Lupe genommen. Das dritte Kapitel basiert auf der "Cluster Expansion"-Methode aus der statistischen Mechanik. Wir beweisen die exponentielle Ergodizit{\"a}t von gewissen nicht-Markow-Prozessen mit nicht-glattem Drift und wenden diese Ergebnisse auf St{\"o}rungen des Ornstein-Uhlenbeck-Prozesses an. Im letzten Teil, nehmen wir uns der zweidimensionalen parabolisch-elliptischen Gleichung von Keller-Segel an. Wir beweisen die Existenz einer L{\"o}sung, welche in gewisser Hinsicht einzig ist, indem wir, mittels Vergleich mit Bessel-Prozessen und der Dirichlet Formtheorie, m{\"o}gliche Stoßtypen zwischen den Teilchen ermitteln.}, language = {en} } @phdthesis{Peldszus2017, author = {Peldszus, Andreas}, title = {Automatic recognition of argumentation structure in short monological texts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421441}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 252}, year = {2017}, abstract = {The aim of this thesis is to develop approaches to automatically recognise the structure of argumentation in short monological texts. This amounts to identifying the central claim of the text, supporting premises, possible objections, and counter-objections to these objections, and connecting them correspondingly to a structure that adequately describes the argumentation presented in the text. The first step towards such an automatic analysis of the structure of argumentation is to know how to represent it. We systematically review the literature on theories of discourse, as well as on theories of the structure of argumentation against a set of requirements and desiderata, and identify the theory of J. B. Freeman (1991, 2011) as a suitable candidate to represent argumentation structure. Based on this, a scheme is derived that is able to represent complex argumentative structures and can cope with various segmentation issues typically occurring in authentic text. In order to empirically test our scheme for reliability of annotation, we conduct several annotation experiments, the most important of which assesses the agreement in reconstructing argumentation structure. The results show that expert annotators produce very reliable annotations, while the results of non-expert annotators highly depend on their training in and commitment to the task. We then introduce the 'microtext' corpus, a collection of short argumentative texts. We report on the creation, translation, and annotation of it and provide a variety of statistics. It is the first parallel corpus (with a German and English version) annotated with argumentation structure, and -- thanks to the work of our colleagues -- also the first annotated according to multiple theories of (global) discourse structure. The corpus is then used to develop and evaluate approaches to automatically predict argumentation structures in a series of six studies: The first two of them focus on learning local models for different aspects of argumentation structure. In the third study, we develop the main approach proposed in this thesis for predicting globally optimal argumentation structures: the 'evidence graph' model. This model is then systematically compared to other approaches in the fourth study, and achieves state-of-the-art results on the microtext corpus. The remaining two studies aim to demonstrate the versatility and elegance of the proposed approach by predicting argumentation structures of different granularity from text, and finally by using it to translate rhetorical structure representations into argumentation structures.}, language = {en} } @phdthesis{Pavashe2017, author = {Pavashe, Prashant}, title = {Synthesis and transformations of 2-thiocarbohydrates}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397739}, school = {Universit{\"a}t Potsdam}, pages = {xi, 132}, year = {2017}, abstract = {I. Ceric ammonium nitrate (CAN) mediated thiocyanate radical additions to glycals In this dissertation, a facile entry was developed for the synthesis of 2-thiocarbohydrates and their transformations. Initially, CAN mediated thiocyanation of carbohydrates was carried out to obtain the basic building blocks (2-thiocyanates) for the entire studies. Subsequently, 2-thiocyanates were reduced to the corresponding thiols using appropriate reagents and reaction conditions. The screening of substrates, stereochemical outcome and the reaction mechanism are discussed briefly (Scheme I). Scheme I. Synthesis of the 2-thiocyanates II and reductions to 2-thiols III \& IV. An interesting mechanism was proposed for the reduction of 2-thiocyanates II to 2-thiols III via formation of a disulfide intermediate. The water soluble free thiols IV were obtained by cleaving the thiocyanate and benzyl groups in a single step. In the subsequent part of studies, the synthetic potential of the 2-thiols was successfully expanded by simple synthetic transformations. II. Transformations of the 2-thiocarbohydrates The 2-thiols were utilized for convenient transformations including sulfa-Michael additions, nucleophilic substitutions, oxidation to disulfides and functionalization at the anomeric position. The diverse functionalizations of the carbohydrates at the C-2 position by means of the sulfur linkage are the highlighting feature of these studies. Thus, it creates an opportunity to expand the utility of 2-thiocarbohydrates for biological studies. Reagents and conditions: a) I2, pyridine, THF, rt, 15 min; b) K2CO3, MeCN, rt, 1 h; c) MeI, K2CO3, DMF, 0 °C, 5 min; d) Ac2O, H2SO4 (1 drop), rt, 10 min; e) CAN, MeCN/H2O, NH4SCN, rt, 1 h; f) NaN3, ZnBr2, iPrOH/H2O, reflux, 15 h; g) NaOH (1 M), TBAI, benzene, rt, 2 h; h) ZnCl2, CHCl3, reflux, 3 h. Scheme II. Functionalization of 2-thiocarbohydrates. These transformations have enhanced the synthetic value of 2-thiocarbohydrates for the preparative scale. Worth to mention is the Lewis acid catalyzed replacement of the methoxy group by other nucleophiles and the synthesis of the (2→1) thiodisaccharides, which were obtained with complete β-selectivity. Additionally, for the first time, the carbohydrate linked thiotetrazole was synthesized by a (3 + 2) cycloaddition approach at the C-2 position. III. Synthesis of thiodisaccharides by thiol-ene coupling. In the final part of studies, the synthesis of thiodisaccharides by a classical photoinduced thiol-ene coupling was successfully achieved. Reagents and conditions: 2,2-Dimethoxy-2-phenylacetophenone (DPAP), CH2Cl2/EtOH, hv, rt. Scheme III. Thiol-ene coupling between 2-thiols and exo-glycals. During the course of investigations, it was found that the steric hindrance plays an important role in the addition of bulky thiols to endo-glycals. Thus, we successfully screened the suitable substrates for addition of various thiols to sterically less hindered alkenes (Scheme III). The photochemical addition of 2-thiols to three different exo-glycals delivered excellent regio- and diastereoselectivities as well as yields, which underlines the synthetic potential of this convenient methodology.}, language = {en} } @phdthesis{Paul2017, author = {Paul, Fabian}, title = {Markov state modeling of binding and conformational changes of proteins}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404273}, school = {Universit{\"a}t Potsdam}, pages = {X, 112}, year = {2017}, abstract = {Proteins are molecules that are essential for life and carry out an enormous number of functions in organisms. To this end, they change their conformation and bind to other molecules. However, the interplay between conformational change and binding is not fully understood. In this work, this interplay is investigated with molecular dynamics (MD) simulations of the protein-peptide system Mdm2-PMI and by analysis of data from relaxation experiments. The central task it to uncover the binding mechanism, which is described by the sequence of (partial) binding events and conformational change events including their probabilities. In the simplest case, the binding mechanism is described by a two-step model: binding followed by conformational change or conformational change followed by binding. In the general case, longer sequences with multiple conformational changes and partial binding events are possible as well as parallel pathways that differ in their sequences of events. The theory of Markov state models (MSMs) provides the theoretical framework in which all these cases can be modeled. For this purpose, MSMs are estimated in this work from MD data, and rate equation models, which are related to MSMs, are inferred from experimental relaxation data. The MD simulation and Markov modeling of the PMI-Mdm2 system shows that PMI and Mdm2 can bind via multiple pathways. A main result of this work is a dissociation rate on the order of one event per second, which was calculated using Markov modeling and is in agreement with experiment. So far, dissociation rates and transition rates of this magnitude have only been calculated with methods that speed up transitions by acting with time-dependent, external forces on the binding partners. The simulation technique developed in this work, in contrast, allows the estimation of dissociation rates from the combination of free energy calculation and direct MD simulation of the fast binding process. Two new statistical estimators TRAM and TRAMMBAR are developed to estimate a MSM from the joint data of both simulation types. In addition, a new analysis technique for time-series data from chemical relaxation experiments is developed in this work. It allows to identify one of the above-mentioned two-step mechanisms as the mechanism that underlays the data. The new method is valid for a broader range of concentrations than previous methods and therefore allows to choose the concentrations such that the mechanism can be uniquely identified. It is successfully tested with data for the binding of recoverin to a rhodopsin kinase peptide.}, language = {en} } @phdthesis{Papenbrock2017, author = {Papenbrock, Thorsten}, title = {Data profiling - efficient discovery of dependencies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406705}, school = {Universit{\"a}t Potsdam}, pages = {viii, ii, 141}, year = {2017}, abstract = {Data profiling is the computer science discipline of analyzing a given dataset for its metadata. The types of metadata range from basic statistics, such as tuple counts, column aggregations, and value distributions, to much more complex structures, in particular inclusion dependencies (INDs), unique column combinations (UCCs), and functional dependencies (FDs). If present, these statistics and structures serve to efficiently store, query, change, and understand the data. Most datasets, however, do not provide their metadata explicitly so that data scientists need to profile them. While basic statistics are relatively easy to calculate, more complex structures present difficult, mostly NP-complete discovery tasks; even with good domain knowledge, it is hardly possible to detect them manually. Therefore, various profiling algorithms have been developed to automate the discovery. None of them, however, can process datasets of typical real-world size, because their resource consumptions and/or execution times exceed effective limits. In this thesis, we propose novel profiling algorithms that automatically discover the three most popular types of complex metadata, namely INDs, UCCs, and FDs, which all describe different kinds of key dependencies. The task is to extract all valid occurrences from a given relational instance. The three algorithms build upon known techniques from related work and complement them with algorithmic paradigms, such as divide \& conquer, hybrid search, progressivity, memory sensitivity, parallelization, and additional pruning to greatly improve upon current limitations. Our experiments show that the proposed algorithms are orders of magnitude faster than related work. They are, in particular, now able to process datasets of real-world, i.e., multiple gigabytes size with reasonable memory and time consumption. Due to the importance of data profiling in practice, industry has built various profiling tools to support data scientists in their quest for metadata. These tools provide good support for basic statistics and they are also able to validate individual dependencies, but they lack real discovery features even though some fundamental discovery techniques are known for more than 15 years. To close this gap, we developed Metanome, an extensible profiling platform that incorporates not only our own algorithms but also many further algorithms from other researchers. With Metanome, we make our research accessible to all data scientists and IT-professionals that are tasked with data profiling. Besides the actual metadata discovery, the platform also offers support for the ranking and visualization of metadata result sets. Being able to discover the entire set of syntactically valid metadata naturally introduces the subsequent task of extracting only the semantically meaningful parts. This is challenge, because the complete metadata results are surprisingly large (sometimes larger than the datasets itself) and judging their use case dependent semantic relevance is difficult. To show that the completeness of these metadata sets is extremely valuable for their usage, we finally exemplify the efficient processing and effective assessment of functional dependencies for the use case of schema normalization.}, language = {en} } @phdthesis{Paape2017, author = {Paape, Dario L. J. F.}, title = {Antecedent complexity effects on ellipsis processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411689}, school = {Universit{\"a}t Potsdam}, pages = {ix, 134}, year = {2017}, abstract = {This dissertation explores whether the processing of ellipsis is affected by changes in the complexity of the antecedent, either due to added linguistic material or to the presence of a temporary ambiguity. Murphy (1985) hypothesized that ellipsis is resolved via a string copying procedure when the antecedent is within the same sentence, and that copying longer strings takes more time. Such an account also implies that the antecedent is copied without its structure, which in turn implies that recomputing its syntax and semantics may be necessary at the ellipsis gap. Alternatively, several accounts predict null effects of antecedent complexity, as well as no reparsing. These either involve a structure copying mechanism that is cost-free and whose finishing time is thus independent of the form of the antecedent (Frazier \& Clifton, 2001), treat ellipsis as a pointer into content-addressable memory with direct access (Martin \& McElree, 2008, 2009), or assume that one structure is 'shared' between antecedent and gap (Frazier \& Clifton, 2005). In a self-paced reading study on German sluicing, temporarily ambiguous garden-path clauses were used as antecedents, but no evidence of reparsing in the form of a slowdown at the ellipsis site was found. Instead, results suggest that antecedents which had been reanalyzed from an initially incorrect structure were easier to retrieve at the gap. This finding that can be explained within the framework of cue-based retrieval parsing (Lewis \& Vasishth, 2005), where additional syntactic operations on a structure yield memory reactivation effects. Two further self-paced reading studies on German bare argument ellipsis and English verb phrase ellipsis investigated if adding linguistic content to the antecedent would increase processing times for the ellipsis, and whether insufficiently demanding comprehension tasks may have been responsible for earlier null results (Frazier \& Clifton, 2000; Martin \& McElree, 2008). It has also been suggested that increased antecedent complexity should shorten rather than lengthen retrieval times by providing more unique memory features (Hofmeister, 2011). Both experiments failed to yield reliable evidence that antecedent complexity affects ellipsis processing times in either direction, irrespectively of task demands. Finally, two eye-tracking studies probed more deeply into the proposed reactivation-induced speedup found in the first experiment. The first study used three different kinds of French garden-path sentences as antecedents, with two of them failing to yield evidence for reactivation. Moreover, the third sentence type showed evidence suggesting that having failed to assign a structure to the antecedent leads to a slowdown at the ellipsis site, as well as regressions towards the ambiguous part of the sentence. The second eye-tracking study used the same materials as the initial self-paced reading study on German, with results showing a pattern similar to the one originally observed, with some notable differences. Overall, the experimental results are compatible with the view that adding linguistic material to the antecedent has no or very little effect on the ease with which ellipsis is resolved, which is consistent with the predictions of cost-free copying, pointer-based approaches and structure sharing. Additionally, effects of the antecedent's parsing history on ellipsis processing may be due to reactivation, the availability of multiple representations in memory, or complete failure to retrieve a matching target.}, language = {en} } @phdthesis{Otto2017, author = {Otto, Christopher}, title = {Numerical analysis of thermal, hydraulic and mechanical processes in the near- and far-field of underground coal gasification reactors}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404625}, school = {Universit{\"a}t Potsdam}, pages = {XII, 115}, year = {2017}, abstract = {Die Untertagevergasung von Kohle (UTV) erm{\"o}glicht die Erschließung konventionell nicht f{\"o}rderbarer Kohleressourcen und bietet dadurch Potenzial zur Erh{\"o}hung der weltweiten Kohlereserven. Bei der in-situ Kohleumwandlung entsteht ein hochkalorisches Synthesegas, das elektrifiziert oder zur Gewinnung chemischer Rohstoffe und synthetischer Kraftstoffe eingesetzt werden kann. Neben den wirtschaftlichen M{\"o}glichkeiten, bestehen jedoch auch standort-spezifische Umweltgef{\"a}hrdungspotentiale durch Subsidenz und Schadstoffmigration von UTV-R{\"u}ckst{\"a}nden in nutzbare Grundwasserleiter. Eine nachhaltige und effiziente UTV erfordert ein umfangreiches Verst{\"a}ndnis der thermisch, hydraulisch und mechanisch gekoppelten Prozesse im UTV-Reaktornahbereich. Aufgrund der hohen Investitionskosten von UTV-Pilotanlagen, sind numerische Simulationen gekoppelter Prozesse von entscheidender Bedeutung f{\"u}r die Bewertung m{\"o}glicher UTV-Umweltauswirkungen. Im Rahmen dieser Arbeit wird die UTV-induzierte Permeabilit{\"a}tsver{\"a}nderung, Erzeugung m{\"o}glicher hydraulischer Kurzschl{\"u}sse benachbarter Reaktoren und Dynamik nicht-isothermer Multiphasenfl{\"u}sse mit gekoppelten Simulationen analysiert. Die Simulationsergebnisse zeigen, dass eine Implementierung temperaturabh{\"a}ngiger thermo-mechanischer Gesteinsparameter nur f{\"u}r Untersuchungen von Permeabilit{\"a}ts-{\"a}nderungen im Reaktornachbereich notwendig ist. Die Ergebnisse erlauben somit eine recheneffiziente Realisierung von komplexen thermo-mechanisch gekoppelten Simulations-studien regionalskaliger Modelle mit konstanten Gesteinsparametern, bei nahezu gleichbleibender Ergebnisgenauigkeit, die zur Bewertung von UTV-Umweltgef{\"a}hrdungs-potenzialen beitragen. Simulationen zur Ausbildung hydraulischer Kurzschl{\"u}sse zwischen einzelnen UTV-Reaktoren auf regionaler Skala, verdeutlichen die Relevanz von geologischen St{\"o}rungen an einem UTV-Standort, da diese durch Reaktivierung hydraulische Verbindungen induzieren und somit einen effizienten und nachhaltigen UTV-Betrieb negativ beeintr{\"a}chtigen k{\"o}nnen. In diesem Zusammenhang kommt der Ausbildung einer Wasserdampfphase, der sogenannte „steam jacket", im Hochtemperaturnahbereich von UTV-Reaktoren, als potenzielle Barriere zur Vermeidung von UTV-Schadstoffaustritten und zur potenziellen Minimierung von Energieverlusten eine entscheidende Bedeutung zu. Diese steam jackets entstehen durch evaporiertes Formationswasser und sind komplexe nicht-isotherme Multiphasenfluss-Ph{\"a}nomene. F{\"u}r ein verbessertes Prozessverst{\"a}ndnis dieser Multiphasenfl{\"u}sse, wurde ein neuartiges Modellkonzept entwickelt, welches, validiert gegen Feldversuchsdaten, erstmals sowohl eine Quantifizierung als auch Prognose von Wasserflussraten in und aus einem UTV-Reaktor erlaubt. Die Ergebnisse der vorgelegten Doktorarbeit bilden eine wichtige Grundlage f{\"u}r eine erfolgreiche Integration gekoppelter thermo-hydro-mechanischer Simulationen in weiterf{\"u}hrende Studien. Vor dem Hintergrund hoher UTV-Umweltgef{\"a}hrdungspotentiale, k{\"o}nnen diese zur verbesserten Bewertung und Minderung von UTV-Umweltauswirkungen beitragen, sowie die UTV-Effizienz nachhaltig optimieren.}, language = {en} } @phdthesis{OseiTutu2017, author = {Osei Tutu, Anthony}, title = {Linking global mantle dynamics with lithosphere dynamics using the geoid, plate velocities and lithosphere stress state as constraints}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407057}, school = {Universit{\"a}t Potsdam}, pages = {108}, year = {2017}, abstract = {Lithospheric plates move over the low viscosity asthenosphere balancing several forces. The driving forces include basal shear stress exerted by mantle convection and plate boundary forces such as slab pull and ridge push, whereas the resisting forces include inter-plate friction, trench resistance, and cratonic root resistance. These generate plate motions, the lithospheric stress field and dynamic topography which are observed with different geophysical methods. The orientation and tectonic regime of the observed crustal/lithospheric stress field further contribute to our knowledge of different deformation processes occurring within the Earth's crust and lithosphere. Using numerical models previous studies were able to identify major forces generating stresses in the crust and lithosphere which also contribute to the formation of topography as well as driving lithospheric plates. They showed that the first-order stress pattern explaining about 80\,\\% of the stress field originates from a balance of forces acting at the base of the moving lithospheric plates due to convective flow in the underlying mantle. The remaining second-order stress pattern is due to lateral density variations in the crust and lithosphere in regions of pronounced topography and high gravitational potential, such as the Himalayas and mid-ocean ridges. By linking global lithosphere dynamics to deep mantle flow this study seeks to evaluate the influence of shallow and deep density heterogenities on plate motions, lithospheric stress field and dynamic topography using the geoid as a major constraint for mantle rheology. We use the global 3D lithosphere-asthenosphere model SLIM3D with visco-elasto-plastic rheology coupled at 300 km depth to a spectral model of mantle flow. The complexity of the lithosphere-asthenosphere component allows for the simulation of power-law rheology with creep parameters accounting for both diffusion and dislocation creep within the uppermost 300 km. First we investigate the influence of intra-plate friction and asthenospheric viscosity on present-day plate motions. Previous modelling studies have suggested that small friction coefficients (µ < 0.1, yield stress ~ 100 MPa) can lead to plate tectonics in models of mantle convection. Here we show that, in order to match present-day plate motions and net rotation, the frictional parameter must be less than 0.05. We are able to obtain a good fit with the magnitude and orientation of observed plate velocities (NUVEL-1A) in a no-net-rotation (NNR) reference frame with µ < 0.04 and minimum asthenosphere viscosity ~ 5*10e19 Pas to 10e20 Pas. Our estimates of net rotation (NR) of the lithosphere suggest that amplitudes ~ 0.1-0.2 °/Ma, similar to most observation-based estimates, can be obtained with asthenosphere viscosity cutoff values of ~ 10e19 Pas to 5*10e19 Pas and friction coefficient µ < 0.05. The second part of the study investigates further constraints on shallow and deep mantle heterogeneities causing plate motion by predicting lithosphere stress field and topography and validating with observations. Lithosphere stresses and dynamic topography are computed using the modelling setup and rheological parameters for prescribed plate motions. We validate our results with the World Stress Map 2016 (WSM2016) and the observed residual topography. Here we tested a number of upper mantle thermal-density structures. The one used to calculate plate motions is considered the reference thermal-density structure. This model is derived from a heat flow model combined with a sea floor age model. In addition we used three different thermal-density structures derived from global S-wave velocity models to show the influence of lateral density heterogeneities in the upper 300 km on model predictions. A large portion of the total dynamic force generating stresses in the crust/lithosphere has its origin in the deep mantle, while topography is largely influenced by shallow heterogeneities. For example, there is hardly any difference between the stress orientation patterns predicted with and without consideration of the heterogeneities in the upper mantle density structure across North America, Australia, and North Africa. However, the crust is dominant in areas of high altitude for the stress orientation compared to the all deep mantle contribution. This study explores the sensitivity of all the considered surface observables with regards to model parameters providing insights into the influence of the asthenosphere and plate boundary rheology on plate motion as we test various thermal-density structures to predict stresses and topography.}, language = {en} } @phdthesis{Olejko2017, author = {Olejko, Lydia}, title = {F{\"o}rster resonance energy transfer (FRET)-based nanophotonics using DNA origami structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396747}, school = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {The field of nanophotonics focuses on the interaction between electromagnetic radiation and matter on the nanometer scale. The elements of nanoscale photonic devices can transfer excitation energy non-radiatively from an excited donor molecule to an acceptor molecule by F{\"o}rster resonance energy transfer (FRET). The efficiency of this energy transfer is highly dependent on the donor-acceptor distance. Hence, in these nanoscale photonic devices it is of high importance to have a good control over the spatial assembly of used fluorophores. Based on molecular self-assembly processes, various nanostructures can be produced. Here, DNA nanotechnology and especially the DNA origami technique are auspicious self-assembling methods. By using DNA origami nanostructures different fluorophores can be introduced with a high local control to create a variety of nanoscale photonic objects. The applications of such nanostructures range from photonic wires and logic gates for molecular computing to artificial light harvesting systems for artificial photosynthesis. In the present cumulative doctoral thesis, different FRET systems on DNA origami structures have been designed and thoroughly analyzed. Firstly, the formation of guanine (G) quadruplex structures from G rich DNA sequences has been studied based on a two-color FRET system (Fluorescein (FAM)/Cyanine3 (Cy3)). Here, the influences of different cations (Na+ and K+), of the DNA origami structure and of the DNA sequence on the G-quadruplex formation have been analyzed. In this study, an ion-selective K+ sensing scheme based on the G-quadruplex formation on DNA origami structures has been developed. Subsequently, the reversibility of the G-quadruplex formation on DNA origami structures has been evaluated. This has been done for the simple two-color FRET system which has then been advanced to a switchable photonic wire by introducing additional fluorophores (FAM/Cy3/Cyanine5 (Cy5)/IRDye®700). In the last part, the emission intensity of the acceptor molecule (Cy5) in a three-color FRET cascade has been tuned by arranging multiple donor (FAM) and transmitter (Cy3) molecules around the central acceptor molecule. In such artificial light harvesting systems, the excitation energy is absorbed by several donor and transmitter molecules followed by an energy transfer to the acceptor leading to a brighter Cy5 emission. Furthermore, the range of possible excitation wavelengths is extended by using several different fluorophores (FAM/Cy3/Cy5). In this part of the thesis, the light harvesting efficiency (antenna effect) and the FRET efficiency of different donor/transmitter/acceptor assemblies have been analyzed and the artificial light harvesting complex has been optimized in this respect.}, language = {en} } @phdthesis{Nitezki2017, author = {Nitezki, Tina}, title = {Charakterisierung von Stereotypien bei der FVB/NJ-Maus hinsichtlich metabolischer und immunologischer Aspekte auf die Stoffwechselleistung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402265}, school = {Universit{\"a}t Potsdam}, pages = {149}, year = {2017}, abstract = {Im Sinne des Refinements von Tierversuchen sollen alle Bedingungen w{\"a}hrend der Zucht, der Haltung und des Transports von zu Versuchszwecken gehaltenen Tieren und alle Methoden w{\"a}hrend des Versuchs so verbessert werden, dass die verwendeten Tiere ein minimales Maß an potentiellem Distress, Schmerzen oder Leiden erfahren. Zudem soll ihr Wohlbefinden durch die M{\"o}glichkeit des Auslebens speziesspezifischer Verhaltensweisen und die Anwendung tierschonender Verfahren maximal gef{\"o}rdert werden. Zur Etablierung von Grunds{\"a}tzen des Refinements sind grundlegende Kenntnisse {\"u}ber die physiologischen Bed{\"u}rfnisse und Verhaltensanspr{\"u}che der jeweiligen Spezies unabdingbar. Die Experimentatoren sollten das Normalverhalten der Tiere kennen, um potentielle Verhaltensabweichungen, wie Stereotypien, zu verstehen und interpretieren zu k{\"o}nnen. Standardisierte Haltungsbedingungen von zu Versuchszwecken gehaltenen M{\"a}usen weichen in diversen Aspekten von der nat{\"u}rlichen Umgebung ab und erfordern eine gewisse Adaptation. Ist ein Tier {\"u}ber einen l{\"a}ngeren Zeitraum unf{\"a}hig, sich an die gegebenen Umst{\"a}nde anzupassen, k{\"o}nnen abnormale Verhaltensweisen, wie Stereotypien auftreten. Stereotypien werden definiert als Abweichungen vom Normalverhalten, die repetitiv und ohne Abweichungen im Ablauf ausgef{\"u}hrt werden, scheinbar keiner Funktion dienen und der konkreten Umweltsituation nicht immer entsprechen. Bisher war unklar, in welchem Ausmaß stereotypes Verhalten den metabolischen Ph{\"a}notyp eines Individuums beeinflusst. Ziel dieser Arbeit war es daher, das stereotype Verhalten der FVB/NJ-Maus erstmals detailliert zu charakterisieren, systematisch zusammenzutragen, welche metabolischen Konsequenzen dieses Verhalten bedingt und wie sich diese auf das Wohlbefinden der Tiere und die Verwendung stereotyper Tiere in Studien mit tierexperimentellem Schwerpunkt auswirken. Der Versuch begann mit der Charakterisierung der m{\"u}tterlichen F{\"u}rsorge in der Parentalgeneration. Insgesamt wurden 35 Jungtiere der F1-Generation vom Absatz an, {\"u}ber einen Zeitraum von 11 Wochen einzeln gehalten, kontinuierlich beobachtet, bis zum Versuchsende w{\"o}chentlich Kotproben gesammelt und das K{\"o}rpergewicht bestimmt. Zus{\"a}tzlich erfolgten begleitende Untersuchungen wie Verhaltenstests und die Erfassung der physischen Aktivit{\"a}t und metabolischer Parameter. Anschließend wurden u.a. die zerebralen Serotonin- und Dopamingehalte, f{\"a}kale Glucocorticoidlevels, hepatisches Glykogen und muskul{\"a}re Glykogen- und Triglyceridlevels bestimmt. Nahezu unabh{\"a}ngig von der m{\"u}tterlichen Herkunft entwickelte sich bei mehr als der H{\"a}lfte der 35 Jungtiere in der F1-Generation stereotypes Verhalten. Diese Daten deuten darauf hin, dass es keine Anzeichen f{\"u}r das Erlernen oder eine direkte genetische Transmission stereotypen Verhaltens bei der FVB/NJ-Maus gibt. {\"U}ber den gesamten Beobachtungszeitraum zeichneten sich die stereotypen FVB/NJ-M{\"a}use durch ein eingeschr{\"a}nktes Verhaltensrepertoire aus. Zu Gunsten der erh{\"o}hten Aktivit{\"a}t und des Aus{\"u}bens stereotypen Verhaltens lebten sie insgesamt weniger andere Verhaltensweisen (Klettern, Graben, Nagen) aus. Dar{\"u}ber hinaus waren Stereotypien sowohl im 24-Stunden Open Field Test als auch in der Messeinrichtung der indirekten Tierkalorimetrie mit einer erh{\"o}hten Aktivit{\"a}t und Motilit{\"a}t assoziiert, w{\"a}hrend die circadiane Rhythmik nicht divergierte. Diese erh{\"o}hte k{\"o}rperliche Bet{\"a}tigung spiegelte sich in den niedrigeren K{\"o}rpergewichtsentwicklungen der stereotypen Tiere wieder. Außerdem unterschieden sich die K{\"o}rperfett- und K{\"o}rpermuskelanteile. Zusammenfassend l{\"a}sst sich sagen, dass das Aus{\"u}ben stereotypen Verhaltens zu Differenzen im metabolischen Ph{\"a}notyp nicht-stereotyper und stereotyper FVB/NJ-M{\"a}use f{\"u}hrt. Im Sinne der „Guten Wissenschaftlichen Praxis" sollte das zentrale Ziel jedes Wissenschaftlers sein, aussagekr{\"a}ftige und reproduzierbare Daten hervorzubringen. Jedoch k{\"o}nnen keine validen Resultate von Tieren erzeugt werden, die in Aspekten variieren, die f{\"u}r den vorgesehenen Zweck der Studie nicht ber{\"u}cksichtigt wurden. Deshalb sollten nicht-stereotype und stereotype Individuen nicht innerhalb einer Versuchsgruppe randomisiert werden. Stereotype Tiere demzufolge von geplanten Studien auszuschließen, w{\"u}rde allerdings dem Gebot des zweiten R's - der Reduction - widersprechen. Um Refinement zu garantieren, sollte der Fokus auf der maximal erreichbaren Pr{\"a}vention stereotypen Verhaltens liegen. Diverse Studien haben bereits gezeigt, dass die Anreicherung der Haltungsumwelt (environmental enrichment) zu einer Senkung der Pr{\"a}valenz von Stereotypien bei M{\"a}usen f{\"u}hrt, dennoch kommen sie weiterhin vor. Daher sollte environmental enrichment zuk{\"u}nftig weniger ein „Kann", sondern ein „Muss" sein - oder vielmehr: der Goldstandard. Zudem w{\"u}rde eine profunde ph{\"a}notypische Charakterisierung dazu beitragen, Mausst{\"a}mme zu erkennen, die zu Stereotypien neigen und den f{\"u}r den spezifischen Zweck am besten geeigneten Mausstamm zu identifizieren, bevor ein Experiment geplant wird.}, language = {de} } @phdthesis{Mueller2017, author = {M{\"u}ller, Juliane}, title = {Trunk loading and back pain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102428}, school = {Universit{\"a}t Potsdam}, pages = {ix, 123}, year = {2017}, abstract = {An essential function of the trunk is the compensation of external forces and loads in order to guarantee stability. Stabilising the trunk during sudden, repetitive loading in everyday tasks, as well as during performance is important in order to protect against injury. Hence, reduced trunk stability is accepted as a risk factor for the development of back pain (BP). An altered activity pattern including extended response and activation times as well as increased co-contraction of the trunk muscles as well as a reduced range of motion and increased movement variability of the trunk are evident in back pain patients (BPP). These differences to healthy controls (H) have been evaluated primarily in quasi-static test situations involving isolated loading directly to the trunk. Nevertheless, transferability to everyday, dynamic situations is under debate. Therefore, the aim of this project is to analyse 3-dimensional motion and neuromuscular reflex activity of the trunk as response to dynamic trunk loading in healthy (H) and back pain patients (BPP). A measurement tool was developed to assess trunk stability, consisting of dynamic test situations. During these tests, loading of the trunk is generated by the upper and lower limbs with and without additional perturbation. Therefore, lifting of objects and stumbling while walking are adequate represents. With the help of a 12-lead EMG, neuromuscular activity of the muscles encompassing the trunk was assessed. In addition, three-dimensional trunk motion was analysed using a newly developed multi-segmental trunk model. The set-up was checked for reproducibility as well as validity. Afterwards, the defined measurement set-up was applied to assess trunk stability in comparisons of healthy and back pain patients. Clinically acceptable to excellent reliability could be shown for the methods (EMG/kinematics) used in the test situations. No changes in trunk motion pattern could be observed in healthy adults during continuous loading (lifting of objects) of different weights. In contrast, sudden loading of the trunk through perturbations to the lower limbs during walking led to an increased neuromuscular activity and ROM of the trunk. Moreover, BPP showed a delayed muscle response time and extended duration until maximum neuromuscular activity in response to sudden walking perturbations compared to healthy controls. In addition, a reduced lateral flexion of the trunk during perturbation could be shown in BPP. It is concluded that perturbed gait seems suitable to provoke higher demands on trunk stability in adults. The altered neuromuscular and kinematic compensation pattern in back pain patients (BPP) can be interpreted as increased spine loading and reduced trunk stability in patients. Therefore, this novel assessment of trunk stability is suitable to identify deficits in BPP. Assignment of affected BPP to therapy interventions with focus on stabilisation of the trunk aiming to improve neuromuscular control in dynamic situations is implied. Hence, sensorimotor training (SMT) to enhance trunk stability and compensation of unexpected sudden loading should be preferred.}, language = {en} } @phdthesis{Mueller2017, author = {M{\"u}ller, Eduard Rudolf}, title = {Architektur und Kunst im lyrischen Werk Johannes Bobrowskis}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-461-6}, doi = {10.25932/publishup-42711}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427113}, school = {Universit{\"a}t Potsdam}, pages = {538}, year = {2017}, abstract = {Bobrowski had expressed the intention to study art history after graduation, but war and captivity thwarted his plans: As a member of the German Armed Forces, he was only released from military service for a semester in winter 1941/1942. Bobrowski was particularly impressed by the lectures on German Art in the Age of Goethe by the departmental chair Wilhelm Pinder. However, despite this fundamental influence Pinder's ideological background never become manifest in Bobrowski's poems. After returning from Soviet captivity during Christmas 1949, university studies were out of the question for the thirty-two-year-old. However, his lifelong intermedial engagement with fine art in his work can be interpreted as an expression of his diverse cultural and historical interests and inclinations. The poet's life phases correlate with the thematic development of his poems on visual art: The inviolable aesthetics of significant works of art helped him to overcome the horror of the last years of the war and the privations of Soviet captivity. Didactic moral aims initially shaped the poems Bobrowski created in the years after his return home before he was able to distance himself in terms of content and form from this type of poetry and began to write poems that take up cultural-historical aspects and juxtapose historical, mythological, biblical and religious-philosophical themes spanning epochs. His poems about the artists Jawlensky and Calder also touch simultaneously on aspects of the cultural landscape. In the last decade of his life, Bobrowski became increasingly interested in twentieth-century art, while modern architecture was absent from his work. Bobrowski devoted himself in an entire series of poems to Classicist and Romanticist painting and thus to works that were written during the Age of Goethe and about which Wilhelm Pinder may have given lectures during his "German Art in the Age of Goethe" course attended by Bobrowski. Architecture is a leitmotif in Bobrowski's lyrical works. The significance conveyed of the particular sacred and profane buildings referred to in the poems as well as the urban and village ensembles and individual parts of buildings changes several times over the years. Starting from traditional, juxtaposed juvenile poems in iambic versification, in which architectural elements form part of an awareness that fades out everything outside of the aesthetic, the significance of the sacred and secular buildings in Bobrowski's lyrical works changes for the first time during the years he spent in Russia during the war as part of the German military. In the odes Bobrowski wrote at the time, the architectural relics testify to suffering, death and destruction. What is still absent, however, is the central idea of guilt, which later becomes the focus of poems he writes after his return from captivity until his early death. Towards the end of the war and during his years of captivity, Bobrowski reflects on the theme of his homeland again, and the architecture in his poems becomes an aesthetically charged projection for his yearning for East Prussia and the Memel area. The aspect of the sublime first appears in his poems, both in relation to painting and architecture, during his captivity. This idea is developed on the one hand after his return to Berlin in his poems on the architecture of Gothic cathedrals and the architectural heritage of Classicism, but the cultural heritage of Europe also represents historical injustice and a heavy, far-reaching guilt in the poems written during this period. Bobrowski turns away from his criticism of the entire continent of Europe in later years and in his "Sarmatic Divan" concentrates on the guilt Germans have towards the peoples of Eastern Europe. This also lends the architecture in his poems a new meaning. The relics of the castles of the Teutonic Order testify to the rule of medieval conquerors and merge with nature: The symbolism of the architecture becomes part of the landscape. In the last decade of his life, he increasingly writes poems related to parks and urban green spaces. The city, "filled with meaning", moves to the centre of his poetry. However he does not deal with the technical achievements and social phenomena of urban life in these poems but with urban structures and especially the green and open spaces as symbols of history. The poet relies not only on personal experiences, but sometimes also on image sources without ever having seen the original. The poems about Chagall and Gauguin are hardly accessible without the knowledge that they refer to image reproductions in narrow, popular books that Bobrowski acquired shortly before writing the respective poems. The situation is different with the Russian churches that find their way into his lyrical works. Bobrowski had seen them all during the war, and most of them still appear to exist today and can be identified with some certainty with the help in part of the poet's letters from that period.}, language = {de} } @phdthesis{Muehlenhoff2017, author = {M{\"u}hlenhoff, Judith}, title = {Culture-driven innovation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-104626}, school = {Universit{\"a}t Potsdam}, pages = {143}, year = {2017}, abstract = {This cumulative dissertation deals with the potential of underexplored cultural sources for innovation. Nowadays, firms recognize an increasing demand for innovation to keep pace with an ever-growing dynamic worldwide competition. Knowledge is one of the most crucial sources and resource, while until now innovation has been foremost driven by technology. But since the last years, we have been witnessing a change from technology's role as a driver of innovation to an enabler of innovation. Innovative products and services increasingly differentiate through emotional qualities and user experience. These experiences are hard to grasp and require alignment in innovation management theory and practice. This work cares about culture in a broader matter as a source for innovation. It investigates the requirements and fundamentals for "culture-driven innovation" by studying where and how to unlock cultural sources. The research questions are the following: What are cultural sources for knowledge and innovation? Where can one find cultural sources and how to tap into them? The dissertation starts with an overview of its central terms and introduces cultural theories as an overarching frame to study cultural sources for innovation systematically. Here, knowledge is not understood as something an organization owns like a material resource, but it is seen as something created and taking place in practices. Such a practice theoretical lens inheres the rejection of the traditional economic depiction of the rational Homo Oeconomicus. Nevertheless, it also rejects the idea of the Homo Sociologicus about the strong impact of society and its values on individual actions. Practice theory approaches take account of both concepts by underscoring the dualism of individual (agency, micro-level) and structure (society, macro-level). Following this, organizations are no enclosed entities but embedded within their socio-cultural environment, which shapes them and is also shaped by them. Then, the first article of this dissertation acknowledges a methodological stance of this dualism by discussing how mixed methods support an integrated approach to study the micro- and macro-level. The article focuses on networks (thus communities) as a central research unit within studies of entrepreneurship and innovation. The second article contains a network analysis and depicts communities as central loci for cultural sources and knowledge. With data from the platform Meetup.com about events etc., the study explores which overarching communities and themes have been evolved in Berlin's start up and tech scene. While the latter study was about where to find new cultural sources, the last article addresses how to unlock such knowledge sources. It develops the concept of a cultural absorptive capacity, that is the capability of organizations to open up towards cultural sources. Furthermore, the article points to the role of knowledge intermediaries in the early phases of knowledge acquisition. Two case studies on companies working with artists illustrate the roles of such intermediaries and how they support firms to gain knowledge from cultural sources. Overall, this dissertation contributes to a better understanding of culture as a source for innovation from a theoretical, methodological, and practitioners' point of view. It provides basic research to unlock the potential of such new knowledge sources for companies - sources that so far have been neglected in innovation management.}, language = {en} } @phdthesis{Muecke2017, author = {M{\"u}cke, Nicole}, title = {Direct and inverse problems in machine learning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403479}, school = {Universit{\"a}t Potsdam}, pages = {159}, year = {2017}, abstract = {We analyze an inverse noisy regression model under random design with the aim of estimating the unknown target function based on a given set of data, drawn according to some unknown probability distribution. Our estimators are all constructed by kernel methods, which depend on a Reproducing Kernel Hilbert Space structure using spectral regularization methods. A first main result establishes upper and lower bounds for the rate of convergence under a given source condition assumption, restricting the class of admissible distributions. But since kernel methods scale poorly when massive datasets are involved, we study one example for saving computation time and memory requirements in more detail. We show that Parallelizing spectral algorithms also leads to minimax optimal rates of convergence provided the number of machines is chosen appropriately. We emphasize that so far all estimators depend on the assumed a-priori smoothness of the target function and on the eigenvalue decay of the kernel covariance operator, which are in general unknown. To obtain good purely data driven estimators constitutes the problem of adaptivity which we handle for the single machine problem via a version of the Lepskii principle.}, language = {en} } @phdthesis{Muzdalo2017, author = {Muzdalo, Anja}, title = {Thermal cis-trans isomerization of azobenzene studied by path sampling and QM/MM stochastic dynamics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405814}, school = {Universit{\"a}t Potsdam}, pages = {144}, year = {2017}, abstract = {Azobenzene-based molecular photoswitches have extensively been applied to biological systems, involving photo-control of peptides, lipids and nucleic acids. The isomerization between the stable trans and the metastable cis state of the azo moieties leads to pronounced changes in shape and other physico-chemical properties of the molecules into which they are incorporated. Fast switching can be induced via transitions to excited electronic states and fine-tuned by a large number of different substituents at the phenyl rings. But a rational design of tailor-made azo groups also requires control of their stability in the dark, the half-lifetime of the cis isomer. In computational chemistry, thermally activated barrier crossing on the ground state Born-Oppenheimer surface can efficiently be estimated with Eyring's transition state theory (TST) approach; the growing complexity of the azo moiety and a rather heterogeneous environment, however, may render some of the underlying simplifying assumptions problematic. In this dissertation, a computational approach is established to remove two restrictions at once: the environment is modeled explicitly by employing a quantum mechanical/molecular mechanics (QM/MM) description; and the isomerization process is tracked by analyzing complete dynamical pathways between stable states. The suitability of this description is validated by using two test systems, pure azo benzene and a derivative with electron donating and electron withdrawing substituents ("push-pull" azobenzene). Each system is studied in the gas phase, in toluene and in polar DMSO solvent. The azo molecules are treated at the QM level using a very recent, semi-empirical approximation to density functional theory (density functional tight binding approximation). Reactive pathways are sampled by implementing a version of the so-called transition path sampling method (TPS), without introducing any bias into the system dynamics. By analyzing ensembles of reactive trajectories, the change in isomerization pathway from linear inversion to rotation in going from apolar to polar solvent, predicted by the TST approach, could be verified for the push-pull derivative. At the same time, the mere presence of explicit solvation is seen to broaden the distribution of isomerization pathways, an effect TST cannot account for. Using likelihood maximization based on the TPS shooting history, an improved reaction coordinate was identified as a sine-cosine combination of the central bend angles and the rotation dihedral, r (ω,α,α′). The computational van't Hoff analysis for the activation entropies was performed to gain further insight into the differential role of solvent for the case of the unsubstituted and the push-pull azobenzene. In agreement with the experiment, it yielded positive activation entropies for azobenzene in the DMSO solvent while negative for the push-pull derivative, reflecting the induced ordering of solvent around the more dipolar transition state associated to the latter compound. Also, the dynamically corrected rate constants were evaluated using the reactive flux approach where an increase comparable to the experimental one was observed for a high polarity medium for both azobenzene derivatives.}, language = {en} } @phdthesis{Muriu2017, author = {Muriu, Abraham Rugo}, title = {Performance management in Kenya's public service}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403281}, school = {Universit{\"a}t Potsdam}, pages = {x, 150}, year = {2017}, abstract = {This study was inspired by the desire to contribute to literature on performance management from the context of a developing country. The guiding research questions were: How do managers use performance information in decision making? Why do managers use performance information the way they do? The study was based on theoretical strands of neo-patrimonialism and new institutionalism. The nature of the inquiry informed the choice of a qualitative case study research design. Data was assembled through face-to-face interviews, some observations, and collection of documents from managers at the levels of the directorate, division, and section/units. The managers who were the focus of this study are current or former staff members of the state departments in Kenya's national Ministry of Agriculture, Livestock, and Fisheries as well as from departments responsible for coordination of performance related reforms. The findings of this study show that performance information is regularly produced but its use by managers varies. Examples of use include preparing reports to external bodies, making decisions for resource re-allocation, making recommendations for rewards and sanctions, and policy advisory. On categorizing the forms of use as passive, purposeful, political or perverse, evidence shows that they overlap and that some of the forms are so closely related that it is difficult to separate them empirically. On what can explain the forms of use established, four factors namely; political will and leadership; organizational capacity; administrative culture; and managers' interests and attitudes, were investigated. While acknowledging the interrelatedness and even overlapping of the factors, the study demonstrates that there is explanatory power to each though with varying depth and scope. The study thus concludes that: Inconsistent political will and leadership for performance management reforms explain forms of use that are passive, political and perverse. Low organizational capacity could best explain passive and some limited aspects of purposeful use. Informal, personal and competitive administrative culture is associated with purposeful use and mostly with political and perverse use. Limited interest and apprehensive attitude are best associated with passive use. The study contributes to the literature particularly in how institutions in a context of neo-patrimonialism shape performance information use. It recommends that further research is necessary to establish how neo-patrimonialism positively affects performance oriented reforms. This is interesting in particular given the emerging thinking on pockets of effectiveness and developmental patrimonialism. This is important since it is expected that performance related reforms will continue to be advocated in developing countries in the foreseeable future.}, language = {en} } @phdthesis{Murawski2017, author = {Murawski, Aline}, title = {Trends in precipitation over Germany and the Rhine basin related to changes in weather patterns}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412725}, school = {Universit{\"a}t Potsdam}, pages = {112}, year = {2017}, abstract = {Niederschlag als eine der wichtigsten meteorologischen Gr{\"o}ßen f{\"u}r Landwirtschaft, Wasserversorgung und menschliches Wohlbefinden hat schon immer erh{\"o}hte Aufmerksamkeit erfahren. Niederschlagsmangel kann verheerende Auswirkungen haben, wie z.B. Missernten und Wasserknappheit. {\"U}berm{\"a}ßige Niederschl{\"a}ge andererseits bergen jedoch ebenfalls Gefahren in Form von Hochwasser oder Sturzfluten und wiederum Missernten. Daher wurde viel Arbeit in die Detektion von Niederschlags{\"a}nderungen und deren zugrundeliegende Prozesse gesteckt. Insbesondere angesichts von Klimawandel und unter Ber{\"u}cksichtigung des Zusammenhangs zwischen Temperatur und atmosph{\"a}rischer Wasserhaltekapazit{\"a}t, ist großer Bedarf an Forschung zum Verst{\"a}ndnis der Auswirkungen von Klimawandel auf Niederschlags{\"a}nderungen gegeben. Die vorliegende Arbeit hat das Ziel, vergangene Ver{\"a}nderungen in Niederschlag und anderen meteorologischen Variablen zu verstehen. F{\"u}r verschiedene Zeitr{\"a}ume wurden Tendenzen gefunden und mit entsprechenden Ver{\"a}nderungen in der großskaligen atmosph{\"a}rischen Zirkulation in Zusammenhang gebracht. Die Ergebnisse dieser Arbeit k{\"o}nnen als Grundlage f{\"u}r die Attributierung von Hochwasserver{\"a}nderungen zu Klimawandel genutzt werden. Die Annahmen f{\"u}r die Maßstabsverkleinerung („Downscaling") der Daten von großskaligen Zirkulationsmodellen auf die lokale Skala wurden hier getestet und verifziert. In einem ersten Schritt wurden Niederschlagsver{\"a}nderungen in Deutschland analysiert. Dabei lag der Fokus nicht nur auf Niederschlagssummen, sondern auch auf Eigenschaften der statistischen Verteilung, {\"U}bergangswahrscheinlichkeiten als Maß f{\"u}r Trocken- und Niederschlagsperioden und Extremniederschlagsereignissen. Den r{\"a}umlichen Fokus auf das Rheineinzugsgebiet, das gr{\"o}ßte Flusseinzugsgebiet Deutschlands und einer der Hauptwasserwege Europas, verlagernd, wurden nachgewiesene Ver{\"a}nderungen in Niederschlag und anderen meteorologischen Gr{\"o}ßen in Bezug zu einer „optimierten" Wetterlagenklassifikation analysiert. Die Wetterlagenklassifikation wurde unter der Maßgabe entwickelt, die Varianz des lokalen Klimas bestm{\"o}glich zu erkl{\"a}ren. Die letzte hier behandelte Frage dreht sich darum, ob die beobachteten Ver{\"a}nderungen im lokalen Klima eher H{\"a}ufigkeits{\"a}nderungen der Wetterlagen zuzuordnen sind oder einer Ver{\"a}nderung der Wetterlagen selbst. Eine gebr{\"a}uchliche Annahme f{\"u}r einen Downscaling-Ansatz mit Hilfe von Wetterlagen und einem stochastischen Wettergenerator ist, dass Klimawandel sich allein durch eine Ver{\"a}nderung der H{\"a}ufigkeit von Wetterlagen ausdr{\"u}ckt, die Eigenschaften der Wetterlagen dabei jedoch konstant bleiben. Diese Annahme wurde {\"u}berpr{\"u}ft und die F{\"a}higkeit der neuesten Generation von Zirkulationsmodellen, diese Wetterlagen zu reproduzieren, getestet. Niederschlagsver{\"a}nderungen in Deutschland im Zeitraum 1951-2006 lassen sich zusammenfassen als negativ im Sommer und positiv in allen anderen Jahreszeiten. Verschiedene Niederschlagscharakteristika best{\"a}tigen die Tendenz in den Niederschlagssummen: w{\"a}hrend mittlere und extreme Niederschlagstageswerte im Winter zugenommen haben, sind auch zusammenh{\"a}ngende Niederschlagsperioden l{\"a}nger geworden (ausgedr{\"u}ckt als eine gestiegene Wahrscheinlichkeit f{\"u}r einen Tag mit Niederschlag gefolgt von einem weiteren nassen Tag). Im Sommer wurde das Gegenteil beobachtet: gesunkene Niederschlagssummen, untermauert von verringerten Mittel- und Extremwerten und l{\"a}ngeren Trockenperioden. Abseits dieser allgemeinen Zusammenfassung f{\"u}r das gesamte Gebiet Deutschlands, ist die r{\"a}umliche Verteilung von Niederschlagsver{\"a}nderungen deutlich heterogener. Vermehrter Niederschlag im Winter wurde haupts{\"a}chlich im Nordwesten und S{\"u}dosten Deutschlands beobachtet, w{\"a}hrend im Fr{\"u}hling die st{\"a}rksten Ver{\"a}nderungen im Westen und im Herbst im S{\"u}den aufgetreten sind. Das saisonale Bild wiederum l{\"o}st sich f{\"u}r die zugeh{\"o}rigen Monate auf, z.B. setzt sich der Anstieg im Herbstniederschlag aus deutlich vermehrtem Niederschlag im S{\"u}dwesten im Oktober und im S{\"u}dosten im November zusammen. Diese Ergebnisse betonen die starken r{\"a}umlichen Zusammenh{\"a}nge der Niederschlags{\"a}nderungen. Der n{\"a}chste Schritt hinsichtlich einer Zuordnung von Niederschlagsver{\"a}nderungen zu {\"A}nderungen in großskaligen Zirkulationsmustern, war die Ableitung einer Wetterlagenklassifikation, die die betrachteten lokalen Klimavariablen hinreichend stratifizieren kann. Fokussierend auf Temperatur, Globalstrahlung und Luftfeuchte zus{\"a}tzlich zu Niederschlag, wurde eine Klassifikation basierend auf Luftdruck, Temperatur und spezifischer Luftfeuchtigkeit als am besten geeignet erachtet, die Varianz der lokalen Variablen zu erkl{\"a}ren. Eine vergleichsweise hohe Anzahl von 40 Wetterlagen wurde ausgew{\"a}hlt, die es erlaubt, typische Druckmuster durch die zus{\"a}tzlich verwendete Temperaturinformation einzelnen Jahreszeiten zuzuordnen. W{\"a}hrend die F{\"a}higkeit, Varianz im Niederschlag zu erkl{\"a}ren, relativ gering ist, ist diese deutlich besser f{\"u}r Globalstrahlung und nat{\"u}rlich Temperatur. Die meisten der aktuellen Zirkulationsmodelle des CMIP5-Ensembles sind in der Lage, die Wetterlagen hinsichtlich H{\"a}ufigkeit, Saisonalit{\"a}t und Persistenz hinreichend gut zu reproduzieren. Schließlich wurden dieWetterlagen bez{\"u}glich Ver{\"a}nderungen in ihrer H{\"a}ufigkeit, Saisonalit{\"a}t und Persistenz, sowie der Wetterlagen-spezifischen Niederschl{\"a}ge und Temperatur, untersucht. Um Unsicherheiten durch die Wahl eines bestimmten Analysezeitraums auszuschließen, wurden alle m{\"o}glichen Zeitr{\"a}ume mit mindestens 31 Jahren im Zeitraum 1901-2010 untersucht. Dadurch konnte die Annahme eines konstanten Zusammenhangs zwischen Wetterlagen und lokalem Wetter gr{\"u}ndlich {\"u}berpr{\"u}ft werden. Es wurde herausgefunden, dass diese Annahme nur zum Teil haltbar ist. W{\"a}hrend Ver{\"a}nderungen in der Temperatur haupts{\"a}chlich auf Ver{\"a}nderungen in der Wetterlagenh{\"a}ufigkeit zur{\"u}ckzuf{\"u}hren sind, wurde f{\"u}r Niederschlag ein erheblicher Teil von Ver{\"a}nderungen innerhalb einzelner Wetterlagen gefunden. Das Ausmaß und sogar das Vorzeichen der Ver{\"a}nderungen h{\"a}ngt hochgradig vom untersuchten Zeitraum ab. Die H{\"a}ufigkeit einiger Wetterlagen steht in direkter Beziehung zur langfristigen Variabilit{\"a}t großskaliger Zirkulationsmuster. Niederschlagsver{\"a}nderungen variieren nicht nur r{\"a}umlich, sondern auch zeitlich - Aussagen {\"u}ber Tendenzen sind nur in Bezug zum jeweils untersuchten Zeitraum g{\"u}ltig. W{\"a}hrend ein Teil der Ver{\"a}nderungen auf {\"A}nderungen der großskaligen Zirkulation zur{\"u}ckzuf{\"u}hren ist, gibt es auch deutliche Ver{\"a}nderungen innerhalb einzelner Wetterlagen. Die Ergebnisse betonen die Notwendigkeit f{\"u}r einen sorgf{\"a}ltigen Nachweis von Ver{\"a}nderungen m{\"o}glichst verschiedene Zeitr{\"a}ume zu untersuchen und mahnen zur Vorsicht bei der Anwendung von Downscaling-Ans{\"a}tzen mit Hilfe von Wetterlagen, da diese die Auswirkungen von Klimaver{\"a}nderungen durch das Vernachl{\"a}ssigen von Wetterlagen-internen Ver{\"a}nderungen falsch einsch{\"a}tzen k{\"o}nnten.}, language = {en} } @phdthesis{Munz2017, author = {Munz, Matthias}, title = {Water flow and heat transport modelling at the interface between river and aquifer}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404319}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 123}, year = {2017}, abstract = {The functioning of the surface water-groundwater interface as buffer, filter and reactive zone is important for water quality, ecological health and resilience of streams and riparian ecosystems. Solute and heat exchange across this interface is driven by the advection of water. Characterizing the flow conditions in the streambed is challenging as flow patterns are often complex and multidimensional, driven by surface hydraulic gradients and groundwater discharge. This thesis presents the results of an integrated approach of studies, ranging from the acquisition of field data, the development of analytical and numerical approaches to analyse vertical temperature profiles to the detailed, fully-integrated 3D numerical modelling of water and heat flux at the reach scale. All techniques were applied in order to characterize exchange flux between stream and groundwater, hyporheic flow paths and temperature patterns. The study was conducted at a reach-scale section of the lowland Selke River, characterized by distinctive pool riffle sequences and fluvial islands and gravel bars. Continuous time series of hydraulic heads and temperatures were measured at different depths in the river bank, the hyporheic zone and within the river. The analyses of the measured diurnal temperature variation in riverbed sediments provided detailed information about the exchange flux between river and groundwater. Beyond the one-dimensional vertical water flow in the riverbed sediment, hyporheic and parafluvial flow patterns were identified. Subsurface flow direction and magnitude around fluvial islands and gravel bars at the study site strongly depended on the position around the geomorphological structures and on the river stage. Horizontal water flux in the streambed substantially impacted temperature patterns in the streambed. At locations with substantial horizontal fluxes the penetration depths of daily temperature fluctuations was reduced in comparison to purely vertical exchange conditions. The calibrated and validated 3D fully-integrated model of reach-scale water and heat fluxes across the river-groundwater interface was able to accurately represent the real system. The magnitude and variations of the simulated temperatures matched the observed ones, with an average mean absolute error of 0.7 °C and an average Nash Sutcliffe Efficiency of 0.87. The simulation results showed that the water and heat exchange at the surface water-groundwater interface is highly variable in space and time with zones of daily temperature oscillations penetrating deep into the sediment and spots of daily constant temperature following the average groundwater temperature. The average hyporheic flow path temperature was found to strongly correlate with the flow path residence time (flow path length) and the temperature gradient between river and groundwater. Despite the complexity of these processes, the simulation results allowed the derivation of a general empirical relationship between the hyporheic residence times and temperature patterns. The presented results improve our understanding of the complex spatial and temporal dynamics of water flux and thermal processes within the shallow streambed. Understanding these links provides a general basis from which to assess hyporheic temperature conditions in river reaches.}, language = {en} } @phdthesis{Muldashev2017, author = {Muldashev, Iskander}, title = {Modeling of the great earthquake seismic cycles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398926}, school = {Universit{\"a}t Potsdam}, pages = {xii, 117}, year = {2017}, abstract = {The timing and location of the two largest earthquakes of the 21st century (Sumatra, 2004 and Tohoku 2011, events) greatly surprised the scientific community, indicating that the deformation processes that precede and follow great megathrust earthquakes remain enigmatic. During these phases before and after the earthquake a combination of multi-scale complex processes are acting simultaneously: Stresses built up by long-term tectonic motions are modified by sudden jerky deformations during earthquakes, before being restored by multiple ensuing relaxation processes. This thesis details a cross-scale thermomechanical model developed with the aim of simulating the entire subduction process from earthquake (1 minute) to million years' time scale, excluding only rupture propagation. The model employs elasticity, non-linear transient viscous rheology, and rate-and-state friction. It generates spontaneous earthquake sequences, and, by using an adaptive time-step algorithm, recreates the deformation process as observed naturally over single and multiple seismic cycles. The model is thoroughly tested by comparing results to those from known high- resolution solutions of generic modeling setups widely used in modeling of rupture propagation. It is demonstrated, that while not modeling rupture propagation explicitly, the modeling procedure correctly recognizes the appearance of instability (earthquake) and correctly simulates the cumulative slip at a fault during great earthquake by means of a quasi-dynamic approximation. A set of 2D models is used to study the effects of non-linear transient rheology on the postseismic processes following great earthquakes. Our models predict that the viscosity in the mantle wedge drops by 3 to 4 orders of magnitude during a great earthquake with magnitude above 9. This drop in viscosity results in spatial scales and timings of the relaxation processes following the earthquakes that are significantly different to previous estimates. These models replicate centuries long seismic cycles exhibited by the greatest earthquakes (like the Great Chile 1960 Earthquake) and are consistent with the major features of postseismic surface displacements recorded after the Great Tohoku Earthquake. The 2D models are also applied to study key factors controlling maximum magnitudes of earthquakes in subduction zones. Even though methods of instrumentally observing earthquakes at subduction zones have rapidly improved in recent decades, the characteristic recurrence interval of giant earthquakes (Mw>8.5) is much larger than the currently available observational record and therefore the necessary conditions for giant earthquakes are not clear. Statistical studies have recognized the importance of the slab shape and its surface roughness, state of the strain of the upper plate and thickness of sediments filling the trenches. In this thesis we attempt to explain these observations and to identify key controlling parameters. We test a set of 2D models representing great earthquake seismic cycles at known subduction zones with various known geometries, megathrust friction coefficients, and convergence rates implemented. We found that low-angle subduction (large effect) and thick sediments in the subduction channel (smaller effect) are the fundamental necessary conditions for generating giant earthquakes, while the change of subduction velocity from 10 to 3.5 cm/yr has a lower effect. Modeling results also suggest that having thick sediments in the subduction channel causes low static friction, resulting in neutral or slightly compressive deformation in the overriding plate for low-angle subduction zones. These modeling results agree well with observations for the largest earthquakes. The model predicts the largest possible earthquakes for subduction zones of given dipping angles. The predicted maximum magnitudes exactly threshold magnitudes of all known giant earthquakes of 20th and 21st centuries. The clear limitation of most of the models developed in the thesis is their 2D nature. Development of 3D models with comparable resolution and complexity will require significant advances in numerical techniques. Nevertheless, we conducted a series of low-resolution 3D models to study the interaction between two large asperities at a subduction interface separated by an aseismic gap of varying width. The novelty of the model is that it considers behavior of the asperities during multiple seismic cycles. As expected, models show that an aseismic gap with a narrow width could not prevent rupture propagation from one asperity to another, and that rupture always crosses the entire model. When the gap becomes too wide, asperities do not interact anymore and rupture independently. However, an interesting mode of interaction was observed in the model with an intermediate width of the aseismic gap: In this model the asperities began to stably rupture in anti-phase following multiple seismic cycles. These 3D modeling results, while insightful, must be considered preliminary because of the limitations in resolution. The technique developed in this thesis for cross-scale modeling of seismic cycles can be used to study the effects of multiple seismic cycles on the long-term deformation of the upper plate. The technique can be also extended to the case of continental transform faults and for the advanced 3D modeling of specific subduction zones. This will require further development of numerical techniques and adaptation of the existing advanced highly scalable parallel codes like LAMEM and ASPECT.}, language = {en} } @phdthesis{Mosca2017, author = {Mosca, Michela}, title = {Multilinguals' language control}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398912}, school = {Universit{\"a}t Potsdam}, pages = {V, 189}, year = {2017}, abstract = {For several decades, researchers have tried to explain how speakers of more than one language (multilinguals) manage to keep their languages separate and to switch from one language to the other depending on the context. This ability of multilingual speakers to use the intended language, while avoiding interference from the other language(s) has recently been termed "language control". A multitude of studies showed that when bilinguals process one language, the other language is also activated and might compete for selection. According to the most influential model of language control developed over the last two decades, competition from the non-intended language is solved via inhibition. In particular, the Inhibitory Control (IC) model proposed by Green (1998) puts forward that the amount of inhibition applied to the non-relevant language depends on its dominance, in that the stronger the language the greater the strength of inhibition applied to it. Within this account, the cost required to reactivate a previously inhibited language depends on the amount of inhibition previously exerted on it, that is, reactivation costs are greater for a stronger compared to a weaker language. In a nutshell, according to the IC model, language control is determined by language dominance. The goal of the present dissertation is to investigate the extent to which language control in multilinguals is affected by language dominance and whether and how other factors might influence this process. Three main factors are considered in this work: (i) the time speakers have to prepare for a certain language or PREPARATION TIME, (ii) the type of languages involved in the interactional context or LANGUAGE TYPOLOGY, and (iii) the PROCESSING MODALITY, that is, whether the way languages are controlled differs between reception and production. The results obtained in the four manuscripts, either published or in revision, indicate that language dominance alone does not suffice to explain language switching patterns. In particular, the present thesis shows that language control is profoundly affected by each of the three variables described above. More generally, the findings obtained in the present dissertation indicate that language control in multilingual speakers is a much more dynamic system than previously believed and is not exclusively determined by language dominance, as predicted by the IC model (Green, 1998).}, language = {en} } @phdthesis{Morling2017, author = {Morling, Karoline}, title = {Import and decomposition of dissolved organic carbon in pre-dams of drinking water reservoirs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-399110}, school = {Universit{\"a}t Potsdam}, pages = {xii, 151}, year = {2017}, abstract = {Dissolved organic carbon (DOC) depicts a key component in the aquatic carbon cycle as well as for drinking water production from surface waters. DOC concentrations increased in water bodies of the northern hemisphere in the last decades, posing ecological consequences and water quality problems. Within the pelagic zone of lakes and reservoirs, the DOC pool is greatly affected by biological activity as DOC is simultaneously produced and decomposed. This thesis aimed for a conceptual understanding of organic carbon cycling and DOC quality changes under differing hydrological and trophic conditions. Further, the occurrence of aquatic priming was investigated, which has been proposed as a potential process facilitating the microbial decomposition of stable allochthonous DOC within the pelagic zone. To study organic carbon cycling under different hydrological conditions, quantitative and qualitative investigations were carried out in three pre-dams of drinking water reservoirs exhibiting a gradient in DOC concentrations and trophic states. All pre-dams were mainly autotrophic in their epilimnia. Discharge and temperature were identified as the key factors regulating net production and respiration in the upper water layers of the pre-dams. Considerable high autochthonous production was observed during the summer season under higher trophic status and base flow conditions. Up to 30\% of the total gained organic carbon was produced within the epilimnia. Consequently, this affected the DOC quality within the pre-dams over the year and enhanced characteristics of algae-derived DOC were observed during base flow in summer. Allochthonous derived DOC dominated at high discharges and oligotrophic conditions when production and respiration were low. These results underline that also small impoundments with typically low water residence times are hotspots of carbon cycling, significantly altering water quality in dependence of discharge conditions, temperature and trophic status. Further, it highlights that these factors need to be considered in future water management as increasing temperatures and altered precipitation patterns are predicted in the context of climate change. Under base flow conditions, heterotrophic bacteria preferentially utilized older DOC components with a conventional radiocarbon age of 195-395 years before present (i.e. before 1950). In contrast, younger carbon components (modern, i.e. produced after 1950) were mineralized following a storm flow event. This highlights that age and recalcitrance of DOC are independent from each other. To assess the ages of the microbially consumed DOC, a simplified method was developed to recover the respired CO2 from heterotrophic bacterioplankton for carbon isotope analyses (13C, 14C). The advantages of the method comprise the operation of replicate incubations at in-situ temperatures using standard laboratory equipment and thus enabling an application in a broad range of conditions. Aquatic priming was investigated in laboratory experiments during the microbial decomposition of two terrestrial DOC substrates (peat water and soil leachate). Thereby, natural phytoplankton served as a source of labile organic matter and the total DOC pool increased throughout the experiments due to exudation and cell lysis of the growing phytoplankton. A priming effect for both terrestrial DOC substrates was revealed via carbon isotope analysis and mixing models. Thereby, priming was more pronounced for the peat water than for the soil leachate. This indicates that the DOC source and the amount of the added labile organic matter might influence the magnitude of a priming effect. Additional analysis via high-resolution mass spectrometry revealed that oxidized, unsaturated compounds were more strongly decomposed under priming (i.e. in phytoplankton presence). Given the observed increase in DOC concentrations during the experiments, it can be concluded that aquatic priming is not easily detectable via net concentration changes alone and could be considered as a qualitative effect. The knowledge gained from this thesis contributes to the understanding of aquatic carbon cycling and demonstrated how DOC dynamics in freshwaters vary with hydrological, seasonal and trophic conditions. It further demonstrated that aquatic priming contributes to the microbial transformation of organic carbon and the observed decay of allochthonous DOC during transport in inland waters.}, language = {en} } @phdthesis{MondolLopez2017, author = {Mondol L{\´o}pez, Mijail}, title = {Historiograf{\´i}a literaria y Sociedad}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406409}, school = {Universit{\"a}t Potsdam}, pages = {344}, year = {2017}, abstract = {Throughout all different socio-historical tensions undergone by the Latin American modernit(ies), the literary-historical production as well as the reflection on the topic - regional, national, supranational and/or continental - have been part of the critical and intellectual itinerary of very significant political and cultural projects, whose particular development allows the analysis of the socio-discursive dynamics fulfilled by the literary historiography in the search of a historical conscience and representation of the esthetic-literary processes. In present literary and cultural Central American literary studies, the academic thought on the development of the literary historiography has given place to some works whose main objects of study involve a significant corpus of national literature histories published mainly in the 20th century, between the forties and the eighties. Although these studies differ greatly from the vast academic production undertaken by the literary critics in the last two decades, the field of research of the literary historiography in Central America has made a theoretical-methodological effort, as of the eighties and until now, to analyze the local literary-historical productions. However, this effort was carried out more systematically in the last five years of the 20th century, within the Central American democratic transition and post-war context, when a national, supra national and transnational model of literary history was boosted. This gave place to the creation and launching of the project Hacia una Historia de las Literaturas Centroamericanas (HILCAS) at the beginning of the new millennium. Given the ideological relevance which the literary historiography has played in the process of the historical formation of the Hispano-American States, and whose philological tradition has also had an impact in the various Central American nation states, the emergence of this historiographic project marks an important rupture in relation with the national paradigms, and it is also manifested in a movement of transition and tension with regard to the new cultural, comparative and transareal dynamics, which seek to understand the geographical, transnational, medial and transdisciplinary movements within which the esthetic narrative processes and the idea and formation of a critical Central American subject gain shape. Taking this aspect into account, our study puts forward as its main hypothesis that the historiographic thought developed as a consequence of the project Hacia una Historia de las Literaturas Centroamericanas (HILCAS) constitutes a socio-discursive practice, which reflects the formation of a historic-literary conscience and of a critical intellectual subject, an emergence that takes place between the mid-nineties and the first decade of the 21st century. In this respect, and taking as a basis the general purpose of this investigation indicated before, the main justification for our object of study consists of making the Central American historiographic reflection visible as a part of the epistemological and cultural changes shown by the Latin American historiographic thought, and from which a new way of conceptualization of the space, the coexistence and the historic conscience emerge with regard to the esthetic-literary practices and processes. Based on the field and hypothesis stated before, the general purpose of this research is framed by the socio-discursive dimension fulfilled by the Latin American literary historiography, and it aims to analyze the Central American historical-literary thought developed between the second half of the nineties and the beginning of the first decade of the 21st century.}, language = {es} } @phdthesis{Mitzkus2017, author = {Mitzkus, Martin}, title = {Spectroscopic surface brightness fluctuations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406327}, school = {Universit{\"a}t Potsdam}, pages = {ix, 89}, year = {2017}, abstract = {Galaxies evolve on cosmological timescales and to study this evolution we can either study the stellar populations, tracing the star formation and chemical enrichment, or the dynamics, tracing interactions and mergers of galaxies as well as accretion. In the last decades this field has become one of the most active research areas in modern astrophysics and especially the use of integral field spectrographs furthered our understanding. This work is based on data of NGC 5102 obtained with the panoramic integral field spectrograph MUSE. The data are analysed with two separate and complementary approaches: In the first part, standard methods are used to measure the kinematics and than model the gravitational potential using these exceptionally high-quality data. In the second part I develop the new method of surface brightness fluctuation spectroscopy and quantitatively explore its potential to investigate the bright evolved stellar population. Measuring the kinematics of NGC 5102 I discover that this low-luminosity S0 galaxy hosts two counter rotating discs. The more central stellar component co-rotates with the large amount of HI gas. Investigating the populations I find strong central age and metallicity gradients with a younger and more metal rich central population. The spectral resolution of MUSE does not allow to connect these population gradients with the two counter rotating discs. The kinematic measurements are modelled with Jeans anisotropic models to infer the gravitational potential of NGC 5102. Under the self-consistent mass-follows-light assumption none of the Jeans models is able to reproduce the observed kinematics. To my knowledge this is the strongest evidence evidence for a dark matter dominated system obtained with this approach so far. Including a Navarro, Frenk \& White dark matter halo immediately solves the discrepancies. A very robust result is the logarithmic slope of the total matter density. For this low-mass galaxy I find a value of -1.75 +- 0.04, shallower than an isothermal halo and even shallower than published values for more massive galaxies. This confirms a tentative relation between total mass slope and stellar mass of galaxies. The Surface Brightness Fluctuation (SBF) method is a well established distance measure, but due to its sensitive to bright stars also used to study evolved stars in unresolved stellar populations. The wide-field spectrograph MUSE offers the possibility to apply this technique for the first time to spectroscopic data. In this thesis I develop the spectroscopic SBF technique and measure the first SBF spectrum of any galaxy. I discuss the challenges for measuring SBF spectra that rise due to the complexity of integral field spectrographs compared to imaging instruments. Since decades, stellar population models indicate that SBFs in intermediate-to-old stellar systems are dominated by red giant branch and asymptotic giant branch stars. Especially the later carry significant model uncertainties, making these stars a scientifically interesting target. Comparing the NGC 5102 SBF spectrum with stellar spectra I show for the first time that M-type giants cause the fluctuations. Stellar evolution models suggest that also carbon rich thermally pulsating asymptotic giant branch stars should leave a detectable signal in the SBF spectrum. I cannot detect a significant contribution from these stars in the NGC 5102 SBF spectrum. I have written a stellar population synthesis tool that predicts for the first time SBF spectra. I compute two sets of population models: based on observed and on theoretical stellar spectra. In comparing the two models I find that the models based on observed spectra predict weaker molecular features. The comparison with the NGC 5102 spectrum reveals that these models are in better agreement with the data.}, language = {en} } @phdthesis{Mera2017, author = {Mera, Azal Jaafar Musa}, title = {The Navier-Stokes equations for elliptic quasicomplexes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398495}, school = {Universit{\"a}t Potsdam}, pages = {101}, year = {2017}, abstract = {The classical Navier-Stokes equations of hydrodynamics are usually written in terms of vector analysis. More promising is the formulation of these equations in the language of differential forms of degree one. In this way the study of Navier-Stokes equations includes the analysis of the de Rham complex. In particular, the Hodge theory for the de Rham complex enables one to eliminate the pressure from the equations. The Navier-Stokes equations constitute a parabolic system with a nonlinear term which makes sense only for one-forms. A simpler model of dynamics of incompressible viscous fluid is given by Burgers' equation. This work is aimed at the study of invariant structure of the Navier-Stokes equations which is closely related to the algebraic structure of the de Rham complex at step 1. To this end we introduce Navier-Stokes equations related to any elliptic quasicomplex of first order differential operators. These equations are quite similar to the classical Navier-Stokes equations including generalised velocity and pressure vectors. Elimination of the pressure from the generalised Navier-Stokes equations gives a good motivation for the study of the Neumann problem after Spencer for elliptic quasicomplexes. Such a study is also included in the work.We start this work by discussion of Lam{\´e} equations within the context of elliptic quasicomplexes on compact manifolds with boundary. The non-stationary Lam{\´e} equations form a hyperbolic system. However, the study of the first mixed problem for them gives a good experience to attack the linearised Navier-Stokes equations. On this base we describe a class of non-linear perturbations of the Navier-Stokes equations, for which the solvability results still hold.}, language = {en} } @phdthesis{Meiser2017, author = {Meiser, Susanne}, title = {Wie dysfunktional sind Dysfunktionale Einstellungen?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412483}, school = {Universit{\"a}t Potsdam}, pages = {186}, year = {2017}, abstract = {Im kognitiven Vulnerabilit{\"a}ts-Stress-Modell der Depression von A.T. Beck (1967, 1976) spielen dysfunktionale Einstellungen bei der Entstehung von Depression in Folge von erlebtem Stress eine zentrale Rolle. Diese Theorie pr{\"a}gt seit Jahrzehnten die {\"a}tiologische Erforschung der Depression, jedoch ist die Bedeutung dysfunktionaler Einstellungen im Prozess der Entstehung einer Depression insbesondere im Kindes- und Jugendalter nach wie vor unklar. Die vorliegende Arbeit widmet sich einigen in der bisherigen Forschung wenig behandelten Fragen. Diese betreffen u. a. die M{\"o}glichkeit nichtlinearer Effekte dysfunktionaler Einstellungen, Auswirkungen einer Stichprobenselektion, Entwicklungseffekte sowie die Spezifit{\"a}t etwaiger Zusammenh{\"a}nge f{\"u}r eine depressive Symptomatik. Zur Beantwortung dieser Fragen wurden Daten von zwei Messzeitpunkten der PIER-Studie, eines großangelegten L{\"a}ngsschnittprojekts {\"u}ber Entwicklungsrisiken im Kindes- und Jugendalter, genutzt. Kinder und Jugendliche im Alter von 9 bis 18 Jahren berichteten zweimal im Abstand von ca. 20 Monaten im Selbstberichtsverfahren {\"u}ber ihre dysfunktionalen Einstellungen, Symptome aus verschiedenen St{\"o}rungsbereichen sowie {\"u}ber eingetretene Lebensereignisse. Die Ergebnisse liefern Evidenz f{\"u}r ein Schwellenmodell, in dem dysfunktionale Einstellungen unabh{\"a}ngig von Alter und Geschlecht nur im h{\"o}heren Auspr{\"a}gungsbereich eine Wirkung als Vulnerabilit{\"a}tsfaktor zeigen, w{\"a}hrend im niedrigen Auspr{\"a}gungsbereich keine Zusammenh{\"a}nge zur sp{\"a}teren Depressivit{\"a}t bestehen. Eine Wirkung als Vulnerabilit{\"a}tsfaktor war zudem nur in der Subgruppe der anf{\"a}nglich weitgehend symptomfreien Kinder und Jugendlichen zu beobachten. Das Schwellenmodell erwies sich als spezifisch f{\"u}r eine depressive Symptomatik, es zeigten sich jedoch auch (teilweise ebenfalls nichtlineare) Effekte dysfunktionaler Einstellungen auf die Entwicklung von Essst{\"o}rungssymptomen und aggressivem Verhalten. Bei 9- bis 13-j{\"a}hrigen Jungen standen dysfunktionale Einstellungen zudem in Zusammenhang mit einer Tendenz, Stress in Leistungskontexten herbeizuf{\"u}hren. Zusammen mit den von Sahyazici-Knaak (2015) berichteten Ergebnissen aus der PIER-Studie weisen die Befunde darauf hin, dass dysfunktionale Einstellungen im Kindes- und Jugendalter - je nach betrachteter Subgruppe - Ursache, Symptom und Konsequenz der Depression darstellen k{\"o}nnen. Die in der vorliegenden Arbeit gezeigten nichtlinearen Effekte dysfunktionaler Einstellungen und die Effekte der Stichprobenselektion bieten eine zumindest teilweise Erkl{\"a}rung f{\"u}r die Heterogenit{\"a}t fr{\"u}herer Forschungsergebnisse. Insgesamt lassen sie auf komplexe - und nicht ausschließlich negative - Auswirkungen dysfunktionaler Einstellungen schließen. F{\"u}r eine ad{\"a}quate Beurteilung der „Dysfunktionalit{\"a}t" der von A.T. Beck so betitelten Einstellungen erscheint daher eine Ber{\"u}cksichtigung der betrachteten Personengruppe, der absoluten Auspr{\"a}gungen und der fraglichen Symptomgruppen geboten.}, language = {de} } @phdthesis{Meiling2017, author = {Meiling, Till Thomas}, title = {Development of a reliable and environmentally friendly synthesis for fluorescence carbon nanodots}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410160}, school = {Universit{\"a}t Potsdam}, pages = {198}, year = {2017}, abstract = {Carbon nanodots (CNDs) have generated considerable attention due to their promising properties, e.g. high water solubility, chemical inertness, resistance to photobleaching, high biocompatibility and ease of functionalization. These properties render them ideal for a wide range of functions, e.g. electrochemical applications, waste water treatment, (photo)catalysis, bio-imaging and bio-technology, as well as chemical sensing, and optoelectronic devices like LEDs. In particular, the ability to prepare CNDs from a wide range of accessible organic materials makes them a potential alternative for conventional organic dyes and semiconductor quantum dots (QDs) in various applications. However, current synthesis methods are typically expensive and depend on complex and time-consuming processes or severe synthesis conditions and toxic chemicals. One way to reduce overall preparation costs is the use of biological waste as starting material. Hence, natural carbon sources such as pomelo peal, egg white and egg yolk, orange juice, and even eggshells, to name a few; have been used for the preparation of CNDs. While the use of waste is desirable, especially to avoid competition with essential food production, most starting-materials lack the essential purity and structural homogeneity to obtain homogeneous carbon dots. Furthermore, most synthesis approaches reported to date require extensive purification steps and have resulted in carbon dots with heterogeneous photoluminescent properties and indefinite composition. For this reason, among others, the relationship between CND structure (e.g. size, edge shape, functional groups and overall composition) and photophysical properties is yet not fully understood. This is particularly true for carbon dots displaying selective luminescence (one of their most intriguing properties), i.e. their PL emission wavelength can be tuned by varying the excitation wavelength. In this work, a new reliable, economic, and environmentally-friendly one-step synthesis is established to obtain CNDs with well-defined and reproducible photoluminescence (PL) properties via the microwave-assisted hydrothermal treatment of starch, carboxylic acids and Tris-EDTA (TE) buffer as carbon- and nitrogen source, respectively. The presented microwave-assisted hydrothermal precursor carbonization (MW-hPC) is characterized by its cost-efficiency, simplicity, short reaction times, low environmental footprint, and high yields of approx. 80\% (w/w). Furthermore, only a single synthesis step is necessary to obtain homogeneous water-soluble CNDs with no need for further purification. Depending on starting materials and reaction conditions different types of CNDs have been prepared. The as-prepared CNDs exhibit reproducible, highly homogeneous and favourable PL properties with narrow emission bands (approx. 70nm FWHM), are non-blinking, and are ready to use without need for further purification, modification or surface passivation agents. Furthermore, the CNDs are comparatively small (approx. 2.0nm to 2.4nm) with narrow size distributions; are stable over a long period of time (at least one year), either in solution or as a dried solid; and maintain their PL properties when re-dispersed in solution. Depending on CND type, the PL quantum yield (PLQY) can be adjusted from as low as 1\% to as high as 90\%; one of the highest reported PLQY values (for CNDs) so far. An essential part of this work was the utilization of a microwave synthesis reactor, allowing various batch sizes and precise control over reaction temperature and -time, pressure, and heating- and cooling rate, while also being safe to operate at elevated reaction conditions (e.g. 230 ±C and 30 bar). The hereby-achieved high sample throughput allowed, for the first time, the thorough investigation of a wide range of synthesis parameters, providing valuable insight into the CND formation. The influence of carbon- and nitrogen source, precursor concentration and -combination, reaction time and -temperature, batch size, and post-synthesis purification steps were carefully investigated regarding their influence on the optical properties of as-synthesized CNDs. In addition, the change in photophysical properties resulting from the conversion of CND solution into solid and back into the solution was investigated. Remarkably, upon freeze-drying the initial brown CND-solution turns into a non-fluorescent white/slightly yellow to brown solid which recovers PL in aqueous solution. Selected CND samples were also subject to EDX, FTIR, NMR, PL lifetime (TCSPC), particle size (TEM), TGA and XRD analysis. Besides structural characterization, the pH- and excitation dependent PL characteristics (i.e. selective luminescence) were examined; giving inside into the origin of photophysical properties and excitation dependent behaviour of CNDs. The obtained results support the notion that for CNDs the nature of the surface states determines the PL properties and that excitation dependent behaviour is caused by the "Giant Red-Edge Excitation Shift" (GREES).}, language = {en} } @phdthesis{Meier2017, author = {Meier, Tobias}, title = {Borehole Breakouts in Transversely Isotropic Posidonia Shale}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-400019}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 133}, year = {2017}, abstract = {Borehole instabilities are frequently encountered when drilling through finely laminated, organic rich shales ({\O}kland and Cook, 1998; Ottesen, 2010; etc.); such instabilities should be avoided to assure a successful exploitation and safe production of the contained unconventional hydrocarbons. Borehole instabilities, such as borehole breakouts or drilling induced tensile fractures, may lead to poor cementing of the borehole annulus, difficulties with recording and interpretation of geophysical logs, low directional control and in the worst case the loss of the well. If these problems are not recognized and expertly remedied, pollution of the groundwater or the emission of gases into the atmosphere can occur since the migration paths of the hydrocarbons in the subsurface are not yet fully understood (e.g., Davies et al., 2014; Zoback et al., 2010). In addition, it is often mentioned that the drilling problems encountered and the resulting downtimes of the wellbore system in finely laminated shales significantly increase drilling costs (Fjaer et al., 2008; Aadnoy and Ong, 2003). In order to understand and reduce the borehole instabilities during drilling in unconventional shales, we investigate stress-induced irregular extensions of the borehole diameter, which are also referred to as borehole breakouts. For this purpose, experiments with different borehole diameters, bedding plane angles and stress boundary conditions were performed on finely laminated Posidonia shales. The Lower Jurassic Posidonia shale is one of the most productive source rocks for conventional reservoirs in Europe and has the greatest potential for unconventional oil and gas in Europe (Littke et al., 2011). In this work, Posidonia shale specimens from the North (PN) and South (PS) German basins were selected and characterized petrophysically and mechanically. The composition of the two shales is dominated by calcite (47-56\%) followed by clays (23-28\%) and quartz (16-17\%). The remaining components are mainly pyrite and organic matter. The porosity of the shales varies considerably and is up to 10\% for PS and 1\% for PN, which is due to a larger deposition depth of PN. Both shales show marked elasticity and strength anisotropy, which can be attributed to a macroscopic distribution and orientation of soft and hard minerals. Under load the hard minerals form a load-bearing, supporting structure, while the soft minerals compensate the deformation. Therefore, if loaded parallel to the bedding, the Posidonia shale is more brittle than loaded normal to the bedding. The resulting elastic anisotropy, which can be defined by the ratio of the modulus of elasticity parallel and normal to the bedding, is about 50\%, while the strength anisotropy (i.e., the ratio of uniaxial compressive strength normal and parallel to the bedding) is up to 66\%. Based on the petrophysical characterization of the two rocks, a transverse isotropy (TVI) was derived. In general, PS is softer and weaker than PN, which is due to the stronger compaction of the material due to the higher burial depth. Conventional triaxial borehole breakout experiments on specimens with different borehole diameters showed that, when the diameter of the borehole is increased, the stress required to initiate borehole breakout decreases to a constant value. This value can be expressed as the ratio of the tangential stress and the uniaxial compressive strength of the rock. The ratio increases exponentially with decreasing borehole diameter from about 2.5 for a 10 mm diameter hole to ~ 7 for a 1 mm borehole (increase of initiation stress by 280\%) and can be described by a fracture mechanic based criterion. The reduction in borehole diameter is therefore a considerable aspect in reducing the risk of breakouts. New drilling techniques with significantly reduced borehole diameters, such as "fish-bone" holes, are already underway and are currently being tested (e.g., Xing et al., 2012). The observed strength anisotropy and the TVI material behavior are also reflected in the observed breakout processes at the borehole wall. Drill holes normal to the bedding develop breakouts in a plane of isotropy and are not affected by the strength or elasticity anisotropy. The observed breakouts are point-symmetric and form compressive shear failure planes, which can be predicted by a Mohr-Coulomb failure approach. If the shear failure planes intersect, conjugate breakouts can be described as "dog-eared" breakouts. While the initiation of breakouts for wells oriented normal to the stratification has been triggered by random local defects, reduced strengths parallel to bedding planes are the starting point for breakouts for wells parallel to the bedding. In the case of a deflected borehole trajectory, therefore, the observed failure type changes from shear-induced failure surfaces to buckling failure of individual layer packages. In addition, the breakout depths and widths increased, resulting in a stress-induced enlargement of the borehole cross-section and an increased output of rock material into the borehole. With the transition from shear to buckling failure and changing bedding plane angle with respect to the borehole axis, the stress required for inducing wellbore breakouts drops by 65\%. These observations under conventional triaxial stress boundary conditions could also be confirmed under true triaxial stress conditions. Here breakouts grew into the rock as a result of buckling failure, too. In this process, the broken layer packs rotate into the pressure-free drill hole and detach themselves from the surrounding rock by tensile cracking. The final breakout shape in Posidonia shale can be described as trapezoidal when the bedding planes are parallel to the greatest horizontal stress and to the borehole axis. In the event that the largest horizontal stress is normal to the stratification, breakouts were formed entirely by shear fractures between the stratification and required higher stresses to initiate similar to breakouts in conventional triaxial experiments with boreholes oriented normal to the bedding. In the content of this work, a fracture mechanics-based failure criterion for conventional triaxial loading conditions in isotropic rocks (Dresen et al., 2010) has been successfully extended to true triaxial loading conditions in the transverse isotropic rock to predict the initiation of borehole breakouts. The criterion was successfully verified on the experiments carried out. The extended failure criterion and the conclusions from the laboratory and numerical work may help to reduce the risk of borehole breakouts in unconventional shales.}, language = {en} } @phdthesis{Meier2017, author = {Meier, Sebastian}, title = {Personal Big Data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406696}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 133}, year = {2017}, abstract = {Many users of cloud-based services are concerned about questions of data privacy. At the same time, they want to benefit from smart data-driven services, which require insight into a person's individual behaviour. The modus operandi of user modelling is that data is sent to a remote server where the model is constructed and merged with other users' data. This thesis proposes selective cloud computing, an alternative approach, in which the user model is constructed on the client-side and only an abstracted generalised version of the model is shared with the remote services. In order to demonstrate the applicability of this approach, the thesis builds an exemplary client-side user modelling technique. As this thesis is carried out in the area of Geoinformatics and spatio-temporal data is particularly sensitive, the application domain for this experiment is the analysis and prediction of a user's spatio-temporal behaviour. The user modelling technique is grounded in an innovative conceptual model, which builds upon spatial network theory combined with time-geography. The spatio-temporal constraints of time-geography are applied to the network structure in order to create individual spatio-temporal action spaces. This concept is translated into a novel algorithmic user modelling approach which is solely driven by the user's own spatio-temporal trajectory data that is generated by the user's smartphone. While modern smartphones offer a rich variety of sensory data, this thesis only makes use of spatio-temporal trajectory data, enriched by activity classification, as the input and foundation for the algorithmic model. The algorithmic model consists of three basal components: locations (vertices), trips (edges), and clusters (neighbourhoods). After preprocessing the incoming trajectory data in order to identify locations, user feedback is used to train an artificial neural network to learn temporal patterns for certain location types (e.g. work, home, bus stop, etc.). This Artificial Neural Network (ANN) is used to automatically detect future location types by their spatio-temporal patterns. The same is done in order to predict the duration of stay at a certain location. Experiments revealed that neural nets were the most successful statistical and machine learning tool to detect those patterns. The location type identification algorithm reached an accuracy of 87.69\%, the duration prediction on binned data was less successful and deviated by an average of 0.69 bins. A challenge for the location type classification, as well as for the subsequent components, was the imbalance of trips and connections as well as the low accuracy of the trajectory data. The imbalance is grounded in the fact that most users exhibit strong habitual patterns (e.g. home > work), while other patterns are rather rare by comparison. The accuracy problem derives from the energy-saving location sampling mode, which creates less accurate results. Those locations are then used to build a network that represents the user's spatio-temporal behaviour. An initial untrained ANN to predict movement on the network only reached 46\% average accuracy. Only lowering the number of included edges, focusing on more common trips, increased the performance. In order to further improve the algorithm, the spatial trajectories were introduced into the predictions. To overcome the accuracy problem, trips between locations were clustered into so-called spatial corridors, which were intersected with the user's current trajectory. The resulting intersected trips were ranked through a k-nearest-neighbour algorithm. This increased the performance to 56\%. In a final step, a combination of a network and spatial clustering algorithm was built in order to create clusters, therein reducing the variety of possible trips. By only predicting the destination cluster instead of the exact location, it is possible to increase the performance to 75\% including all classes. A final set of components shows in two exemplary ways how to deduce additional inferences from the underlying spatio-temporal data. The first example presents a novel concept for predicting the 'potential memorisation index' for a certain location. The index is based on a cognitive model which derives the index from the user's activity data in that area. The second example embeds each location in its urban fabric and thereby enriches its cluster's metadata by further describing the temporal-semantic activity in an area (e.g. going to restaurants at noon). The success of the client-side classification and prediction approach, despite the challenges of inaccurate and imbalanced data, supports the claimed benefits of the client-side modelling concept. Since modern data-driven services at some point do need to receive user data, the thesis' computational model concludes with a concept for applying generalisation to semantic, temporal, and spatial data before sharing it with the remote service in order to comply with the overall goal to improve data privacy. In this context, the potentials of ensemble training (in regards to ANNs) are discussed in order to highlight the potential of only sharing the trained ANN instead of the raw input data. While the results of our evaluation support the assets of the proposed framework, there are two important downsides of our approach compared to server-side modelling. First, both of these server-side advantages are rooted in the server's access to multiple users' data. This allows a remote service to predict spatio-in the user-specific data, which represents the second downside. While minor classes will likely be minor classes in a bigger dataset as well, for each class, there will still be more variety than in the user-specific dataset. The author emphasises that the approach presented in this work holds the potential to change the privacy paradigm in modern data-driven services. Finding combinations of client- and server-side modelling could prove a promising new path for data-driven innovation. Beyond the technological perspective, throughout the thesis the author also offers a critical view on the data- and technology-driven development of this work. By introducing the client-side modelling with user-specific artificial neural networks, users generate their own algorithm. Those user-specific algorithms are influenced less by generalised biases or developers' prejudices. Therefore, the user develops a more diverse and individual perspective through his or her user model. This concept picks up the idea of critical cartography, which questions the status quo of how space is perceived and represented.}, language = {en} } @phdthesis{MbayaMani2017, author = {Mbaya Mani, Christian}, title = {Functional nanoporous carbon-based materials derived from oxocarbon-metal coordination complexes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407866}, school = {Universit{\"a}t Potsdam}, pages = {IV, 135}, year = {2017}, abstract = {Nanoporous carbon based materials are of particular interest for both science and industry due to their exceptional properties such as a large surface area, high pore volume, high electroconductivity as well as high chemical and thermal stability. Benefiting from these advantageous properties, nanoporous carbons proved to be useful in various energy and environment related applications including energy storage and conversion, catalysis, gas sorption and separation technologies. The synthesis of nanoporous carbons classically involves thermal carbonization of the carbon precursors (e.g. phenolic resins, polyacrylonitrile, poly(vinyl alcohol) etc.) followed by an activation step and/or it makes use of classical hard or soft templates to obtain well-defined porous structures. However, these synthesis strategies are complicated and costly; and make use of hazardous chemicals, hindering their application for large-scale production. Furthermore, control over the carbon materials properties is challenging owing to the relatively unpredictable processes at the high carbonization temperatures. In the present thesis, nanoporous carbon based materials are prepared by the direct heat treatment of crystalline precursor materials with pre-defined properties. This synthesis strategy does not require any additional carbon sources or classical hard- or soft templates. The highly stable and porous crystalline precursors are based on coordination compounds of the squarate and croconate ions with various divalent metal ions including Zn2+, Cu2+, Ni2+, and Co2+, respectively. Here, the structural properties of the crystals can be controlled by the choice of appropriate synthesis conditions such as the crystal aging temperature, the ligand/metal molar ratio, the metal ion, and the organic ligand system. In this context, the coordination of the squarate ions to Zn2+ yields porous 3D cube crystalline particles. The morphology of the cubes can be tuned from densely packed cubes with a smooth surface to cubes with intriguing micrometer-sized openings and voids which evolve on the centers of the low index faces as the crystal aging temperature is raised. By varying the molar ratio, the particle shape can be changed from truncated cubes to perfect cubes with right-angled edges. These crystalline precursors can be easily transformed into the respective carbon based materials by heat treatment at elevated temperatures in a nitrogen atmosphere followed by a facile washing step. The resulting carbons are obtained in good yields and possess a hierarchical pore structure with well-organized and interconnected micro-, meso- and macropores. Moreover, high surface areas and large pore volumes of up to 1957 m2 g-1 and 2.31 cm3 g-1 are achieved, respectively, whereby the macroscopic structure of the precursors is preserved throughout the whole synthesis procedure. Owing to these advantageous properties, the resulting carbon based materials represent promising supercapacitor electrode materials for energy storage applications. This is exemplarily demonstrated by employing the 3D hierarchical porous carbon cubes derived from squarate-zinc coordination compounds as electrode material showing a specific capacitance of 133 F g-1 in H2SO4 at a scan rate of 5 mV s-1 and retaining 67\% of this specific capacitance when the scan rate is increased to 200 mV s-1. In a further application, the porous carbon cubes derived from squarate-zinc coordination compounds are used as high surface area support material and decorated with nickel nanoparticles via an incipient wetness impregnation. The resulting composite material combines a high surface area, a hierarchical pore structure with high functionality and well-accessible pores. Moreover, owing to their regular micro-cube shape, they allow for a good packing of a fixed-bed flow reactor along with high column efficiency and a minimized pressure drop throughout the packed reactor. Therefore, the composite is employed as heterogeneous catalyst in the selective hydrogenation of 5-hydroxymethylfurfural to 2,5-dimethylfuran showing good catalytic performance and overcoming the conventional problem of column blocking. Thinking about the rational design of 3D carbon geometries, the functions and properties of the resulting carbon-based materials can be further expanded by the rational introduction of heteroatoms (e.g. N, B, S, P, etc.) into the carbon structures in order to alter properties such as wettability, surface polarity as well as the electrochemical landscape. In this context, the use of crystalline materials based on oxocarbon-metal ion complexes can open a platform of highly functional materials for all processes that involve surface processes.}, language = {en} } @phdthesis{Martin2017, author = {Martin, Thorsten}, title = {Advances in spatial econometrics and the political economy of local housing supply}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406836}, school = {Universit{\"a}t Potsdam}, pages = {207}, year = {2017}, abstract = {This cumulative dissertation consists of five chapters. In terms of research content, my thesis can be divided into two parts. Part one examines local interactions and spillover effects between small regional governments using spatial econometric methods. The second part focuses on patterns within municipalities and inspects which institutions of citizen participation, elections and local petitions, influence local housing policies.}, language = {en} } @phdthesis{Mahlstedt2017, author = {Mahlstedt, Robert}, title = {Essays on job search behavior and labor market policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397081}, school = {Universit{\"a}t Potsdam}, pages = {252}, year = {2017}, abstract = {Persistently high unemployment rates are a major threat to the social cohesion in many societies. To moderate the consequences of unemployment industrialized countries spend substantial shares of their GDP on labor market policies, while in recent years there has been a shift from passive measures, such as transfer payments, towards more activating elements which aim to promote the reintegration into the labor market. Although, there exists a wide range of evidence about the effects of traditional active labor market policies (ALMP) on participants' subsequent labor market outcomes, a deeper understanding of the impact of these programs on the job search behavior and the interplay with long-term labor market outcomes is necessary. This allows policy makers to improve the design of labor market policies and the allocation of unemployed workers into specific programs. Moreover, previous studies have shown that many traditional ALMP programs, like public employment or training schemes, do not achieve the desired results. This underlines the importance of understanding the effect mechanisms, but also the need to develop innovative programs that are more effective. This thesis extends the existing literature with respect to several dimensions. First, it analyzes the impact of job seekers' beliefs about upcoming ALMPs programs on the effectiveness of realized treatments later during the unemployment spell. This provides important insights with respect to the job search process and relates potential anticipation effects (on the job seekers behavior before entering a program) to the vast literature evaluating the impact of participating in an ALMP program on subsequent outcomes. The empirical results show that training programs are more effective if the participants expect participation ex ante, while expected treatment effects are unrelated to the actual labor market outcomes of participants. A subsequent analysis of the effect mechanisms shows that job seekers who expect to participate also receive more information by their caseworker and show a higher willingness to adjust their search behavior in association with an upcoming ALMP program. The findings suggest that the effectiveness of training programs can be improved by providing more detailed information about the possibility of a future treatment early during the unemployment spell. Second, the thesis investigates the effects of a relatively new class of programs that aim to improve the geographical mobility of unemployed workers with respect to the job search behavior, the subsequent job finding prospects and the returns to labor market mobility. To estimate the causal impact of these programs, it is exploited that local employment agencies have a degree of autonomy when deciding about the regional-specific policy mix. The findings show that the policy style of the employment agency indeed affects the job search behavior of unemployed workers. Job seekers who are assigned to agencies with higher preferences for mobility programs increase their search radius without affecting the total number of job applications. This shift of the search effort to distant regions leads to a higher probability to find a regular job and higher wages. Moreover, it is shown that participants in one of the subsidy programs who move to geographically distant region a earn significantly higher wages, end up in more stable jobs and face a higher long-run employment probability compared to non-participants. Third, the thesis offers an empirical assessment of the unconfoundedness assumption with respect to the relevance of variables that are usually unobserved in studies evaluating ALMP programs. A unique dataset that combines administrative records and survey data allows us to observe detailed information on typical covariates, as well as usually unobserved variables including personality traits, attitudes, expectations, intergenerational information, as well as indicators about social networks and labor market flexibility. The findings show that, although our set of usually unobserved variables indeed has a significant effect on the selection into ALMP programs, the overall impact when estimating treatment effects is rather small. Finally, the thesis also examines the importance of gender differences in reservation wages that allows assessing the importance of special ALMP programs targeting women. In particular, when including reservation wages in a wage decomposition exercise, the gender gap in realized wages becomes small and statistically insignificant. The strong connection between gender differences in reservation wages and realized wages raises the question how these differences in reservation wages are set in the first place. Since traditional covariates cannot sufficiently explain the gender gap in reservation wages, we perform subgroup analysis to better understand what the driving forces behind this gender gap are.}, language = {en} } @phdthesis{Lysyakova2017, author = {Lysyakova, Liudmila}, title = {Interaction of azobenzene containing surfactants with plasmonic nanoparticles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403359}, school = {Universit{\"a}t Potsdam}, pages = {viii, 155}, year = {2017}, abstract = {The goal of this thesis is related to the question how to introduce and combine simultaneously plasmonic and photoswitching properties to different nano-objects. In this thesis I investigate the complexes between noble metal nanoparticles and cationic surfactants containing azobenzene units in their hydrophobic tail, employing absorption spectroscopy, surface zeta-potential, and electron microscopy. In the first part of the thesis, the formation of complexes between negatively charged laser ablated spherical gold nanoparticles and cationic azobenzene surfactants in trans- conformation is explored. It is shown that the constitution of the complexes strongly depends on a surfactant-to-gold molar ratio. At certain molar ratios, particle self-assembly into nanochains and their aggregation have been registered. At higher surfactant concentrations, the surface charge of nanoparticles turned positive, attributed to the formation of the stabilizing double layer of azobenzene surfactants on gold nanoparticle surfaces. These gold-surfactant complexes remained colloidally stable. UV light induced trans-cis isomerization of azobenzene surfactant molecules and thus perturbed the stabilizing surfactant shell, causing nanoparticle aggregation. The results obtained with silver and silicon nanoparticles mimick those for the comprehensively studied gold nanoparticles, corroborating the proposed model of complex formation. In the second part, the interaction between plasmonic metal nanoparticles (Au, Ag, Pd, alloy Au-Ag, Au-Pd), as well as silicon nanoparticles, and cis-isomers of azobenzene containing compounds is addressed. Cis-trans thermal isomerization of azobenzenes was enhanced in the presence of gold, palladium, and alloy gold-palladium nanoparticles. The influence of the surfactant structure and nanoparticle material on the azobenzene isomerization rate is expounded. Gold nanoparticles showed superior catalytic activity for thermal cis-trans isomerization of azobenzenes. In a joint project with theoretical chemists, we demonstrated that the possible physical origin of this phenomenon is the electron transfer between azobenzene moieties and nanoparticle surfaces. In the third part, complexes between gold nanorods and azobenzene surfactants with different tail length were exposed to UV and blue light, inducing trans-cis and cis-trans isomerization of surfactant, respectively. At the same time, the position of longitudinal plasmonic absorption maximum of gold nanorods experienced reversible shift responding to the changes in local dielectric environment. Surface plasmon resonance condition allowed the estimation of the refractive index of azobenzene containing surfactants in solution.}, language = {en} } @phdthesis{Lubitz2017, author = {Lubitz, Christin}, title = {Investigating local surface displacements associated with anthropogenic activities by satellite radar interferometry}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-416001}, school = {Universit{\"a}t Potsdam}, pages = {III, vii, 96, xii}, year = {2017}, abstract = {Direct anthropogenic influences on the Earth's subsurface during drilling, extraction or injection activities, can affect land stability by causing subsidence, uplifts or lateral displacements. They can occur in localized as well as in uninhabited and inhabited regions. Thus the associated risks for humans, infrastructure, and environment must be minimized. To achieve this, appropriate surveillance methods must be found that can be used for simultaneous monitoring during such activities. Multi-temporal synthetic aperture radar interferometry (MT-InSAR) methods like the Persistent Scatterer Interferometry (PSI) and the Small BAseline Subsets (SBAS) have been developed as standard approaches for satellite-based surface displacement monitoring. With increasing spatial resolution and availability of SAR sensors in recent years, MT-InSAR can be valuable for the detection and mapping of even the smallest man-made displacements. This doctoral thesis aims at investigating the capacities of the mentioned standard methods for this purpose, and comprises three main objectives against the backdrop of a user-friendly surveillance service: (1) the spatial and temporal significance assessment against leveling, (2) the suitability evaluation of PSI and SBAS under different conditions, and (3) the analysis of the link between surface motion and subsurface processes. Two prominent case studies on anthropogenic induced subsurface processes in Germany serve as the basis for this goal. The first is the distinct urban uplift with severe damages at Staufen im Breisgau that has been associated since 2007 with a failure to implement a shallow geothermal energy supply for an individual building. The second case study considers the pilot project of geological carbon dioxide (CO2) storage at Ketzin, and comprises borehole drilling and fluid injection of more than 67 kt CO2 between 2008 and 2013. Leveling surveys at Staufen and comprehensive background knowledge of the underground processes gained from different kinds of in-situ measurements at both locations deliver a suitable basis for this comparative study and the above stated objectives. The differences in location setting, i.e. urban versus rural site character, were intended to investigate the limitations in the applicability of PSI and SBAS. For the MT-InSAR analysis, X-band images from the German TerraSAR-X and TanDEM-X satellites were acquired in the standard Stripmap mode with about 3 m spatial resolution in azimuth and range direction. Data acquisition lasted over a period of five years for Staufen (2008-2013), and four years for Ketzin (2009-2013). For the first approximation of the subsurface source, an inversion of the InSAR outcome in Staufen was applied. The modeled uplift based on complex hydromechanical simulations and a correlation analysis with bottomhole pressure data were used for comparison with MT-InSAR measurements at Ketzin. In response to the defined objectives of this thesis, a higher level of detail can be achieved in mapping surface displacements without in-situ effort by using MT-InSAR in comparison to leveling (1). A clear delineation of the elliptical shaped uplift border and its magnitudes at different parts was possible at Staufen, with the exception of a vegetated area in the northwest. Vegetation coverage and the associated temporal signal decorrelation are the main limitations of MT-InSAR as clearly demonstrated at the Ketzin test site. They result in insufficient measurement point density and unwrapping issues. Therefore, spatial resolutions of one meter or better are recommended to achieve an adequate point density for local displacement analysis and to apply signal noise reduction. Leveling measurements can provide a complementary data source here, but require much effort pertaining to personnel even at the local scale. Horizontal motions could be identified at Staufen by only comparing the temporal evolution of the 1D line of sight (LOS) InSAR measurements with the available leveling data. An exception was the independent LOS decomposition using ascending and descending data sets for the period 2012-2013. The full 3D displacement field representation failed due to insufficient orbit-related, north-south sensitivity of the satellite-based measurements. By using the dense temporal mapping capabilities of the TerraSAR-X/TanDEM-X satellites after every 11 days, the temporal displacement evolution could be captured as good as that with leveling. With respect to the tested methods and in the view of generality, SBAS should be preferred over PSI (2). SBAS delivered a higher point density, and was therefore less affected by phase unwrapping issues in both case studies. Linking surface motions with subsurface processes is possible when considering simplified geophysical models (3), but it still requires intensive research to gain a deep understanding.}, language = {en} } @phdthesis{LorenteSanchez2017, author = {Lorente S{\´a}nchez, Alejandro Jose}, title = {Synthesis of side-chain polystyrenes for all organic solution processed OLEDs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398006}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 131}, year = {2017}, abstract = {In the present work side-chain polystyrenes were synthesized and characterized, in order to be applied in multilayer OLEDs fabricated by solution process techniques. Manufacture of optoelectronic devices by solution process techniques is meant to decrease significantly fabrication cost and allow large scale production of such devices. This dissertation focusses in three series, enveloped in two material classes. The two classes differ to each other in the type of charge transport exhibited, either ambipolar transport or electron transport. All materials were applied in all-organic solution processed green Ir-based devices. In the first part, a series of ambipolar host materials were developed to transport both charge types, holes and electrons, and be applied especially as matrix for green Ir-based emitters. It was possible to increase devices efficacy by modulating the predominant charge transport type. This was achieved by modification of molecules electron transport part with more electron-deficient heterocycles or by extending the delocalization of the LUMO. Efficiencies up to 28.9 cd/A were observed for all-organic solution-process three layer devices. In the second part, suitability of triarylboranes and tetraphenylsilanes as electron transport materials was studied. High triplet energies were obtained, up to 2.95 eV, by rational combination of both molecular structures. Although the combination of both elements had a low effect in materials electron transport properties, high efficiencies around 24 cd/A were obtained for the series in all-organic solution-processed two layer devices. In the last part, benzene and pyridine were chosen as the series electron-transport motif. By controlling the relative pyridine content (RPC) solubility into methanol was induced for polystyrenes with bulky side-chains. Materials with RPC ≥ 0.5 could be deposited orthogonally from solution without harming underlying layers. From the best of our knowledge, this is the first time such materials are applied in this architecture showing moderate efficiencies around 10 cd/A in all-organic solution processed OLEDs. Overall, the outcome of these studies will actively contribute to the current research on materials for all-solution processed OLEDs.}, language = {en} } @phdthesis{LindbladPetersen2017, author = {Lindblad Petersen, Oliver}, title = {The Cauchy problem for the linearised Einstein equation and the Goursat problem for wave equations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410216}, school = {Universit{\"a}t Potsdam}, pages = {108}, year = {2017}, abstract = {In this thesis, we study two initial value problems arising in general relativity. The first is the Cauchy problem for the linearised Einstein equation on general globally hyperbolic spacetimes, with smooth and distributional initial data. We extend well-known results by showing that given a solution to the linearised constraint equations of arbitrary real Sobolev regularity, there is a globally defined solution, which is unique up to addition of gauge solutions. Two solutions are considered equivalent if they differ by a gauge solution. Our main result is that the equivalence class of solutions depends continuously on the corre- sponding equivalence class of initial data. We also solve the linearised constraint equations in certain cases and show that there exist arbitrarily irregular (non-gauge) solutions to the linearised Einstein equation on Minkowski spacetime and Kasner spacetime. In the second part, we study the Goursat problem (the characteristic Cauchy problem) for wave equations. We specify initial data on a smooth compact Cauchy horizon, which is a lightlike hypersurface. This problem has not been studied much, since it is an initial value problem on a non-globally hyperbolic spacetime. Our main result is that given a smooth function on a non-empty, smooth, compact, totally geodesic and non-degenerate Cauchy horizon and a so called admissible linear wave equation, there exists a unique solution that is defined on the globally hyperbolic region and restricts to the given function on the Cauchy horizon. Moreover, the solution depends continuously on the initial data. A linear wave equation is called admissible if the first order part satisfies a certain condition on the Cauchy horizon, for example if it vanishes. Interestingly, both existence of solution and uniqueness are false for general wave equations, as examples show. If we drop the non-degeneracy assumption, examples show that existence of solution fails even for the simplest wave equation. The proof requires precise energy estimates for the wave equation close to the Cauchy horizon. In case the Ricci curvature vanishes on the Cauchy horizon, we show that the energy estimates are strong enough to prove local existence and uniqueness for a class of non-linear wave equations. Our results apply in particular to the Taub-NUT spacetime and the Misner spacetime. It has recently been shown that compact Cauchy horizons in spacetimes satisfying the null energy condition are necessarily smooth and totally geodesic. Our results therefore apply if the spacetime satisfies the null energy condition and the Cauchy horizon is compact and non-degenerate.}, language = {en} } @phdthesis{Leupelt2017, author = {Leupelt, Anke Verena}, title = {Hormonelle K{\"o}rpergewichtsregulation nach Gewichtsreduktion im Rahmen der multimodalen randomisierten Interventionsstudie MAINTAIN}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413181}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 104}, year = {2017}, abstract = {Adipositas wird mit einer Vielzahl schwerwiegender Folgeerkrankungen in Verbindung gebracht. Eine Gewichtsreduktion f{\"u}hrt zu einer Verbesserung der metabolischen Folgen der Adipositas. Es ist bekannt, dass die Mehrzahl der adip{\"o}sen Personen in den Monaten nach der Gewichtsreduktion einen Großteil des abgenommenen Gewichts wieder zunimmt. Nichtsdestotrotz existiert eine hohe Variabilit{\"a}t hinsichtlich des Langzeiterfolges einer Gewichtsreduktion. Der erfolgreiche Erhalt des reduzierten K{\"o}rpergewichts einiger Personen f{\"u}hrt zu der Frage nach den Faktoren, die einen Gewichtserhalt beeinflussen, mit dem Ziel einen Ansatzpunkt f{\"u}r m{\"o}gliche Therapiestrategien zu identifizieren. In der vorliegenden Arbeit wurde im Rahmen einer kontrollierten, randomisierten Studie mit 143 {\"u}bergewichtigen Probanden untersucht, ob nach einer dreimonatigen Gewichtsreduktion eine zw{\"o}lfmonatige gewichtsstabilisierende Lebensstilintervention einen Einfluss auf die Ver{\"a}nderungen der neuroendokrinen Regelkreisl{\"a}ufe und damit auf den langfristigen Gewichtserhalt {\"u}ber einen Zeitraum von achtzehn Monaten hat. Hierbei wurde im Vergleich der beiden Behandlungsgruppen prim{\"a}r festgestellt, dass die multimodale Lebensstilintervention zu einer Gewichtstabilisierung {\"u}ber die Dauer dieser zw{\"o}lfmonatigen Behandlungsphase f{\"u}hrte. In der Kontrollgruppe kam es zu einer moderaten Gewichtszunahme . Dadurch war nach Beendigung der Interventionsphase der BMI der Teilnehmer in der Kontrollgruppe h{\"o}her als der in der Interventionsgruppe (34,1±6,0 kg*m-2 vs. 32,4±5,7 kg*m-2; p<0,01). W{\"a}hrend der Nachbeobachtungszeit war die Interventionsgruppe durch eine signifikant st{\"a}rkere Gewichtswiederzunahme im Vergleich zur Kontrollgruppe gekennzeichnet, so dass der BMI zwischen beiden Behandlungsgruppen bereits sechs Monate nach der Intervention keinen Unterschied mehr aufwies. Bez{\"u}glich der hormonellen Ver{\"a}nderung durch die Gewichtsreduktion wurde, wie erwartet, eine Auslenkung des endokrinen Systems beobachtet. Jedoch konnte kein Unterschied der untersuchten Hormone im Vergleich der beiden Behandlungsgruppen ausfindig gemacht werden. Im Verlauf der Gewichtsabnahme und der anschließenden Studienphasen zeigten sich tendenziell drei verschiedene Verlaufsmuster in den hormonellen Ver{\"a}nderungen. Nach einer zus{\"a}tzlichen Adjustierung auf den jeweiligen BMI des Untersuchungszeitpunktes konnte f{\"u}r die TSH-Spiegel (p<0,05), die Schilddr{\"u}senhormone (p<0,001) und f{\"u}r die IGF 1-Spiegel (p<0,001) eine {\"u}ber die Studienzeit anhaltende Ver{\"a}nderung festgestellt werden. Abschließend wurde behandlungsgruppenunabh{\"a}ngig untersucht, ob die Hormonspiegel nach Gewichtsreduktion oder ob die relative hormonelle Ver{\"a}nderung w{\"a}hrend der Gewichtsreduktion pr{\"a}diktiv f{\"u}r den Erfolg der Gewichterhaltungsphase ist. Hier fand sich f{\"u}r die Mehrzahl der hormonellen Parameter kein Effekt auf die Langzeitentwicklung der Gewichtszunahme. Jedoch konnte gezeigt werden, dass eine geringere Abnahme der 24h Urin-Metanephrin-Ausscheidung w{\"a}hrend der Gewichtsabnahmephase mit einem besseren Erfolg bez{\"u}glich des Gewichtserhalts {\"u}ber die achtzehnmonatige Studienzeit assoziiert war (standardisiertes Beta= -0,365; r2=0,133 p<0,01). Die anderen hormonellen Achsen zeigten keinen nachweislichen Effekt.}, language = {de} } @phdthesis{Leonhardt2017, author = {Leonhardt, Helmar}, title = {Chemotaxis, shape and adhesion dynamics of amoeboid cells studied by impedance fluctuations in open and confined spaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405016}, school = {Universit{\"a}t Potsdam}, pages = {98}, year = {2017}, abstract = {Die vorliegende Arbeit befasst sich mit elektrischen Impedanzmessungen von ameoboiden Zellen auf Mikroelektroden. Der Modellorganismus Dictyostelium discoideum zeigt unter der Bedingung des Nahrungsentzugs einen {\"U}bergang zum kollektiven Verhalten, bei dem sich chemotaktische Zellen zu einem multizellul{\"a}ren Aggregat zusammenschliessen. Wir zeigen wie Impedanzaufnahmen {\"u}ber die Dynamik der Zell-substrat Adh{\"a}sion ein pr{\"a}zises Bild der Phasen der Aggregation liefern. Dar{\"u}berhinaus zeigen wir zum ersten mal systematische Einzelzellmessungen von Wildtyp-Zellen und vier Mutanten, die sich in der St{\"a}rke der Substratadh {\"a}sion unterscheiden. Wir zeichneten die projizierte Zellfl{\"a}che durch Zeitverlaufsmikroskopie auf und fanden eine Korrelation zwischen den quasi-periodischen Oszillationen in der Kinetik der projizierten Fl{\"a}che - der Zellform-Oszillation - und dem Langzeittrend des Impedanzsignals. Amoeboidale Motilit{\"a}t offenbart sich typischerweise durch einen Zyklus von Membranausst{\"u}lpung, Substratadh{\"a}sion, Vorw{\"a}rtsziehen des Zellk{\"o}rpers und Einziehen des hinteren Teils der Zelle. Dieser Motilit{\"a}tszyklus resultiert in quasi-periodischen Oszillationen der projizierten Zellfl{\"a}che und der Impedanz. In allen gemessenen Zelllinien wurden f{\"u}r diesen Zyklus {\"a}hnliche Periodendauern beobachtet trotz der Unterschiede in der Anhaftungsst{\"a}rke. Wir beobachteten, dass die St{\"a}rke der Zell-substrat Anhaftung die Impedanz stark beeinflusst, indem die Abweichungen vom Mittelwert (die Gr{\"o}sse der Fluktuationen) vergr{\"o}ssert sind bei Zellen, die die vom Zytoskelett generierten Kr{\"a}fte effektiv auf das Substrat {\"u}bertragen. Zum Beispiel sind bei talA- Zellen, in welchen das Actin verankernde Protein Talin fehlt, die Fluktuationen stark reduziert. Einzelzellkraft-Spektroskopie und Ergebnisse eines Abl{\"o}sungsassays, bei dem Adh{\"a}sionskraft gemessen wird indem Zellen einer Scherspannung ausgesetzt werden, best{\"a}tigen, dass die Gr{\"o}sse der Impedanz-fluktuationen ein korrektes Mass f{\"u}r die St{\"a}rke der Substratadh{\"a}sion ist. Schliesslich haben wir uns auch mit dem Einbau von Zell-substrat-Impedanz-Sensoren in mikro-fluidische Apparaturen befasst. Ein chip-basierter elektrischer Chemotaxis Assay wurde entwickelt, der die Geschwindigkeit chemotaktischer Zellen misst, welche entlang eines chemischen Konzentrationsgradienten {\"u}ber Mikroelektroden wandern.}, language = {en} } @phdthesis{Lenz2017, author = {Lenz, Susanne}, title = {Der konsularische Schutz}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406069}, school = {Universit{\"a}t Potsdam}, pages = {288}, year = {2017}, abstract = {Anl{\"a}sslich der Zunahme von Entf{\"u}hrungen deutscher Staatsangeh{\"o}riger im Ausland und des im Jahr 2009 ergangenen Urteils des Bundesverwaltungsgerichts zu diesem Thema legt die vorliegende Arbeit eine detaillierte und umfassende Analyse der Rechtsgrundlagen f{\"u}r die Gew{\"a}hrung konsularischen Schutzes durch die Auslandsvertretungen der Bundesrepublik Deutschland vor. Das erste Kapitel beinhaltet eine detaillierte Darstellung der sich aus dem V{\"o}lker-, Europa- und Verfassungsrecht sowie aus dem Konsulargesetz ergebenen staatlichen Handlungspflichten sowie m{\"o}glicher damit einhergehender Individualanspr{\"u}che auf die Aus{\"u}bung konsularischen Schutzes im Einzelfall. Im zweiten Kapitel werden die Voraussetzungen der Gew{\"a}hrung konsularischen Schutzes nach dem Konsulargesetz dargestellt. Den Schwerpunkt bildet hierbei die Bestimmung des Anwendungsbereiches des \S 5 Abs. 1 Satz 1 Konsulargesetz unter Ber{\"u}cksichtigung des Urteils des Bundesverwaltungsgerichtes vom 28.05.2009, welches nach Auffassung der Verfasserin den Anwendungsbereich dieser Norm verkennt. Bei \S 5 Abs. 1 Satz 1 Konsulargesetz handelt es sich um eine besondere sozialhilferechtliche Norm außerhalb der SGB XII, welche die konsularische Hilfe allein in wirtschaftlichen Notlagen regelt. Das dritte Kapitel analysiert die bestehenden Regelungen {\"u}ber die Erstattung der im Rahmen der Gew{\"a}hrung konsularischen Schutzes entstandenen Kosten und erkl{\"a}rt deren Systematik. Ferner erfolgt ein Ausblick auf die k{\"u}nftigen Regelungen der Kostenerstattung nach dem Bundesgeb{\"u}hrengesetz sowie der damit einhergehenden Rechtsverordnung. Zum Abschluss werden die Ergebnisse anhand eines historischen Falles zusammengefasst sowie ein Gesetzesvorschlag vorgestellt, welcher die gefundenen Unklarheiten und Unstimmigkeiten im Konsulargesetz beheben kann.}, language = {de} } @phdthesis{Lacroix2017, author = {Lacroix, Andr{\´e}}, title = {Factors influencing the effectiveness of balance and resistance training in older adults}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411826}, school = {Universit{\"a}t Potsdam}, pages = {viii, 235}, year = {2017}, abstract = {Hintergrund und Ziele: Altersbedingte Kraft- und Gleichgewichtsverluste sind mit Funktionseinschr{\"a}nkungen und einem erh{\"o}hten Sturzrisiko assoziiert. Kraft- und Gleichgewichtstraining haben das Potenzial, das Gleichgewicht und die Maximalkraft/Schnellkraft von gesunden {\"a}lteren Menschen zu verbessern. Es ist jedoch noch nicht hinreichend untersucht, wie die Effektivit{\"a}t solcher {\"U}bungsprogramme von verschiedenen Faktoren beeinflusst wird. Hierzu geh{\"o}ren die Rolle der Rumpfmuskulatur, die Effekte von kombiniertem Kraft- und Gleichgewichtstraining sowie die Effekte der Trainingsanleitung. Die prim{\"a}ren Ziele dieser Dissertation bestehen daher in der {\"U}berpr{\"u}fung der Zusammenh{\"a}nge von Rumpfkraft und Gleichgewichtsvariablen und der Effekte von kombiniertem Kraft- und Gleichgewichtstraining auf ein breites Spektrum an intrinsischen Sturzrisikofaktoren bei {\"a}lteren Menschen. Ein wesentliches Ziel dieser Dissertation ist zudem die {\"U}berpr{\"u}fung der Auswirkungen von angeleitetem gegen{\"u}ber unangeleitetem Kraft- und/oder Gleichgewichtstraining auf Variablen des Gleichgewichts und der Maximal-/Schnellkraft bei {\"a}lteren Menschen. Methoden: Gesunde {\"a}ltere Erwachsene im Alter zwischen 63 und 80 Jahren wurden in einer Querschnittsstudie, einer L{\"a}ngsschnittstudie und einer Metaanalyse untersucht (Gruppenmittelwerte Meta-Analyse: 65.3-81.1 Jahre). Messungen des Gleichgewichts (statisches/dynamisches, proaktives, reaktives Gleichgewicht) wurden mittels klinischer (z. B. Romberg Test) und instrumentierter Tests (z. B. 10 Meter Gangtest inklusive elektrischer Erfassung von Gangparametern) durchgef{\"u}hrt. Die isometrische Maximalkraft der Rumpfmuskulatur wurde mit speziellen Rumpfkraft-Maschinen gemessen. F{\"u}r die {\"U}berpr{\"u}fung der dynamischen Maximal-/Schnellkraft der unteren Extremit{\"a}t wurden klinische Tests (z. B. Chair Stand Test) verwendet. Weiterhin wurde ein kombiniertes Kraft- und Gleichgewichtstraining durchgef{\"u}hrt, um trainingsbedingte Effekte auf Gleichgewicht und Maximal-/Schnellkraft sowie die Effekte der Trainingsanleitung bei {\"a}lteren Erwachsenen zu untersuchen. Ergebnisse: Die Ergebnisse zeigten signifikante Korrelationen zwischen Rumpfkraft und statischem sowie ausgew{\"a}hlten Parametern des dynamischen Gleichgewichts (0.42 ≤ r ≤ 0.57). Kombiniertes Kraft- und Gleichgewichtstraining verbesserte das statische/dynamische (z. B. Romberg Test, Ganggeschwindigkeit), proaktive (z. B. Timed Up und Go Test) und reaktive Gleichgewicht (z. B. Push and Release Test) sowie die Maximal-/Schnellkraft (z. B. Chair Stand Test) von gesunden {\"a}lteren Menschen (0.62 ≤ Cohen's d ≤ 2.86; alle p < 0.05). Angeleitetes Training f{\"u}hrte verglichen mit unangeleitetem Training zu gr{\"o}ßeren Effekten bei Gleichgewicht und Maximal-/Schnellkraft [L{\"a}ngsschnittstudie: Effekte in der angeleiteten Gruppe 0.26 ≤ d ≤ 2.86, Effekte in der unangeleiteten Gruppe 0.06 ≤ d ≤ 2.30; Metaanalyse: alle Standardisierte Mittelwertdifferenzen (SMDbs) zugunsten der angeleiteten Programme 0.24-0.53]. Die Metaanalyse zeigte zudem gr{\"o}ßere Effekte zugunsten der angeleiteten Programme, wenn diese mit komplett unbeaufsichtigten Programmen verglichen wurden (0.28 ≤ SMDbs ≤ 1.24). Diese Effekte zugunsten der angeleiteten Interventionen wurden jedoch abgeschw{\"a}cht, wenn sie mit unangeleiteten Interventionen verglichen wurden, die wenige zus{\"a}tzliche angeleitete Einheiten integrierten (-0.06 ≤ SMDbs ≤ 0.41). Schlussfolgerungen: Eine Aufnahme von Rumpfkraft{\"u}bungen in sturzpr{\"a}ventive Trainingsprogramme f{\"u}r {\"a}ltere Menschen k{\"o}nnte die Verbesserung von Gleichgewichtsparametern positiv beeinflussen. Die positiven Effekte auf eine Vielzahl wichtiger intrinsischer Sturzrisikofaktoren (z. B. Gleichgewichts-, Kraftdefizite) implizieren, dass besonders die Kombination aus Kraft- und Gleichgewichtstraining eine durchf{\"u}hrbare und effektive sturzpr{\"a}ventive Intervention ist. Aufgrund gr{\"o}ßerer Effekte von angeleitetem im Vergleich zu unangeleitetem Training sollten angeleitete Einheiten in sturzpr{\"a}ventive {\"U}bungsprogramme f{\"u}r {\"a}ltere Erwachsene integriert werden.}, language = {en} } @phdthesis{Kunstmann2017, author = {Kunstmann, Ruth Sonja}, title = {Design of a high-affinity carbohydrate binding protein}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403458}, school = {Universit{\"a}t Potsdam}, pages = {XI, 169}, year = {2017}, abstract = {Kohlenhydrat-Protein Interaktionen sind in der Natur weitverbreitet. Sie stellen die Grundlage f{\"u}r viele biologische Prozesse dar, wie zum Beispiel Immunantworten, Wundheilung und Infektionsprozesse von pathogenen Viren oder Bakterien mit einem Wirt wie dem Menschen. Neben der Infektion von Menschen k{\"o}nnen aber auch Bakterien selbst durch so genannte Bakteriophagen infiziert werden, welche f{\"u}r den Menschen ungef{\"a}hrlich sind. Diese Infektion involviert die spezifische Erkennung der pathogenen Bakterien, die Vermehrung der Bakteriophagen und schließlich die Abt{\"o}tung der Bakterien. Dabei k{\"o}nnen die Mechanismen der spezifischen Erkennung genutzt werden, pathogene Bakterien auf Lebensmitteln zu detektieren oder die Diagnose von Infektionen zu vereinfachen. Die spezifische Erkennung von Enteritis-erzeugenden Bakterien wie Escherichia coli, Salmonella spp. oder Shigella flexneri durch Bakteriophagen der Familie der Podoviridae erfolgt {\"u}ber die Bindung eines sogenannten tailspike proteins des Bakteriophagen an das aus Kohlenhydraten-bestehende O-Antigen des Lipopolysaccharids von Gram-negativen Bakterien. Das tailspike protein spaltet das O-Antigen um den Bakteriophage an die Oberfl{\"a}che des Bakteriums zu f{\"u}hren, damit eine Infektion stattfinden kann. Die Affinit{\"a}t des tailspike proteins zum O-Antigen ist dabei sehr niedrig, um nach Spaltung des O-Antigens das Spaltungsprodukt zu l{\"o}sen und wiederum neues Substrat zu binden. In dieser Arbeit wurde ein tailspike protein des Bakteriophagen Sf6 verwendet (Sf6 TSP), das spezifisch an das O-Antigen von Shigella flexneri Y bindet. Eine inaktive Variante des Sf6 TSP wurde verwendet um einen hoch-affin bindenden Sensor f{\"u}r pathogene Shigella zu entwickeln. Der Shigella-Sensor wurde durch Kopplung von unterschiedlichen Proteinmutanten mit einem fluoreszierendem Molek{\"u}l erhalten. Dabei zeigte eine dieser Mutanten bei Bindung von Shigella O-Antigen ein Fluoreszenz-Signal im Bereich des sichtbaren Lichts. Molekulardynamische Simulationen wurde anhand der erzeugten Proteinmutanten als Methode zum rationalen Design von hoch-affin Kohlenhydrat-bindenden Proteinen getestet und die resultierenden Affinit{\"a}tsvorhersagen wurden {\"u}ber Oberfl{\"a}chenplasmonresonanz-Experimente {\"u}berpr{\"u}ft. Aus weiteren experimentellen und simulierten Daten konnten schließlich Schlussfolgerungen {\"u}ber die Urspr{\"u}nge von Kohlenhydrat-Protein Interaktionen gezogen werden, die eine Einsicht {\"u}ber den Einfluss von Wasser in diesem Bindungsprozess lieferten.}, language = {en} } @phdthesis{Krueger2017, author = {Kr{\"u}ger, Stefanie}, title = {Seidenbasierte anorganische Funktionsmaterialien}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404635}, school = {Universit{\"a}t Potsdam}, pages = {143}, year = {2017}, abstract = {In der vorliegenden Arbeit konnten erfolgreich zwei unterschiedliche Hybridmaterialien (HM) {\"u}ber die Sol-Gel-Methode synthetisiert werden. Bei den HM handelt es sich um Monolithe mit einem Durchmesser von bis zu 4,5 cm. Das erste HM besteht aus Titandioxid und Bombyx mori Seide und wird als TS bezeichnet, w{\"a}hrend das zweite weniger Seide und zus{\"a}tzlich Polyethylenoxid (PEO) enth{\"a}lt und daher als TPS abgek{\"u}rzt wird. Einige der HM wurden nach der Synthese in eine w{\"a}ssrige Tetrachloridogolds{\"a}ure-L{\"o}sung getaucht, wodurch sich auf der Oberfl{\"a}che Goldnanopartikel gebildet haben. Die Materialien wurden mittels Elektronenmikroskopie, energiedispersiver R{\"o}ntgenspektroskopie, Ramanspektroskopie sowie R{\"o}ntgenpulverdiffraktometrie charakterisiert. Die Ergebnisse zeigen, dass beide HM aus etwa 5 nm großen, sph{\"a}rischen Titandioxidnanopartikeln aufgebaut sind, die prim{\"a}r aus Anatas und zu einem geringen Anteil aus Brookit bestehen. Die Goldnanopartikel bei TPS_Au waren gr{\"o}ßer und polydisperser als die Goldnanopartikel auf dem TS_Au HM. Dar{\"u}ber hinaus sind die Goldnanopartikel im TS HM tiefer in das Material eingedrungen als beim TPS HM. Die weiterf{\"u}hrende Analyse der HM mittels Elementaranalyse und thermogravimetrischer Analyse ergab f{\"u}r TPS einen geringeren Anteil an organischen Bestandteilen im HM als f{\"u}r TS, obwohl f{\"u}r beide Synthesen die gleiche Masse an organischen Materialien eingesetzt wurde. Es wird vermutet, dass das PEO w{\"a}hrend der Synthese teilweise wieder aus dem Material herausgewaschen wird. Diese Theorie korreliert mit den Ergebnissen aus der Stickstoffsorption und der Quecksilberporosimetrie, die f{\"u}r das TPS HM eine h{\"o}here Oberfl{\"a}che als f{\"u}r das TS HM anzeigten. Die Variation einiger Syntheseparameter wie die Menge an Seide und PEO oder die Zusammensetzung der Titandioxidvorl{\"a}uferl{\"o}sung hatte einen großen Einfluss auf die synthetisierten HM. W{\"a}hrend unterschiedliche Mengen an PEO die Gr{\"o}ße des HM beeinflussten, konnte ohne Seide kein HM in einer {\"a}hnlichen Gr{\"o}ße hergestellt werden. Die Bildung der HM wird stark von der Zusammensetzung der Titandioxidvorl{\"a}uferl{\"o}sung beeinflusst. Eine Ver{\"a}nderung f{\"u}hrte daher nur selten zur Bildung eines homogenen HM. Die in dieser Arbeit synthetisierten HM wurden als Photokatalysatoren f{\"u}r die Wasserspaltung und den Abbau von Methylenblau eingesetzt. Bei der photokatalytischen Wasserspaltung wurde zun{\"a}chst der Einfluss unterschiedlicher Goldkonzentrationen beim TPS HM auf die Wasserstoffausbeute untersucht. Die besten Ergebnisse wurden bei einer Menge von 2,5 mg Tetrachloridogolds{\"a}ure erhalten. Dar{\"u}ber hinaus wurde gezeigt, dass mit dem TPS HM eine deutlich h{\"o}here Menge an Wasserstoff gewonnen werden konnte als mit dem TS HM. Die Ursachen f{\"u}r die schlechtere Aktivit{\"a}t werden in der geringeren spezifischen Oberfl{\"a}che, der unterschiedlichen Porenstruktur, dem h{\"o}heren Anteil an Seide und besonders in der geringeren Gr{\"o}ße und h{\"o}heren Eindringtiefe der Goldnanopartikel vermutet. Dar{\"u}ber hinaus konnte mit einem h{\"o}heren UV-Anteil in der Lichtquelle sowie durch die Zugabe von Ethanol als Opferreagenz eine Zunahme der Wasserstoffausbeute erzielt werden. Bei dem Methylenblauabbau wurde f{\"u}r beide HM zun{\"a}chst nur eine Adsorption des Methylenblaus beobachtet. Nach der Zugabe von Wasserstoffperoxid konnte nach 8 h bereits eine fast vollst{\"a}ndige Oxidation des Methylenblaus unter sichtbarem Licht beobachtet werden. Die Ursache f{\"u}r die etwas h{\"o}here Aktivit{\"a}t von TPS gegen{\"u}ber TS wird in der unterschiedlichen Porenstruktur und dem h{\"o}heren Anteil an Seide im TS HM vermutet. Insgesamt zeigen beide HM eine gute photokatalytische Aktivit{\"a}t f{\"u}r den Abbau von Methylenblau im Vergleich zu den erhaltenen Werten aus der Literatur.}, language = {de} } @phdthesis{Korzeniowska2017, author = {Korzeniowska, Karolina}, title = {Object-based image analysis for detecting landforms diagnostic of natural hazards}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402240}, school = {Universit{\"a}t Potsdam}, pages = {XV, 139}, year = {2017}, abstract = {Natural and potentially hazardous events occur on the Earth's surface every day. The most destructive of these processes must be monitored, because they may cause loss of lives, infrastructure, and natural resources, or have a negative effect on the environment. A variety of remote sensing technologies allow the recoding of data to detect these processes in the first place, partly based on the diagnostic landforms that they form. To perform this effectively, automatic methods are desirable. Universal detection of natural hazards is challenging due to their differences in spatial impacts, timing and longevity of consequences, and the spatial resolution of remote-sensing data. Previous studies have reported that topographic metrics such as roughness, which can be captured from digital elevation data, can reveal landforms diagnostic of natural hazards, such as gullies, dunes, lava fields, landslides and snow avalanches, as these landforms tend to be more heterogeneous than the surrounding landscape. A single roughness metric is often limited in such detections; however, a more complex approach that exploits the spatial relation and the location of objects, such as object-based image analysis (OBIA), is desirable. In this thesis, I propose a topographic roughness measure derived from an airborne laser scanning (ALS) digital terrain model (DTM) and discuss its performance in detecting landforms principally diagnostic of natural hazards. I further develop OBIA-based algorithms for the detection of snow avalanches using near-infrared (NIR) aerial images, and the size (changes) of mountain lakes using LANDSAT satellite images. I quantitatively test and document how the level of difficulty in detecting these very challenging landforms depends on the input data resolution, the derivatives that could be evaluated from images and DTMs, the size, shape and complexity of landforms, and the capabilities of obtaining the information in the data. I demonstrate that surface roughness is a promising metric for detecting different landforms in diverse environments, and that OBIA assists significantly in detecting parts of lakes and snow avalanches that may not be correctly assigned by applying only the thresholding of spectral properties of data and their derivatives. The curvature-based surface roughness parameter allows the detection of gullies, dunes, lava fields and landslides with a user's accuracy of 0.63, 0.21, 0.53, and 0.45, respectively. The OBIA algorithms for detecting lakes and snow avalanches obtained user's accuracy of 0.98, and 0.78, respectively. Most of the analysed landforms constituted only a small part of the entire dataset, and therefore the user's accuracy is the most appropriate performance measure that should be given in a such classification, because it tells how many automatically-extracted pixels in fact represent the object that one wants to classify, and its calculation does not take the second (background) class into account. One advantage of the proposed roughness parameter is that it allows the extraction of the heterogeneity of the surface without the need for data detrending. The OBIA approach is novel in that it allows the classification of lakes regardless of the physical state of their water, and also allows the separation of frozen lakes from glaciers that have very similar water indices used in purely optical remote sensing applications. The algorithm proposed for snow avalanches allows the detection of release zones, tracks, and deposition zones by verifying the snow heterogeneity based on a roughness metric evaluated from a water index, and by analysing the local relation of segments with their neighbouring objects. This algorithm contains few steps, which allows for the simultaneous classification of avalanches that occur on diverse mountain slopes and differ in size and shape. This thesis contributes to natural hazard research as it provides automatic solutions to tracking six different landforms that are diagnostic of natural hazards over large regions. This is a step toward delineating areas susceptible to the processes producing these landforms and the improvement of hazard maps.}, language = {en} } @phdthesis{Kirsch2017, author = {Kirsch, Fabian}, title = {Intrapersonal risk factors of aggressive behavior in childhood}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407369}, school = {Universit{\"a}t Potsdam}, pages = {X, 182}, year = {2017}, abstract = {BACKGROUND: Aggressive behavior at an early age is linked to a broad range of psychosocial problems in later life. That is why risk factors of the occurrence and the development of aggression have been examined for a long time in psychological science. The present doctoral dissertation aims to expand this research by investigating risk factors in three intrapersonal domains using the prominent social-information processing approach by Crick and Dodge (1994) as a framework model. Anger regulation was examined as an affective, theory of mind as a cognitive, and physical attractiveness as an appearance-related developmental factor of aggression in middle childhood. An additional goal of this work was to develop and validate a behavioral observation assessment of anger regulation as past research lacked in ecologically valid measures of anger regulation that are applicable for longitudinal studies. METHODS: Three empirical studies address the aforementioned intrapersonal risk factors. In each study, data from the PIER-project were used, a three-wave-longitudinal study covering three years with a total sample size of 1,657 children in the age between 6 and 11 years (at the first measurement point). The central constructs were assessed via teacher-reports (aggression), behavioral observation (anger regulation), computer tests (theory of mind), and independent ratings (physical attractiveness). The predictive value of each proposed risk factor for the development of aggressive behavior was examined via structural equation modeling. RESULTS AND CONCLUSION: The newly developed behavioral observation measure was found to be a reliable and valid tool to assess anger regulation in middle childhood, but limited in capturing a full range of relevant regulation strategies. That might be the reason, why maladaptive anger regulation was not found to function as a risk factor of subsequent aggressive behavior. However, children's deficits in theory of mind and a low level in physical attractiveness significantly predicted later aggression. Problematic peer relationships were identified as underlying the link between low attractiveness and aggression. Thus, fostering children's skills in theory of mind and their ability to call existing beliefs about the nature of more versus less attractive individuals into question may be important starting points for the prevention of aggressive behavior in middle childhood.}, language = {en} } @phdthesis{KianiAlibagheri2017, author = {Kiani Alibagheri, Bahareh}, title = {On structural properties of magnetosome chains}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398849}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 117}, year = {2017}, abstract = {Magnetotaktische Bakterien besitzen eine intrazellul{\"a}re Struktur, die Magnetosomenkette genannt wird. Magnetosomenketten enthalten Nanopartikel von Eisenkristallen, die von einer Membran umschlossen und entlang eines Zytoskelettfilaments ausgerichtet sind. Dank der Magnetosomenkette ist es magnetotaktischen Bakterien m{\"o}glich sich in Magnetfeldern auszurichten und entlang magnetischer Feldlinien zu schwimmen. Die ausf{\"u}hrliche Untersuchung der strukturellen Eigenschaften der Magnetosomenkette in magnetotaktischen Bakterien sind von grundlegendem wissenschaftlichen Interesse, weil sie Einblicke in die Anordnung des Zytoskeletts von Bakterien erlauben. In dieser Studie haben wir ein neues theoretisches Modell entwickelt, dass sich dazu eignet, die strukturellen Eigenschaften der Magnetosomenketten in magnetotaktischen Bakterien zu erforschen. Zuerst wenden wir uns der Biegesteifigkeit von Magnetosomenketten zu, die von zwei Faktoren beeinflusst wird: Die magnetische Wechselwirkung der Magnetosomenpartikel und der Biegesteifigkeit des Zytoskelettfilaments auf welchem die Magnetosome verankert sind. Unsere Analyse zeigt, dass sich die lineare Konfiguration von Magnetosomenpartikeln ohne die Stabilisierung durch das Zytoskelett zu einer ring{\"o}rmigen Struktur biegen w{\"u}rde, die kein magnetisches Moment aufweist und daher nicht die Funktion eines Kompass in der zellul{\"a}ren Navigation einnehmen k{\"o}nnte. Wir schlussfolgern, dass das Zytoskelettfilament eine stabilisierende Wirkung auf die lineare Konfiguration hat und eine ringf{\"o}rmige Anordnung verhindert. Wir untersuchen weiter die Gleichgewichtskonfiguration der Magnetosomenpartikel in einer linearen Kette und in einer geschlossenen ringf{\"o}rmigen Struktur. Dabei beobachteten wir ebenfalls, dass f{\"u}r eine stabile lineare Anordnung eine Bindung an ein Zytoskelettfilament notwendig ist. In einem externen magnetischen Feld wird die Stabilit{\"a}t der Magnetosomenketten durch die Dipol-Dipol-Wechselwirkung, {\"u}ber die Steifheit und die Bindungsenergie der Proteinstruktur, die die Partikel des Magnetosomen mit dem Filament verbinden, erreicht. Durch Beobachtungen w{\"a}hrend und nach der Behandlung einer Magnetosomenkette mit einem externen magnetischen Feld, l{\"a}sst sich begr{\"u}nden, dass die Stabilisierung von Magnetosomenketten durch Zytoskelettfilamente {\"u}ber proteinhaltige Bindeglieder und die dynamischen Eigenschaften dieser Strukturen realisiert wird. Abschließend wenden wir unser Modell bei der Untersuchung von ferromagnetischen Resonanz-Spektren von Magnetosomenketten in einzelnen Zellen von magnetotaktischen Bakterien an. Wir erforschen den Effekt der magnetokristallinen Anistropie in ihrer dreifach-Symmetrie, die in ferromagnetischen Ressonanz Spektren beobachtet wurden und die Besonderheit von verschiedenen Spektren, die bei Mutanten dieser Bakterien auftreten.}, language = {en} } @phdthesis{Kellermann2017, author = {Kellermann, Patric}, title = {Assessing natural risks for railway infrastructure and transportation in Austria}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103877}, school = {Universit{\"a}t Potsdam}, pages = {x, 113}, year = {2017}, abstract = {Natural hazards can have serious societal and economic impacts. Worldwide, around one third of economic losses due to natural hazards are attributable to floods. The majority of natural hazards are triggered by weather-related extremes such as heavy precipitation, rapid snow melt, or extreme temperatures. Some of them, and in particular floods, are expected to further increase in terms of frequency and/or intensity in the coming decades due to the impacts of climate change. In this context, the European Alps areas are constantly disclosed as being particularly sensitive. In order to enhance the resilience of societies to natural hazards, risk assessments are substantial as they can deliver comprehensive risk information to be used as a basis for effective and sustainable decision-making in natural hazards management. So far, current assessment approaches mostly focus on single societal or economic sectors - e.g. flood damage models largely concentrate on private-sector housing - and other important sectors, such as the transport infrastructure sector, are widely neglected. However, transport infrastructure considerably contributes to economic and societal welfare, e.g. by ensuring mobility of people and goods. In Austria, for example, the national railway network is essential for the European transit of passengers and freights as well as for the development of the complex Alpine topography. Moreover, a number of recent experiences show that railway infrastructure and transportation is highly vulnerable to natural hazards. As a consequence, the Austrian Federal Railways had to cope with economic losses on the scale of several million euros as a result of flooding and other alpine hazards. The motivation of this thesis is to contribute to filling the gap of knowledge about damage to railway infrastructure caused by natural hazards by providing new risk information for actors and stakeholders involved in the risk management of railway transportation. Hence, in order to support the decision-making towards a more effective and sustainable risk management, the following two shortcomings in natural risks research are approached: i) the lack of dedicated models to estimate flood damage to railway infrastructure, and ii) the scarcity of insights into possible climate change impacts on the frequency of extreme weather events with focus on future implications for railway transportation in Austria. With regard to flood impacts to railway infrastructure, the empirically derived damage model Railway Infrastructure Loss (RAIL) proved expedient to reliably estimate both structural flood damage at exposed track sections of the Northern Railway and resulting repair cost. The results show that the RAIL model is capable of identifying flood risk hot spots along the railway network and, thus, facilitates the targeted planning and implementation of (technical) risk reduction measures. However, the findings of this study also show that the development and validation of flood damage models for railway infrastructure is generally constrained by the continuing lack of detailed event and damage data. In order to provide flood risk information on the large scale to support strategic flood risk management, the RAIL model was applied for the Austrian Mur River catchment using three different hydraulic scenarios as input as well as considering an increased risk aversion of the railway operator. Results indicate that the model is able to deliver comprehensive risk information also on the catchment level. It is furthermore demonstrated that the aspect of risk aversion can have marked influence on flood damage estimates for the study area and, hence, should be considered with regard to the development of risk management strategies. Looking at the results of the investigation on future frequencies of extreme weather events jeopardizing railway infrastructure and transportation in Austria, it appears that an increase in intense rainfall events and heat waves has to be expected, whereas heavy snowfall and cold days are likely to decrease. Furthermore, results indicate that frequencies of extremes are rather sensitive to changes of the underlying thresholds. It thus emphasizes the importance to carefully define, validate, and — if needed — to adapt the thresholds that are used to detect and forecast meteorological extremes. For this, continuous and standardized documentation of damaging events and near-misses is a prerequisite. Overall, the findings of the research presented in this thesis agree on the necessity to improve event and damage documentation procedures in order to enable the acquisition of comprehensive and reliable risk information via risk assessments and, thus, support strategic natural hazards management of railway infrastructure and transportation.}, language = {en} } @phdthesis{Kayser2017, author = {Kayser, Markus}, title = {Wechselwirkung der atmosph{\"a}rischen Grenzschicht mit synoptisch-skaligen Prozessen w{\"a}hrend der N-ICE2015 Kampagne}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411124}, school = {Universit{\"a}t Potsdam}, pages = {147}, year = {2017}, abstract = {Die Arktis erw{\"a}rmt sich schneller als der Rest der Erde. Die Auswirkungen manifestieren sich unter Anderem in einer verst{\"a}rkten Erw{\"a}rmung der arktischen Grenzschicht. Diese Arbeit befasst sich mit Wechselwirkungen zwischen synoptischen Zyklonen und der arktischen Atmosph{\"a}re auf lokalen bis {\"u}berregionalen Skalen. Ausgangspunkt daf{\"u}r sind Messdaten und Modellsimulationen f{\"u}r den Zeitraum der N-ICE2015 Expedition, die von Anfang Januar bis Ende Juni 2015 im arktischen Nordatlantiksektor stattgefunden hat. Anhand von Radiosondenmessungen lassen sich Auswirkungen von synoptischen Zyklonen am deutlichsten im Winter erkennen, da sie durch die Advektion warmer und feuchter Luftmassen in die Arktis den Zustand der Atmosph{\"a}re von einem strahlungs-klaren in einen strahlungs-opaken {\"a}ndern. Obwohl dieser scharfe Kontrast nur im Winter existiert, zeigt die Analyse, dass der integrierte Wasserdampf als Indikator f{\"u}r die Advektion von Luftmassen aus niedrigen Breiten in die Arktis auch im Fr{\"u}hjahr geeignet ist. Neben der Advektion von Luftmassen wird der Einfluss der Zyklonen auf die statische Stabilit{\"a}t charakterisiert. Beim Vergleich der N-ICE2015 Beobachtungen mit der SHEBA Kampagne (1997/1998), die {\"u}ber dickerem Eis stattfand, finden sich trotz der unterschiedlichen Meereisregime {\"A}hnlichkeiten in der statischen Stabilit{\"a}t der Atmosph{\"a}re. Die beobachteten Differenzen in der Stabilit{\"a}t lassen sich auf Unterschiede in der synoptischen Aktivit{\"a}t zur{\"u}ckf{\"u}hren. Dies l{\"a}sst vermuten, dass die d{\"u}nnere Eisdecke auf saisonalen Zeitskalen nur einen geringen Einfluss auf die thermodynamische Struktur der arktischen Troposph{\"a}re besitzt, solange eine dicke Schneeschicht sie bedeckt. Ein weiterer Vergleich mit den parallel zur N-ICE2015 Kampagne gestarteten Radiosonden der AWIPEV Station in Ny-{\AA}esund, Spitzbergen, macht deutlich, dass die synoptischen Zyklonen oberhalb der Orographie auf saisonalen Zeitskalen das Wettergeschehen bestimmen. Des Weiteren werden f{\"u}r Februar 2015 die Auswirkungen von in der Vertikalen variiertem Nudging auf die Entwicklung der Zyklonen am Beispiel des hydrostatischen regionalen Klimamodells HIRHAM5 untersucht. Es zeigt sich, dass die Unterschiede zwischen den acht Modellsimulationen mit abnehmender Anzahl der genudgten Level zunehmen. Die gr{\"o}ßten Differenzen resultieren vornehmlich aus dem zeitlichen Versatz der Entwicklung synoptischer Zyklonen. Zur Korrektur des Zeitversatzes der Zykloneninitiierung gen{\"u}gt es bereits, Nudging in den unterstem 250 m der Troposph{\"a}re anzuwenden. Daneben findet sich zwischen den genudgten HIRHAM5-Simulation und den in situ Messungen der gleiche positive Temperaturbias, den auch ERA-Interim besitzt. Das freie HIRHAM hingegen reproduziert das positive Ende der N-ICE2015 Temperaturverteilung gut, besitzt aber einen starken negativen Bias, der sehr wahrscheinlich aus einer Untersch{\"a}tzung des Feuchtegehalts resultiert. An Beispiel einer Zyklone wird gezeigt, dass Nudging Einfluss auf die Lage der H{\"o}hentiefs besitzt, die ihrerseits die Zyklonenentwicklung am Boden beeinflussen. Im Weiteren wird mittels eines f{\"u}r kleine Ensemblegr{\"o}ßen geeigneten Varianzmaßes eine statistische Einsch{\"a}tzung der Wirkung des Nudgings auf die Vertikale getroffen. Es wird festgestellt, dass die {\"A}hnlichkeit der Modellsimulationen in der unteren Troposph{\"a}re generell h{\"o}her ist als dar{\"u}ber und in 500 hPa ein lokales Minimum besitzt. Im letzten Teil der Analyse wird die Wechselwirkung der oberen und unteren Stratosph{\"a}re anhand zuvor betrachteter Zyklonen mit Daten der ERA-Interim Reanalyse untersucht. Lage und Ausrichtung des Polarwirbels erzeugten ab Anfang Februar 2015 eine ungew{\"o}hnlich große Meridionalkomponente des Tropopausenjets, die Zugbahnen in die zentrale Arktis beg{\"u}nstigte. Am Beispiel einer Zyklone wird die {\"U}bereinstimmung der synoptischen Entwicklung mit den theoretischen Annahmen {\"u}ber den abw{\"a}rts gerichteten Einfluss der Stratosph{\"a}re auf die Troposph{\"a}re hervorgehoben. Dabei spielt die nicht-lineare Wechselwirkung zwischen der Orographie Gr{\"o}nlands, einer Intrusion stratosph{\"a}rischer Luft in die Troposph{\"a}re sowie einer in Richtung Arktis propagierender Rossby-Welle eine tragende Rolle. Als Indikator dieser Wechselwirkung werden horizontale Signaturen aus abwechselnd aufsteigender und absinkender Luft innerhalb der Troposph{\"a}re identifiziert.}, language = {de} } @phdthesis{Kasch2017, author = {Kasch, Juliane}, title = {Impact of maternal high-fat consumption on offspring exercise performance, skeletal muscle energy metabolism, and obesity susceptibility}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409703}, school = {Universit{\"a}t Potsdam}, pages = {XII, 95, XXV}, year = {2017}, abstract = {Background: Obesity is thought to be the consequence of an unhealthy nutrition and a lack of physical activity. Although the resulting metabolic alterations such as impaired glucose homeostasis and insulin sensitivity can usually be improved by physical activity, some obese patients fail to enhance skeletal muscle metabolic health with exercise training. Since this might be largely heritable, maternal nutrition during pregnancy and lactation is hypothesized to impair offspring skeletal muscle physiology. Objectives: This PhD thesis aims to investigate the consequences of maternal high-fat diet (mHFD) consumption on offspring skeletal muscle physiology and exercise performance. We could show that maternal high-fat diet during gestation and lactation decreases the offspring's training efficiency and endurance performance by influencing the epigenetic profile of their skeletal muscle and altering the adaptation to an acute exercise bout, which in long-term, increases offspring obesity susceptibility. Experimental setup: To investigate this issue in detail, we conducted several studies with a similar maternal feeding regime. Dams (C57BL/6J) were either fed a low-fat diet (LFD; 10 energy\% from fat) or high-fat diet (HFD; 40 energy\% from fat) during pregnancy and lactation. After weaning, male offspring of both maternal groups were switched to a LFD, on which they remained until sacrifice in week 6, 15 or 25. In one study, LFD feeding was followed by HFD provision from week 15 until week 25 to elucidate the effects on offspring obesity susceptibility. In week 7, all mice were randomly allocated to a sedentary group (without running wheel) or an exercised group (with running wheel for voluntary exercise training). Additionally, treadmill endurance tests were conducted to investigate training performance and efficiency. In order to uncover regulatory mechanisms, each study was combined with a specific analytical setup, such as whole genome microarray analysis, gene and protein expression analysis, DNA methylation analyses, and enzyme activity assays. Results: mHFD offspring displayed a reduced training efficiency and endurance capacity. This was not due to an altered skeletal muscle phenotype with changes in fiber size, number, and type. DNA methylation measurements in 6 week old offspring showed a hypomethylation of the Nr4a1 gene in mHFD offspring leading to an increased gene expression. Since Nr4a1 plays an important role in the regulation of skeletal muscle energy metabolism and early exercise adaptation, this could affect offspring training efficiency and exercise performance in later life. Investigation of the acute response to exercise showed that mHFD offspring displayed a reduced gene expression of vascularization markers (Hif1a, Vegfb, etc) pointing towards a reduced angiogenesis which could possibly contribute to their reduced endurance capacity. Furthermore, an impaired glucose utilization of skeletal muscle during the acute exercise bout by an impaired skeletal muscle glucose handling was evidenced by higher blood glucose levels, lower GLUT4 translocation and diminished Lactate dehydrogenase activity in mHFD offspring immediately after the endurance test. These points towards a disturbed use of glucose as a substrate during endurance exercise. Prolonged HFD feeding during adulthood increases offspring fat mass gain in mHFD offspring compared to offspring from low-fat fed mothers and also reduces their insulin sensitivity pointing towards a higher obesity and diabetes susceptibility despite exercise training. Consequently, mHFD reduces offspring responsiveness to the beneficial effects of voluntary exercise training. Conclusion: The results of this PhD thesis demonstrate that mHFD consumption impairs the offspring's training efficiency and endurance capacity, and reduced the beneficial effects of exercise on the development of diet-induced obesity and insulin resistance in the offspring. This might be due to changes in skeletal muscle epigenetic profile and/or an impaired skeletal muscle angiogenesis and glucose utilization during an acute exercise bout, which could contribute to a disturbed adaptive response to exercise training.}, language = {en} } @phdthesis{Kaliga2017, author = {Kaliga, Sarah Ninette}, title = {Eine Frage der Zeit. Wie Einfl{\"u}sse individueller Merkmale auf Einkommen bei Frauen {\"u}ber ihre famili{\"a}ren Verpflichtungen vermittelt werden}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407353}, school = {Universit{\"a}t Potsdam}, pages = {259}, year = {2017}, abstract = {Die vorliegende Arbeit mit dem Titel „Eine Frage der Zeit. Wie Einfl{\"u}sse individueller Merkmale auf Einkommen bei Frauen {\"u}ber ihre famili{\"a}ren Verpflichtungen vermittelt werden" geht der Frage der Heterogenit{\"a}t bei weiblichen Einkommensergebnissen nach. Dabei steht die Thematik der individuellen Investitionen in die famili{\"a}re Arbeit als erkl{\"a}render Faktor im Vordergrund und es wird der Frage nachgegangen, warum die einen Frauen viele und andere weniger h{\"a}usliche Verpflichtungen {\"u}bernehmen. Hierf{\"u}r werden das individuelle Humankapital der Frauen, ihre Werteorientierungen und individuelle berufliche Motivationen aus der Jugendzeit und im Erwachsenenalter herangezogen. Die analysierten Daten (Daten der LifE-Studie) repr{\"a}sentieren eine Langzeitperspektive vom 16. bis zum 45. Lebensjahr der befragten Frauen. Zusammenfassend kann im Ergebnis gezeigt werden, dass ein Effekt famili{\"a}rer Verpflichtungen auf Einkommensergebnisse bei Frauen im fr{\"u}hen und mittleren Erwachsenenalter als Zeiteffekt {\"u}ber die investierte Erwerbsarbeitszeit vermittelt wird. Die Relevanz privater Routinearbeiten f{\"u}r Berufserfolge von Frauen und insbesondere M{\"u}ttern stellt somit eine Frage der Zeit dar. Weiterhin kann f{\"u}r individuelle Einfl{\"u}sse auf Einkommen bei Frauen gezeigt werden, dass h{\"o}here zeitliche Investitionen in den Beruf von Frauen mit hohem Bildungsniveau als indirect-only-Mediation nur {\"u}ber die Umverteilung h{\"a}uslicher Arbeiten erkl{\"a}rbar werden. Frauen sind demnach zwar Gewinnerinnen der Bildungsexpansion. Die Bildungsexpansion stellt jedoch auch die Geschichte der Entstehung eines Vereinbarkeitskonflikts f{\"u}r eben diese Frauen dar, weil die bis heute virulenten Beharrungskr{\"a}fte hinsichtlich der Frauen zugeschriebenen famili{\"a}ren Verpflichtungen mit ihren gestiegenen beruflichen Erwartungen und Chancen kollidieren. Die Arbeit leistet in ihren Analyseresultaten einen wichtigen Beitrag zur Erkl{\"a}rung heterogener Investitionen von Frauen in den Beruf und ihrer Einkommensergebnisse aus dem Privaten heraus.}, language = {de} } @phdthesis{Jordan2017, author = {Jordan, Thomas}, title = {CxNy-materials from supramolecular precursors for "All-Carbon" composite materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398855}, school = {Universit{\"a}t Potsdam}, pages = {157}, year = {2017}, abstract = {Among modern functional materials, the class of nitrogen-containing carbons combines non-toxicity and sustainability with outstanding properties. The versatility of this materials class is based on the opportunity to tune electronic and catalytic properties via the nitrogen content and -motifs: This ranges from the electronically conducting N-doped carbon, where few carbon atoms in the graphitic lattice are substituted by nitrogen, to the organic semiconductor graphitic carbon nitride (g-C₃N₄), with a structure based on tri-s-triazine units. In general, composites can reveal outstanding catalytic properties due to synergistic behavior, e.g. the formation of electronic heterojunctions. In this thesis, the formation of an "all-carbon" heterojunction was targeted, i.e. differences in the electronic properties of the single components were achieved by the introduction of different nitrogen motives into the carbon lattice. Such composites are promising as metal-free catalysts for the photocatalytic water splitting. Here, hydrogen can be generated from water by light irradiation with the use of a photocatalyst. As first part of the heterojunction, the organic semiconductor g-C₃N₄ was employed, because of its suitable band structure for photocatalytic water splitting, high stability and non-toxicity. The second part was chosen as C₂N, a recently discovered semiconductor. Compared to g-C₃N₄, the less nitrogen containing C₂N has a smaller band gap and a higher absorption coefficient in the visible light range, which is expected to increase the optical absorption in the composite eventually leading to an enhanced charge carrier separation due to the formation of an electronic heterojunction. The aim of preparing an "all-carbon" composite included the research on appropriate precursors for the respective components g-C₃N₄ and C₂N, as well as strategies for appropriate structuring. This was targeted by applying precursors which can form supramolecular pre-organized structures. This allows for more control over morphology and atom patterns during the carbonization process. In the first part of this thesis, it was demonstrated how the photocatalytic activity of g-C₃N₄ can be increased by the targeted introduction of defects or surface terminations. This was achieved by using caffeine as a "growth stopping" additive during the formation of the hydrogen-bonded supramolecular precursor complexes. The increased photocatalytic activity of the obtained materials was demonstrated with dye degradation experiments. The second part of this thesis was focused on the synthesis of the second component C₂N. Here, a deep eutectic mixture from hexaketocyclohexane and urea was structured using the biopolymer chitosan. This scaffolding resulted in mesoporous nitrogen-doped carbon monoliths and beads. CO₂- and dye-adsorption experiments with the obtained monolith material revealed a high isosteric heat of CO₂-adsorption and showed the accessibility of the monolithic pore system to larger dye molecules. Furthermore, a novel precursor system for C₂N was explored, based on organic crystals from squaric acid and urea. The respective C₂N carbon with an unusual sheet-like morphology could be synthesized by carbonization of the crystals at 550 °C. With this precursor system, also microporous C₂N carbon with a BET surface area of 865 m²/g was obtained by "salt-templating" with ZnCl₂. Finally, the preparation of a g-C₃N₄/C₂N "all carbon" composite heterojunction was attempted by the self-assembly of g-C₃N₄ and C₂N nanosheets and tested for photocatalytic water splitting. Indeed, the composites revealed high rates of hydrogen evolution when compared to bulk g-C₃N₄. However, the increased catalytic activity was mainly attributed to the high surface area of the nanocomposites rather than to the composition. With regard to alternative composite synthesis ways, first experiments indicated N-Methyl-2-pyrrolidon to be suitable for higher concentrated dispersion of C₂N nanosheets. Eventually, the results obtained in this thesis provide precious synthetic contributions towards the preparation and processing of carbon/nitrogen compounds for energy applications.}, language = {en} } @phdthesis{John2017, author = {John, Daniela}, title = {Herstellung anisotroper Kolloide mittels templatgesteuerter Assemblierung und Kontaktdruckverfahren}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398270}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 122, XVII}, year = {2017}, abstract = {Diese Arbeit befasste sich mit neuen Konzepten zur Darstellung anisotroper Partikelsysteme durch Anordnung von funktionalisierten Partikeln unter Zuhilfenahme etablierter Methoden wie der templatgest{\"u}tzten Assemblierung von Partikeln und dem Mikrokontaktdruck. Das erste Teilprojekt besch{\"a}ftigte sich mit der kontrollierten Herstellung von Faltenstrukturen im Mikro- bis Nanometerbereich. Die Faltenstrukturen entstehen durch die Relaxation eines Systems bestehend aus zwei {\"u}bereinander liegender Schichten unterschiedlicher Elastizit{\"a}t. In diesem Fall wurden Falten auf einem elastischen PDMS-Substrat durch Generierung einer Oxidschicht auf der Substratoberfl{\"a}che mittels Plasmabehandlung erzeugt. Die Dicke der Oxidschicht, die {\"u}ber verschiedene Parameter wie Behandlungszeit, Prozessleistung, Partialdruck des plasmaaktiven Gases, Vernetzungsgrad, Deformation sowie Substratdicke einstellbar war, bestimmte Wellenl{\"a}nge und Amplitude der Falten. Das zweite Teilprojekt hatte die Darstellung komplexer, kolloidaler Strukturen auf Basis supramolekularer Wechselwirkungen zum Ziel. Dazu sollte vor allem die templatgest{\"u}tzte Assemblierung von Partikeln sowohl an fest-fl{\"u}ssig als auch fl{\"u}ssig-fl{\"u}ssig Grenzfl{\"a}chen genutzt werden. F{\"u}r Erstere sollten die in Teilprojekt 1 hergestellten Faltenstrukturen als Templat, f{\"u}r Letztere Pickering-Emulsionen zur Anwendung kommen. Im ersten Fall wurden verschiedene, modifizierte Silicapartikel und Magnetitnanopartikel, deren Gr{\"o}ße und Oberfl{\"a}chenfunktionalit{\"a}t (Cyclodextrin-, Azobenzol- und Arylazopyrazolgruppen) variierte, in Faltenstrukturen angeordnet. Die Anordnung hing dabei nicht nur vom gew{\"a}hlten Verfahren, sondern auch von Faktoren wie der Partikelkonzentration, der Oberfl{\"a}chenladung oder dem Gr{\"o}ßenverh{\"a}ltnis der Partikel zur Faltengeometrie ab. Die Kombination von Cyclodextrin (CD)- und Arylazopyrazol-modifizierten Partikeln erm{\"o}glichte, auf Basis einer Wirt-Gast-Wechselwirkung zwischen den Partikeltypen und einer templatgesteuerten Anordnung, die Bildung komplexer und strukturierter Formen in der Gr{\"o}ßenordnung mehrerer Mikrometer. Dieses System kann einerseits als Grundlage f{\"u}r die Herstellung verschiedener Janus-Partikel herangezogen werden, andererseits stellt die gerichtete Vernetzung zweier Partikelsysteme zu gr{\"o}ßeren Aggregaten den Grundstein f{\"u}r neuartige, funktionale Materialien dar. Neben der Anordnung an fest-fl{\"u}ssig Grenzfl{\"a}chen konnte außerdem nachgewiesen werden, dass Azobenzol-funktionalisierte Silicapartikel in der Lage sind, Pickering-Emulsionen {\"u}ber mehrere Monate zu stabilisieren. Die Stabilit{\"a}t und Gr{\"o}ße der Emulsionsphase kann {\"u}ber Parameter, wie das Volumenverh{\"a}ltnis und die Konzentration, gesteuert werden. CD-funktionalisierte Silicapartikel besaßen dagegen keine Grenzfl{\"a}chenaktivit{\"a}t, w{\"a}hrend es CD-basierten Polymeren wiederum m{\"o}glich war, durch die Ausbildung von Einschlusskomplexen mit den hydrophoben Molek{\"u}len der {\"O}lphase stabile Emulsionen zu bilden. Dagegen zeigte die Kombination zwei verschiedener Partikelsysteme keinen oder einen destabilisierenden Effekt bez{\"u}glich der Ausbildung von Emulsionen. Im letzten Teilprojekt wurde die Herstellung multivalenter Silicapartikel mittels Mikrokontaktdruck untersucht. Die Faltenstrukturen wurden dabei als Stempel verwendet, wodurch es m{\"o}glich war, die Patch-Geometrie {\"u}ber die Wellenl{\"a}nge der Faltenstrukturen zu steuern. Als Tinte diente das positiv geladene Polyelektrolyt Polyethylenimin (PEI), welches {\"u}ber elektrostatische Wechselwirkungen auf unmodifizierten Silicapartikeln haftet. Im Gegensatz zum Drucken mit flachen Stempeln fiel dabei zun{\"a}chst auf, dass sich die Tinte bei den Faltenstrukturen nicht gleichm{\"a}ßig {\"u}ber die gesamte Substratfl{\"a}che verteilt, sondern haupts{\"a}chlich in den Faltent{\"a}lern vorlag. Dadurch handelte es sich bei dem Druckprozess letztlich nicht mehr um ein klassisches Mikrokontaktdruckverfahren, sondern um ein Tiefdruckverfahren. {\"U}ber das Tiefdruckverfahren war es dann aber m{\"o}glich, sowohl eine als auch beide Partikelhemisph{\"a}ren gleichzeitig und mit verschiedenen Funktionalit{\"a}ten zu modifizieren und somit multivalente Silicapartikel zu generieren. In Abh{\"a}ngigkeit der Wellenl{\"a}nge der Falten konnten auf einer Partikelhemisph{\"a}re zwei bis acht Patches abgebildet werden. F{\"u}r die Patch-Geometrie, sprich Gr{\"o}ße und Form der Patches, spielten zudem die Konzentration der Tinte auf dem Stempel, das L{\"o}sungsmittel zum Abl{\"o}sen der Partikel nach dem Drucken sowie die Stempelh{\"a}rte eine wichtige Rolle. Da die Stempelh{\"a}rte aufgrund der variierenden Dicke der Oxidschicht bei verschiedenen Wellenl{\"a}ngen nicht kontant ist, wurden f{\"u}r den Druckprozess meist Abg{\"u}sse der Faltensubstrate verwendet. Auf diese Weise war auch die Vergleichbarkeit bei variierender Wellenl{\"a}nge gew{\"a}hrleistet. Neben dem erfolgreichen Nachweis der Modifikation mittels Tiefdruckverfahren konnte auch gezeigt werden, dass {\"u}ber die Komplexierung mit PEI negativ geladene Nanopartikel auf die Partikeloberfl{\"a}che aufgebracht werden k{\"o}nnen.}, language = {de} } @phdthesis{Jennerich2017, author = {Jennerich, Christian}, title = {Lebensweltliche Erfahrungen und kollektive Zuschreibungen bei Bildungsausl{\"a}ndern}, publisher = {Universit{\"a}tsverlag Potsdam}, isbn = {978-3-86956-409-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398991}, school = {Universit{\"a}t Potsdam}, pages = {214}, year = {2017}, abstract = {In der vorliegenden Dissertation werden lebensweltliche Erfahrungszusammenh{\"a}nge in 128 Aufs{\"a}tzen ausl{\"a}ndischer Studierender, die oft auch als Bildungsausl{\"a}nder bezeichnet werden, und grunds{\"a}tzliche Prozesse bei der Herausbildung sowie Ver{\"a}nderung kollektiver Zuschreibungen im Rahmen der sozialen Identit{\"a}tsbildung hermeneutisch untersucht. Mit Hilfe der nach der empirischen Methode der Grounded Theory kodierten Interpretationsergebnisse werden in der qualitativen L{\"a}ngsschnittstudie Vergleiche angestellt, sowohl fallintern als auch fall{\"u}bergreifend, um Muster zu entdecken, Typologien zu konstruieren und die jeweiligen (kulturellen) Horizonte in den Aufs{\"a}tzen zu verallgemeinern. Neben der Grounded Theory bildet die Theorie der „allt{\"a}glichen Lebenswelt" (Alfred Sch{\"u}tz) eine Basis der Herangehensweise an die Untersuchung der Aufs{\"a}tze. In diesem Kontext wird ausgehend von der graduellen Unterteilung der Fremdheit in allt{\"a}gliche, strukturelle und radikale Fremdheit sowie ausgehend von Goffmans Identit{\"a}tskonzept der Frage nachgegangen, inwieweit sich in den untersuchten Texten die Bildung und Ver{\"a}nderung sozialer Identit{\"a}ten feststellen lassen. Dabei werden Akkulturationsprozesse und Prozesse der Selbstidentifikation, bez{\"u}glich einer angenommenen Gemeinschaft, analysiert, die von kollektiven (kulturellen) Schemata bestimmt sind. In diesem Zusammenhang kann die vorliegende Dissertation zeigen, dass sich bestimmte kulturelle Schemata in der Auseinandersetzung mit dem vormals neuen Leben in Deutschland herausgebildet haben und bestimmte {\"a}ltere Erfahrungen immer wieder zur Best{\"a}tigung dieser Bilder bzw. Erfahrungsschemata herangezogen und wie der Abdruck in Gestein fossiliert werden.}, language = {de} } @phdthesis{Hakansson2017, author = {H{\aa}kansson, Nils}, title = {A Dark Matter line search using 3D-modeling of Cherenkov showers below 10 TeV with VERITAS}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397670}, school = {Universit{\"a}t Potsdam}, pages = {107, xxxvi}, year = {2017}, abstract = {Dark matter, DM, has not yet been directly observed, but it has a very solid theoretical basis. There are observations that provide indirect evidence, like galactic rotation curves that show that the galaxies are rotating too fast to keep their constituent parts, and galaxy clusters that bends the light coming from behind-lying galaxies more than expected with respect to the mass that can be calculated from what can be visibly seen. These observations, among many others, can be explained with theories that include DM. The missing piece is to detect something that can exclusively be explained by DM. Direct observation in a particle accelerator is one way and indirect detection using telescopes is another. This thesis is focused on the latter method. The Very Energetic Radiation Imaging Telescope Array System, V ERITAS, is a telescope array that detects Cherenkov radiation. Theory predicts that DM particles annihilate into, e.g., a γγ pair and create a distinctive energy spectrum when detected by such telescopes, e.i., a monoenergetic line at the same energy as the particle mass. This so called "smoking-gun" signature is sought with a sliding window line search within the sub-range ∼ 0.3 - 10 TeV of the VERITAS energy range, ∼ 0.01 - 30 TeV. Standard analysis within the VERITAS collaboration uses Hillas analysis and look-up tables, acquired by analysing particle simulations, to calculate the energy of the particle causing the Cherenkov shower. In this thesis, an improved analysis method has been used. Modelling each shower as a 3Dgaussian should increase the energy recreation quality. Five dwarf spheroidal galaxies were chosen as targets with a total of ∼ 224 hours of data. The targets were analysed individually and stacked. Particle simulations were based on two simulation packages, CARE and GrISU. Improvements have been made to the energy resolution and bias correction, up to a few percent each, in comparison to standard analysis. Nevertheless, no line with a relevant significance has been detected. The most promising line is at an energy of ∼ 422 GeV with an upper limit cross section of 8.10 · 10^-24 cm^3 s^-1 and a significance of ∼ 2.73 σ, before trials correction and ∼ 1.56 σ after. Upper limit cross sections have also been calculated for the γγ annihilation process and four other outcomes. The limits are in line with current limits using other methods, from ∼ 8.56 · 10^-26 - 6.61 · 10^-23 cm^3s^-1. Future larger telescope arrays, like the upcoming Cherenkov Telescope Array, CTA, will provide better results with the help of this analysis method.}, language = {en} } @phdthesis{HolzgrefeLang2017, author = {Holzgrefe-Lang, Julia}, title = {Prosodic phrase boundary perception in adults and infants}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405943}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 141}, year = {2017}, abstract = {Prosody is a rich source of information that heavily supports spoken language comprehension. In particular, prosodic phrase boundaries divide the continuous speech stream into chunks reflecting the semantic and syntactic structure of an utterance. This chunking or prosodic phrasing plays a critical role in both spoken language processing and language acquisition. Aiming at a better understanding of the underlying processing mechanisms and their acquisition, the present work investigates factors that influence prosodic phrase boundary perception in adults and infants. Using the event-related potential (ERP) technique, three experimental studies examined the role of prosodic context (i.e., phrase length) in German phrase boundary perception and of the main prosodic boundary cues, namely pitch change, final lengthening, and pause. With regard to the boundary cues, the dissertation focused on the questions which cues or cue combination are essential for the perception of a prosodic boundary and on whether and how this cue weighting develops during infancy. Using ERPs is advantageous because the technique captures the immediate impact of (linguistic) information during on-line processing. Moreover, as it can be applied independently of specific task demands or an overt response performance, it can be used with both infants and adults. ERPs are particularly suitable to study the time course and underlying mechanisms of boundary perception, because a specific ERP component, the Closure Positive Shift (CPS) is well established as neuro-physiological indicator of prosodic boundary perception in adults. The results of the three experimental studies first underpin that the prosodic context plays an immediate role in the processing of prosodic boundary information. Moreover, the second study reveals that adult listeners perceive a prosodic boundary also on the basis of a sub-set of the boundary cues available in the speech signal. Both ERP and simultaneously collected behavioral data (i.e., prosodic judgements) suggest that the combination of pitch change and final lengthening triggers boundary perception; however, when presented as single cues, neither pitch change nor final lengthening were sufficient. Finally, testing six- and eight-month-old infants shows that the early sensitivity for prosodic information is reflected in a brain response resembling the adult CPS. For both age groups, brain responses to prosodic boundaries cued by pitch change and final lengthening revealed a positivity that can be interpreted as a CPS-like infant ERP component. In contrast, but comparable to the adults' response pattern, pitch change as a single cue does not provoke an infant CPS. These results show that infant phrase boundary perception is not exclusively based on pause detection and hint at an early ability to exploit subtle, relational prosodic cues in speech perception.}, language = {en} } @phdthesis{Hoffmann2017, author = {Hoffmann, Lars}, title = {Lehrkr{\"a}fte als Diagnostikerinnen und Diagnostiker}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404584}, school = {Universit{\"a}t Potsdam}, pages = {246}, year = {2017}, abstract = {Im Fokus der kumulativen Dissertationsschrift stehen Grundschullehrerinnen und Grundschullehrer in ihrer Rolle als Diagnostikerinnen und Diagnostiker. Exemplarisch werden mit der Einsch{\"a}tzung von Aufgabenschwierigkeiten und der Feststellung von Sprachf{\"o}rderbedarf zwei diagnostische Herausforderungen untersucht, die Lehrkr{\"a}fte an Grundschulen bew{\"a}ltigen m{\"u}ssen. Die vorliegende Arbeit umfasst drei empirische Teilstudien, die in einem Rahmentext integriert sind, in dem zun{\"a}chst die theoretischen Hintergr{\"u}nde und die empirische Befundlage erl{\"a}utert werden. Hierbei wird auf die diagnostische Kompetenz von Lehrkr{\"a}ften bzw. die Genauigkeit von Lehrerurteilen eingegangen. Ferner wird der Einsatz standardisierter Testinstrumente als wichtiger Bestandteil des diagnostischen Aufgabenfeldes von Lehrkr{\"a}ften charakterisiert. Außerdem werden zentrale Aspekte der Sprachdiagnostik in der Grundschule dargestellt. In Teilstudie 1 (Hoffmann \& B{\"o}hme, 2014b) wird der Frage nachgegangen, wie akkurat Grundschullehrerinnen und -lehrer die Schwierigkeit von Deutsch- und Mathematikaufgaben einsch{\"a}tzen. Dar{\"u}ber hinaus wird untersucht, welche Faktoren mit der {\"U}ber- oder Untersch{\"a}tzung der Schwierigkeit von Aufgaben zusammenh{\"a}ngen. In Teilstudie 2 (Hoffmann \& B{\"o}hme, 2017) wird gepr{\"u}ft, inwiefern die Klassifikationsg{\"u}te von Entschei¬dungen zum sprachlichen F{\"o}rderbedarf mit den hierf{\"u}r genutzten diagnostischen Informationsquellen kovariiert. Der Fokus liegt hierbei vor allem auf der Untersuchung von Effekten des Einsatzes von sprachdiagnostischen Verfahren. Teilstudie 3 (Hoffmann, B{\"o}hme \& Stanat, 2017) untersucht schließlich die Frage, welche diagnostischen Verfahren gegenw{\"a}rtig bundesweit an den Grundschulen zur Feststellung sprachlichen F{\"o}rderbedarfs genutzt werden und ob diese Verfahren etwa testtheoretischen G{\"u}tekriterien gen{\"u}gen. Die zentralen Ergebnisse der Teilstudien werden im Rahmentext zusammen¬gefasst und studien{\"u}bergreifend diskutiert. Hierbei wird auch auf methodische St{\"a}rken und Schw{\"a}chen der drei Beitr{\"a}ge sowie auf Implikationen f{\"u}r die zuk{\"u}nftige Forschung und die schulische Praxis hingewiesen.}, language = {de} } @phdthesis{Hochrein2017, author = {Hochrein, Lena}, title = {Development of a new DNA-assembly method and its application for the establishment of a red light-sensing regulation system}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404441}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2017}, abstract = {In der hier vorgelegten Doktorarbeit wurde eine Strategie zur schnellen, einfachen und zuverl{\"a}ssigen Assemblierung von DNS-Fragmenten, genannt AssemblX, entwickelt. Diese kann genutzt werden, um komplexe DNS-Konstrukte, wie beispielsweise komplette Biosynthesewege, aufzubauen. Dies dient der Produktion von technisch oder medizinisch relevanten Produkten in biotechnologisch nutzbaren Organismen. Die Vorteile der Klonierungsstrategie liegen in der Schnelligkeit der Klonierung, der Flexibilit{\"a}t bez{\"u}glich des Wirtsorganismus, sowie der hohen Effektivit{\"a}t, die durch gezielte Optimierung erreicht wurde. Die entwickelte Technik erlaubt die nahtlose Assemblierung von Genfragmenten und bietet eine Komplettl{\"o}sung von der Software-gest{\"u}tzten Planung bis zur Fertigstellung von DNS-Konstrukten, welche die Gr{\"o}ße von Mini-Chromosomen erreichen k{\"o}nnen. Mit Hilfe der oben beschriebenen AssemblX Strategie wurde eine optogenetische Plattform f{\"u}r die B{\"a}ckerhefe Saccharomyces cerevisiae etabliert. Diese besteht aus einem Rotlicht-sensitiven Photorezeptor und seinem interagierenden Partner aus Arabidopsis thaliana, welche in lichtabh{\"a}ngiger Weise miteinander agieren. Diese Interaktion wurde genutzt, um zwei Rotlicht-aktivierbare Proteine zu erstellen: Einen Transkriptionsfaktor, der nach Applikation eines Lichtpulses die Produktion eines frei w{\"a}hlbaren Proteins stimuliert, sowie eine Cre Rekombinase, die ebenfalls nach Bestrahlung mit einer bestimmten Wellenl{\"a}nge die zufallsbasierte Reorganisation bestimmter DNS-Konstrukte erm{\"o}glicht. Zusammenfassend wurden damit drei Werkzeuge f{\"u}r die synthetische Biologie etabliert. Diese erm{\"o}glichen den Aufbau von komplexen Biosynthesewegen, deren Licht-abh{\"a}ngige Regulation, sowie die zufallsbasierte Rekombination zu Optimierungszwecken.}, language = {en} } @phdthesis{Hethey2017, author = {Hethey, Christoph Philipp}, title = {Cell physiology based pharmacodynamic modeling of antimicrobial drug combinations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401056}, school = {Universit{\"a}t Potsdam}, pages = {102}, year = {2017}, abstract = {Mathematical models of bacterial growth have been successfully applied to study the relationship between antibiotic drug exposure and the antibacterial effect. Since these models typically lack a representation of cellular processes and cell physiology, the mechanistic integration of drug action is not possible on the cellular level. The cellular mechanisms of drug action, however, are particularly relevant for the prediction, analysis and understanding of interactions between antibiotics. Interactions are also studied experimentally, however, a lacking consent on the experimental protocol hinders direct comparison of results. As a consequence, contradictory classifications as additive, synergistic or antagonistic are reported in literature. In the present thesis we developed a novel mathematical model for bacterial growth that integrates cell-level processes into the population growth level. The scope of the model is to predict bacterial growth under antimicrobial perturbation by multiple antibiotics in vitro. To this end, we combined cell-level data from literature with population growth data for Bacillus subtilis, Escherichia coli and Staphylococcus aureus. The cell-level data described growth-determining characteristics of a reference cell, including the ribosomal concentration and efficiency. The population growth data comprised extensive time-kill curves for clinically relevant antibiotics (tetracycline, chloramphenicol, vancomycin, meropenem, linezolid, including dual combinations). The new cell-level approach allowed for the first time to simultaneously describe single and combined effects of the aforementioned antibiotics for different experimental protocols, in particular different growth phases (lag and exponential phase). Consideration of ribosomal dynamics and persisting sub-populations explained the decreased potency of linezolid on cultures in the lag phase compared to exponential phase cultures. The model captured growth rate dependent killing and auto-inhibition of meropenem and - also for vancomycin exposure - regrowth of the bacterial cultures due to adaptive resistance development. Stochastic interaction surface analysis demonstrated the pronounced antagonism between meropenem and linezolid to be robust against variation in the growth phase and pharmacodynamic endpoint definition, but sensitive to a change in the experimental duration. Furthermore, the developed approach included a detailed representation of the bacterial cell-cycle. We used this representation to describe septation dynamics during the transition of a bacterial culture from the exponential to stationary growth phase. Resulting from a new mechanistic understanding of transition processes, we explained the lag time between the increase in cell number and bacterial biomass during the transition from the lag to exponential growth phase. Furthermore, our model reproduces the increased intracellular RNA mass fraction during long term exposure of bacteria to chloramphenicol. In summary, we contribute a new approach to disentangle the impact of drug effects, assay readout and experimental protocol on antibiotic interactions. In the absence of a consensus on the corresponding experimental protocols, this disentanglement is key to translate information between heterogeneous experiments and also ultimately to the clinical setting.}, language = {en} } @phdthesis{Herbrich2017, author = {Herbrich, Marcus}, title = {Einfluss der erosionsbedingten Pedogenese auf den Wasserund Stoffhaushalt ackerbaulich genutzter B{\"o}den der Grundmor{\"a}nenbodenlandschaft NO-Deutschlands - hydropedologische Untersuchungen mittels w{\"a}gbarer Pr{\"a}zisionslysimeter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408561}, school = {Universit{\"a}t Potsdam}, pages = {186}, year = {2017}, abstract = {In the arable soil landscape of hummocky ground moraines, an erosion-affected spatial differentiation of soils can be observed. Man-made erosion leads to soil profile modifications along slopes with changed solum thickness and modified properties of soil horizons due to water erosion in combination with tillage operations. Soil erosion creates, thereby, spatial patterns of soil properties (e.g., texture and organic matter content) and differences in crop development. However, little is known about the manner in which water fluxes are affected by soil-crop interactions depending on contrasting properties of differently-developed soil horizons and how water fluxes influence the carbon transport in an eroded landscape. To identify such feedbacks between erosion-induced soil profile modifications and the 1D-water and solute balance, high-precision weighing lysimeters equipped with a wide range of sensor technique were filled with undisturbed soil monoliths that differed in the degree of past soil erosion. Furthermore, lysimeter effluent concentrations were analyzed for dissolved carbon fractions in bi-weekly intervals. The water balance components measured by high precision lysimeters varied from the most eroded to the less eroded monolith up to 83 \% (deep drainage) primarily caused due to varying amounts of precipitation and evapotranspiration for a 3-years period. Here, interactions between crop development and contrasting rainfall interception by above ground biomass could explain differences in water balance components. Concentrations of dissolved carbon in soil water samples were relatively constant in time, suggesting carbon leaching was mainly affected by water fluxes in this observation period. For the lysimeter-based water balance analysis, a filtering scheme was developed considering temporal autocorrelation. The minute-based autocorrelation analysis of mass changes from lysimeter time series revealed characteristic autocorrelation lengths ranging from 23 to 76 minutes. Thereby, temporal autocorrelation provided an optimal approximation of precipitation quantities. However, the high temporal resolution in lysimeter time series is restricted by the lengths of autocorrelation. Erosion-induced but also gradual changes in soil properties were reflected by dynamics of soil water retention properties in the lysimeter soils. Short-term and long-term hysteretic water retention data suggested seasonal wettability problems of soils increasingly limited rewetting of previously dried pore regions. Differences in water retention were assigned to soil tillage operations and the erosion history at different slope positions. The threedimensional spatial pattern of soil types that result from erosional soil profile modifications were also reflected in differences of crop root development at different landscape positions. Contrasting root densities revealed positive relations of root and aboveground plant characteristics. Differences in the spatially-distributed root growth between different eroded soil types provided indications that root development was affected by the erosion-induced soil evolution processes. Overall, the current thesis corroborated the hypothesis that erosion-induced soil profile modifications affect the soil water balance, carbon leaching and soil hydraulic properties, but also the crop root system is influenced by erosion-induced spatial patterns of soil properties in the arable hummocky post glacial soil landscape. The results will help to improve model predictions of water and solute movement in arable soils and to understand interactions between soil erosion and carbon pathways regarding sink-or-source terms in landscapes.}, language = {en} } @phdthesis{Hentrich2017, author = {Hentrich, Doreen}, title = {Grenzfl{\"a}chen-kontrollierte Mineralisation von Calciumphosphat}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398236}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2017}, abstract = {In der vorliegenden Arbeit konnte gezeigt werden, dass die beiden verwendeten Amphiphile mit Cholesterol als hydrophoben Block, gute Template f{\"u}r die Mineralisation von Calciumphosphat an der Wasser/Luft-Grenzfl{\"a}che sind. Mittels Infrarot-Reflexions-Absorptions-Spektroskopie (IRRAS), R{\"o}ntgenphotoelektronenspektroskopie (XPS), Energie dispersiver R{\"o}ntgenspektroskopie (EDXS), Elektronenbeugung (SAED) und hochaufl{\"o}sende Transmissionselektronenmikroskopie (HRTEM) konnte die erfolgreiche Mineralisation von Calciumphosphat f{\"u}r beide Amphiphile an der Wasser/Luft-Grenzfl{\"a}che nachgewiesen werden. Es konnte auch gezeigt werden, dass das Phasenverhalten der beiden Amphiphile und die bei der Mineralisation von Calciumphosphat gebildeten Kristallphasen nicht identisch sind. Beide Amphiphile {\"u}ben demnach einen unterschiedlichen Einfluss auf den Mineralisationsverlauf aus. Beim CHOL-HEM konnte sowohl nach 3 h als auch nach 5 h Octacalciumphosphat (OCP) als einzige Kristallphase mittels XPS, SAED, HRTEM und EDXS nachgewiesen werden. Das A-CHOL hingegen zeigte bei der Mineralisation von Calciumphosphat nach 1 h zun{\"a}chst eine nicht eindeutig identifizierbare Vorl{\"a}uferphase aus amorphen Calciumphosphat, Brushit (DCPD) oder OCP. Diese wandelte sich dann nach 3 h und 5 h in ein Gemisch, bestehend aus OCP und ein wenig Hydroxylapatit (HAP) um. Die Schlussfolgerung daraus ist, dass das CHOL-HEM in der Lage ist, dass w{\"a}hrend der Mineralisation entstandene OCP zu stabilisieren. Dies geschieht vermutlich durch die Adsorption des Amphiphils bevorzugt an der OCP Oberfl{\"a}che in [100] Orientierung. Dadurch wird die Spaltung entlang der c-Achse unterdr{\"u}ckt und die Hydrolyse zum HAP verhindert. Das A-CHOL ist hingegen sterisch anspruchsvoller und kann wahrscheinlich aufgrund seiner Gr{\"o}ße nicht so gut an der OCP Kristalloberfl{\"a}che adsorbieren verglichen zum CHOL HEM. Das CHOL-HEM kann also die Hydrolyse von OCP zu HAP besser unterdr{\"u}cken als das A-CHOL. Da jedoch auch beim A-CHOL nach einer Mineralisationszeit von 5 h nur wenig HAP zu finden ist, w{\"a}re auch hier ein Stabilisierungseffekt der OCP Kristalle m{\"o}glich. Um eine genaue Aussage dar{\"u}ber treffen zu k{\"o}nnen, sind jedoch zus{\"a}tzliche Kontrollexperimente notwendig. Es w{\"a}re zum einen denkbar, die Mineralisationsexperimente {\"u}ber einen l{\"a}ngeren Zeitraum durchzuf{\"u}hren. Diese k{\"o}nnten zeigen, ob das CHOL-HEM die Hydrolyse vom OCP zum HAP komplett unterdr{\"u}ckt. Außerdem k{\"o}nnte nachgewiesen werden, ob beim A-CHOL das OCP weiter zum HAP umgesetzt wird oder ob ein Gemisch beider Kristallphasen erhalten bleibt. Um die Mineralisation an der Wasser/Luft-Grenzfl{\"a}che mit der Mineralisation in Bulkl{\"o}sung zu vergleichen, wurden zus{\"a}tzlich Mineralisationsexperimente in Bulkl{\"o}sung durchgef{\"u}hrt. Dazu wurden Nitrilotriessigs{\"a}ure (NTA) und Ethylendiamintetraessigs{\"a}ure (EDTA) als Mineralisationsadditive verwendet, da NTA unter anderem der Struktur der hydrophilen Kopfgruppe des A-CHOLs {\"a}hnelt. Es konnte gezeigt werden, dass ein Vergleich der Mineralisation an der Grenzfl{\"a}che mit der Mineralisation in Bulkl{\"o}sung nicht ohne weiteres m{\"o}glich ist. Bei der Mineralisation in Bulkl{\"o}sung wird bei tiefen pH-Werten DCPD und bei h{\"o}heren pH-Werten HAP gebildet. Diese wurde mittels R{\"o}ntgenpulverdiffraktometrie Messungen nachgewiesen und durch Infrarotspektroskopie bekr{\"a}ftigt. Die Bildung von OCP wie an der Wasser/Luft-Grenzfl{\"a}che konnte nicht beobachtet werden. Es konnte auch gezeigt werden, dass beide Additive NTA und EDTA einen unterschiedlichen Einfluss auf den Verlauf der Mineralisation nehmen. So unterscheiden sich zum einen die Morphologien des gebildeten DCPDs und zum anderen wurde beispielsweise in Anwesenheit von 10 und 15 mM NTA neben DCPD auch HAP bei einem Ausgangs-pH-Wert von 7 nachgewiesen. Da unser Augenmerk speziell auf der Mineralisation von Calciumphosphat an der Wasser/Luft-Grenzfl{\"a}che liegt, k{\"o}nnten Folgeexperimente wie beispielsweise GIXD Messungen durchgef{\"u}hrt werden. Dadurch w{\"a}re es m{\"o}glich, einen {\"U}berblick {\"u}ber die gebildeten Kristallphasen nach unterschiedlichen Reaktionszeiten direkt auf dem Trog zu erhalten. Es konnte weiterhin gezeigt werden, dass auch einfache Amphiphile in der Lage sind, die Mineralisation von Calciumphosphat zu steuern. Amphiphile mit Cholesterol als hydrophoben Block bilden offensichtlich besonders stabile Monolagen an der Wasser/Luft-Grenzfl{\"a}che. Eine Untersuchung des Einflusses {\"a}hnlicher Amphiphile mit unterschiedlichen hydrophilen Kopfgruppen auf das Mineralisationsverhalten von Calciumphosphat w{\"a}re durchaus interessant.}, language = {de} } @phdthesis{Hendriyana2017, author = {Hendriyana, Andri}, title = {Detection and Kirchhoff-type migration of seismic events by use of a new characteristic function}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398879}, school = {Universit{\"a}t Potsdam}, pages = {v, 139}, year = {2017}, abstract = {The classical method of seismic event localization is based on the picking of body wave arrivals, ray tracing and inversion of travel time data. Travel time picks with small uncertainties are required to produce reliable and accurate results with this kind of source localization. Hence recordings, with a low Signal-to-Noise Ratio (SNR) cannot be used in a travel time based inversion. Low SNR can be related with weak signals from distant and/or low magnitude sources as well as with a high level of ambient noise. Diffraction stacking is considered as an alternative seismic event localization method that enables also the processing of low SNR recordings by mean of stacking the amplitudes of seismograms along a travel time function. The location of seismic event and its origin time are determined based on the highest stacked amplitudes (coherency) of the image function. The method promotes an automatic processing since it does not need travel time picks as input data. However, applying diffraction stacking may require longer computation times if only limited computer resources are used. Furthermore, a simple diffraction stacking of recorded amplitudes could possibly fail to locate the seismic sources if the focal mechanism leads to complex radiation patterns which typically holds for both natural and induced seismicity. In my PhD project, I have developed a new work flow for the localization of seismic events which is based on a diffraction stacking approach. A parallelized code was implemented for the calculation of travel time tables and for the determination of an image function to reduce computation time. In order to address the effects from complex source radiation patterns, I also suggest to compute diffraction stacking from a characteristic function (CF) instead of stacking the original wave form data. A new CF, which is called in the following mAIC (modified from Akaike Information Criterion) is proposed. I demonstrate that, the performance of the mAIC does not depend on the chosen length of the analyzed time window and that both P- and S-wave onsets can be detected accurately. To avoid cross-talk between P- and S-waves due to inaccurate velocity models, I separate the P- and S-waves from the mAIC function by making use of polarization attributes. Then, eventually the final image function is represented by the largest eigenvalue as a result of the covariance analysis between P- and S-image functions. Before applying diffraction stacking, I also apply seismogram denoising by using Otsu thresholding in the time-frequency domain. Results from synthetic experiments show that the proposed diffraction stacking provides reliable results even from seismograms with low SNR=1. Tests with different presentations of the synthetic seismograms (displacement, velocity, and acceleration) shown that, acceleration seismograms deliver better results in case of high SNR, whereas displacement seismograms provide more accurate results in case of low SNR recordings. In another test, different measures (maximum amplitude, other statistical parameters) were used to determine the source location in the final image function. I found that the statistical approach is the preferred method particularly for low SNR. The work flow of my diffraction stacking method was finally applied to local earthquake data from Sumatra, Indonesia. Recordings from a temporary network of 42 stations deployed for 9 months around the Tarutung pull-apart Basin were analyzed. The seismic event locations resulting from the diffraction stacking method align along a segment of the Sumatran Fault. A more complex distribution of seismicity is imaged within and around the Tarutung Basin. Two lineaments striking N-S were found in the middle of the Tarutung Basin which support independent results from structural geology. These features are interpreted as opening fractures due to local extension. A cluster of seismic events repeatedly occurred in short time which might be related to fluid drainage since two hot springs are observed at the surface near to this cluster.}, language = {en} } @phdthesis{Heise2017, author = {Heise, Janine}, title = {Phylogenetic and physiological characterization of deep-biosphere microorganisms in El'gygytgyn Crater Lake sediments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403436}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2017}, abstract = {The existence of diverse and active microbial ecosystems in the deep subsurface - a biosphere that was originally considered devoid of life - was discovered in multiple microbiological studies. However, most of the studies are restricted to marine ecosystems, while our knowledge about the microbial communities in the deep subsurface of lake systems and their potentials to adapt to changing environmental conditions is still fragmentary. This doctoral thesis aims to build up a unique data basis for providing the first detailed high-throughput characterization of the deep biosphere of lacustrine sediments and to emphasize how important it is to differentiate between the living and the dead microbial community in deep biosphere studies. In this thesis, up to 3.6 Ma old sediments (up to 317 m deep) of the El'gygytgyn Crater Lake were examined, which represents the oldest terrestrial climate record of the Arctic. Combining next generation sequencing with detailed geochemical characteristics and other environmental parameters, the microbial community composition was analyzed in regard to changing climatic conditions within the last 3.6 Ma to 1.0 Ma (Pliocene and Pleistocene). DNA from all investigated sediments was successfully extracted and a surprisingly diverse (6,910 OTUs) and abundant microbial community in the El'gygytgyn deep sediments were revealed. The bacterial abundance (10³-10⁶ 16S rRNA copies g⁻¹ sediment) was up to two orders of magnitudes higher than the archaeal abundance (10¹-10⁵) and fluctuates with the Pleistocene glacial/interglacial cyclicality. Interestingly, a strong increase in the microbial diversity with depth was observed (approximately 2.5 times higher diversity in Pliocene sediments compared to Pleistocene sediments). The increase in diversity with depth in the Lake El'gygytgyn is most probably caused by higher sedimentary temperatures towards the deep sediment layers as well as an enhanced temperature-induced intra-lake bioproductivity and higher input of allochthonous organic-rich material during Pliocene climatic conditions. Moreover, the microbial richness parameters follow the general trends of the paleoclimatic parameters, such as the paleo-temperature and paleo-precipitation. The most abundant bacterial representatives in the El'gygytgyn deep biosphere are affiliated with the phyla Proteobacteria, Actinobacteria, Bacteroidetes, and Acidobacteria, which are also commonly distributed in the surrounding permafrost habitats. The predominated taxon was the halotolerant genus Halomonas (in average 60\% of the total reads per sample). Additionally, this doctoral thesis focuses on the live/dead differentiation of microbes in cultures and environmental samples. While established methods (e.g., fluorescence in situ hybridization, RNA analyses) are not applicable to the challenging El'gygytgyn sediments, two newer methods were adapted to distinguish between DNA from live cells and free (extracellular, dead) DNA: the propidium monoazide (PMA) treatment and the cell separation adapted for low amounts of DNA. The applicability of the DNA-intercalating dye PMA was successfully evaluated to mask free DNA of different cultures of methanogenic archaea, which play a major role in the global carbon cycle. Moreover, an optimal procedure to simultaneously treat bacteria and archaea was developed using 130 µM PMA and 5 min of photo-activation with blue LED light, which is also applicable on sandy environmental samples with a particle load of ≤ 200 mg mL⁻¹. It was demonstrated that the soil texture has a strong influence on the PMA treatment in particle-rich samples and that in particular silt and clay-rich samples (e.g., El'gygytgyn sediments) lead to an insufficient shielding of free DNA by PMA. Therefore, a cell separation protocol was used to distinguish between DNA from live cells (intracellular DNA) and extracellular DNA in the El'gygytgyn sediments. While comparing these two DNA pools with a total DNA pool extracted with a commercial kit, significant differences in the microbial composition of all three pools (mean distance of relative abundance: 24.1\%, mean distance of OTUs: 84.0\%) was discovered. In particular, the total DNA pool covers significantly fewer taxa than the cell-separated DNA pools and only inadequately represents the living community. Moreover, individual redundancy analyses revealed that the microbial community of the intra- and extracellular DNA pool are driven by different environmental factors. The living community is mainly influenced by life-dependent parameters (e.g., sedimentary matrix, water availability), while the extracellular DNA is dependent on the biogenic silica content. The different community-shaping parameters and the fact, that a redundancy analysis of the total DNA pool explains significantly less variance of the microbial community, indicate that the total DNA represents a mixture of signals of the live and dead microbial community. This work provides the first fundamental data basis of the diversity and distribution of microbial deep biosphere communities of a lake system over several million years. Moreover, it demonstrates the substantial importance of extracellular DNA in old sediments. These findings may strongly influence future environmental community analyses, where applications of live/dead differentiation avoid incorrect interpretations due to a failed extraction of the living microbial community or an overestimation of the past community diversity in the course of total DNA extraction approaches.}, language = {en} }