@phdthesis{Halecker2016, author = {Halecker, Bastian}, title = {New perspective and insights on business model innovation using systems thinking and action case studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90404}, school = {Universit{\"a}t Potsdam}, pages = {IX, 239}, year = {2016}, abstract = {In recent years, entire industries and their participants have been affected by disruptive technologies, resulting in dramatic market changes and challenges to firm's business logic and thus their business models (BMs). Firms from mature industries are increasingly realizing that BMs that worked successfully for years have become insufficient to stay on track in today's "move fast and break things" economy. Firms must scrutinize the core logic that informs how they do business, which means exploring novel ways to engage customers and get them to pay. This can lead to a complete renewal of existing BMs or innovating completely new BMs. BMs have emerged as a popular object of research within the last decade. Despite the popularity of the BM, the theoretical and empirical foundation underlying the concept is still weak. In particular, the innovation process for BMs has been developed and implemented in firms, but understanding of the mechanisms behind it is still lacking. Business model innovation (BMI) is a complex and challenging management task that requires more than just novel ideas. Systematic studies to generate a better understanding of BMI and support incumbents with appropriate concepts to improve BMI development are in short supply. Further, there is a lack of knowledge about appropriate research practices for studying BMI and generating valid data sets in order to meet expectations in both practice and academia. This paper-based dissertation aims to contribute to research practice in the field of BM and BMI and foster better understanding of the BM concept and BMI processes in incumbent firms from mature industries. The overall dissertation presents three main results. The first result is a new perspective, or the systems thinking view, on the BM and BMI. With the systems thinking view, the fuzzy BM concept is clearly structured and a BMI framework is proposed. The second result is a new research strategy for studying BMI. After analyzing current research practice in the areas of BMs and BMI, it is obvious that there is a need for better research on BMs and BMI in terms of accuracy, transparency, and practical orientation. Thus, the action case study approach combined with abductive methodology is proposed and proven in the research setting of this thesis. The third result stems from three action case studies in incumbent firms from mature industries employed to study how BMI occurs in practice. The new insights and knowledge gained from the action case studies help to explain BMI in such industries and increase understanding of the core of these processes. By studying these issues, the articles complied in this thesis contribute conceptually and empirically to the recently consolidated but still increasing literature on the BM and BMI. The conclusions and implications made are intended to foster further research and improve managerial practices for achieving BMI in a dramatically changing business environment.}, language = {en} } @phdthesis{Herenz2016, author = {Herenz, Edmund Christian}, title = {Detecting and understanding extragalactic Lyman α emission using 3D spectroscopy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102341}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2016}, abstract = {In this thesis we use integral-field spectroscopy to detect and understand of Lyman α (Lyα) emission from high-redshift galaxies. Intrinsically the Lyα emission at λ = 1216 {\AA} is the strongest recombination line from galaxies. It arises from the 2p → 1s transition in hydrogen. In star-forming galaxies the line is powered by ionisation of the interstellar gas by hot O- and B- stars. Galaxies with star-formation rates of 1 - 10 Msol/year are expected to have Lyα luminosities of 42 dex - 43 dex (erg/s), corresponding to fluxes ~ -17 dex - -18 dex (erg/s/cm²) at redshifts z~3, where Lyα is easily accessible with ground-based telescopes. However, star-forming galaxies do not show these expected Lyα fluxes. Primarily this is a consequence of the high-absorption cross-section of neutral hydrogen for Lyα photons σ ~ -14 dex (cm²). Therefore, in typical interstellar environments Lyα photons have to undergo a complex radiative transfer. The exact conditions under which Lyα photons can escape a galaxy are poorly understood. Here we present results from three observational projects. In Chapter 2, we show integral field spectroscopic observations of 14 nearby star-forming galaxies in Balmer α radiation (Hα, λ = 6562.8 {\AA}). These observations were obtained with the Potsdam Multi Aperture Spectrophotometer at the Calar-Alto 3.5m Telescope}. Hα directly traces the intrinsic Lyα radiation field. We present Hα velocity fields and velocity dispersion maps spatially registered onto Hubble Space Telescope Lyα and Hα images. From our observations, we conjecture a causal connection between spatially resolved Hα kinematics and Lyα photometry for individual galaxies. Statistically, we find that dispersion-dominated galaxies are more likely to emit Lyα photons than galaxies where ordered gas-motions dominate. This result indicates that turbulence in actively star-forming systems favours an escape of Lyα radiation. Not only massive stars can power Lyα radiation, but also non-thermal emission from an accreting super-massive black hole in the galaxy centre. If a galaxy harbours such an active galactic nucleus, the rate of hydrogen-ionising photons can be more than 1000 times higher than that of a typical star-forming galaxy. This radiation can potentially ionise large regions well outside the main stellar body of galaxies. Therefore, it is expected that the neutral hydrogen from these circum-galactic regions shines fluorescently in Lyα. Circum-galactic gas plays a crucial role in galaxy formation. It may act as a reservoir for fuelling star formation, and it is also subject to feedback processes that expel galactic material. If Lyα emission from this circum-galactic medium (CGM) was detected, these important processes could be studied in-situ around high-z galaxies. In Chapter 3, we show observations of five radio-quiet quasars with PMAS to search for possible extended CGM emission in the Lyα line. However, in four of the five objects, we find no significant traces of this emission. In the fifth object, there is evidence for a weak and spatially quite compact Lyα excess at several kpc outside the nucleus. The faintness of these structures is consistent with the idea that radio-quiet quasars typically reside in dark matter haloes of modest masses. While we were not able to detect Lyα CGM emission, our upper limits provide constraints for the new generation of IFS instruments at 8--10m class telescopes. The Multi Unit Spectroscopic Explorer (MUSE) at ESOs Very Large Telescopeis such an unique instrument. One of the main motivating drivers in its construction was the use as a survey instrument for Lyα emitting galaxies at high-z. Currently, we are conducting such a survey that will cover a total area of ~100 square arcminutes with 1 hour exposures for each 1 square arcminute MUSE pointing. As a first result from this survey we present in Chapter 5 a catalogue of 831 emission-line selected galaxies from a 22.2 square arcminute region in the Chandra Deep Field South. In order to construct the catalogue, we developed and implemented a novel source detection algorithm -- LSDCat -- based on matched filtering for line emission in 3D spectroscopic datasets (Chapter 4). Our catalogue contains 237 Lyα emitting galaxies in the redshift range 3 ≲ z ≲ 6. Only four of those previously had spectroscopic redshifts in the literature. We conclude this thesis with an outlook on the construction of a Lyα luminosity function based on this unique sample (Chapter 6).}, language = {en} } @phdthesis{Sauermann2016, author = {Sauermann, Antje}, title = {Impact of the type of referring expression on the acquisition of word order variation}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-330-5}, issn = {1869-3822}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89409}, school = {Universit{\"a}t Potsdam}, year = {2016}, abstract = {This dissertation examines the impact of the type of referring expression on the acquisition of word order variation in German-speaking preschoolers. A puzzle in the area of language acquisition concerns the production-comprehension asymmetry for non-canonical sentences like "Den Affen f{\"a}ngt die Kuh." ("The monkey, the cow chases."), that is, preschoolers usually have difficulties in accurately understanding non-canonical sentences approximately until age six (e.g., Dittmar et al., 2008) although they produce non-canonical sentences already around age three (e.g., Poeppel \& Wexler, 1993; Weissenborn, 1990). This dissertation investigated the production and comprehension of non-canonical sentences to address this issue. Three corpus analyses were conducted to investigate the impact of givenness, topic status and the type of referring expression on word order in the spontaneous speech of two- to four-year-olds and the child-directed speech produced by their mothers. The positioning of the direct object in ditransitive sentences was examined; in particular, sentences in which the direct object occurred before or after the indirect object in the sentence-medial positions and sentences in which it occurred in the sentence-initial position. The results reveal similar ordering patterns for children and adults. Word order variation was to a large extent predictable from the type of referring expression, especially with respect to the word order involving the sentence-medial positions. Information structure (e.g., topic status) had an additional impact only on word order variation that involved the sentence-initial position. Two comprehension experiments were conducted to investigate whether the type of referring expression and topic status influences the comprehension of non-canonical transitive sentences in four- and five-year-olds. In the first experiment, the topic status of the one of the sentential arguments was established via a preceding context sentence, and in the second experiment, the type of referring expression for the sentential arguments was additionally manipulated by using either a full lexical noun phrase (NP) or a personal pronoun. The results demonstrate that children's comprehension of non-canonical sentences improved when the topic argument was realized as a personal pronoun and this improvement was independent of the grammatical role of the arguments. However, children's comprehension was not improved when the topic argument was realized as a lexical NP. In sum, the results of both production and comprehension studies support the view that referring expressions may be seen as a sentence-level cue to word order and to the information status of the sentential arguments. The results highlight the important role of the type of referring expression on the acquisition of word order variation and indicate that the production-comprehension asymmetry is reduced when the type of referring expression is considered.}, language = {en} } @phdthesis{Breuer2016, author = {Breuer, David}, title = {The plant cytoskeleton as a transportation network}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93583}, school = {Universit{\"a}t Potsdam}, pages = {164}, year = {2016}, abstract = {The cytoskeleton is an essential component of living cells. It is composed of different types of protein filaments that form complex, dynamically rearranging, and interconnected networks. The cytoskeleton serves a multitude of cellular functions which further depend on the cell context. In animal cells, the cytoskeleton prominently shapes the cell's mechanical properties and movement. In plant cells, in contrast, the presence of a rigid cell wall as well as their larger sizes highlight the role of the cytoskeleton in long-distance intracellular transport. As it provides the basis for cell growth and biomass production, cytoskeletal transport in plant cells is of direct environmental and economical relevance. However, while knowledge about the molecular details of the cytoskeletal transport is growing rapidly, the organizational principles that shape these processes on a whole-cell level remain elusive. This thesis is devoted to the following question: How does the complex architecture of the plant cytoskeleton relate to its transport functionality? The answer requires a systems level perspective of plant cytoskeletal structure and transport. To this end, I combined state-of-the-art confocal microscopy, quantitative digital image analysis, and mathematically powerful, intuitively accessible graph-theoretical approaches. This thesis summarizes five of my publications that shed light on the plant cytoskeleton as a transportation network: (1) I developed network-based frameworks for accurate, automated quantification of cytoskeletal structures, applicable in, e.g., genetic or chemical screens; (2) I showed that the actin cytoskeleton displays properties of efficient transport networks, hinting at its biological design principles; (3) Using multi-objective optimization, I demonstrated that different plant cell types sustain cytoskeletal networks with cell-type specific and near-optimal organization; (4) By investigating actual transport of organelles through the cell, I showed that properties of the actin cytoskeleton are predictive of organelle flow and provided quantitative evidence for a coordination of transport at a cellular level; (5) I devised a robust, optimization-based method to identify individual cytoskeletal filaments from a given network representation, allowing the investigation of single filament properties in the network context. The developed methods were made publicly available as open-source software tools. Altogether, my findings and proposed frameworks provide quantitative, system-level insights into intracellular transport in living cells. Despite my focus on the plant cytoskeleton, the established combination of experimental and theoretical approaches is readily applicable to different organisms. Despite the necessity of detailed molecular studies, only a complementary, systemic perspective, as presented here, enables both understanding of cytoskeletal function in its evolutionary context as well as its future technological control and utilization.}, language = {en} } @phdthesis{Ludewig2016, author = {Ludewig, Matthias}, title = {Path integrals on manifolds with boundary and their asymptotic expansions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94387}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2016}, abstract = {It is "scientific folklore" coming from physical heuristics that solutions to the heat equation on a Riemannian manifold can be represented by a path integral. However, the problem with such path integrals is that they are notoriously ill-defined. One way to make them rigorous (which is often applied in physics) is finite-dimensional approximation, or time-slicing approximation: Given a fine partition of the time interval into small subintervals, one restricts the integration domain to paths that are geodesic on each subinterval of the partition. These finite-dimensional integrals are well-defined, and the (infinite-dimensional) path integral then is defined as the limit of these (suitably normalized) integrals, as the mesh of the partition tends to zero. In this thesis, we show that indeed, solutions to the heat equation on a general compact Riemannian manifold with boundary are given by such time-slicing path integrals. Here we consider the heat equation for general Laplace type operators, acting on sections of a vector bundle. We also obtain similar results for the heat kernel, although in this case, one has to restrict to metrics satisfying a certain smoothness condition at the boundary. One of the most important manipulations one would like to do with path integrals is taking their asymptotic expansions; in the case of the heat kernel, this is the short time asymptotic expansion. In order to use time-slicing approximation here, one needs the approximation to be uniform in the time parameter. We show that this is possible by giving strong error estimates. Finally, we apply these results to obtain short time asymptotic expansions of the heat kernel also in degenerate cases (i.e. at the cut locus). Furthermore, our results allow to relate the asymptotic expansion of the heat kernel to a formal asymptotic expansion of the infinite-dimensional path integral, which gives relations between geometric quantities on the manifold and on the loop space. In particular, we show that the lowest order term in the asymptotic expansion of the heat kernel is essentially given by the Fredholm determinant of the Hessian of the energy functional. We also investigate how this relates to the zeta-regularized determinant of the Jacobi operator along minimizing geodesics.}, language = {en} } @phdthesis{Schroeder2016, author = {Schr{\"o}der, Henning}, title = {Ultrafast electron dynamics in Fe(CO)5 and Cr(CO)6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94589}, school = {Universit{\"a}t Potsdam}, pages = {v, 87}, year = {2016}, abstract = {In this thesis, the two prototype catalysts Fe(CO)₅ and Cr(CO)₆ are investigated with time-resolved photoelectron spectroscopy at a high harmonic setup. In both of these metal carbonyls, a UV photon can induce the dissociation of one or more ligands of the complex. The mechanism of the dissociation has been debated over the last decades. The electronic dynamics of the first dissociation occur on the femtosecond timescale. For the experiment, an existing high harmonic setup was moved to a new location, was extended, and characterized. The modified setup can induce dynamics in gas phase samples with photon energies of 1.55eV, 3.10eV, and 4.65eV. The valence electronic structure of the samples can be probed with photon energies between 20eV and 40eV. The temporal resolution is 111fs to 262fs, depending on the combination of the two photon energies. The electronically excited intermediates of the two complexes, as well as of the reaction product Fe(CO)₄, could be observed with photoelectron spectroscopy in the gas phase for the first time. However, photoelectron spectroscopy gives access only to the final ionic states. Corresponding calculations to simulate these spectra are still in development. The peak energies and their evolution in time with respect to the initiation pump pulse have been determined, these peaks have been assigned based on literature data. The spectra of the two complexes show clear differences. The dynamics have been interpreted with the assumption that the motion of peaks in the spectra relates to the movement of the wave packet in the multidimensional energy landscape. The results largely confirm existing models for the reaction pathways. In both metal carbonyls, this pathway involves a direct excitation of the wave packet to a metal-to-ligand charge transfer state and the subsequent crossing to a dissociative ligand field state. The coupling of the electronic dynamics to the nuclear dynamics could explain the slower dissociation in Fe(CO)₅ as compared to Cr(CO)₆.}, language = {en} } @phdthesis{Won2016, author = {Won, Jooyoung}, title = {Dynamic and equilibrium adsorption behaviour of ß-lactoglobulin at the solution/tetradecane interface: Effect of solution concentration, pH and ionic strength}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99167}, school = {Universit{\"a}t Potsdam}, pages = {ix, 106}, year = {2016}, abstract = {Proteins are amphiphilic and adsorb at liquid interfaces. Therefore, they can be efficient stabilizers of foams and emulsions. β-lactoglobulin (BLG) is one of the most widely studied proteins due to its major industrial applications, in particular in food technology. In the present work, the influence of different bulk concentration, solution pH and ionic strength on the dynamic and equilibrium pressures of BLG adsorbed layers at the solution/tetradecane (W/TD) interface has been investigated. Dynamic interfacial pressure (Π) and interfacial dilational elastic modulus (E') of BLG solutions for various concentrations at three different pH values of 3, 5 and 7 at a fixed ionic strength of 10 mM and for a selected fixed concentration at three different ionic strengths of 1 mM, 10 mM and 100 mM are measured by Profile Analysis Tensiometer PAT-1 (SINTERFACE Technologies, Germany). A quantitative data analysis requires additional consideration of depletion due to BLG adsorption at the interface at low protein bulk concentrations. This fact makes experiments more efficient when oil drops are studied in the aqueous protein solutions rather than solution drops formed in oil. On the basis of obtained experimental data, concentration dependencies and the effect of solution pH on the protein surface activity was qualitatively analysed. In the presence of 10 mM buffer, we observed that generally the adsorbed amount is increasing with increasing BLG bulk concentration for all three pH values. The adsorption kinetics at pH 5 result in the highest Π values at any time of adsorption while it exhibits a less active behaviour at pH 3. Since the experimental data have not been in a good agreement with the classical diffusion controlled model due to the conformational changes which occur when the protein molecules get in contact with the hydrophobic oil phase in order to adapt to the interfacial environment, a new theoretical model is proposed here. The adsorption kinetics data were analysed with the newly proposed model, which is the classical diffusion model but modified by assuming an additional change in the surface activity of BLG molecules when adsorbing at the interface. This effect can be expressed through the adsorption activity constant in the corresponding equation of state. The dilational visco-elasticity of the BLG adsorbed interfacial layers is determined from measured dynamic interfacial tensions during sinusoidal drop area variations. The interfacial tension responses to these harmonic drop oscillations are interpreted with the same thermodynamic model which is used for the corresponding adsorption isotherm. At a selected BLG concentration of 2×10-6 mol/l, the influence of the ionic strength using different buffer concentration of 1, 10 and 100 mM on the interfacial pressure was studied. It is affected weakly at pH 5, whereas it has a strong impact by increasing buffer concentration at pH 3 and 7. In conclusion, the structure formation of BLG adsorbed layer in the early stage of adsorption at the W/TD interface is similar to those of the solution/air (W/A) surface. However, the equation of state at the W/TD interface provides an adsorption activity constant which is almost two orders of magnitude higher than that for the solution/air surface. At the end of this work, a new experimental tool called Drop and Bubble Micro Manipulator DBMM (SINTERFACE Technologies, Germany) has been introduced to study the stability of protein covered bubbles against coalescence. Among the available protocols the lifetime between the moment of contact and coalescence of two contacting bubble is determined for different BLG concentrations. The adsorbed amount of BLG is determined as a function of time and concentration and correlates with the observed coalescence behaviour of the contacting bubbles.}, language = {en} } @phdthesis{BreakellFernandez2016, author = {Breakell Fernandez, Leigh}, title = {Investigating word order processing using pupillometry and event-related potentials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91438}, school = {Universit{\"a}t Potsdam}, pages = {x, 122}, year = {2016}, abstract = {In this thesis sentence processing was investigated using a psychophysiological measure known as pupillometry as well as Event-Related Potentials (ERP). The scope of the the- sis was broad, investigating the processing of several different movement constructions with native speakers of English and second language learners of English, as well as word order and case marking in German speaking adults and children. Pupillometry and ERP allowed us to test competing linguistic theories and use novel methodologies to investigate the processing of word order. In doing so we also aimed to establish pupillometry as an effective way to investigate the processing of word order thus broadening the methodological spectrum.}, language = {en} } @phdthesis{Gutsch2016, author = {Gutsch, Martin}, title = {Model-based analysis of climate change impacts on the productivity of oak-pine forests in Brandenburg}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97241}, school = {Universit{\"a}t Potsdam}, pages = {vii, 148}, year = {2016}, abstract = {The relationship between climate and forest productivity is an intensively studied subject in forest science. This Thesis is embedded within the general framework of future forest growth under climate change and its implications for the ongoing forest conversion. My objective is to investigate the future forest productivity at different spatial scales (from a single specific forest stand to aggregated information across Germany) with focus on oak-pine forests in the federal state of Brandenburg. The overarching question is: how are the oak-pine forests affected by climate change described by a variety of climate scenarios. I answer this question by using a model based analysis of tree growth processes and responses to different climate scenarios with emphasis on drought events. In addition, a method is developed which considers climate change uncertainty of forest management planning. As a first 'screening' of climate change impacts on forest productivity, I calculated the change in net primary production on the base of a large set of climate scenarios for different tree species and the total area of Germany. Temperature increases up to 3 K lead to positive effects on the net primary production of all selected tree species. But, in water-limited regions this positive net primary production trend is dependent on the length of drought periods which results in a larger uncertainty regarding future forest productivity. One of the regions with the highest uncertainty of net primary production development is the federal state of Brandenburg. To enhance the understanding and ability of model based analysis of tree growth sensitivity to drought stress two water uptake approaches in pure pine and mixed oak-pine stands are contrasted. The first water uptake approach consists of an empirical function for root water uptake. The second approach is more mechanistic and calculates the differences of soil water potential along a soil-plant-atmosphere continuum. I assumed the total root resistance to vary at low, medium and high total root resistance levels. For validation purposes three data sets on different tree growth relevant time scales are used. Results show that, except the mechanistic water uptake approach with high total root resistance, all transpiration outputs exceeded observed values. On the other hand high transpiration led to a better match of observed soil water content. The strongest correlation between simulated and observed annual tree ring width occurred with the mechanistic water uptake approach and high total root resistance. The findings highlight the importance of severe drought as a main reason for small diameter increment, best supported by the mechanistic water uptake approach with high root resistance. However, if all aspects of the data sets are considered no approach can be judged superior to the other. I conclude that the uncertainty of future productivity of water-limited forest ecosystems under changing environmental conditions is linked to simulated root water uptake. Finally my study aimed at the impacts of climate change combined with management scenarios on an oak-pine forest to evaluate growth, biomass and the amount of harvested timber. The pine and the oak trees are 104 and 9 years old respectively. Three different management scenarios with different thinning intensities and different climate scenarios are used to simulate the performance of management strategies which explicitly account for the risks associated with achieving three predefined objectives (maximum carbon storage, maximum harvested timber, intermediate). I found out that in most cases there is no general management strategy which fits best to different objectives. The analysis of variance in the growth related model outputs showed an increase of climate uncertainty with increasing climate warming. Interestingly, the increase of climate-induced uncertainty is much higher from 2 to 3 K than from 0 to 2 K.}, language = {en} } @phdthesis{Waetzoldt2016, author = {W{\"a}tzoldt, Sebastian}, title = {Modeling collaborations in adaptive systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97494}, school = {Universit{\"a}t Potsdam}, pages = {XII, 380}, year = {2016}, abstract = {Recently, due to an increasing demand on functionality and flexibility, beforehand isolated systems have become interconnected to gain powerful adaptive Systems of Systems (SoS) solutions with an overall robust, flexible and emergent behavior. The adaptive SoS comprises a variety of different system types ranging from small embedded to adaptive cyber-physical systems. On the one hand, each system is independent, follows a local strategy and optimizes its behavior to reach its goals. On the other hand, systems must cooperate with each other to enrich the overall functionality to jointly perform on the SoS level reaching global goals, which cannot be satisfied by one system alone. Due to difficulties of local and global behavior optimizations conflicts may arise between systems that have to be solved by the adaptive SoS. This thesis proposes a modeling language that facilitates the description of an adaptive SoS by considering the adaptation capabilities in form of feedback loops as first class entities. Moreover, this thesis adopts the Models@runtime approach to integrate the available knowledge in the systems as runtime models into the modeled adaptation logic. Furthermore, the modeling language focuses on the description of system interactions within the adaptive SoS to reason about individual system functionality and how it emerges via collaborations to an overall joint SoS behavior. Therefore, the modeling language approach enables the specification of local adaptive system behavior, the integration of knowledge in form of runtime models and the joint interactions via collaboration to place the available adaptive behavior in an overall layered, adaptive SoS architecture. Beside the modeling language, this thesis proposes analysis rules to investigate the modeled adaptive SoS, which enables the detection of architectural patterns as well as design flaws and pinpoints to possible system threats. Moreover, a simulation framework is presented, which allows the direct execution of the modeled SoS architecture. Therefore, the analysis rules and the simulation framework can be used to verify the interplay between systems as well as the modeled adaptation effects within the SoS. This thesis realizes the proposed concepts of the modeling language by mapping them to a state of the art standard from the automotive domain and thus, showing their applicability to actual systems. Finally, the modeling language approach is evaluated by remodeling up to date research scenarios from different domains, which demonstrates that the modeling language concepts are powerful enough to cope with a broad range of existing research problems.}, language = {en} } @phdthesis{Couturier2016, author = {Couturier, Jean-Philippe}, title = {New inverse opal hydrogels as platform for detecting macromolecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98412}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 132, XXXVIII}, year = {2016}, abstract = {In this thesis, a route to temperature-, pH-, solvent-, 1,2-diol-, and protein-responsive sensors made of biocompatible and low-fouling materials is established. These sensor devices are based on the sensitivemodulation of the visual band gap of a photonic crystal (PhC), which is induced by the selective binding of analytes, triggering a volume phase transition. The PhCs introduced by this work show a high sensitivity not only for small biomolecules, but also for large analytes, such as glycopolymers or proteins. This enables the PhC to act as a sensor that detects analytes without the need of complex equipment. Due to their periodical dielectric structure, PhCs prevent the propagation of specific wavelengths. A change of the periodicity parameters is thus indicated by a change in the reflected wavelengths. In the case explored, the PhC sensors are implemented as periodically structured responsive hydrogels in formof an inverse opal. The stimuli-sensitive inverse opal hydrogels (IOHs) were prepared using a sacrificial opal template of monodispersed silica particles. First, monodisperse silica particles were assembled with a hexagonally packed structure via vertical deposition onto glass slides. The obtained silica crystals, also named colloidal crystals (CCs), exhibit structural color. Subsequently, the CCs templates were embedded in polymer matrix with low-fouling properties. The polymer matrices were composed of oligo(ethylene glycol) methacrylate derivatives (OEGMAs) that render the hydrogels thermoresponsive. Finally, the silica particles were etched, to produce highly porous hydrogel replicas of the CC. Importantly, the inner structure and thus the ability for light diffraction of the IOHs formed was maintained. The IOH membrane was shown to have interconnected pores with a diameter as well as interconnections between the pores of several hundred nanometers. This enables not only the detection of small analytes, but also, the detection of even large analytes that can diffuse into the nanostructured IOH membrane. Various recognition unit - analyte model systems, such as benzoboroxole - 1,2-diols, biotin - avidin and mannose - concanavalin A, were studied by incorporating functional comonomers of benzoboroxole, biotin and mannose into the copolymers. The incorporated recognition units specifically bind to certain low and highmolar mass biomolecules, namely to certain saccharides, catechols, glycopolymers or proteins. Their specific binding strongly changes the overall hydrophilicity, thus modulating the swelling of the IOH matrices, and in consequence, drastically changes their internal periodicity. This swelling is amplified by the thermoresponsive properties of the polymer matrix. The shift of the interference band gap due to the specific molecular recognition is easily visible by the naked eye (up to 150 nm shifts). Moreover, preliminary trial were attempted to detect even larger entities. Therefore anti-bodies were immobilized on hydrogel platforms via polymer-analogous esterification. These platforms incorporate comonomers made of tri(ethylene glycol) methacrylate end-functionalized with a carboxylic acid. In these model systems, the bacteria analytes are too big to penetrate into the IOH membranes, but can only interact with their surfaces. The selected model bacteria, as Escherichia coli, show a specific affinity to anti-body-functionalized hydrogels. Surprisingly in the case functionalized IOHs, this study produced weak color shifts, possibly opening a path to detect directly living organism, which will need further investigations.}, language = {en} } @phdthesis{Lenz2016, author = {Lenz, Josefine}, title = {Thermokarst dynamics in central-eastern Beringia}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101364}, school = {Universit{\"a}t Potsdam}, pages = {XII, 128, A-47}, year = {2016}, abstract = {Widespread landscape changes are presently observed in the Arctic and are most likely to accelerate in the future, in particular in permafrost regions which are sensitive to climate warming. To assess current and future developments, it is crucial to understand past environmental dynamics in these landscapes. Causes and interactions of environmental variability can hardly be resolved by instrumental records covering modern time scales. However, long-term environmental variability is recorded in paleoenvironmental archives. Lake sediments are important archives that allow reconstruction of local limnogeological processes as well as past environmental changes driven directly or indirectly by climate dynamics. This study aims at reconstructing Late Quaternary permafrost and thermokarst dynamics in central-eastern Beringia, the terrestrial land mass connecting Eurasia and North America during glacial sea-level low stands. In order to investigate development, processes and influence of thermokarst dynamics, several sediment cores from extant lakes and drained lake basins were analyzed to answer the following research questions: 1. When did permafrost degradation and thermokarst lake development take place and what were enhancing and inhibiting environmental factors? 2. What are the dominant processes during thermokarst lake development and how are they reflected in proxy records? 3. How did, and still do, thermokarst dynamics contribute to the inventory and properties of organic matter in sediments and the carbon cycle? Methods applied in this study are based upon a multi-proxy approach combining sedimentological, geochemical, geochronological, and micropaleontological analyses, as well as analyses of stable isotopes and hydrochemistry of pore-water and ice. Modern field observations of water quality and basin morphometrics complete the environmental investigations. The investigated sediment cores reveal permafrost degradation and thermokarst dynamics on different time scales. The analysis of a sediment core from GG basin on the northern Seward Peninsula (Alaska) shows prevalent terrestrial accumulation of yedoma throughout the Early to Mid Wisconsin with intermediate wet conditions at around 44.5 to 41.5 ka BP. This first wetland development was terminated by the accumulation of a 1-meter-thick airfall tephra most likely originating from the South Killeak Maar eruption at 42 ka BP. A depositional hiatus between 22.5 and 0.23 ka BP may indicate thermokarst lake formation in the surrounding of the site which forms a yedoma upland till today. The thermokarst lake forming GG basin initiated 230 ± 30 cal a BP and drained in Spring 2005 AD. Four years after drainage the lake talik was still unfrozen below 268 cm depth. A permafrost core from Mama Rhonda basin on the northern Seward Peninsula preserved a full lacustrine record including several lake phases. The first lake generation developed at 11.8 cal ka BP during the Lateglacial-Early Holocene transition; its old basin (Grandma Rhonda) is still partially preserved at the southern margin of the study basin. Around 9.0 cal ka BP a shallow and more dynamic thermokarst lake developed with actively eroding shorelines and potentially intermediate shallow water or wetland phases (Mama Rhonda). Mama Rhonda lake drainage at 1.1 cal ka BP was followed by gradual accumulation of terrestrial peat and top-down refreezing of the lake talik. A significant lower organic carbon content was measured in Grandma Rhonda deposits (mean TOC of 2.5 wt\%) than in Mama Rhonda deposits (mean TOC of 7.9 wt\%) highlighting the impact of thermokarst dynamics on biogeochemical cycling in different lake generations by thawing and mobilization of organic carbon into the lake system. Proximal and distal sediment cores from Peatball Lake on the Arctic Coastal Plain of Alaska revealed young thermokarst dynamics since about 1,400 years along a depositional gradient based on reconstructions from shoreline expansion rates and absolute dating results. After its initiation as a remnant pond of a previous drained lake basin, a rapidly deepening lake with increasing oxygenation of the water column is evident from laminated sediments, and higher Fe/Ti and Fe/S ratios in the sediment. The sediment record archived characterizing shifts in depositional regimes and sediment sources from upland deposits and re-deposited sediments from drained thaw lake basins depending on the gradually changing shoreline configuration. These changes are evident from alternating organic inputs into the lake system which highlights the potential for thermokarst lakes to recycle old carbon from degrading permafrost deposits of its catchment. The lake sediment record from Herschel Island in the Yukon (Canada) covers the full Holocene period. After its initiation as a thermokarst lake at 11.7 cal ka BP and intense thermokarst activity until 10.0 cal ka BP, the steady sedimentation was interrupted by a depositional hiatus at 1.6 cal ka BP which likely resulted from lake drainage or allochthonous slumping due to collapsing shore lines. The specific setting of the lake on a push moraine composed of marine deposits is reflected in the sedimentary record. Freshening of the maturing lake is indicated by decreasing electrical conductivity in pore-water. Alternation of marine to freshwater ostracods and foraminifera confirms decreasing salinity as well but also reflects episodical re-deposition of allochthonous marine sediments. Based on permafrost and lacustrine sediment records, this thesis shows examples of the Late Quaternary evolution of typical Arctic permafrost landscapes in central-eastern Beringia and the complex interaction of local disturbance processes, regional environmental dynamics and global climate patterns. This study confirms that thermokarst lakes are important agents of organic matter recycling in complex and continuously changing landscapes.}, language = {en} } @phdthesis{Hildebrand2016, author = {Hildebrand, Viet}, title = {Twofold switchable block copolymers based on new polyzwitterions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101372}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 170, LXXX}, year = {2016}, abstract = {In complement to the well-established zwitterionic monomers 3-((2-(methacryloyloxy)ethyl)dimethylammonio)propane-1-sulfonate ("SPE") and 3-((3-methacrylamidopropyl)dimethylammonio)propane-1-sulfonate ("SPP"), the closely related sulfobetaine monomers were synthesized and polymerized by reversible addition-fragmentation chain transfer (RAFT) polymerization, using a fluorophore labeled RAFT agent. The polyzwitterions of systematically varied molar mass were characterized with respect to their solubility in water, deuterated water, and aqueous salt solutions. These poly(sulfobetaine)s show thermoresponsive behavior in water, exhibiting upper critical solution temperatures (UCST). Phase transition temperatures depend notably on the molar mass and polymer concentration, and are much higher in D2O than in H2O. Also, the phase transition temperatures are effectively modulated by the addition of salts. The individual effects can be in parts correlated to the Hofmeister series for the anions studied. Still, they depend in a complex way on the concentration and the nature of the added electrolytes, on the one hand, and on the detailed structure of the zwitterionic side chain, on the other hand. For the polymers with the same zwitterionic side chain, it is found that methacrylamide-based poly(sulfobetaine)s exhibit higher UCST-type transition temperatures than their methacrylate analogs. The extension of the distance between polymerizable unit and zwitterionic groups from 2 to 3 methylene units decreases the UCST-type transition temperatures. Poly(sulfobetaine)s derived from aliphatic esters show higher UCST-type transition temperatures than their analogs featuring cyclic ammonium cations. The UCST-type transition temperatures increase markedly with spacer length separating the cationic and anionic moieties from 3 to 4 methylene units. Thus, apparently small variations of their chemical structure strongly affect the phase behavior of the polyzwitterions in specific aqueous environments. Water-soluble block copolymers were prepared from the zwitterionic monomers and the non-ionic monomer N-isopropylmethacrylamide ("NIPMAM") by the RAFT polymerization. Such block copolymers with two hydrophilic blocks exhibit twofold thermoresponsive behavior in water. The poly(sulfobetaine) block shows an UCST, whereas the poly(NIPMAM) block exhibits a lower critical solution temperature (LCST). This constellation induces a structure inversion of the solvophobic aggregate, called "schizophrenic micelle". Depending on the relative positions of the two different phase transitions, the block copolymer passes through a molecularly dissolved or an insoluble intermediate regime, which can be modulated by the polymer concentration or by the addition of salt. Whereas, at low temperature, the poly(sulfobetaine) block forms polar aggregates that are kept in solution by the poly(NIPMAM) block, at high temperature, the poly(NIPMAM) block forms hydrophobic aggregates that are kept in solution by the poly(sulfobetaine) block. Thus, aggregates can be prepared in water, which switch reversibly their "inside" to the "outside", and vice versa.}, language = {en} } @phdthesis{Schmidt2016, author = {Schmidt, Peter}, title = {Contributions to EU regional policy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90837}, school = {Universit{\"a}t Potsdam}, pages = {xii, 137}, year = {2016}, abstract = {This cumulative dissertation contains four self-contained articles which are related to EU regional policy and its structural funds as the overall research topic. In particular, the thesis addresses the question if EU regional policy interventions can at all be scientifically justified and legitimated on theoretical and empirical grounds from an economics point of view. The first two articles of the thesis ("The EU structural funds as a means to hamper migration" and "Internal migration and EU regional policy transfer payments: a panel data analysis for 28 EU member countries") enter into one particular aspect of the debate regarding the justification and legitimisation of EU regional policy. They theoretically and empirically analyse as to whether regional policy or the market force of the free flow of labour (migration) in the internal European market is the better instrument to improve and harmonise the living and working conditions of EU citizens. Based on neoclassical market failure theory, the first paper argues that the structural funds of the EU are inhibiting internal migration, which is one of the key measures in achieving convergence among the nations in the single European market. It becomes clear that European regional policy aiming at economic growth and cohesion among the member states cannot be justified and legitimated if the structural funds hamper instead of promote migration. The second paper, however, shows that the empirical evidence on the migration and regional policy nexus is not unambiguous, i.e. different empirical investigations show that EU structural funds hamper and promote EU internal migration. Hence, the question of the scientific justification and legitimisation of EU regional policy cannot be readily and unambiguously answered on empirical grounds. This finding is unsatisfying but is in line with previous theoretical and empirical literature. That is why, I take a step back and reconsider the theoretical beginnings of the thesis, which took for granted neoclassical market failure theory as the starting point for the positive explanation as well as the normative justification and legitimisation of EU regional policy. The third article of the thesis ("EU regional policy: theoretical foundations and policy conclusions revisited") deals with the theoretical explanation and legitimisation of EU regional policy as well as the policy recommendations given to EU regional policymakers deduced from neoclassical market failure theory. The article elucidates that neoclassical market failure is a normative concept, which justifies and legitimates EU regional policy based on a political and thus subjective goal or value-judgement. It can neither be used, therefore, to give a scientifically positive explanation of the structural funds nor to obtain objective and practically applicable policy instruments. Given this critique of neoclassical market failure theory, the third paper consequently calls into question the widely prevalent explanation and justification of EU regional policy given in static neoclassical equilibrium economics. It argues that an evolutionary non-equilibrium economics perspective on EU regional policy is much more appropriate to provide a realistic understanding of one of the largest policies conducted by the EU. However, this does neither mean that evolutionary economic theory can be unreservedly seen as the panacea to positively explain EU regional policy nor to derive objective policy instruments for EU regional policymakers. This issue is discussed in the fourth article of the thesis ("Market failure vs. system failure as a rationale for economic policy? A critique from an evolutionary perspective"). This article reconsiders the explanation of economic policy from an evolutionary economics perspective. It contrasts the neoclassical equilibrium notions of market and government failure with the dominant evolutionary neo-Schumpeterian and Austrian-Hayekian perceptions. Based on this comparison, the paper criticises the fact that neoclassical failure reasoning still prevails in non-equilibrium evolutionary economics when economic policy issues are examined. This is surprising, since proponents of evolutionary economics usually view their approach as incompatible with its neoclassical counterpart. The paper therefore argues that in order to prevent the otherwise fruitful and more realistic evolutionary approach from undermining its own criticism of neoclassical economics and to create a consistent as well as objective evolutionary policy framework, it is necessary to eliminate the equilibrium spirit. Taken together, the main finding of this thesis is that European regional policy and its structural funds can neither theoretically nor empirically be justified and legitimated from an economics point of view. Moreover, the thesis finds that the prevalent positive and instrumental explanation of EU regional policy given in the literature needs to be reconsidered, because these theories can neither scientifically explain the emergence and development of this policy nor are they appropriate to derive objective and scientific policy instruments for EU regional policymakers.}, language = {en} } @phdthesis{Gomez2016, author = {Gomez, David}, title = {Mechanisms of biochemical reactions within crowded environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94593}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2016}, abstract = {The cell interior is a highly packed environment in which biological macromolecules evolve and function. This crowded media has effects in many biological processes such as protein-protein binding, gene regulation, and protein folding. Thus, biochemical reactions that take place in such crowded conditions differ from diluted test tube conditions, and a considerable effort has been invested in order to understand such differences. In this work, we combine different computationally tools to disentangle the effects of molecular crowding on biochemical processes. First, we propose a lattice model to study the implications of molecular crowding on enzymatic reactions. We provide a detailed picture of how crowding affects binding and unbinding events and how the separate effects of crowding on binding equilibrium act together. Then, we implement a lattice model to study the effects of molecular crowding on facilitated diffusion. We find that obstacles on the DNA impair facilitated diffusion. However, the extent of this effect depends on how dynamic obstacles are on the DNA. For the scenario in which crowders are only present in the bulk solution, we find that at some conditions presence of crowding agents can enhance specific-DNA binding. Finally, we make use of structure-based techniques to look at the impact of the presence of crowders on the folding a protein. We find that polymeric crowders have stronger effects on protein stability than spherical crowders. The strength of this effect increases as the polymeric crowders become longer. The methods we propose here are general and can also be applied to more complicated systems.}, language = {en} } @phdthesis{Schroth2016, author = {Schroth, Maximilian}, title = {Microfinance and the enhancement of economic development in less developed countries}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94735}, school = {Universit{\"a}t Potsdam}, pages = {XII, 287}, year = {2016}, abstract = {It is the intention of this study to contribute to further rethinking and innovating in the Microcredit business which stands at a turning point - after around 40 years of practice it is endangered to fail as a tool for economic development and to become a doubtful finance product with a random scope instead. So far, a positive impact of Microfinance on the improvement of the lives of the poor could not be confirmed. Over-indebtment of borrowers due to the pre-dominance of consumption Microcredits has become a widespread problem. Furthermore, a rising number of abusive and commercially excessive practices have been reported. In fact, the Microfinance sector appears to suffer from a major underlying deficit: there does not exist a coherent and transparent understanding of its meaning and objectives so that Microfinance providers worldwide follow their own approaches of Microfinance which tend to differ considerably from each other. In this sense the study aims at consolidating the multi-faced and very often confusingly different Microcredit profiles that exist nowadays. Subsequently, in this study, the Microfinance spectrum will be narrowed to one clear-cut objective, in fact away from the mere monetary business transactions to poor people it has gradually been reduced to back towards a tool for economic development as originally envisaged by its pioneers. Hence, the fundamental research question of this study is whether, and under which conditions, Microfinance may attain a positive economic impact leading to an improvement of the living of the poor. The study is structured in five parts: the three main parts (II.-IV.) are surrounded by an introduction (I.) and conclusion (V.). In part II., the Microfinance sector is analysed critically aiming to identify the challenges persisting as well as their root causes. In the third part, a change to the macroeconomic perspective is undertaken in oder to learn about the potential and requirements of small-scale finance to enhance economic development, particularly within the economic context of less developed countries. By consolidating the insights gained in part IV., the elements of a new concept of Microfinance with the objecitve to achieve economic development of its borrowers are elaborated. Microfinance is a rather sensitive business the great fundamental idea of which is easily corruptible and, additionally, the recipients of which are predestined victims of abuse due to their limited knowledge in finance. It therefore needs to be practiced responsibly, but also according to clear cut definitions of its meaning and objectives all institutions active in the sector should be devoted to comply with. This is especially relevant as the demand for Microfinance services is expected to rise further within the years coming. For example, the recent refugee migration movement towards Europe entails a vast potential for Microfinance to enable these people to make a new start into economic life. This goes to show that Microfinance may no longer mainly be associated with a less developed economic context, but that it will gain importance as a financial instrument in the developed economies, too.}, language = {en} } @phdthesis{Makower2016, author = {Makower, Katharina}, title = {The roles of secondary metabolites in microcystis inter-strain interactions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93916}, school = {Universit{\"a}t Potsdam}, pages = {X, 131}, year = {2016}, abstract = {Among the bloom-forming and potentially harmful cyanobacteria, the genus Microcystis represents a most diverse taxon, on the genomic as well as on morphological and secondary metabolite levels. Microcystis communities are composed of a variety of diversified strains. The focus of this study lies on potential interactions between Microcystis representatives and the roles of secondary metabolites in these interaction processes. The role of secondary metabolites functioning as signaling molecules in the investigated interactions is demonstrated exemplary for the prevalent hepatotoxin microcystin. The extracellular and intracellular roles of microcystin are tested in microarray-based transcriptomic approaches. While an extracellular effect of microcystin on Microcystis transcription is confirmed and connected to a specific gene cluster of another secondary metabolite in this study, the intracellularly occurring microcystin is related with several pathways of the primary metabolism. A clear correlation of a microcystin knockout and the SigE-mediated regulation of carbon metabolism is found. According to the acquired transcriptional data, a model is proposed that postulates the regulating effect of microcystin on transcriptional regulators such as the alternative sigma factor SigE, which in return captures an essential role in sugar catabolism and redox-state regulation. For the purpose of simulating community conditions as found in the field, Microcystis colonies are isolated from the eutrophic lakes near Potsdam, Germany and established as stably growing under laboratory conditions. In co-habitation simulations, the recently isolated field strain FS2 is shown to specifically induce nearly immediate aggregation reactions in the axenic lab strain Microcystis aeruginosa PCC 7806. In transcriptional studies via microarrays, the induced expression program in PCC 7806 after aggregation induction is shown to involve the reorganization of cell envelope structures, a highly altered nutrient uptake balance and the reorientation of the aggregating cells to a heterotrophic carbon utilization, e.g. via glycolysis. These transcriptional changes are discussed as mechanisms of niche adaptation and acclimation in order to prevent competition for resources.}, language = {en} } @phdthesis{Draeger2016, author = {Dr{\"a}ger, Nadine}, title = {Holocene climate and environmental variability in NE Germany inferred from annually laminated lake sediments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103037}, school = {Universit{\"a}t Potsdam}, pages = {xv, 144 Seiten}, year = {2016}, abstract = {Understanding the role of natural climate variability under the pressure of human induced changes of climate and landscapes, is crucial to improve future projections and adaption strategies. This doctoral thesis aims to reconstruct Holocene climate and environmental changes in NE Germany based on annually laminated lake sediments. The work contributes to the ICLEA project (Integrated CLimate and Landscape Evolution Analyses). ICLEA intends to compare multiple high-resolution proxy records with independent chronologies from the N central European lowlands, in order to disentangle the impact of climate change and human land use on landscape development during the Lateglacial and Holocene. In this respect, two study sites in NE Germany are investigated in this doctoral project, Lake Tiefer See and palaeolake Wukenfurche. While both sediment records are studied with a combination of high-resolution sediment microfacies and geochemical analyses (e.g. µ-XRF, carbon geochemistry and stable isotopes), detailed proxy understanding mainly focused on the continuous 7.7 m long sediment core from Lake Tiefer See covering the last ~6000 years. Three main objectives are pursued at Lake Tiefer See: (1) to perform a reliable and independent chronology, (2) to establish microfacies and geochemical proxies as indicators for climate and environmental changes, and (3) to trace the effects of climate variability and human activity on sediment deposition. Addressing the first aim, a reliable chronology of Lake Tiefer See is compiled by using a multiple-dating concept. Varve counting and tephra findings form the chronological framework for the last ~6000 years. The good agreement with independent radiocarbon dates of terrestrial plant remains verifies the robustness of the age model. The resulting reliable and independent chronology of Lake Tiefer See and, additionally, the identification of nine tephras provide a valuable base for detailed comparison and synchronization of the Lake Tiefer See data set with other climate records. The sediment profile of Lake Tiefer See exhibits striking alternations between well-varved and non-varved sediment intervals. The combination of microfacies, geochemical and microfossil (i.e. Cladocera and diatom) analyses indicates that these changes of varve preservation are caused by variations of lake circulation in Lake Tiefer See. An exception is the well-varved sediment deposited since AD 1924, which is mainly influenced by human-induced lake eutrophication. Well-varved intervals before the 20th century are considered to reflect phases of reduced lake circulation and, consequently, stronger anoxic conditions. Instead, non-varved intervals indicate increased lake circulation in Lake Tiefer See, leading to more oxygenated conditions at the lake ground. Furthermore, lake circulation is not only influencing sediment deposition, but also geochemical processes in the lake. As, for example, the proxy meaning of δ13COM varies in time in response to changes of the oxygen regime in the lake hypolinion. During reduced lake circulation and stronger anoxic conditions δ13COM is influenced by microbial carbon cycling. In contrast, organic matter degradation controls δ13COM during phases of intensified lake circulation and more oxygenated conditions. The varve preservation indicates an increasing trend of lake circulation at Lake Tiefer See after ~4000 cal a BP. This trend is superimposed by decadal to centennial scale variability of lake circulation intensity. Comparison to other records in Central Europe suggests that the long-term trend is probably related to gradual changes in Northern Hemisphere orbital forcing, which induced colder and windier conditions in Central Europe and, therefore, reinforced lake circulation. Decadal to centennial scale periods of increased lake circulation coincide with settlement phases at Lake Tiefer See, as inferred from pollen data of the same sediment record. Deforestation reduced the wind shelter of the lake, which probably increased the sensitivity of lake circulation to wind stress. However, results of this thesis also suggest that several of these phases of increased lake circulation are additionally reinforced by climate changes. A first indication is provided by the comparison to the Baltic Sea record, which shows striking correspondence between major non-varved intervals at Lake Tiefer See and bioturbated sediments in the Baltic Sea. Furthermore, a preliminary comparison to the ICLEA study site Lake Czechowskie (N central Poland) shows a coincidence of at least three phases of increased lake circulation in both lakes, which concur with periods of known climate changes (2.8 ka event, 'Migration Period' and 'Little Ice Age'). These results suggest an additional over-regional climate forcing also on short term increased of lake circulation in Lake Tiefer See. In summary, the results of this thesis suggest that lake circulation at Lake Tiefer See is driven by a combination of long-term and short-term climate changes as well as of anthropogenic deforestation phases. Furthermore, the lake circulation drives geochemical cycles in the lake affecting the meaning of proxy data. Therefore, the work presented here expands the knowledge of climate and environmental variability in NE Germany. Furthermore, the integration of the Lake Tiefer See multi-proxy record in a regional comparison with another ICLEA side, Lake Czechowskie, enabled to better decipher climate changes and human impact on the lake system. These first results suggest a huge potential for further detailed regional comparisons to better understand palaeoclimate dynamics in N central Europe.}, language = {en} } @phdthesis{Sin2016, author = {Sin, Celine}, title = {Post-transcriptional control of gene expression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102469}, school = {Universit{\"a}t Potsdam}, pages = {xxv, 238}, year = {2016}, abstract = {Gene expression describes the process of making functional gene products (e.g. proteins or special RNAs) from instructions encoded in the genetic information (e.g. DNA). This process is heavily regulated, allowing cells to produce the appropriate gene products necessary for cell survival, adapting production as necessary for different cell environments. Gene expression is subject to regulation at several levels, including transcription, mRNA degradation, translation and protein degradation. When intact, this system maintains cell homeostasis, keeping the cell alive and adaptable to different environments. Malfunction in the system can result in disease states and cell death. In this dissertation, we explore several aspects of gene expression control by analyzing data from biological experiments. Most of the work following uses a common mathematical model framework based on Markov chain models to test hypotheses, predict system dynamics or elucidate network topology. Our work lies in the intersection between mathematics and biology and showcases the power of statistical data analysis and math modeling for validation and discovery of biological phenomena.}, language = {en} } @phdthesis{Shikangalah2016, author = {Shikangalah, Rosemary Ndawapeka}, title = {An ecohydrological impact assessment in urban areas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102356}, school = {Universit{\"a}t Potsdam}, pages = {ii, 105}, year = {2016}, abstract = {Over the last decades, the world's population has been growing at a faster rate, resulting in increased urbanisation, especially in developing countries. More than half of the global population currently lives in urbanised areas with an increasing tendency. The growth of cities results in a significant loss of vegetation cover, soil compaction and sealing of the soil surface which in turn results in high surface runoff during high-intensity storms and causes the problem of accelerated soil water erosion on streets and building grounds. Accelerated soil water erosion is a serious environmental problem in cities as it gives rise to the contamination of aquatic bodies, reduction of ground water recharge and increase in land degradation, and also results in damages to urban infrastructures, including drainage systems, houses and roads. Understanding the problem of water erosion in urban settings is essential for the sustainable planning and management of cities prone to water erosion. However, in spite of the vast existence of scientific literature on water erosion in rural regions, a concrete understanding of the underlying dynamics of urban erosion still remains inadequate for the urban dryland environments. This study aimed at assessing water erosion and the associated socio-environmental determinants in a typical dryland urban area and used the city of Windhoek, Namibia, as a case study. The study used a multidisciplinary approach to assess the problem of water erosion. This included an in depth literature review on current research approaches and challenges of urban erosion, a field survey method for the quantification of the spatial extent of urban erosion in the dryland city of Windhoek, and face to face interviews by using semi-structured questionnaires to analyse the perceptions of stakeholders on urban erosion. The review revealed that around 64\% of the literatures reviewed were conducted in the developed world, and very few researches were carried out in regions with extreme climate, including dryland regions. Furthermore, the applied methods for erosion quantification and monitoring are not inclusive of urban typical features and they are not specific for urban areas. The reviewed literature also lacked aspects aimed at addressing the issues of climate change and policies regarding erosion in cities. In a field study, the spatial extent and severity of an urban dryland city, Windhoek, was quantified and the results show that nearly 56\% of the city is affected by water erosion showing signs of accelerated erosion in the form of rills and gullies, which occurred mainly in the underdeveloped, informal and semi-formal areas of the city. Factors influencing the extent of erosion in Windhoek included vegetation cover and type, socio-urban factors and to a lesser extent slope estimates. A comparison of an interpolated field survey erosion map with a conventional erosion assessment tool (the Universal Soil Loss Equation) depicted a large deviation in spatial patterns, which underlines the inappropriateness of traditional non-urban erosion tools to urban settings and emphasises the need to develop new erosion assessment and management methods for urban environments. It was concluded that measures for controlling water erosion in the city need to be site-specific as the extent of erosion varied largely across the city. The study also analysed the perceptions and understanding of stakeholders of urban water erosion in Windhoek, by interviewing 41 stakeholders using semi-structured questionnaires. The analysis addressed their understanding of water erosion dynamics, their perceptions with regards to the causes and the seriousness of erosion damages, and their attitudes towards the responsibilities for urban erosion. The results indicated that there is less awareness of the process as a phenomenon, instead there is more awareness of erosion damages and the factors contributing to the damages. About 69\% of the stakeholders considered erosion damages to be ranging from moderate to very serious. However, there were notable disparities between the private householders and public authority groups. The study further found that the stakeholders have no clear understanding of their responsibilities towards the management of the control measures and payment for the damages. The private householders and local authority sectors pointed fingers at each other for the responsibilities for erosion damage payments and for putting up prevention measures. The reluctance to take responsibility could create a predicament for areas affected, specifically in the informal settlements where land management is not carried out by the local authority and land is not owned by the occupants. The study concluded that in order to combat urban erosion, it is crucial to understand diverse dynamics aggravating the process of urbanisation from different scales. Accordingly, the study suggests that there is an urgent need for the development of urban-specific approaches that aim at: (a) incorporating the diverse socio-economic-environmental aspects influencing erosion, (b) scientifically improving natural cycles that influence water storages and nutrients for plants in urbanised dryland areas in order to increase the amount of vegetation cover, (c) making use of high resolution satellite images to improve the adopted methods for assessing urban erosion, (d) developing water erosion policies, and (e) continuously monitoring the impact of erosion and the influencing processes from local, national and international levels.}, language = {en} } @phdthesis{Reschke2016, author = {Reschke, Antje}, title = {Effectiveness of a foot orthosis on muscular activity in functional ankle instability}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-104366}, school = {Universit{\"a}t Potsdam}, pages = {XII, 155, XXXVIII}, year = {2016}, abstract = {A majority of studies documented a reduced ankle muscle activity, particularly of the peroneus longus muscle (PL), in patients with functional ankle instability (FI). It is considered valid that foot orthoses as well as sensorimotor training have a positive effect on ankle muscle activity in healthy individuals and those with lower limb overuse injuries or flat arched feet (reduced reaction time by sensorimotor exercises; increased ankle muscle amplitude by orthoses use). However, the acute- and long-term influence of foot orthoses on ankle muscle activity in individuals with FI is unknown. AIMS: The present thesis addressed (1a) acute- and (1b) long-term effects of foot orthoses compared to sensorimotor training on ankle muscle activity in patients with FI. (2) Further, it was investigated if the orthosis intervention group demonstrate higher ankle muscle activity by additional short-term use of a measurement in-shoe orthosis (compared to short-term use of "shoe only") after intervention. (3) As prerequisite, it was evaluated if ankle muscle activity can be tested reliably and (4) if this differs between healthy individuals and those with FI. METHODS: Three intervention groups (orthosis group [OG], sensorimotor training group [SMTG], control group [CG]), consisting of both, healthy individuals and those with FI, underwent one longitudinal investigation (randomised controlled trial). Throughout 6 weeks of intervention, OG wore an in-shoe orthosis with a specific "PL stimulation module", whereas SMTG conducted home-based exercises. CG served to measure test-retest reliability of ankle muscle activity (PL, M. tibialis anterior [TA] and M. gastrocnemius medialis [GM]). Pre- and post-intervention, ankle muscle activity (EMG amplitude) was recorded during "normal" unperturbed (NW) and perturbed walking (PW) on a split-belt treadmill (stimulus 200 ms post initial heel contact [IC]) as well as during side cutting (SC), each while wearing "shoes only" and additional measurement in-shoe orthoses (randomized order). Normalized RMS values (100\% MVC, mean±SD) were calculated pre- (100-50 ms) and post (200-400 ms) - IC. RESULTS: (3) Test-retest reliability showed a high range of values in healthy individuals and those with FI. (4) Compared to healthy individuals, patients with FI demonstrated lower PL pre-activity during SC, however higher PL pre-activity for NW and PW. (1a) Acute orthoses use did not influence ankle muscle activity. (1b) For most conditions, sensorimotor training was more effective in individuals with FI than long-term orthotic intervention (increased: PL and GM pre-activity and TA reflex-activity for NW, PL pre-activity and TA, PL and GM reflex-activity for SC, PL reflex-activity for PW). However, prolonged orthoses use was more beneficial in terms of an increase in GM pre-activity during SC. For some conditions, long-term orthoses intervention was as effective as sensorimotor training for individuals with FI (increased: PL pre-activity for PW, TA pre-activity for SC, PL and GM reflex-activity for NW). Prolonged orthoses use was also advantageous in healthy individuals (increased: PL and GM pre-activity for NW and PW, PL pre-activity for SC, TA and PL reflex-activity for NW, PL and GM reflex-activity for PW). (2) The orthosis intervention group did not present higher ankle muscle activity by the additional short-term use of a measurement in-shoe orthosis at re-test after intervention. CONCLUSION: High variations of reproducibility reflect physiological variability in muscle activity during gait and therefore deemed acceptable. The main findings confirm the presence of sensorimotor long-term effects of specific foot orthoses in healthy individuals (primary preventive effect) and those with FI (therapeutic effect). Neuromuscular compensatory feedback- as well as anticipatory feedforward adaptation mechanism to prolonged orthoses use, specifically of the PL muscle, underpins the key role of PL in providing essential dynamic ankle joint stability. Due to its advantages over sensorimotor training (positive subjective feedback in terms of comfort, time-and-cost-effectiveness), long-term foot orthoses use can be recommended as an applicable therapy alternative in the treatment of FI. Long-term effect of foot orthoses in a population with FI must be validated in a larger sample size with longer follow-up periods to substantiate the generalizability of the existing outcomes.}, language = {en} } @phdthesis{Chen2016, author = {Chen, Kejie}, title = {Real-time GNSS for fast seismic source inversion and tsunami early warning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93174}, school = {Universit{\"a}t Potsdam}, pages = {xii, 81}, year = {2016}, abstract = {Over the past decades, rapid and constant advances have motivated GNSS technology to approach the ability to monitor transient ground motions with mm to cm accuracy in real-time. As a result, the potential of using real-time GNSS for natural hazards prediction and early warning has been exploited intensively in recent years, e.g., landslides and volcanic eruptions monitoring. Of particular note, compared with traditional seismic instruments, GNSS does not saturate or tilt in terms of co-seismic displacement retrieving, which makes it especially valuable for earthquake and earthquake induced tsunami early warning. In this thesis, we focus on the application of real-time GNSS to fast seismic source inversion and tsunami early warning. Firstly, we present a new approach to get precise co-seismic displacements using cost effective single-frequency receivers. As is well known, with regard to high precision positioning, the main obstacle for single-frequency GPS receiver is ionospheric delay. Considering that over a few minutes, the change of ionospheric delay is almost linear, we constructed a linear model for each satellite to predict ionospheric delay. The effectiveness of this method has been validated by an out-door experiment and 2011 Tohoku event, which confirms feasibility of using dense GPS networks for geo-hazard early warning at an affordable cost. Secondly, we extended temporal point positioning from GPS-only to GPS/GLONASS and assessed the potential benefits of multi-GNSS for co-seismic displacement determination. Out-door experiments reveal that when observations are conducted in an adversary environment, adding a couple of GLONASS satellites could provide more reliable results. The case study of 2015 Illapel Mw 8.3 earthquake shows that the biases between co-seismic displacements derived from GPS-only and GPS/GLONASS vary from station to station, and could be up to 2 cm in horizontal direction and almost 3 cm in vertical direction. Furthermore, slips inverted from GPS/GLONASS co-seismic displacements using a layered crust structure on a curved plane are shallower and larger for the Illapel event. Thirdly, we tested different inversion tools and discussed the uncertainties of using real-time GNSS for tsunami early warning. To be exact, centroid moment tensor inversion, uniform slip inversion using a single Okada fault and distributed slip inversion in layered crust on a curved plane were conducted using co-seismic displacements recorded during 2014 Pisagua earthquake. While the inversion results give similar magnitude and the rupture center, there are significant differences in depth, strike, dip and rake angles, which lead to different tsunami propagation scenarios. Even though, resulting tsunami forecasting along the Chilean coast is close to each other for all three models. Finally, based on the fact that the positioning performance of BDS is now equivalent to GPS in Asia-Pacific area and Manila subduction zone has been identified as a zone of potential tsunami hazard, we suggested a conceptual BDS/GPS network for tsunami early warning in South China Sea. Numerical simulations with two earthquakes (Mw 8.0 and Mw 7.5) and induced tsunamis demonstrate the viability of this network. In addition, the advantage of BDS/GPS over a single GNSS system by source inversion grows with decreasing earthquake magnitudes.}, language = {en} } @phdthesis{Semmo2016, author = {Semmo, Amir}, title = {Design and implementation of non-photorealistic rendering techniques for 3D geospatial data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99525}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 155}, year = {2016}, abstract = {Geospatial data has become a natural part of a growing number of information systems and services in the economy, society, and people's personal lives. In particular, virtual 3D city and landscape models constitute valuable information sources within a wide variety of applications such as urban planning, navigation, tourist information, and disaster management. Today, these models are often visualized in detail to provide realistic imagery. However, a photorealistic rendering does not automatically lead to high image quality, with respect to an effective information transfer, which requires important or prioritized information to be interactively highlighted in a context-dependent manner. Approaches in non-photorealistic renderings particularly consider a user's task and camera perspective when attempting optimal expression, recognition, and communication of important or prioritized information. However, the design and implementation of non-photorealistic rendering techniques for 3D geospatial data pose a number of challenges, especially when inherently complex geometry, appearance, and thematic data must be processed interactively. Hence, a promising technical foundation is established by the programmable and parallel computing architecture of graphics processing units. This thesis proposes non-photorealistic rendering techniques that enable both the computation and selection of the abstraction level of 3D geospatial model contents according to user interaction and dynamically changing thematic information. To achieve this goal, the techniques integrate with hardware-accelerated rendering pipelines using shader technologies of graphics processing units for real-time image synthesis. The techniques employ principles of artistic rendering, cartographic generalization, and 3D semiotics—unlike photorealistic rendering—to synthesize illustrative renditions of geospatial feature type entities such as water surfaces, buildings, and infrastructure networks. In addition, this thesis contributes a generic system that enables to integrate different graphic styles—photorealistic and non-photorealistic—and provide their seamless transition according to user tasks, camera view, and image resolution. Evaluations of the proposed techniques have demonstrated their significance to the field of geospatial information visualization including topics such as spatial perception, cognition, and mapping. In addition, the applications in illustrative and focus+context visualization have reflected their potential impact on optimizing the information transfer regarding factors such as cognitive load, integration of non-realistic information, visualization of uncertainty, and visualization on small displays.}, language = {en} } @phdthesis{GonzalezCamargo2016, author = {Gonzalez Camargo, Rodolfo}, title = {Insulin resistance in cancer cachexia and metabolic syndrome}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100973}, school = {Universit{\"a}t Potsdam}, pages = {104}, year = {2016}, abstract = {The ever-increasing fat content in Western diet, combined with decreased levels of physical activity, greatly enhance the incidence of metabolic-related diseases. Cancer cachexia (CC) and Metabolic syndrome (MetS) are both multifactorial highly complex metabolism related syndromes, whose etiology is not fully understood, as the mechanisms underlying their development are not completely unveiled. Nevertheless, despite being considered "opposite sides", MetS and CC share several common issues such as insulin resistance and low-grade inflammation. In these scenarios, tissue macrophages act as key players, due to their capacity to produce and release inflammatory mediators. One of the main features of MetS is hyperinsulinemia, which is generally associated with an attempt of the β-cell to compensate for diminished insulin sensitivity (insulin resistance). There is growing evidence that hyperinsulinemia per se may contribute to the development of insulin resistance, through the establishment of low grade inflammation in insulin responsive tissues, especially in the liver (as insulin is secreted by the pancreas into the portal circulation). The hypothesis of the present study was that insulin may itself provoke an inflammatory response culminating in diminished hepatic insulin sensitivity. To address this premise, firstly, human cell line U937 differentiated macrophages were exposed to insulin, LPS and PGE2. In these cells, insulin significantly augmented the gene expression of the pro-inflammatory mediators IL-1β, IL-8, CCL2, Oncostatin M (OSM) and microsomal prostaglandin E2 synthase (mPGES1), and of the anti-inflammatory mediator IL-10. Moreover, the synergism between insulin and LPS enhanced the induction provoked by LPS in IL-1β, IL-8, IL-6, CCL2 and TNF-α gene. When combined with PGE2, insulin enhanced the induction provoked by PGE2 in IL-1β, mPGES1 and COX2, and attenuated the inhibition induced by PGE2 in CCL2 and TNF-α gene expression contributing to an enhanced inflammatory response by both mechanisms. Supernatants of insulin-treated U937 macrophages reduced the insulin-dependent induction of glucokinase in hepatocytes by 50\%. Cytokines contained in the supernatant of insulin-treated U937 macrophages also activated hepatocytes ERK1/2, resulting in inhibitory serine phosphorylation of the insulin receptor substrate. Additionally, the transcription factor STAT3 was activated by phosphorylation resulting in the induction of SOCS3, which is capable of interrupting the insulin receptor signal chain. MicroRNAs, non-coding RNAs linked to protein expression regulation, nowadays recognized as active players in the generation of several inflammatory disorders such as cancer and type II diabetes are also of interest. Considering that in cancer cachexia, patients are highly affected by insulin resistance and inflammation, control, non-cachectic and cachectic cancer patients were selected and the respective circulating levels of pro-inflammatory mediators and microRNA-21-5p, a posttranscriptional regulator of STAT3 expression, assessed and correlated. Cachectic patients circulating cytokines IL-6 and IL-8 levels were significantly higher than those of non-cachectic and controls, and the expression of microRNA-21-5p was significantly lower. Additionally, microRNA-21-5p reduced expression correlated negatively with IL-6 plasma levels. These results indicate that hyperinsulinemia per se might contribute to the low grade inflammation prevailing in MetS patients and thereby promote the development of insulin resistance particularly in the liver. Diminished MicroRNA-21-5p expression may enhance inflammation and STAT3 expression in cachectic patients, contributing to the development of insulin resistance.}, language = {en} } @phdthesis{Berner2016, author = {Berner, Nadine}, title = {Deciphering multiple changes in complex climate time series using Bayesian inference}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100065}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 135}, year = {2016}, abstract = {Change points in time series are perceived as heterogeneities in the statistical or dynamical characteristics of the observations. Unraveling such transitions yields essential information for the understanding of the observed system's intrinsic evolution and potential external influences. A precise detection of multiple changes is therefore of great importance for various research disciplines, such as environmental sciences, bioinformatics and economics. The primary purpose of the detection approach introduced in this thesis is the investigation of transitions underlying direct or indirect climate observations. In order to develop a diagnostic approach capable to capture such a variety of natural processes, the generic statistical features in terms of central tendency and dispersion are employed in the light of Bayesian inversion. In contrast to established Bayesian approaches to multiple changes, the generic approach proposed in this thesis is not formulated in the framework of specialized partition models of high dimensionality requiring prior specification, but as a robust kernel-based approach of low dimensionality employing least informative prior distributions. First of all, a local Bayesian inversion approach is developed to robustly infer on the location and the generic patterns of a single transition. The analysis of synthetic time series comprising changes of different observational evidence, data loss and outliers validates the performance, consistency and sensitivity of the inference algorithm. To systematically investigate time series for multiple changes, the Bayesian inversion is extended to a kernel-based inference approach. By introducing basic kernel measures, the weighted kernel inference results are composed into a proxy probability to a posterior distribution of multiple transitions. The detection approach is applied to environmental time series from the Nile river in Aswan and the weather station Tuscaloosa, Alabama comprising documented changes. The method's performance confirms the approach as a powerful diagnostic tool to decipher multiple changes underlying direct climate observations. Finally, the kernel-based Bayesian inference approach is used to investigate a set of complex terrigenous dust records interpreted as climate indicators of the African region of the Plio-Pleistocene period. A detailed inference unravels multiple transitions underlying the indirect climate observations, that are interpreted as conjoint changes. The identified conjoint changes coincide with established global climate events. In particular, the two-step transition associated to the establishment of the modern Walker-Circulation contributes to the current discussion about the influence of paleoclimate changes on the environmental conditions in tropical and subtropical Africa at around two million years ago.}, language = {en} } @phdthesis{Schoellner2016, author = {Schoellner, Karsten}, title = {Towards a Wittgensteinian metaethics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409288}, school = {Universit{\"a}t Potsdam}, pages = {309}, year = {2016}, abstract = {This doctoral thesis seeks to elaborate how Wittgenstein's very sparse writings on ethics and ethical thought, together with his later work on the more general problem of normativity and his approach to philosophical problems as a whole, can be applied to contemporary meta-ethical debates about the nature of moral thought and language and the sources of moral obligation. I begin with a discussion of Wittgenstein's early "Lecture on Ethics", distinguishing the thesis of a strict fact/value dichotomy that Wittgenstein defends there from the related thesis that all ethical discourse is essentially and intentionally nonsensical, an attempt to go beyond the limits of sense. The first chapter discusses and defends Wittgenstein's argument that moral valuation always goes beyond any ascertaining of fact; the second chapter seeks to draw out the valuable insights from Wittgenstein's (early) insistence that value discourse is nonsensical while also arguing that this thesis is ultimately untenable and also incompatible with later Wittgensteinian understanding of language. On the basis of this discussion I then take up the writings of the American philosopher Cora Diamond, who has worked out an ethical approach in a very closely Wittgensteinian spirit, and show how this approach shares many of the valuable insights of the moral expressivism and constructivism of contemporary authors such as Blackburn and Korsgaard while suggesting a way to avoid some of the problems and limitations of their approaches. Subsequently I turn to a criticism of the attempts by Lovibond and McDowell to enlist Wittgenstein in the support for a non-naturalist moral realism. A concluding chapter treats the ways that a broadly Wittgensteinian conception expands the subject of metaethics itself by questioning the primacy of discursive argument in moral thought and of moral propositions as the basic units of moral belief.}, language = {en} } @phdthesis{Prokopović2016, author = {Prokopović, Vladimir Z.}, title = {Light-triggered release of bioactive compounds from HA/PLL multilayer films for stimulation of cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97927}, school = {Universit{\"a}t Potsdam}, pages = {91}, year = {2016}, abstract = {The concept of targeting cells and tissues by controlled delivery of molecules is essential in the field of biomedicine. The layer-by-layer (LbL) technology for the fabrication of polymer multilayer films is widely implemented as a powerful tool to assemble tailor-made materials for controlled drug delivery. The LbL films can as well be engineered to act as mimics of the natural cellular microenvironment. Thus, due to the myriad possibilities such as controlled cellular adhesion and drug delivery offered by LbL films, it becomes easily achievable to direct the fate of cells by growing them on the films. The aim of this work was to develop an approach for non-invasive and precise control of the presentation of bioactive molecules to cells. The strategy is based on employment of the LbL films, which function as support for cells and at the same time as reservoirs for bioactive molecules to be released in a controlled manner. UV light is used to trigger the release of the stored ATP with high spatio-temporal resolution. Both physico-chemical (competitive intermolecular interactions in the film) and biological aspects (cellular response and viability) are addressed in this study. Biopolymers hyaluronic acid (HA) and poly-L-lysine (PLL) were chosen as the building blocks for the LbL film assembly. Poor cellular adhesion to native HA/PLL films as well as significant degradation by cells within a few days were shown. However, coating the films with gold nanoparticles not only improved cellular adhesion and protected the films from degradation, but also formed a size-exclusion barrier with adjustable cut-off in the size range of a few tens of kDa. The films were shown to have high reservoir capacity for small charged molecules (reaching mM levels in the film). Furthermore, they were able to release the stored molecules in a sustained manner. The loading and release are explained by a mechanism based on interactions between charges of the stored molecules and uncompensated charges of the biopolymers in the film. Charge balance and polymer dynamics in the film play the pivotal role. Finally, the concept of light-triggered release from the films has been proven using caged ATP loaded into the films from which ATP was released on demand. ATP induces a fast cellular response, i.e. increase in intracellular [Ca2+], which was monitored in real-time. Limitations of the cellular stimulation by the proposed approach are highlighted by studying the stimulation as a function of irradiation parameters (time, distance, light power). Moreover, caging molecules bind to the film stronger than ATP does, which opens new perspectives for the use of the most diverse chemical compounds as caging molecules. Employment of HA/PLL films as a nouvelle support for cellular growth and hosting of bioactive molecules, along with the possibility to stimulate individual cells using focused light renders this approach highly efficient and unique in terms of precision and spatio-temporal resolution among those previously described. With its high potential, the concept presented herein provides the foundation for the design of new intelligent materials for single cell studies, with the focus on tissue engineering, diagnostics, and other cell-based applications.}, language = {en} } @phdthesis{Brauer2016, author = {Brauer, Doroth{\´e}e}, title = {Chemo-kinematic constraints on Milky Way models from the spectroscopic surveys SEGUE \& RAVE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403968}, school = {Universit{\"a}t Potsdam}, pages = {vii, 197}, year = {2016}, abstract = {The Milky Way is only one out of billions of galaxies in the universe. However, it is a special galaxy because it allows to explore the main mechanisms involved in its evolution and formation history by unpicking the system star-by-star. Especially, the chemical fingerprints of its stars provide clues and evidence of past events in the Galaxy's lifetime. These information help not only to decipher the current structure and building blocks of the Milky Way, but to learn more about the general formation process of galaxies. In the past decade a multitude of stellar spectroscopic Galactic surveys have scanned millions of stars far beyond the rim of the solar neighbourhood. The obtained spectroscopic information provide unprecedented insights to the chemo-dynamics of the Milky Way. In addition analytic models and numerical simulations of the Milky Way provide necessary descriptions and predictions suited for comparison with observations in order to decode the physical properties that underlie the complex system of the Galaxy. In the thesis various approaches are taken to connect modern theoretical modelling of galaxy formation and evolution with observations from Galactic stellar surveys. With its focus on the chemo-kinematics of the Galactic disk this work aims to determine new observational constraints on the formation of the Milky Way providing also proper comparisons with two different models. These are the population synthesis model TRILEGAL based on analytical distribution functions, which aims to simulate the number and distribution of stars in the Milky Way and its different components, and a hybrid model (MCM) that combines an N-body simulation of a Milky Way like galaxy in the cosmological framework with a semi-analytic chemical evolution model for the Milky Way. The major observational data sets in use come from two surveys, namely the "Radial Velocity Experiment" (RAVE) and the "Sloan Extension for Galactic Understanding and Exploration" (SEGUE). In the first approach the chemo-kinematic properties of the thin and thick disk of the Galaxy as traced by a selection of about 20000 SEGUE G-dwarf stars are directly compared to the predictions by the MCM model. As a necessary condition for this, SEGUE's selection function and its survey volume are evaluated in detail to correct the spectroscopic observations for their survey specific selection biases. Also, based on a Bayesian method spectro-photometric distances with uncertainties below 15\% are computed for the selection of SEGUE G-dwarfs that are studied up to a distance of 3 kpc from the Sun. For the second approach two synthetic versions of the SEGUE survey are generated based on the above models. The obtained synthetic stellar catalogues are then used to create mock samples best resembling the compiled sample of observed SEGUE G-dwarfs. Generally, mock samples are not only ideal to compare predictions from various models. They also allow validation of the models' quality and improvement as with this work could be especially achieved for TRILEGAL. While TRILEGAL reproduces the statistical properties of the thin and thick disk as seen in the observations, the MCM model has shown to be more suitable in reproducing many chemo-kinematic correlations as revealed by the SEGUE stars. However, evidence has been found that the MCM model may be missing a stellar component with the properties of the thick disk that the observations clearly show. While the SEGUE stars do indicate a thin-thick dichotomy of the stellar Galactic disk in agreement with other spectroscopic stellar studies, no sign for a distinct metal-poor disk is seen in the MCM model. Usually stellar spectroscopic surveys are limited to a certain volume around the Sun covering different regions of the Galaxy's disk. This often prevents to obtain a global view on the chemo-dynamics of the Galactic disk. Hence, a suitable combination of stellar samples from independent surveys is not only useful for the verification of results but it also helps to complete the picture of the Milky Way. Therefore, the thesis closes with a comparison of the SEGUE G-dwarfs and a sample of RAVE giants. The comparison reveals that the chemo-kinematic relations agree in disk regions where the samples of both surveys show a similar number of stars. For those parts of the survey volumes where one of the surveys lacks statistics they beautifully complement each other. This demonstrates that the comparison of theoretical models on the one side, and the combined observational data gathered by multiple surveys on the other side, are key ingredients to understand and disentangle the structure and formation history of the Milky Way.}, language = {en} } @phdthesis{Vacogne2016, author = {Vacogne, Charlotte D.}, title = {New synthetic routes towards well-defined polypeptides, morphologies and hydrogels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396366}, school = {Universit{\"a}t Potsdam}, pages = {xii, 175}, year = {2016}, abstract = {Proteins are natural polypeptides produced by cells; they can be found in both animals and plants, and possess a variety of functions. One of these functions is to provide structural support to the surrounding cells and tissues. For example, collagen (which is found in skin, cartilage, tendons and bones) and keratin (which is found in hair and nails) are structural proteins. When a tissue is damaged, however, the supporting matrix formed by structural proteins cannot always spontaneously regenerate. Tailor-made synthetic polypeptides can be used to help heal and restore tissue formation. Synthetic polypeptides are typically synthesized by the so-called ring opening polymerization (ROP) of α-amino acid N-carboxyanhydrides (NCA). Such synthetic polypeptides are generally non-sequence-controlled and thus less complex than proteins. As such, synthetic polypeptides are rarely as efficient as proteins in their ability to self-assemble and form hierarchical or structural supramolecular assemblies in water, and thus, often require rational designing. In this doctoral work, two types of amino acids, γ-benzyl-L/D-glutamate (BLG / BDG) and allylglycine (AG), were selected to synthesize a series of (co)polypeptides of different compositions and molar masses. A new and versatile synthetic route to prepare polypeptides was developed, and its mechanism and kinetics were investigated. The polypeptide properties were thoroughly studied and new materials were developed from them. In particular, these polypeptides were able to aggregate (or self-assemble) in solution into microscopic fibres, very similar to those formed by collagen. By doing so, they formed robust physical networks and organogels which could be processed into high water-content, pH-responsive hydrogels. Particles with highly regular and chiral spiral morphologies were also obtained by emulsifying these polypeptides. Such polypeptides and the materials derived from them are, therefore, promising candidates for biomedical applications.}, language = {en} } @phdthesis{Prinz2016, author = {Prinz, Julia}, title = {DNA origami substrates as a versatile tool for surface-enhanced Raman scattering (SERS)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-104089}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 184 Seiten}, year = {2016}, abstract = {Surface-enhanced Raman scattering (SERS) is a promising tool to obtain rich chemical information about analytes at trace levels. However, in order to perform selective experiments on individual molecules, two fundamental requirements have to be fulfilled. On the one hand, areas with high local field enhancement, so-called "hot spots", have to be created by positioning the supporting metal surfaces in close proximity to each other. In most cases hot spots are formed in the gap between adjacent metal nanoparticles (NPs). On the other hand, the analyte has to be positioned directly in the hot spot in order to profit from the highest signal amplification. The use of DNA origami substrates provides both, the arrangement of AuNPs with nm precision as well as the ability to bind analyte molecules at predefined positions. Consequently, the present cumulative doctoral thesis aims at the development of a novel SERS substrate based on a DNA origami template. To this end, two DNA-functionalized gold nanoparticles (AuNPs) are attached to one DNA origami substrate resulting in the formation of a AuNP dimer and thus in a hot spot within the corresponding gap. The obtained structures are characterized by correlated atomic force microscopy (AFM) and SERS imaging which allows for the combination of structural and chemical information. Initially, the proof-of principle is presented which demonstrates the potential of the novel approach. It is shown that the Raman signal of 15 nm AuNPs coated with dye-modified DNA (dye: carboxytetramethylrhodamine (TAMRA)) is significantly higher for AuNP dimers arranged on a DNA origami platform in comparison to single AuNPs. Furthermore, by attaching single TAMRA molecules in the hot spot between two 5 nm AuNPs and optimizing the size of the AuNPs by electroless gold deposition, SERS experiments at the few-molecule level are presented. The initially used DNA origami-AuNPs design is further optimized in many respects. On the one hand, larger AuNPs up to a diameter of 60 nm are used which are additionally treated with a silver enhancement solution to obtain Au-Ag-core-shell NPs. On the other hand, the arrangement of both AuNPs is altered to improve the position of the dye molecule within the hot spot as well as to decrease the gap size between the two particles. With the optimized design the detection of single dye molecules (TAMRA and cyanine 3 (Cy3)) by means of SERS is demonstrated. Quantitatively, enhancement factors up to 10^10 are estimated which is sufficiently high to detect single dye molecules. In the second part, the influence of graphene as an additional component of the SERS substrate is investigated. Graphene is a two-dimensional material with an outstanding combination of electronical, mechanical and optical properties. Here, it is demonstrated that single layer graphene (SLG) replicates the shape of underlying non-modified DNA origami substrates very well, which enables the monitoring of structural alterations by AFM imaging. In this way, it is shown that graphene encapsulation significantly increases the structural stability of bare DNA origami substrates towards mechanical force and prolonged exposure to deionized water. Furthermore, SLG is used to cover DNA origami substrates which are functionalized with a 40 nm AuNP dimer. In this way, a novel kind of hybrid material is created which exhibits several advantages compared to the analogue non-covered SERS substrates. First, the fluorescence background of dye molecules that are located in between the AuNP surface and SLG is efficiently reduced. Second, the photobleaching rate of the incorporated dye molecules is decreased up to one order of magnitude. Third, due to the increased photostability of the investigated dye molecules, the performance of polarization-dependent series measurements on individual structures is enabled. This in turn reveals extensive information about the dye molecules in the hot spot as well as about the strain induced within the graphene lattice. Although SLG can significantly influence the SERS substrate in the aforementioned ways, all those effects are strongly related to the extent of contact with the underlying AuNP dimer.}, language = {en} } @phdthesis{Pampel2016, author = {Pampel, Jonas}, title = {Ionothermal carbon materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101323}, school = {Universit{\"a}t Potsdam}, pages = {122, xlv}, year = {2016}, abstract = {Alternative concepts for energy storage and conversion have to be developed, optimized and employed to fulfill the dream of a fossil-independent energy economy. Porous carbon materials play a major role in many energy-related devices. Among different characteristics, distinct porosity features, e.g., specific surface area (SSA), total pore volume (TPV), and the pore size distribution (PSD), are important to maximize the performance in the final device. In order to approach the aim to synthesize carbon materials with tailor-made porosity in a sustainable fashion, the present thesis focused on biomass-derived precursors employing and developing the ionothermal carbonization. During the ionothermal carbonization, a salt melt simultaneously serves as solvent and porogen. Typically, eutectic mixtures containing zinc chloride are employed as salt phase. The first topic of the present thesis addressed the possibility to precisely tailor the porosity of ionothermal carbon materials by an experimentally simple variation of the molar composition of the binary salt mixture. The developed pore tuning tool allowed the synthesis of glucose derived carbon materials with predictable SSAs in the range of ~ 900 to ~ 2100 m2 g-1. Moreover, the nucleobase adenine was employed as precursor introducing nitrogen functionalities in the final material. Thereby, the chemical properties of the carbon materials are varied leading to new application fields. Nitrogen doped carbons (NDCs) are able to catalyze the oxygen reduction reaction (ORR) which takes place on the cathodic site of a fuel cell. The herein developed porosity tailoring allowed the synthesis of adenine derived NDCs with outstanding SSAs of up to 2900 m2 g-1 and very large TPV of 5.19 cm3 g-1. Furthermore, the influence of the porosity on the ORR could be directly investigated enabling the precise optimization of the porosity characteristics of NDCs for this application. The second topic addressed the development of a new method to investigate the not-yet unraveled mechanism of the oxygen reduction reaction using a rotating disc electrode setup. The focus was put on noble-metal free catalysts. The results showed that the reaction pathway of the investigated catalysts is pH-dependent indicating different active species at different pH-values. The third topic addressed the expansion of the used salts for the ionothermal approach towards hydrated calcium and magnesium chloride. It was shown that hydrated salt phases allowed the introduction of a secondary templating effect which was connected to the coexistence of liquid and solid salt phases. The method enabled the synthesis of fibrous NDCs with SSAs of up to 2780 m2 g-1 and very large TPV of 3.86 cm3 g-1. Moreover, the concept of active site implementation by a facile low-temperature metalation employing the obtained NDCs as solid ligands could be shown for the first time in the context of ORR. Overall, the thesis may pave the way towards highly porous carbon with tailor-made porosity materials prepared by an inexpensive and sustainable pathway, which can be applied in energy related field thereby supporting the needed expansion of the renewable energy sector.}, language = {en} } @phdthesis{Steeples2016, author = {Steeples, Elliot}, title = {Amino acid-derived imidazolium salts: platform molecules for N-Heterocyclic carbene metal complexes and organosilica materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101861}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2016}, abstract = {In the interest of producing functional catalysts from sustainable building-blocks, 1, 3-dicarboxylate imidazolium salts derived from amino acids were successfully modified to be suitable as N-Heterocyclic carbene (NHC) ligands within metal complexes. Complexes of Ag(I), Pd(II), and Ir(I) were successfully produced using known procedures using ligands derived from glycine, alanine, β-alanine and phenylalanine. The complexes were characterized in solid state using X-Ray crystallography, which allowed for the steric and electronic comparison of these ligands to well-known NHC ligands within analogous metal complexes. The palladium complexes were tested as catalysts for aqueous-phase Suzuki-Miyaura cross-coupling. Water-solubility could be induced via ester hydrolysis of the N-bound groups in the presence of base. The mono-NHC-Pd complexes were seen to be highly active in the coupling of aryl bromides with phenylboronic acid; the active catalyst of which was determined to be mostly Pd(0) nanoparticles. Kinetic studies determined that reaction proceeds quickly in the coupling of bromoacetophenone, for both pre-hydrolyzed and in-situ hydrolysis catalyst dissolution. The catalyst could also be recycled for an extra run by simply re-using the aqueous layer. The imidazolium salts were also used to produce organosilica hybrid materials. This was attempted via two methods: by post-grafting onto a commercial organosilica, and co-condensation of the corresponding organosilane. The co-condensation technique harbours potential for the production of solid-support catalysts.}, language = {en} } @phdthesis{Ledendecker2016, author = {Ledendecker, Marc}, title = {En route towards advanced catalyst materials for the electrocatalytic water splitting reaction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93296}, school = {Universit{\"a}t Potsdam}, pages = {II, 148}, year = {2016}, abstract = {The thesis on hand deals with the development of new types of catalysts based on pristine metals and ceramic materials and their application as catalysts for the electrocatalytic water splitting reaction. In order to breathe life into this technology, cost-efficient, stable and efficient catalysts are imploringly desired. In this manner, the preparation of Mn-, N-, S-, P-, and C-containing nickel materials has been investigated together with the theoretical and electrochemical elucidation of their activity towards the hydrogen (and oxygen) evolution reaction. The Sabatier principle has been used as the principal guideline towards successful tuning of catalytic sites. Furthermore, two pathways have been chosen to ameliorate the electrocatalytic performance, namely, the direct improvement of intrinsic properties through appropriate material selection and secondly the increase of surface area of the catalytic material with an increased amount of active sites. In this manner, bringing materials with optimized hydrogen adsorption free energy onto high surface area support, catalytic performances approaching the golden standards of noble metals were feasible. Despite varying applied synthesis strategies (wet chemistry in organic solvents, ionothermal reaction, gas phase reaction), one goal has been systematically pursued: to understand the driving mechanism of the growth. Moreover, deeper understanding of inherent properties and kinetic parameters of the catalytic materials has been gained.}, language = {en} } @phdthesis{Cajar2016, author = {Cajar, Anke}, title = {Eye-movement control during scene viewing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395536}, school = {Universit{\"a}t Potsdam}, pages = {vii, 133}, year = {2016}, abstract = {Eye movements serve as a window into ongoing visual-cognitive processes and can thus be used to investigate how people perceive real-world scenes. A key issue for understanding eye-movement control during scene viewing is the roles of central and peripheral vision, which process information differently and are therefore specialized for different tasks (object identification and peripheral target selection respectively). Yet, rather little is known about the contributions of central and peripheral processing to gaze control and how they are coordinated within a fixation during scene viewing. Additionally, the factors determining fixation durations have long been neglected, as scene perception research has mainly been focused on the factors determining fixation locations. The present thesis aimed at increasing the knowledge on how central and peripheral vision contribute to spatial and, in particular, to temporal aspects of eye-movement control during scene viewing. In a series of five experiments, we varied processing difficulty in the central or the peripheral visual field by attenuating selective parts of the spatial-frequency spectrum within these regions. Furthermore, we developed a computational model on how foveal and peripheral processing might be coordinated for the control of fixation duration. The thesis provides three main findings. First, the experiments indicate that increasing processing demands in central or peripheral vision do not necessarily prolong fixation durations; instead, stimulus-independent timing is adapted when processing becomes too difficult. Second, peripheral vision seems to play a prominent role in the control of fixation durations, a notion also implemented in the computational model. The model assumes that foveal and peripheral processing proceed largely in parallel and independently during fixation, but can interact to modulate fixation duration. Thus, we propose that the variation in fixation durations can in part be accounted for by the interaction between central and peripheral processing. Third, the experiments indicate that saccadic behavior largely adapts to processing demands, with a bias of avoiding spatial-frequency filtered scene regions as saccade targets. We demonstrate that the observed saccade amplitude patterns reflect corresponding modulations of visual attention. The present work highlights the individual contributions and the interplay of central and peripheral vision for gaze control during scene viewing, particularly for the control of fixation duration. Our results entail new implications for computational models and for experimental research on scene perception.}, language = {en} } @phdthesis{Riebe2016, author = {Riebe, Daniel}, title = {Experimental and theoretical investigations of molecular ions by spectroscopy as well as ion mobility and mass spectrometry}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94632}, school = {Universit{\"a}t Potsdam}, pages = {143}, year = {2016}, abstract = {The aim of this thesis was the elucidation of different ionization methods (resonance-enhanced multiphoton ionization - REMPI, electrospray ionization - ESI, atmospheric pressure chemical ionization - APCI) in ion mobility (IM) spectrometry. In order to gain a better understanding of the ionization processes, several spectroscopic, mass spectrometric and theoretical methods were also used. Another focus was the development of experimental techniques, including a high resolution spectrograph and various combinations of IM and mass spectrometry. The novel high resolution 2D spectrograph facilitates spectroscopic resolutions in the range of commercial echelle spectrographs. The lowest full width at half maximum of a peak achieved was 25 pm. The 2D spectrograph is based on the wavelength separation of light by the combination of a prism and a grating in one dimension, and an etalon in the second dimension. This instrument was successfully employed for the acquisition of Raman and laser-induced breakdown spectra. Different spectroscopic methods (light scattering and fluorescence spectroscopy) permitting a spatial as well as spectral resolution, were used to investigate the release of ions in the electrospray. The investigation is based on the 50 nm shift of the fluorescence band of rhodamine 6G ions of during the transfer from the electrospray droplets to the gas phase. A newly developed ionization chamber operating at reduced pressure (0.5 mbar) was coupled to a time-of-flight mass spectrometer. After REMPI of H2S, an ionization chemistry analogous to H2O was observed with this instrument. Besides H2S+ and its fragments, H3S+ and protonated analyte ions could be observed as a result of proton-transfer reactions. For the elucidation of the peaks in IM spectra, a combination of IM spectrometer and linear quadrupole ion trap mass spectrometer was developed. The instrument can be equipped with various ionization sources (ESI, REMPI, APCI) and was used for the characterization of the peptide bradykinin and the neuroleptic promazine. The ionization of explosive compounds in an APCI source based on soft x-radiation was investigated in a newly developed ionization chamber attached to the ion trap mass spectrometer. The major primary and secondary reactions could be characterized and explosive compound ions could be identified and assigned to the peaks in IM spectra. The assignment is based on the comparison of experimentally determined and calculated IM. The methods of calculation currently available exhibit large deviations, especially in the case of anions. Therefore, on the basis of an assessment of available methods, a novel hybrid method was developed and characterized.}, language = {en} } @phdthesis{Ulaganathan2016, author = {Ulaganathan, Vamseekrishna}, title = {Molecular fundamentals of foam fractionation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94263}, school = {Universit{\"a}t Potsdam}, pages = {ix, 136}, year = {2016}, abstract = {Foam fractionation of surfactant and protein solutions is a process dedicated to separate surface active molecules from each other due to their differences in surface activities. The process is based on forming bubbles in a certain mixed solution followed by detachment and rising of bubbles through a certain volume of this solution, and consequently on the formation of a foam layer on top of the solution column. Therefore, systematic analysis of this whole process comprises of at first investigations dedicated to the formation and growth of single bubbles in solutions, which is equivalent to the main principles of the well-known bubble pressure tensiometry. The second stage of the fractionation process includes the detachment of a single bubble from a pore or capillary tip and its rising in a respective aqueous solution. The third and final stage of the process is the formation and stabilization of the foam created by these bubbles, which contains the adsorption layers formed at the growing bubble surface, carried up and gets modified during the bubble rising and finally ends up as part of the foam layer. Bubble pressure tensiometry and bubble profile analysis tensiometry experiments were performed with protein solutions at different bulk concentrations, solution pH and ionic strength in order to describe the process of accumulation of protein and surfactant molecules at the bubble surface. The results obtained from the two complementary methods allow understanding the mechanism of adsorption, which is mainly governed by the diffusional transport of the adsorbing protein molecules to the bubble surface. This mechanism is the same as generally discussed for surfactant molecules. However, interesting peculiarities have been observed for protein adsorption kinetics at sufficiently short adsorption times. First of all, at short adsorption times the surface tension remains constant for a while before it decreases as expected due to the adsorption of proteins at the surface. This time interval is called induction time and it becomes shorter with increasing protein bulk concentration. Moreover, under special conditions, the surface tension does not stay constant but even increases over a certain period of time. This so-called negative surface pressure was observed for BCS and BLG and discussed for the first time in terms of changes in the surface conformation of the adsorbing protein molecules. Usually, a negative surface pressure would correspond to a negative adsorption, which is of course impossible for the studied protein solutions. The phenomenon, which amounts to some mN/m, was rather explained by simultaneous changes in the molar area required by the adsorbed proteins and the non-ideality of entropy of the interfacial layer. It is a transient phenomenon and exists only under dynamic conditions. The experiments dedicated to the local velocity of rising air bubbles in solutions were performed in a broad range of BLG concentration, pH and ionic strength. Additionally, rising bubble experiments were done for surfactant solutions in order to validate the functionality of the instrument. It turns out that the velocity of a rising bubble is much more sensitive to adsorbing molecules than classical dynamic surface tension measurements. At very low BLG or surfactant concentrations, for example, the measured local velocity profile of an air bubble is changing dramatically in time scales of seconds while dynamic surface tensions still do not show any measurable changes at this time scale. The solution's pH and ionic strength are important parameters that govern the measured rising velocity for protein solutions. A general theoretical description of rising bubbles in surfactant and protein solutions is not available at present due to the complex situation of the adsorption process at a bubble surface in a liquid flow field with simultaneous Marangoni effects. However, instead of modelling the complete velocity profile, new theoretical work has been started to evaluate the maximum values in the profile as characteristic parameter for dynamic adsorption layers at the bubble surface more quantitatively. The studies with protein-surfactant mixtures demonstrate in an impressive way that the complexes formed by the two compounds change the surface activity as compared to the original native protein molecules and therefore lead to a completely different retardation behavior of rising bubbles. Changes in the velocity profile can be interpreted qualitatively in terms of increased or decreased surface activity of the formed protein-surfactant complexes. It was also observed that the pH and ionic strength of a protein solution have strong effects on the surface activity of the protein molecules, which however, could be different on the rising bubble velocity and the equilibrium adsorption isotherms. These differences are not fully understood yet but give rise to discussions about the structure of protein adsorption layer under dynamic conditions or in the equilibrium state. The third main stage of the discussed process of fractionation is the formation and characterization of protein foams from BLG solutions at different pH and ionic strength. Of course a minimum BLG concentration is required to form foams. This minimum protein concentration is a function again of solution pH and ionic strength, i.e. of the surface activity of the protein molecules. Although at the isoelectric point, at about pH 5 for BLG, the hydrophobicity and hence the surface activity should be the highest, the concentration and ionic strength effects on the rising velocity profile as well as on the foamability and foam stability do not show a maximum. This is another remarkable argument for the fact that the interfacial structure and behavior of BLG layers under dynamic conditions and at equilibrium are rather different. These differences are probably caused by the time required for BLG molecules to adapt respective conformations once they are adsorbed at the surface. All bubble studies described in this work refer to stages of the foam fractionation process. Experiments with different systems, mainly surfactant and protein solutions, were performed in order to form foams and finally recover a solution representing the foamed material. As foam consists to a large extent of foam lamella - two adsorption layers with a liquid core - the concentration in a foamate taken from foaming experiments should be enriched in the stabilizing molecules. For determining the concentration of the foamate, again the very sensitive bubble rising velocity profile method was applied, which works for any type of surface active materials. This also includes technical surfactants or protein isolates for which an accurate composition is unknown.}, language = {en} } @phdthesis{Bendre2016, author = {Bendre, Abhijit B.}, title = {Growth and saturation of dynamo in spiral galaxies via direct simulations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407517}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 133}, year = {2016}, abstract = {We do magnetohydrodynamic (MHD) simulations of local box models of turbulent Interstellar Medium (ISM) and analyse the process of amplification and saturation of mean magnetic fields with methods of mean field dynamo theory. It is shown that the process of saturation of mean fields can be partially described by the prolonged diffusion time scales in presence of the dynamically significant magnetic fields. However, the outward wind also plays an essential role in the saturation in higher SN rate case. Algebraic expressions for the back reaction of the magnetic field onto the turbulent transport coefficients are derived, which allow a complete description of the nonlinear dynamo. We also present the effects of dynamically significant mean fields on the ISM configuration and pressure distribution. We further add the cosmic ray component in the simulations and investigate the kinematic growth of mean fields with a dynamo perspective.}, language = {en} } @phdthesis{Renans2016, author = {Renans, Agata Maria}, title = {Exhaustivity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89501}, school = {Universit{\"a}t Potsdam}, pages = {221}, year = {2016}, abstract = {The dissertation proposes an answer to the question of how to model exhaustive inferences and what the meaning of the linguistic material that triggers these inferences is. In particular, it deals with the semantics of exclusive particles, clefts, and progressive aspect in Ga, an under-researched language spoken in Ghana. Based on new data coming from the author's original fieldwork in Accra, the thesis points to a previously unattested variation in the semantics of exclusives in a cross-linguistic perspective, analyzes the connections between exhaustive interpretation triggered by clefts and the aspectual interpretation of the sentence, and identifies a cross-categorial definite determiner. By that it sheds new light on several exhaustivity-related phenomena in both the nominal and the verbal domain and shows that both domains are closely connected.}, language = {en} } @phdthesis{Markova2016, author = {Markova, Mariya}, title = {Metabolic and molecular effects of two different isocaloric high protein diets in subjects with type 2 diabetes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394310}, school = {Universit{\"a}t Potsdam}, pages = {x, 127}, year = {2016}, abstract = {Ern{\"a}hrung stellt ein wichtiger Faktor in der Pr{\"a}vention und Therapie von Typ-2-Diabetes dar. Fr{\"u}here Studien haben gezeigt, dass Hochproteindi{\"a}ten sowohl positive als auch negative Effekte auf den Metabolismus hervorrufen. Jedoch ist unklar, ob die Herkunft des Proteins dabei eine Rolle spielt. In der LeguAN-Studie wurden die Effekte von zwei unterschiedlichen Hochproteindi{\"a}ten, entweder tierischer oder pflanzlicher Herkunft, bei Typ-2-Diabetes Patienten untersucht. Beide Di{\"a}ten enthielten 30 EN\% Proteine, 40 EN\% Kohlenhydrate und 30 EN\% Fette. Der Anteil an Ballaststoffen, der glyk{\"a}mischer Index und die Fettkomposition waren in beiden Di{\"a}ten {\"a}hnlich. Die Proteinaufnahme war h{\"o}her, w{\"a}hrend die Fettaufnahme niedriger im Vergleich zu den fr{\"u}heren Ern{\"a}hrungsgewohnheiten der Probanden war. Insgesamt f{\"u}hrten beide Di{\"a}tinterventionen zu einer Verbesserung der glyk{\"a}mischen Kontrolle, der Insulinsensitivit{\"a}t, des Leberfettgehalts und kardiovaskul{\"a}rer Risikomarkern ohne wesentliche Unterschiede zwischen den Proteintypen. In beiden Interventionsgruppen wurden die n{\"u}chternen Glukosewerte zusammen mit Indizes von Insulinresistenz in einem unterschiedlichen Ausmaß, jedoch ohne signifikante Unterschiede zwischen beiden Di{\"a}ten verbessert. Die Reduktion von HbA1c war ausgepr{\"a}gter in der pflanzlichen Gruppe, w{\"a}hrend sich die Insulinsensitivit{\"a}t mehr in der tierischen Gruppe erh{\"o}hte. Die Hochproteindi{\"a}ten hatten nur einen geringf{\"u}gigen Einfluss auf den postprandialen Metabolismus. Dies zeigte sich durch eine leichte Verbesserung der Indizes f{\"u}r Insulinsekretion, -sensitivit{\"a}t und -degradation sowie der Werte der freien Fetts{\"a}uren. Mit Ausnahme des Einflusses auf die GIP-Sekretion riefen die tierische und die pflanzliche Testmahlzeit {\"a}hnliche metabolische und hormonelle Antworten, trotz unterschiedlicher Aminos{\"a}urenzusammensetzung. Die tierische Hochproteindi{\"a}t f{\"u}hrte zu einer selektiven Zunahme der fettfreien Masse und Abnahme der Fettmasse, was nicht signifikant unterschiedlich von der pflanzlichen Gruppe war. Dar{\"u}ber hinaus reduzierten die Hochproteindi{\"a}ten den Leberfettgehalt um durchschnittlich 42\%. Die Reduktion des Leberfettgehaltes ging mit einer Verminderung der Lipogenese, der Lipolyse und des freien Fetts{\"a}ure Flux einher. Beide Interventionen induzierten einen moderaten Abfall von Leberenzymen im Blut. Die Reduktion des Leberfetts war mit einer verbesserten Glukosehom{\"o}ostase und Insulinsensitivit{\"a}t assoziiert. Blutlipide sanken in allen Probanden, was eventuell auf die niedrigere Fettaufnahme zur{\"u}ckzuf{\"u}hren war. Weiterhin waren die Spiegel an Harns{\"a}ure und Inflammationsmarkern erniedrigt unabh{\"a}ngig von der Proteinquelle. Die Werte des systolischen und diastolischen Blutdrucks sanken nur in der pflanzlichen Gruppe, was auf eine potentielle Rolle von Arginin hinweist. Es wurden keine Hinweise auf eine beeintr{\"a}chtigte Nierenfunktion durch die 6-w{\"o}chige Hochproteindi{\"a}ten beobachtet unabh{\"a}ngig von der Herkunft der Proteine. Serumkreatinin war nur in der pflanzlichen Gruppe signifikant reduziert, was eventuell an dem geringen Kreatingehalt der pflanzlichen Nahrungsmittel liegen k{\"o}nnte. Jedoch sind l{\"a}ngere Studien n{\"o}tig, um die Sicherheit von Hochproteindi{\"a}ten vollkommen aufkl{\"a}ren zu k{\"o}nnen. Des Weiteren verursachte keine der Di{\"a}ten eine Induktion des mTOR Signalwegs weder im Fettgewebe noch in Blutzellen. Die Verbesserung der Ganzk{\"o}rper-Insulinsensitivit{\"a}t deutete auch auf keine Aktivierung von mTOR und keine Verschlechterung der Insulinsensitivit{\"a}t im Skeletmuskel hin. Ein nennenswerter Befund war die erhebliche Reduktion von FGF21, einem wichtigen Regulator metabolischer Prozesse, um ungef{\"a}hr 50\% bei beiden Proteinarten. Ob hepatischer ER-Stress, Ammoniumniveau oder die Makron{\"a}hrstoffpr{\"a}ferenz hinter dem paradoxen Ergebnis stehen, sollte weiter im Detail untersucht werden. Entgegen der anf{\"a}nglichen Erwartung und der bisherigen Studienlage zeigte die pflanzlich-betonte Hochproteindi{\"a}t keine klaren Vorteile gegen{\"u}ber der tierischen Di{\"a}t. Der ausgepr{\"a}gte g{\"u}nstige Effekt des tierischen Proteins auf Insulinhom{\"o}ostase trotz des hohen BCAA-Gehaltes war sicherlich unerwartet und deutet darauf hin, dass bei dem l{\"a}ngeren Verzehr andere komplexe metabolische Adaptationen stattfinden. Einen weiteren Aspekt stellt der niedrigere Fettverzehr dar, der eventuell auch zu den Verbesserungen in beiden Gruppen beigetragen hat. Zusammenfassend l{\"a}sst sich sagen, dass eine 6-w{\"o}chige Di{\"a}t mit 30 EN\% Proteinen (entweder pflanzlich oder tierisch), 40 EN\% Kohlenhydraten und 30 EN\% Fetten mit weniger ges{\"a}ttigten Fetten zu metabolischen Verbesserungen bei Typ-2-Diabetes Patienten unabh{\"a}ngig von Proteinherkunft f{\"u}hrt.}, language = {en} } @phdthesis{Breitling2016, author = {Breitling, Frank}, title = {Propagation of energetic electrons in the solar corona observed with LOFAR}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396893}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 101}, year = {2016}, abstract = {This work reports about new high-resolution imaging and spectroscopic observations of solar type III radio bursts at low radio frequencies in the range from 30 to 80 MHz. Solar type III radio bursts are understood as result of the beam-plasma interaction of electron beams in the corona. The Sun provides a unique opportunity to study these plasma processes of an active star. Its activity appears in eruptive events like flares, coronal mass ejections and radio bursts which are all accompanied by enhanced radio emission. Therefore solar radio emission carries important information about plasma processes associated with the Sun's activity. Moreover, the Sun's atmosphere is a unique plasma laboratory with plasma processes under conditions not found in terrestrial laboratories. Because of the Sun's proximity to Earth, it can be studied in greater detail than any other star but new knowledge about the Sun can be transfer to them. This "solar stellar connection" is important for the understanding of processes on other stars. The novel radio interferometer LOFAR provides imaging and spectroscopic capabilities to study these processes at low frequencies. Here it was used for solar observations. LOFAR, the characteristics of its solar data and the processing and analysis of the latter with the Solar Imaging Pipeline and Solar Data Center are described. The Solar Imaging Pipeline is the central software that allows using LOFAR for solar observations. So its development was necessary for the analysis of solar LOFAR data and realized here. Moreover a new density model with heat conduction and Alfv{\´e}n waves was developed that provides the distance of radio bursts to the Sun from dynamic radio spectra. Its application to the dynamic spectrum of a type III burst observed on March 16, 2016 by LOFAR shows a nonuniform radial propagation velocity of the radio emission. The analysis of an imaging observation of type III bursts on June 23, 2012 resolves a burst as bright, compact region localized in the corona propagating in radial direction along magnetic field lines with an average velocity of 0.23c. A nonuniform propagation velocity is revealed. A new beam model is presented that explains the nonuniform motion of the radio source as a propagation effect of an electron ensemble with a spread velocity distribution and rules out a monoenergetic electron distribution. The coronal electron number density is derived in the region from 1.5 to 2.5 R☉ and fitted with the newly developed density model. It determines the plasma density for the interplanetary space between Sun and Earth. The values correspond to a 1.25- and 5-fold Newkirk model for harmonic and fundamental emission, respectively. In comparison to data from other radio instruments the LOFAR data shows a high sensitivity and resolution in space, time and frequency. The new results from LOFAR's high resolution imaging spectroscopy are consistent with current theories of solar type III radio bursts and demonstrate its capability to track fast moving radio sources in the corona. LOFAR solar data is found to be a valuable source for solar radio physics and opens a new window for studying plasma processes associated with highly energetic electrons in the solar corona.}, language = {en} } @phdthesis{Krohmer2016, author = {Krohmer, Anton}, title = {Structures \& algorithms in hyperbolic random graphs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395974}, school = {Universit{\"a}t Potsdam}, pages = {xii, 102}, year = {2016}, abstract = {Complex networks are ubiquitous in nature and society. They appear in vastly different domains, for instance as social networks, biological interactions or communication networks. Yet in spite of their different origins, these networks share many structural characteristics. For instance, their degree distribution typically follows a power law. This means that the fraction of vertices of degree k is proportional to k^(-β) for some constant β; making these networks highly inhomogeneous. Furthermore, they also typically have high clustering, meaning that links between two nodes are more likely to appear if they have a neighbor in common. To mathematically study the behavior of such networks, they are often modeled as random graphs. Many of the popular models like inhomogeneous random graphs or Preferential Attachment excel at producing a power law degree distribution. Clustering, on the other hand, is in these models either not present or artificially enforced. Hyperbolic random graphs bridge this gap by assuming an underlying geometry to the graph: Each vertex is assigned coordinates in the hyperbolic plane, and two vertices are connected if they are nearby. Clustering then emerges as a natural consequence: Two nodes joined by an edge are close by and therefore have many neighbors in common. On the other hand, the exponential expansion of space in the hyperbolic plane naturally produces a power law degree sequence. Due to the hyperbolic geometry, however, rigorous mathematical treatment of this model can quickly become mathematically challenging. In this thesis, we improve upon the understanding of hyperbolic random graphs by studying its structural and algorithmical properties. Our main contribution is threefold. First, we analyze the emergence of cliques in this model. We find that whenever the power law exponent β is 2 < β < 3, there exists a clique of polynomial size in n. On the other hand, for β >= 3, the size of the largest clique is logarithmic; which severely contrasts previous models with a constant size clique in this case. We also provide efficient algorithms for finding cliques if the hyperbolic node coordinates are known. Second, we analyze the diameter, i. e., the longest shortest path in the graph. We find that it is of order O(polylog(n)) if 2 < β < 3 and O(logn) if β > 3. To complement these findings, we also show that the diameter is of order at least Ω(logn). Third, we provide an algorithm for embedding a real-world graph into the hyperbolic plane using only its graph structure. To ensure good quality of the embedding, we perform extensive computational experiments on generated hyperbolic random graphs. Further, as a proof of concept, we embed the Amazon product recommendation network and observe that products from the same category are mapped close together.}, language = {en} } @phdthesis{Janetschek2016, author = {Janetschek, Hannah}, title = {Water development programs in India}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401337}, school = {Universit{\"a}t Potsdam}, pages = {279}, year = {2016}, abstract = {In the past decades, development cooperation (DC) led by conventional bi- and multilateral donors has been joined by a large number of small, private or public-private donors. This pluralism of actors raises questions as to whether or not these new donors are able to implement projects more or less effectively than their conventional counterparts. In contrast to their predecessors, the new donors have committed themselves to be more pragmatic, innovative and flexible in their development cooperation measures. However, they are also criticized for weakening the function of local civil society and have the reputation of being an intransparent and often controversial alternative to public services. With additional financial resources and their new approach to development, the new donors have been described in the literature as playing a controversial role in transforming development cooperation. This dissertation compares the effectiveness of initiatives by new and conventional donors with regard to the provision of public goods and services to the poor in the water and sanitation sector in India. India is an emerging country but it is experiencing high poverty rates and poor water supply in predominantly rural areas. It lends itself for analyzing this research theme as it is currently being confronted by a large number of actors and approaches that aim to find solutions for these challenges . In the theoretical framework of this dissertation, four governance configurations are derived from the interaction of varying actor types with regard to hierarchical and non-hierarchical steering of their interactions. These four governance configurations differ in decision-making responsibilities, accountability and delegation of tasks or direction of information flow. The assumption on actor relationships and steering is supplemented by possible alternative explanations in the empirical investigation, such as resource availability, the inheritance of structures and institutions from previous projects in a project context, gaining acceptance through beneficiaries (local legitimacy) as a door opener, and asymmetries of power in the project context. Case study evidence from seven projects reveals that the actors' relationship is important for successful project delivery. Additionally, the results show that there is a systematic difference between conventional and new donors. Projects led by conventional donors were consistently more successful, due to an actor relationship that placed the responsibility in the hands of the recipient actors and benefited from the trust and reputation of a long-term cooperation. The trust and reputation of conventional donors always went along with a back-up from federal level and trickled down as reputation also at local level implementation. Furthermore, charismatic leaders, as well as the acquired structures and institutions of predecessor projects, also proved to be a positive influencing factor for successful project implementation. Despite the mixed results of the seven case studies, central recommendations for action can be derived for the various actors involved in development cooperation. For example, new donors could fulfill a supplementary function with conventional donors by developing innovative project approaches through pilot studies and then implementing them as a supplement to the projects of conventional donors on the ground. In return, conventional donors would have to make room the new donors by integrating their approaches into already programs in order to promote donor harmonization. It is also important to identify and occupy niches for activities and to promote harmonization among donors on state and federal sides. The empirical results demonstrate the need for a harmonization strategy of different donor types in order to prevent duplication, over-experimentation and the failure of development programs. A transformation to successful and sustainable development cooperation can only be achieved through more coordination processes and national self-responsibility.}, language = {en} } @phdthesis{Fournier2016, author = {Fournier, Yori}, title = {Dynamics of the rise of magnetic flux tubes in stellar interiors}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394533}, school = {Universit{\"a}t Potsdam}, pages = {xii, 98}, year = {2016}, abstract = {In sonnen{\"a}hnlichen Sternen erh{\"a}lt ein Dynamo-Mechanismus die Magnetfelder. Der Babcock-Leighton-Dynamo beruht auf einem solchen Mechanismus und erfordert insbesondere die Existenz von magnetischen Flussr{\"o}hren. Man nimmt an, dass magnetische Flussr{\"o}hren am Boden der Konvetionszone entstehen und durch Auftrieb bis zur Oberfl{\"a}che steigen. Es wird ein spezielles Dynamomodell vorgeschlagen, in dem der Verz{\"o}gerungseffekt durch das Aufsteigen der Flussr{\"o}hren ber{\"u}cksichtigt wird. Die vorliegende Dissertation besch{\"a}ftigt sich mit der Anwendbarkeit des Babcock-Leighton-Dynamos auf andere Sterne. Zu diesem Zweck versuchen wir, die Aufstiegszeiten von magnetischen Flussr{\"o}hren mit Hilfe von kompressiblen MHD-Simulationen in sp{\"a}rischen Kugelschalen mit Dichteschichtung zu bestimmen und einzugrenzen. Derartige Simulationen sind allerdings nur in einem unrealistischen Parameterbereich m{\"o}glich. Deshalb ist eine Skalierungsrelation n{\"o}tig, die die Ergebnisse auf realistische physikalische Regimes {\"u}bertr{\"a}gt. Wir erweitern fr{\"u}here Arbeiten zu Skalierungsrelationen in 2D und leiten ein allgemeines Skalierungsgesetz ab, das f{\"u}r 2D- und 3D-Flussr{\"o}hren g{\"u}ltig ist. In einem umfangreichen Satz von numerischen Simulationen zeigen wir, dass die abgeleitete Skalierungsrelation auch im vollst{\"a}ndig nichtlinearen Fall gilt. Wir haben damit ein Gesetz f{\"u}r die Aufstiegszeit von magnetischen Flussr{\"o}hren gefunden, dass in jedem sonnen{\"a}hnlichen Stern G{\"u}ltigkeit hat. Schließlich implementieren wir dieses Gesetz in einem Dynamomodell mit Verz{\"o}gerungsterm. Die Simulationen eines solchen verz{\"o}gerten Flussr{\"o}hren/Babcock-Leighton-Dynamos auf der Basis der Meanfield-Formulierung f{\"u}hrten auf ein neues Dynamo-Regime, das nur bei Anwesenheit der Verz{\"o}gerung existiert. Die erforderlichen Verz{\"o}gerungen sind von der Gr{\"o}{\"y}enordnung der Zyklusl{\"a}nge, die resultierenden Magnetfelder sind schw{\"a}cher als die {\"A}quipartitions-Feldst{\"a}rke. Dieses neue Regime zeigt, dass auch bei sehr langen Aufstiegszeiten der Flussr{\"o}hren/Babcock-Leighton-Dynamo noch nichtzerfallende L{\"o}sungen liefern und daher auf ein breites Spektrum von Sternen anwendbar sein kann.}, language = {en} } @phdthesis{Kaethner2016, author = {K{\"a}thner, Jana}, title = {Interaction of spatial variability characterized by soil electrical conductivity and plant water status related to generative growth of fruit trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397666}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 104, IV}, year = {2016}, abstract = {Precision horticulture beschreibt ein neues Bewirtschaftungskonzept im Gartenbau, bei dem teilfl{\"a}chenspezifisch oder an den Einzelbaum angepasste Maßnahmen eine ressourcenschonende, intensitve Produktion erm{\"o}glichen. Die Datengrundlage wird aus r{\"a}umlich aufgel{\"o}sten Messungen aus der Produktionsanlage gewonnen, wobei sowohl kurzfristige Faktoren wie der effektive Pflanzenwasserzustand als auch langfristige Faktoren wie die Bodenvariabilit{\"a}t zur Informationsgewinnung genutzt werden k{\"o}nnen. Die vorliegende Arbeit umfasst eine Untersuchung der scheinbaren elektrischen Leitf{\"a}higkeit des Bodens (ECa), des Pflanzenwasserzustandes und der Fruchtqualit{\"a}t (zum Beispiel: Fruchtgr{\"o}ße) bei Prunus domestica L. (Pflaume) und Citrus x aurantium, Syn. Citrus paradisi (Grapefruit). Zielsetzungen der vorliegenden Arbeit waren (i) die Charakterisierung der 3D-Verteilung der scheinbaren elektrischen Leitf{\"a}higkeit des Bodens und Variabilit{\"a}t des Pflanzenwasserzustandes; (ii) die Untersuchung der Interaktion zwischen ECa, kumulativer Wassernutzungseffizienz (WUEc) und des crop water stress index (CWSI) bezogen auf die Fruchtqualit{\"a}t sowie (iii) eine M{\"o}glichkeit zur Einteilung von einzelnen B{\"a}umen hinsichtlich der Bew{\"a}sserung. Dazu fanden die Hauptuntersuchungen in der Pflaumenanlage statt. Diese Obstanlage befindet sich in Hanglage (3°) auf pleistoz{\"a}nen und postpleistoz{\"a}nen Substraten in semi-humiden Klima (Potsdam, Deutschland) und umfasst eine Fl{\"a}che von 0,37 ha mit 156 B{\"a}umen der Kultursorte ˈTophit Plusˈ auf der Unterlage Wavit. Die Anlage wurde 2009 mit ein und zwei-j{\"a}hrigen B{\"a}umen in einem Pflanzabstand von 4 m entlang der Bew{\"a}sserung und 5 m zwischen den Reihen angelegt. Dreimal pro Woche wurden die B{\"a}ume mit einer 50 cm {\"u}ber dem Boden installierten Tr{\"o}pfchenbew{\"a}sserung mit 1,6 l pro Baum bew{\"a}ssert. Mit Hilfe geoelektrischer Messungen wurde die scheinbare elektrische Leitf{\"a}higkeit des Oberbodens (0,25 m) mit einem Elektrodenabstand von 0,5 m (4-point light hp) an jedem Baum gemessen. Dadurch wurde die Anlage hinsichtlich ECa r{\"a}umlich charakterisiert. Zus{\"a}tzlich erfolgten Tomographiemessungen zur 3D-Charakterisierung der ECa und punktuell die Beprobung von Bohrlochprofilen bis 1 m Tiefe. Die vegetativen, generativen und Fruchtqualit{\"a}tsdaten wurden an jedem Baum erhoben. Der momentane Pflanzenwasserzustand wurde mit der etablierten Scholander-Methode zur Wasserpotentialanalyse (Scholander Bombe) punktuell und mit Thermalaufnahmen fl{\"a}chendeckend bestimmt. Die Thermalaufnahmen erfolgten mit einer Infrarot-Kamera (ThermaCam SC 500), die auf einem Traktor in 3,3 m H{\"o}he {\"u}ber dem Boden montiert war. Die Thermalaufnahmen (320 x 240 Pixel) der Kronenoberfl{\"a}che wurden mit einem {\"O}ffnungswinkel von 45° und einer geometrischen Aufl{\"o}sung von 6,41 mm x 8,54 mm aufgenommen. Mit Hilfe der Kronentemperatur aus den Thermalbildern und den Temperaturen eines nassen und trockenen Referenzblattes wurde der CWSI berechnet. Es wurde die Anpassung des CWSI f{\"u}r die Messung in semi-humidem Klima erarbeitet, wobei die Erhebung der Referenztemperaturen automatisiert aus den Thermalbildern erfolgte. Die Boniturdaten wurden mit Hilfe eines Varianz-Stabilisierungsverfahrens in eine Normalverteilung transformiert. Die statistischen Analysen sowie die automatisierte Auswertungsroutine erfolgten mit eigenen Skripten in MATLAB® (R2010b sowie R2016a) und einem freien Programm (spatialtoolbox). Die Hot-spot Analysen dienten der Pr{\"u}fung, ob ein beobachtetes Muster statistisch signifikant ist. Evaluiert wurde die Methode mit der etablierten k-mean Analyse. Zum Testen der Hot-spot Analyse wurden ECa, Stammumfang und Ertrag Daten aus einer Grapefruitanlage (Adana, T{\"u}rkei) mit 179 B{\"a}umen auf einem Boden vom Typ Xerofkuvent mit toniger und tonig-lehmiger Textur herangezogen. Die {\"U}berpr{\"u}fung der Interaktion zwischen den kritischen Werten aus den Boden- und Pflanzenwasserzustandsinformationen zu den vegetativen und generativen Pflanzenwachtumsvariablen erfolgte durch die Anwendung der ANOVA und die Ermittlung des Korrelationskoeffizienten. In der Arbeit konnte gezeigt werden, dass die Variabilit{\"a}t der Boden- und Pflanzeninformationen in Obstanlagen auch kleinr{\"a}umig hoch ist. Es konnte gezeigt werden, dass die r{\"a}umlich gefundenen Muster in den ECa {\"u}ber die Jahre zwischen 2011-2012 (r = 0.88) beziehungsweise 2012-2013 (r = 0.71) stabil geblieben sind. Zum anderen wurde gezeigt, dass eine CWSI-Bestimmung auch im semi-humiden Klima m{\"o}glich ist. Es wurde ein Zusammenhang (r = - 0.65, p < 0.0001) mit der etablierten Methode der Blattwasser-potentialanalyse ermittelt. Die Interaktion zwischen der ECa aus verschiedenen Tiefen und den Pflanzenvariablen ergab einen hoch signifikanten Zusammenhang mit dem Oberboden, in dem das Bew{\"a}sserungswasser zu finden war. Es wurde eine Korrelation zwischen Ertrag und ECatopsoil von r = 0.52 ermittelt. Durch die Anwendung der Hot-spot Analyse konnten Extremwerte in den r{\"a}umlichen Daten ermittelt werden. Diese Extrema dienten zur Einteilung der Zonen in cold-spot, random und hot-spot. Die random Zone weist die h{\"o}chsten Korrelationen zu den Pflanzenvariablen auf. Ferner konnte gezeigt werden, dass bereits im semi-humiden Klima der Pflanzenwasserstatus entscheidend zur Fruchtqualit{\"a}t beitr{\"a}gt. Zusammenfassend l{\"a}sst sich sagen, dass die r{\"a}umliche Variabilit{\"a}t der Fruchtqualit{\"a}t durch die Interaktion von Wassernutzungseffizienz und CWSI sowie in geringerem Maße durch den ECa des Bodens. In der Pflaumenanlage im semi-humiden Klima war die Bew{\"a}sserung ausschlaggebend f{\"u}r die Produktion von qualitativ hochwertigen Fr{\"u}chten.}, language = {en} } @phdthesis{Linnik2016, author = {Linnik, Anastasia}, title = {Coherence and structure in aphasic and non-aphasic spoken discourse}, doi = {10.25932/publishup-42320}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423202}, school = {Universit{\"a}t Potsdam}, pages = {xii, 106}, year = {2016}, abstract = {Discourse production is crucial for communicative success and is in the core of aphasia assessment and treatment. Coherence differentiates discourse from a series of utterances/sentences; it is internal unity and connectedness, and, as such, perhaps the most inherent property of discourse. It is unclear whether people with aphasia, who experience various language production difficulties, preserve the ability to produce coherent discourse. A more general question of how coherence is established and represented linguistically has been addressed in the literature, yet remains unanswered. This dissertation presents an investigation of discourse production in aphasia and the linguistic mechanisms of establishing coherence.}, language = {en} } @phdthesis{Lontsi2016, author = {Lontsi, Agostiny Marrios}, title = {1D shallow sedimentary subsurface imaging using ambient noise and active seismic data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103807}, school = {Universit{\"a}t Potsdam}, pages = {xix, 119}, year = {2016}, abstract = {The Earth's shallow subsurface with sedimentary cover acts as a waveguide to any incoming wavefield. Within the framework of my thesis, I focused on the characterization of this shallow subsurface within tens to few hundreds of meters of sediment cover. I imaged the seismic 1D shear wave velocity (and possibly the 1D compressional wave velocity). This information is not only required for any seismic risk assessment, geotechnical engineering or microzonation activities, but also for exploration and global seismology where site effects are often neglected in seismic waveform modeling. First, the conventional frequency-wavenumber (f - k) technique is used to derive the dispersion characteristic of the propagating surface waves recorded using distinct arrays of seismometers in 1D and 2D configurations. Further, the cross-correlation technique is applied to seismic array data to estimate the Green's function between receivers pairs combination assuming one is the source and the other the receiver. With the consideration of a 1D media, the estimated cross-correlation Green's functions are sorted with interstation distance in a virtual 1D active seismic experiment. The f - k technique is then used to estimate the dispersion curves. This integrated analysis is important for the interpretation of a large bandwidth of the phase velocity dispersion curves and therefore improving the resolution of the estimated 1D Vs profile. Second, the new theoretical approach based on the Diffuse Field Assumption (DFA) is used for the interpretation of the observed microtremors H/V spectral ratio. The theory is further extended in this research work to include not only the interpretation of the H/V measured at the surface, but also the H/V measured at depths and in marine environments. A modeling and inversion of synthetic H/V spectral ratio curves on simple predefined geological structures shows an almost perfect recovery of the model parameters (mainly Vs and to a lesser extent Vp). These results are obtained after information from a receiver at depth has been considered in the inversion. Finally, the Rayleigh wave phase velocity information, estimated from array data, and the H/V(z, f) spectral ratio, estimated from a single station data, are combined and inverted for the velocity profile information. Obtained results indicate an improved depth resolution in comparison to estimations using the phase velocity dispersion curves only. The overall estimated sediment thickness is comparable to estimations obtained by inverting the full micortremor H/V spectral ratio.}, language = {en} } @phdthesis{Mueller2016, author = {Mueller, Stefanie}, title = {Interacting with personal fabrication devices}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100908}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 108}, year = {2016}, abstract = {Personal fabrication tools, such as 3D printers, are on the way of enabling a future in which non-technical users will be able to create custom objects. However, while the hardware is there, the current interaction model behind existing design tools is not suitable for non-technical users. Today, 3D printers are operated by fabricating the object in one go, which tends to take overnight due to the slow 3D printing technology. Consequently, the current interaction model requires users to think carefully before printing as every mistake may imply another overnight print. Planning every step ahead, however, is not feasible for non-technical users as they lack the experience to reason about the consequences of their design decisions. In this dissertation, we propose changing the interaction model around personal fabrication tools to better serve this user group. We draw inspiration from personal computing and argue that the evolution of personal fabrication may resemble the evolution of personal computing: Computing started with machines that executed a program in one go before returning the result to the user. By decreasing the interaction unit to single requests, turn-taking systems such as the command line evolved, which provided users with feedback after every input. Finally, with the introduction of direct-manipulation interfaces, users continuously interacted with a program receiving feedback about every action in real-time. In this dissertation, we explore whether these interaction concepts can be applied to personal fabrication as well. We start with fabricating an object in one go and investigate how to tighten the feedback-cycle on an object-level: We contribute a method called low-fidelity fabrication, which saves up to 90\% fabrication time by creating objects as fast low-fidelity previews, which are sufficient to evaluate key design aspects. Depending on what is currently being tested, we propose different conversions that enable users to focus on different parts: faBrickator allows for a modular design in the early stages of prototyping; when users move on WirePrint allows quickly testing an object's shape, while Platener allows testing an object's technical function. We present an interactive editor for each technique and explain the underlying conversion algorithms. By interacting on smaller units, such as a single element of an object, we explore what it means to transition from systems that fabricate objects in one go to turn-taking systems. We start with a 2D system called constructable: Users draw with a laser pointer onto the workpiece inside a laser cutter. The drawing is captured with an overhead camera. As soon as the the user finishes drawing an element, such as a line, the constructable system beautifies the path and cuts it--resulting in physical output after every editing step. We extend constructable towards 3D editing by developing a novel laser-cutting technique for 3D objects called LaserOrigami that works by heating up the workpiece with the defocused laser until the material becomes compliant and bends down under gravity. While constructable and LaserOrigami allow for fast physical feedback, the interaction is still best described as turn-taking since it consists of two discrete steps: users first create an input and afterwards the system provides physical output. By decreasing the interaction unit even further to a single feature, we can achieve real-time physical feedback: Input by the user and output by the fabrication device are so tightly coupled that no visible lag exists. This allows us to explore what it means to transition from turn-taking interfaces, which only allow exploring one option at a time, to direct manipulation interfaces with real-time physical feedback, which allow users to explore the entire space of options continuously with a single interaction. We present a system called FormFab, which allows for such direct control. FormFab is based on the same principle as LaserOrigami: It uses a workpiece that when warmed up becomes compliant and can be reshaped. However, FormFab achieves the reshaping not based on gravity, but through a pneumatic system that users can control interactively. As users interact, they see the shape change in real-time. We conclude this dissertation by extrapolating the current evolution into a future in which large numbers of people use the new technology to create objects. We see two additional challenges on the horizon: sustainability and intellectual property. We investigate sustainability by demonstrating how to print less and instead patch physical objects. We explore questions around intellectual property with a system called Scotty that transfers objects without creating duplicates, thereby preserving the designer's copyright.}, language = {en} } @phdthesis{Mazzonetto2016, author = {Mazzonetto, Sara}, title = {On the exact simulation of (skew) Brownian diffusions with discontinuous drift}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102399}, school = {Universit{\"a}t Potsdam}, pages = {ii, 100}, year = {2016}, abstract = {This thesis is focused on the study and the exact simulation of two classes of real-valued Brownian diffusions: multi-skew Brownian motions with constant drift and Brownian diffusions whose drift admits a finite number of jumps. The skew Brownian motion was introduced in the sixties by It{\^o} and McKean, who constructed it from the reflected Brownian motion, flipping its excursions from the origin with a given probability. Such a process behaves as the original one except at the point 0, which plays the role of a semipermeable barrier. More generally, a skew diffusion with several semipermeable barriers, called multi-skew diffusion, is a diffusion everywhere except when it reaches one of the barriers, where it is partially reflected with a probability depending on that particular barrier. Clearly, a multi-skew diffusion can be characterized either as solution of a stochastic differential equation involving weighted local times (these terms providing the semi-permeability) or by its infinitesimal generator as Markov process. In this thesis we first obtain a contour integral representation for the transition semigroup of the multiskew Brownian motion with constant drift, based on a fine analysis of its complex properties. Thanks to this representation we write explicitly the transition densities of the two-skew Brownian motion with constant drift as an infinite series involving, in particular, Gaussian functions and their tails. Then we propose a new useful application of a generalization of the known rejection sampling method. Recall that this basic algorithm allows to sample from a density as soon as one finds an - easy to sample - instrumental density verifying that the ratio between the goal and the instrumental densities is a bounded function. The generalized rejection sampling method allows to sample exactly from densities for which indeed only an approximation is known. The originality of the algorithm lies in the fact that one finally samples directly from the law without any approximation, except the machine's. As an application, we sample from the transition density of the two-skew Brownian motion with or without constant drift. The instrumental density is the transition density of the Brownian motion with constant drift, and we provide an useful uniform bound for the ratio of the densities. We also present numerical simulations to study the efficiency of the algorithm. The second aim of this thesis is to develop an exact simulation algorithm for a Brownian diffusion whose drift admits several jumps. In the literature, so far only the case of a continuous drift (resp. of a drift with one finite jump) was treated. The theoretical method we give allows to deal with any finite number of discontinuities. Then we focus on the case of two jumps, using the transition densities of the two-skew Brownian motion obtained before. Various examples are presented and the efficiency of our approach is discussed.}, language = {en} } @phdthesis{Laux2016, author = {Laux, Eva-Maria}, title = {Electric field-assisted immobilization and alignment of biomolecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90271}, school = {Universit{\"a}t Potsdam}, pages = {IX, 120}, year = {2016}, abstract = {In this dissertation, an electric field-assisted method was developed and applied to achieve immobilization and alignment of biomolecules on metal electrodes in a simple one-step experiment. Neither modifications of the biomolecule nor of the electrodes were needed. The two major electrokinetic effects that lead to molecule motion in the chosen electrode configurations used were identified as dielectrophoresis and AC electroosmotic flow. To minimize AC electroosmotic flow, a new 3D electrode configuration was designed. Thus, the influence of experimental parameters on the dielectrophoretic force and the associated molecule movement could be studied. Permanent immobilization of proteins was examined and quantified absolutely using an atomic force microscope. By measuring the volumes of the immobilized protein deposits, a maximal number of proteins contained therein was calculated. This was possible since the proteins adhered to the tungsten electrodes even after switching off the electric field. The permanent immobilization of functional proteins on surfaces or electrodes is one crucial prerequisite for the fabrication of biosensors. Furthermore, the biofunctionality of the proteins must be retained after immobilization. Due to the chemical or physical modifications on the proteins caused by immobilization, their biofunctionality is sometimes hampered. The activity of dielectrophoretically immobilized proteins, however, was proven here for an enzyme for the first time. The enzyme horseradish peroxidase was used exemplarily, and its activity was demonstrated with the oxidation of dihydrorhodamine 123, a non-fluorescent precursor of the fluorescence dye rhodamine 123. Molecular alignment and immobilization - reversible and permanent - was achieved under the influence of inhomogeneous AC electric fields. For orientational investigations, a fluorescence microscope setup, a reliable experimental procedure and an evaluation protocol were developed and validated using self-made control samples of aligned acridine orange molecules in a liquid crystal. Lambda-DNA strands were stretched and aligned temporarily between adjacent interdigitated electrodes, and the orientation of PicoGreen molecules, which intercalate into the DNA strands, was determined. Similarly, the aligned immobilization of enhanced Green Fluorescent Protein was demonstrated exploiting the protein's fluorescence and structural properties. For this protein, the angle of the chromophore with respect to the protein's geometrical axis was determined in good agreement with X-ray crystallographic data. Permanent immobilization with simultaneous alignment of the proteins was achieved along the edges, tips and on the surface of interdigitated electrodes. This was the first demonstration of aligned immobilization of proteins by electric fields. Thus, the presented electric field-assisted immobilization method is promising with regard to enhanced antibody binding capacities and enzymatic activities, which is a requirement for industrial biosensor production, as well as for general interaction studies of proteins.}, language = {en} } @phdthesis{Schintgen2016, author = {Schintgen, Tom Vincent}, title = {The geothermal potential of Luxembourg}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87110}, school = {Universit{\"a}t Potsdam}, pages = {XXII, 313}, year = {2016}, abstract = {The aim of this work is the evaluation of the geothermal potential of Luxembourg. The approach consists in a joint interpretation of different types of information necessary for a first rather qualitative assessment of deep geothermal reservoirs in Luxembourg and the adjoining regions in the surrounding countries of Belgium, France and Germany. For the identification of geothermal reservoirs by exploration, geological, thermal, hydrogeological and structural data are necessary. Until recently, however, reliable information about the thermal field and the regional geology, and thus about potential geothermal reservoirs, was lacking. Before a proper evaluation of the geothermal potential can be performed, a comprehensive survey of the geology and an assessment of the thermal field are required. As a first step, the geology and basin structure of the Mesozoic Trier-Luxembourg Basin (TLB) is reviewed and updated using recently published information on the geology and structures as well as borehole data available in Luxembourg and the adjoining regions. A Bouguer map is used to get insight in the depth, morphology and structures in the Variscan basement buried beneath the Trier-Luxembourg Basin. The geological section of the old Cessange borehole is reinterpreted and provides, in combination with the available borehole data, consistent information for the production of isopach maps. The latter visualize the synsedimentary evolution of the Trier-Luxembourg Basin. Complementary, basin-wide cross sections illustrate the evolution and structure of the Trier-Luxembourg Basin. The knowledge gained does not support the old concept of the Weilerbach Mulde. The basin-wide cross sections, as well as the structural and sedimentological observations in the Trier-Luxembourg Basin suggest that the latter probably formed above a zone of weakness related to a buried Rotliegend graben. The inferred graben structure designated by SE-Luxembourg Graben (SELG) is located in direct southwestern continuation of the Wittlicher Rotliegend-Senke. The lack of deep boreholes and subsurface temperature prognosis at depth is circumnavigated by using thermal modelling for inferring the geothermal resource at depth. For this approach, profound structural, geological and petrophysical input data are required. Conceptual geological cross sections encompassing the entire crust are constructed and further simplified and extended to lithospheric scale for their utilization as thermal models. The 2-D steady state and conductive models are parameterized by means of measured petrophysical properties including thermal conductivity, radiogenic heat production and density. A surface heat flow of 75 ∓ 7 (2δ) mW m-2 for verification of the thermal models could be determined in the area. The models are further constrained by the geophysically-estimated depth of the lithosphere-asthenosphere boundary (LAB) defined by the 1300 °C isotherm. A LAB depth of 100 km, as seismically derived for the Ardennes, provides the best fit with the measured surface heat flow. The resulting mantle heat flow amounts to ∼40 mW m-2. Modelled temperatures are in the range of 120-125 °C at 5 km depth and of 600-650 °C at the crust/mantle discontinuity (Moho). Possible thermal consequences of the 10-20 Ma old Eifel plume, which apparently caused upwelling of the asthenospheric mantle to 50-60 km depth, were modelled in a steady-state thermal scenario resulting in a surface heat flow of at least 91 mW m-2 (for the plume top at 60 km) in the Eifel region. Available surface heat-flow values are significantly lower (65-80 mW m-2) and indicate that the plume-related heating has not yet entirely reached the surface. Once conceptual geological models are established and the thermal regime is assessed, the geothermal potential of Luxembourg and the surrounding areas is evaluated by additional consideration of the hydrogeology, the stress field and tectonically active regions. On the one hand, low-enthalpy hydrothermal reservoirs in Mesozoic reservoirs in the Trier-Luxembourg Embayment (TLE) are considered. On the other hand, petrothermal reservoirs in the Lower Devonian basement of the Ardennes and Eifel regions are considered for exploitation by Enhanced/Engineered Geothermal Systems (EGS). Among the Mesozoic aquifers, the Buntsandstein aquifer characterized by temperatures of up to 50 °C is a suitable hydrothermal reservoir that may be exploited by means of heat pumps or provide direct heat for various applications. The most promising area is the zone of the SE-Luxembourg Graben. The aquifer is warmest underneath the upper Alzette River valley and the limestone plateau in Lorraine, where the Buntsandstein aquifer lies below a thick Mesozoic cover. At the base of an inferred Rotliegend graben in the same area, temperatures of up to 75 °C are expected. However, geological and hydraulic conditions are uncertain. In the Lower Devonian basement, thick sandstone-/quartzite-rich formations with temperatures >90 °C are expected at depths >3.5 km and likely offer the possibility of direct heat use. The setting of the S{\"u}deifel (South Eifel) region, including the M{\"u}llerthal region near Echternach, as a tectonically active zone may offer the possibility of deep hydrothermal reservoirs in the fractured Lower Devonian basement. Based on the recent findings about the structure of the Trier-Luxembourg Basin, the new concept presents the M{\"u}llerthal-S{\"u}deifel Depression (MSD) as a Cenozoic structure that remains tectonically active and subsiding, and therefore is relevant for geothermal exploration. Beyond direct use of geothermal heat, the expected modest temperatures at 5 km depth (about 120 °C) and increased permeability by EGS in the quartzite-rich Lochkovian could prospectively enable combined geothermal heat production and power generation in Luxembourg and the western realm of the Eifel region.}, language = {en} } @phdthesis{Bande2016, author = {Bande, Alejandro}, title = {The tectonic evolution of the western Tien Shan}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398933}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 119}, year = {2016}, abstract = {Intracontinental deformation usually is a result of tectonic forces associated with distant plate collisions. In general, the evolution of mountain ranges and basins in this environment is strongly controlled by the distribution and geometries of preexisting structures. Thus, predictive models usually fail in forecasting the deformation evolution in these kinds of settings. Detailed information on each range and basin-fill is vital to comprehend the evolution of intracontinental mountain belts and basins. In this dissertation, I have investigated the complex Cenozoic tectonic evolution of the western Tien Shan in Central Asia, which is one of the most active intracontinental ranges in the world. The work presented here combines a broad array of datasets, including thermo- and geochronology, paleoenvironmental interpretations, sediment provenance and subsurface interpretations in order to track changes in tectonic deformation. Most of the identified changes are connected and can be related to regional-scale processes that governed the evolution of the western Tien Shan. The NW-SE trending Talas-Fergana fault (TFF) separates the western from the central Tien Shan and constitutes a world-class example of the influence of preexisting anisotropies on the subsequent structural development of a contractile orogen. While to the east most of ranges and basins have a sub-parallel E-W trend, the triangular-shaped Fergana basin forms a substantial feature in the western Tien Shan morphology with ranges on all three sides. In this thesis, I present 55 new thermochronologic ages (apatite fission track and zircon (U-Th)/He)) used to constrain exhumation histories of several mountain ranges in the western Tien Shan. At the same time, I analyzed the Fergana basin-fill looking for progressive changes in sedimentary paleoenvironments, source areas and stratal geometrical configurations in the subsurface and outcrops. The data presented in this thesis suggests that low cooling rates (<1°C Myr-1), calm depositional environments, and low depositional rates (<10 m Myr-1) were widely distributed across the western Tien Shan, describing a quiescent tectonic period throughout the Paleogene. Increased cooling rates in the late Cenozoic occurred diachronously and with variable magnitudes in different ranges. This rapid cooling stage is interpreted to represent increased erosion caused by active deformation and constrains the onset of Cenozoic deformation in the western Tien Shan. Time-temperature histories derived from the northwestern Tien Shan samples show an increase in cooling rates by ~25 Ma. This event is correlated with a synchronous pulse iv in the South Tien Shan. I suggest that strike-slip motion along the TFF commenced at the Oligo-Miocene boundary, facilitating CCW rotation of the Fergana basin and enabling exhumation of the linked horsetail splays. Higher depositional rates (~150 m Myr-1) in the Oligo-Miocene section (Massaget Fm.) of the Fergana basin suggest synchronous deformation in the surrounding ranges. The central Alai Range also experienced rapid cooling around this time, suggesting that the onset of intramontane basin fragmentation and isolation is coeval. These results point to deformation starting simultaneously in the late Oligocene - early Miocene in geographically distant mountain ranges. I suggest that these early uplifts are controlled by reactivated structures (like the TFF), which are probably the frictionally weakest and most-suitably oriented for accommodating and transferring N-S horizontal shortening along the western Tien Shan. Afterwards, in the late Miocene (~10 Ma), a period of renewed rapid cooling affected the Tien Shan and most mountain ranges and inherited structures started to actively deform. This episode is widely distributed and an increase in exhumation is interpreted in most of the sampled ranges. Moreover, the Pliocene section in the basin subsurface shows the higher depositional rates (>180 m Myr-1) and higher energy facies. The deformation and exhumation increase further contributed to intramontane basin partitioning. Overall, the interpretation is that the Tien Shan and much of Central Asia suffered a global increase in the rate of horizontal crustal shortening. Previously, stress transfer along the rigid Tarim block or Pamir indentation has been proposed to account for Himalayan hinterland deformation. However, the extent of the episode requires a different and broader geodynamic driver.}, language = {en} } @phdthesis{Rohlf2016, author = {Rohlf, Helena L.}, title = {The development of aggression in middle childhood}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-95457}, school = {Universit{\"a}t Potsdam}, pages = {242}, year = {2016}, abstract = {Background: The engagement in aggressive behavior in middle childhood is linked to the development of severe problems in later life. Thus, identifying factors and processes that con-tribute to the continuity and increase of aggression in middle childhood is essential in order to facilitate the development of intervention programs. The present PhD thesis aimed at expand-ing the understanding of the development of aggression in middle childhood by examining risk factors in the intrapersonal and interpersonal domains as well as the interplay between these factors: Maladaptive anger regulation was examined as an intrapersonal risk factor; processes that occur in the peer context (social rejection and peer socialization) were included as interpersonal risk factors. In addition, in order to facilitate the in situ assessment of anger regulation strategies, an observational measure of anger regulation was developed and validated. Method: The research aims were addressed within the scope of four articles. Data from two measurement time points about ten months apart were available for the analyses. Participants were elementary school children aged from 6 to 10 years at T1 and 7 to 11 years at T2. The first article was based on cross-sectional analyses including only the first time point; in the remaining three articles longitudinal associations across the two time points were analyzed. The first two articles were concerned with the development and cross-sectional as well as longitudinal validation of observational measure of anger regulation in middle childhood in a sample of 599 children. Using the same sample, the third article investigated the longitudinal link between maladaptive anger regulation and aggression considering social rejection as a mediating variable. The frequency as well as different functions of aggression (reactive and proactive) were included as outcomes measures. The fourth article examined the influence of class-level aggression on the development of different forms of aggression (relational and physical) over time under consideration of differences in initial individual aggression in a sample of 1,284 children. In addition, it was analyzed if the path from aggression to social rejection varies as a function of class-level aggression. Results: The first two articles revealed that the observational measure of anger regulation developed for the purpose of this research was cross-sectionally related to anger reactivity, aggression and social rejection as well as longitudinally related to self-reported anger regula-tion. In the third article it was found that T1 maladaptive anger regulation showed no direct link to T2 aggression, but an indirect link through T1 social rejection. This indirect link was found for the frequency of aggression as well as for reactive and proactive aggression. The fourth article revealed that with regard to relational aggression, a high level of classroom ag-gression predicted an increase of individual aggression only among children with initially low levels of aggression. For physical aggression, it was found that the overall level of aggression in the class affected all children equally. In addition, physical aggression increased the likelihood of social rejection irrespective of the class-level of aggression whereas relational aggression caused social rejection only in classes with a generally low level of relational aggression. The analyses of gender-specific effects showed that children were mainly influenced by their same-gender peers and that the effect on the opposite gender was higher if children engaged in gender-atypical forms of aggressive behavior. Conclusion: The results provided evidence for the construct and criterion validity of the observational measure of maladaptive anger regulation that was developed within the scope of this research. Furthermore, the findings indicated that maladaptive anger regulation constitutes an important risk factor of aggression through the influence of social rejection. Finally, the results demonstrated that the level of aggression among classmates is relevant for the development of individual aggression over time and that the children´s evaluation of relationally aggressive behavior varies as a function of the normativity of relational aggression in the class. The study findings have implications for the measurement of anger regulation in middle childhood as well as for the prevention of aggression and social rejection.}, language = {en} } @phdthesis{Jung2016, author = {Jung, Janis Moritz}, title = {The role of dysfunctional peer relationships and academic failure in the development and progression of aggression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394477}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2016}, abstract = {Background: Aggression is a severe behavioral problem that interferes with many developmental challenges individuals face in middle childhood and adolescence. Particularly in the peer and in the academic domain, aggression inhibits the individual from making important learning experiences that are predictive for a healthy transition into adulthood. Furthermore, the resulting developmental deficits have the propensity to feedback and to promote aggression at later developmental stages. The aim of the present PhD thesis was to investigate pathways and processes involved in the etiology of aggression by examining the interrelation between multiple developmental problems in the peer and in the academic domain. More specifically, the relevance of affiliation with deviant peers as a driving mechanism for the development of aggression, factors promoting the affiliation with deviant peers (social rejection; academic failure), and mechanisms by which affiliation with deviant peers leads to aggression (external locus of control) were investigated. Method: The research questions were addressed by three studies. Three data waves were available for the first study, the second and third study were based on two data waves. The first study specified pathways to antisocial behavior by investigating the temporal interrelation between social rejection, academic failure, and affiliation with deviant peers in a sample of 1,657 male and female children and adolescents aged between 6 and 15 years. The second study examined the role of external control beliefs as a potential mediator in the link between affiliation with deviant peers and aggression in a sample of 1,466 children and adolescents in the age of 9 to 19 years, employing a half-longitudinal design. The third study aimed to expand the findings of Study 1 and Study 2 by examining the differential predictivity of combinations of developmental risks for different functions of aggression, using a sample of 1,479 participants in the age between 9 and 19 years. First, profiles of social rejection, academic failure, and affiliation with deviant peers were identified, using latent profile analysis. Second, prospective pathways between risk-profiles and reactive and proactive aggression were investigated, using latent path analysis. Results: The first study revealed that antisocial behavior at T1 was associated with social rejection and academic failure at T2. Both mechanisms promoted affiliation with deviant peers at the same data wave, which predicted deviancy at T3. Furthermore, both an indirect pathway via social rejection and affiliation with deviant peers and an indirect pathway via academic failure and affiliation with deviant peers significantly mediated the link between antisocial behavior at the first and the third data wave. Additionally, the proposed pathways generalized across genders and different age groups. The second study yielded that external control beliefs significantly mediated the link between affiliation with deviant peers and aggression, with affiliation with deviant peers at T1 predicting external control beliefs at T2 and external control beliefs at T1 predicting aggressive behavior at T2. Again, the analyses provided no evidence for gender and age specific variations in the proposed pathways. In the third study, three distinct risk groups were identified, made up of a large non-risk group, with low scores on all risk measures, a group characterized by high scores on social rejection (SR group), and a group with the highest scores on measures of affiliation with deviant peers and academic failure (APAF group). Importantly, risk group membership was differentially associated with reactive and proactive aggression. Only membership in the SR group at T1 was associated with the development of reactive aggression at T2 and only membership in the APAF group at T1 predicted proactive aggression at T2. Additionally, proactive aggression at T1 predicted membership in the APAF group at T2, indicating a reciprocal relationship between both constructs. Conclusion: The results demonstrated that aggression causes severe behavioral deficits in social and academic domains which promote future aggression by increasing individuals' tendency to affiliate with deviant peers. The stimulation of external control beliefs provides an explanation for deviant peers' effect on the progression and intensification of aggression. Finally, multiple developmental risks were shown to co-occur within individuals and to be differentially predictive of reactive and proactive aggression. The findings of this doctoral dissertation have possible implications for the conceptualization of prevention and intervention programs aimed to reduce aggression in middle childhood and adolescence.}, language = {en} } @phdthesis{Olonscheck2016, author = {Olonscheck, Mady}, title = {Climate change impacts on electricity and residential energy demand}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98378}, school = {Universit{\"a}t Potsdam}, pages = {XXIV, 127}, year = {2016}, abstract = {The energy sector is both affected by climate change and a key sector for climate protection measures. Energy security is the backbone of our modern society and guarantees the functioning of most critical infrastructure. Thus, decision makers and energy suppliers of different countries should be familiar with the factors that increase or decrease the susceptibility of their electricity sector to climate change. Susceptibility means socioeconomic and structural characteristics of the electricity sector that affect the demand for and supply of electricity under climate change. Moreover, the relevant stakeholders are supposed to know whether the given national energy and climate targets are feasible and what needs to be done in order to meet these targets. In this regard, a focus should be on the residential building sector as it is one of the largest energy consumers and therefore emitters of anthropogenic CO 2 worldwide. This dissertation addresses the first aspect, namely the susceptibility of the electricity sector, by developing a ranked index which allows for quantitative comparison of the electricity sector susceptibility of 21 European countries based on 14 influencing factors. Such a ranking has not been completed to date. We applied a sensitivity analysis to test the relative effect of each influencing factor on the susceptibility index ranking. We also discuss reasons for the ranking position and thus the susceptibility of selected countries. The second objective, namely the impact of climate change on the energy demand of buildings, is tackled by means of a new model with which the heating and cooling energy demand of residential buildings can be estimated. We exemplarily applied the model to Germany and the Netherlands. It considers projections of future changes in population, climate and the insulation standards of buildings, whereas most of the existing studies only take into account fewer than three different factors that influence the future energy demand of buildings. Furthermore, we developed a comprehensive retrofitting algorithm with which the total residential building stock can be modeled for the first time for each year in the past and future. The study confirms that there is no correlation between the geographical location of a country and its position in the electricity sector susceptibility ranking. Moreover, we found no pronounced pattern of susceptibility influencing factors between countries that ranked higher or lower in the index. We illustrate that Luxembourg, Greece, Slovakia and Italy are the countries with the highest electricity sector susceptibility. The electricity sectors of Norway, the Czech Republic, Portugal and Denmark were found to be least susceptible to climate change. Knowledge about the most important factors for the poor and good ranking positions of these countries is crucial for finding adequate adaptation measures to reduce the susceptibility of the electricity sector. Therefore, these factors are described within this study. We show that the heating energy demand of residential buildings will strongly decrease in both Germany and the Netherlands in the future. The analysis for the Netherlands focused on the regional level and a finer temporal resolution which revealed strong variations in the future heating energy demand changes by province and by month. In the German study, we additionally investigated the future cooling energy demand and could demonstrate that it will only slightly increase up to the middle of this century. Thus, increases in the cooling energy demand are not expected to offset reductions in heating energy demand. The main factor for substantial heating energy demand reductions is the retrofitting of buildings. We are the first to show that the given German and Dutch energy and climate targets in the building sector can only be met if the annual retrofitting rates are substantially increased. The current rate of only about 1 \% of the total building stock per year is insufficient for reaching a nearly zero-energy demand of all residential buildings by the middle of this century. To reach this target, it would need to be at least tripled. To sum up, this thesis emphasizes that country-specific characteristics are decisive for the electricity sector susceptibility of European countries. It also shows for different scenarios how much energy is needed in the future to heat and cool residential buildings. With this information, existing climate mitigation and adaptation measures can be justified or new actions encouraged.}, language = {en} } @phdthesis{Daschewski2016, author = {Daschewski, Maxim}, title = {Thermophony in real gases}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98866}, school = {Universit{\"a}t Potsdam}, pages = {79}, year = {2016}, abstract = {A thermophone is an electrical device for sound generation. The advantages of thermophones over conventional sound transducers such as electromagnetic, electrostatic or piezoelectric transducers are their operational principle which does not require any moving parts, their resonance-free behavior, their simple construction and their low production costs. In this PhD thesis, a novel theoretical model of thermophonic sound generation in real gases has been developed. The model is experimentally validated in a frequency range from 2 kHz to 1 MHz by testing more then fifty thermophones of different materials, including Carbon nano-wires, Titanium, Indium-Tin-Oxide, different sizes and shapes for sound generation in gases such as air, argon, helium, oxygen, nitrogen and sulfur hexafluoride. Unlike previous approaches, the presented model can be applied to different kinds of thermophones and various gases, taking into account the thermodynamic properties of thermophone materials and of adjacent gases, degrees of freedom and the volume occupied by the gas atoms and molecules, as well as sound attenuation effects, the shape and size of the thermophone surface and the reduction of the generated acoustic power due to photonic emission. As a result, the model features better prediction accuracy than the existing models by a factor up to 100. Moreover, the new model explains previous experimental findings on thermophones which can not be explained with the existing models. The acoustic properties of the thermophones have been tested in several gases using unique, highly precise experimental setups comprising a Laser-Doppler-Vibrometer combined with a thin polyethylene film which acts as a broadband and resonance-free sound-pressure detector. Several outstanding properties of the thermophones have been demonstrated for the first time, including the ability to generate arbitrarily shaped acoustic signals, a greater acoustic efficiency compared to conventional piezoelectric and electrostatic airborne ultrasound transducers, and applicability as powerful and tunable sound sources with a bandwidth up to the megahertz range and beyond. Additionally, new applications of thermophones such as the study of physical properties of gases, the thermo-acoustic gas spectroscopy, broad-band characterization of transfer functions of sound and ultrasound detection systems, and applications in non-destructive materials testing are discussed and experimentally demonstrated.}, language = {en} } @phdthesis{Raeling2016, author = {R{\"a}ling, Romy}, title = {Age of acquisition and semantic typicality effects}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-95943}, school = {Universit{\"a}t Potsdam}, pages = {x, 133}, year = {2016}, abstract = {Age of acquisition (AOA) is a psycholinguistic variable that significantly influences behavioural measures (response times and accuracy rates) in tasks that require lexical and semantic processing. Its origin is - unlike the origin of semantic typicality (TYP), which is assumed at the semantic level - controversially discussed. Different theories propose AOA effects to originate either at the semantic level or at the link between semantics and phonology (lemma-level). The dissertation aims at investigating the influence of AOA and its interdependence with the semantic variable TYP on particularly semantic processing in order to pinpoint the origin of AOA effects. Therefore, three studies have been conducted that considered the variables AOA and TYP in semantic processing tasks (category verifications and animacy decisions) by means of behavioural and partly electrophysiological (ERP) data and in different populations (healthy young and elderly participants and in semantically impaired individuals with aphasia (IWA)). The behavioural and electrophysiological data of the three studies provide evidence for distinct processing levels of the variables AOA and TYP. The data further support previous assumptions on a semantic origin for TYP but question the same for AOA. The findings, however, support an origin of AOA effects at the transition between the word form (phonology) and the semantic level that can be captured at the behavioural but not at the electrophysiological level.}, language = {en} } @phdthesis{Klier2016, author = {Klier, Dennis Tobias}, title = {Upconversion luminescence in Er-codoped NaYF4 nanoparticles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98486}, school = {Universit{\"a}t Potsdam}, pages = {ix, 89}, year = {2016}, abstract = {In the context of an increasing population of aging people and a shift of medical paradigm towards an individualized medicine in health care, nanostructured lanthanides doped sodium yttrium fluoride (NaYF4) represents an exciting class of upconversion nanomaterials (UCNM) which are suitable to bring forward developments in biomedicine and -biodetection. Despite the fact that among various fluoride based upconversion (UC) phosphors lanthanide doped NaYF4 is one of the most studied upconversion nanomaterial, many open questions are still remaining concerning the interplay of the population routes of sensitizer and activator electronic states involved in different luminescence upconversion photophysics as well as the role of phonon coupling. The collective work aims to explore a detailed understanding of the upconversion mechanism in nanoscaled NaYF4 based materials co-doped with several lanthanides, e.g. Yb3+ and Er3+ as the "standard" type upconversion nanoparticles (UCNP) up to advanced UCNP with Gd3+ and Nd3+. Especially the impact of the crystal lattice structure as well as the resulting lattice phonons on the upconversion luminescence was investigated in detail based on different mixtures of cubic and hexagonal NaYF4 nanoscaled crystals. Three synthesis methods, depending on the attempt of the respective central spectroscopic questions, could be accomplished in the following work. NaYF4 based upconversion nanoparticles doped with several combination of lanthanides (Yb3+, Er3+, Gd3+ and Nd3+) were synthesized successfully using a hydrothermal synthesis method under mild conditions as well as a co-precipitation and a high temperature co-precipitation technique. Structural information were gathered by means of X-ray diffraction (XRD), electron microscopy (TEM), dynamic light scattering (DLS), Raman spectroscopy and inductively coupled plasma atomic emission spectrometry (ICP-OES). The results were discussed in detail with relation to the spectroscopic results. A variable spectroscopic setup was developed for multi parameter upconversion luminescence studies at various temperature 4 K to 328 K. Especially, the study of the thermal behavior of upconversion luminescence as well as time resolved area normalized emission spectra were a prerequisite for the detailed understanding of intramolecular deactivation processes, structural changes upon annealing or Gd3+ concentration, and the role of phonon coupling for the upconversion efficiency. Subsequently it became possible to synthesize UCNP with tailored upconversion luminescence properties. In the end, the potential of UCNP for life science application should be enunciated in context of current needs and improvements of a nanomaterial based optical sensors, whereas the "standard" UCNP design was attuned according to the special conditions in the biological matrix. In terms of a better biocompatibility due to a lower impact on biological tissue and higher penetrability for the excitation light. The first step into this direction was to use Nd3+ ions as a new sensitizer in tridoped NaYF4 based UCNP, whereas the achieved absolute and relative temperature sensitivity is comparable to other types of local temperature sensors in the literature.}, language = {en} } @phdthesis{Hoffmann2016, author = {Hoffmann, Bernd}, title = {Plant organic matter mobilization and export in fluvial systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99336}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 131}, year = {2016}, abstract = {The global carbon cycle is closely linked to Earth's climate. In the context of continuously unchecked anthropogenic CO₂ emissions, the importance of natural CO₂ bond and carbon storage is increasing. An important biogenic mechanism of natural atmospheric CO₂ drawdown is the photosynthetic carbon fixation in plants and the subsequent longterm deposition of plant detritus in sediments. The main objective of this thesis is to identify factors that control mobilization and transport of plant organic matter (pOM) through rivers towards sedimentation basins. I investigated this aspect in the eastern Nepalese Arun Valley. The trans-Himalayan Arun River is characterized by a strong elevation gradient (205 - 8848 m asl) that is accompanied by strong changes in ecology and climate ranging from wet tropical conditions in the Himalayan forelad to high alpine tundra on the Tibetan Plateau. Therefore, the Arun is an excellent natural laboratory, allowing the investigation of the effect of vegetation cover, climate, and topography on plant organic matter mobilization and export in tributaries along the gradient. Based on hydrogen isotope measurements of plant waxes sampled along the Arun River and its tributaries, I first developed a model that allows for an indirect quantification of pOM contributed to the mainsetm by the Arun's tributaries. In order to determine the role of climatic and topographic parameters of sampled tributary catchments, I looked for significant statistical relations between the amount of tributary pOM export and tributary characteristics (e.g. catchment size, plant cover, annual precipitation or runoff, topographic measures). On one hand, I demonstrated that pOMsourced from the Arun is not uniformly derived from its entire catchment area. On the other, I showed that dense vegetation is a necessary, but not sufficient, criterion for high tributary pOM export. Instead, I identified erosion and rainfall and runoff as key factors controlling pOM sourcing in the Arun Valley. This finding is supported by terrestrial cosmogenic nuclide concentrations measured on river sands along the Arun and its tributaries in order to quantify catchment wide denudation rates. Highest denudation rates corresponded well with maximum pOM mobilization and export also suggesting the link between erosion and pOM sourcing. The second part of this thesis focusses on the applicability of stable isotope records such as plant wax n-alkanes in sediment archives as qualitative and quantitative proxy for the variability of past Indian Summer Monsoon (ISM) strength. First, I determined how ISM strength affects the hydrogen and oxygen stable isotopic composition (reported as δD and δ18O values vs. Vienna Standard Mean Ocean Water) of precipitation in the Arun Valley and if this amount effect (Dansgaard, 1964) is strong enough to be recorded in potential paleo-ISM isotope proxies. Second, I investigated if potential isotope records across the Arun catchment reflect ISM strength dependent precipitation δD values only, or if the ISM isotope signal is superimposed by winter precipitation or glacial melt. Furthermore, I tested if δD values of plant waxes in fluvial deposits reflect δD values of environmental waters in the respective catchments. I showed that surface water δD values in the Arun Valley and precipitation δD from south of the Himalaya both changed similarly during two consecutive years (2011 \& 2012) with distinct ISM rainfall amounts (~20\% less in 2012). In order to evaluate the effect of other water sources (Winter-Westerly precipitation, glacial melt) and evapotranspiration in the Arun Valley, I analysed satellite remote sensing data of rainfall distribution (TRMM 3B42V7), snow cover (MODIS MOD10C1), glacial coverage (GLIMSdatabase, Global Land Ice Measurements from Space), and evapotranspiration (MODIS MOD16A2). In addition to the predominant ISM in the entire catchment I found through stable isotope analysis of surface waters indications for a considerable amount of glacial melt derived from high altitude tributaries and the Tibetan Plateau. Remotely sensed snow cover data revealed that the upper portion of the Arun also receives considerable winter precipitation, but the effect of snow melt on the Arun Valley hydrology could not be evaluated as it takes place in early summer, several months prior to our sampling campaigns. However, I infer that plant wax records and other potential stable isotope proxy archives below the snowline are well-suited for qualitative, and potentially quantitative, reconstructions of past changes of ISM strength.}, language = {en} } @phdthesis{AlSaffar2016, author = {Al-Saffar, Loay Talib Ahmed}, title = {Analysing prerequisites, expectations, apprehensions, and attitudes of university students studying Computer science}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98437}, school = {Universit{\"a}t Potsdam}, pages = {xii, 131}, year = {2016}, abstract = {The main objective of this dissertation is to analyse prerequisites, expectations, apprehensions, and attitudes of students studying computer science, who are willing to gain a bachelor degree. The research will also investigate in the students' learning style according to the Felder-Silverman model. These investigations fall in the attempt to make an impact on reducing the "dropout"/shrinkage rate among students, and to suggest a better learning environment. The first investigation starts with a survey that has been made at the computer science department at the University of Baghdad to investigate the attitudes of computer science students in an environment dominated by women, showing the differences in attitudes between male and female students in different study years. Students are accepted to university studies via a centrally controlled admission procedure depending mainly on their final score at school. This leads to a high percentage of students studying subjects they do not want. Our analysis shows that 75\% of the female students do not regret studying computer science although it was not their first choice. And according to statistics over previous years, women manage to succeed in their study and often graduate on top of their class. We finish with a comparison of attitudes between the freshman students of two different cultures and two different university enrolment procedures (University of Baghdad, in Iraq, and the University of Potsdam, in Germany) both with opposite gender majority. The second step of investigation took place at the department of computer science at the University of Potsdam in Germany and analyzes the learning styles of students studying the three major fields of study offered by the department (computer science, business informatics, and computer science teaching). Investigating the differences in learning styles between the students of those study fields who usually take some joint courses is important to be aware of which changes are necessary to be adopted in the teaching methods to address those different students. It was a two stage study using two questionnaires; the main one is based on the Index of Learning Styles Questionnaire of B. A. Solomon and R. M. Felder, and the second questionnaire was an investigation on the students' attitudes towards the findings of their personal first questionnaire. Our analysis shows differences in the preferences of learning style between male and female students of the different study fields, as well as differences between students with the different specialties (computer science, business informatics, and computer science teaching). The third investigation looks closely into the difficulties, issues, apprehensions and expectations of freshman students studying computer science. The study took place at the computer science department at the University of Potsdam with a volunteer sample of students. The goal is to determine and discuss the difficulties and issues that they are facing in their study that may lead them to think in dropping-out, changing the study field, or changing the university. The research continued with the same sample of students (with business informatics students being the majority) through more than three semesters. Difficulties and issues during the study were documented, as well as students' attitudes, apprehensions, and expectations. Some of the professors and lecturers opinions and solutions to some students' problems were also documented. Many participants had apprehensions and difficulties, especially towards informatics subjects. Some business informatics participants began to think of changing the university, in particular when they reached their third semester, others thought about changing their field of study. Till the end of this research, most of the participants continued in their studies (the study they have started with or the new study they have changed to) without leaving the higher education system.}, language = {en} } @phdthesis{Teshebaeva2016, author = {Teshebaeva, Kanayim}, title = {SAR interferometry analysis of surface processes in the Pamir - Tien Shan active orogens - emphasis on coseismic deformation and landslides}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96743}, school = {Universit{\"a}t Potsdam}, pages = {128}, year = {2016}, abstract = {This thesis presents new approaches of SAR methods and their application to tectonically active systems and related surface deformation. With 3 publications two case studies are presented: (1) The coseismic deformation related to the Nura earthquake (5th October 2008, magnitude Mw 6.6) at the eastern termination of the intramontane Alai valley. Located between the southern Tien Shan and the northern Pamir the coseismic surface displacements are analysed using SAR (Synthetic Aperture RADAR) data. The results show clear gradients in the vertical and horizontal directions along a complex pattern of surface ruptures and active faults. To integrate and to interpret these observations in the context of the regional active tectonics a SAR data analysis is complemented with seismological data and geological field observations. The main moment release of the Nura earthquake appears to be on the Pamir Frontal thrust, while the main surface displacements and surface rupture occurred in the footwall and along of the NE-SW striking Irkeshtam fault. With InSAR data from ascending and descending satellite tracks along with pixel offset measurements the Nura earthquake source is modelled as a segmented rupture. One fault segment corresponds to high-angle brittle faulting at the Pamir Frontal thrust and two more fault segments show moderate-angle and low-friction thrusting at the Irkeshtam fault. The integrated analysis of the coseismic deformation argues for a rupture segmentation and strain partitioning associated to the earthquake. It possibly activated an orogenic wedge in the easternmost segment of the Pamir-Alai collision zone. Further, the style of the segmentation may be associated with the presence of Paleogene evaporites. (2) The second focus is put on slope instabilities and consequent landslides in the area of prominent topographic transition between the Fergana basin and high-relief Alai range. The Alai range constitutes an active orogenic wedge of the Pamir - Tien Shan collision zone that described as a progressively northward propagating fold-and-thrust belt. The interferometric analysis of ALOS/PALSAR radar data integrates a period of 4 years (2007-2010) based on the Small Baseline Subset (SBAS) time-series technique to assess surface deformation with millimeter surface change accuracy. 118 interferograms are analyzed to observe spatially-continuous movements with downslope velocities up to 71 mm/yr. The obtained rates indicate slow movement of the deep-seated landslides during the observation time. We correlated these movements with precipitation and seismic records. The results suggest that the deformation peaks correlate with rainfall in the 3 preceding months and with one earthquake event. In the next step, to understand the spatial pattern of landslide processes, the tectonic morphologic and lithologic settings are combined with the patterns of surface deformation. We demonstrate that the lithological and tectonic structural patterns are the main controlling factors for landslide occurrence and surface deformation magnitudes. Furthermore active contractional deformation in the front of the orogenic wedge is the main mechanism to sustain relief. Some of the slower but continuously moving slope instabilities are directly related to tectonically active faults and unconsolidated young Quaternary syn-orogenic sedimentary sequences. The InSAR observed slow moving landslides represent active deep-seated gravitational slope deformation phenomena which is first time observed in the Tien Shan mountains. Our approach offers a new combination of InSAR techniques and tectonic aspects to localize and understand enhanced slope instabilities in tectonically active mountain fronts in the Kyrgyz Tien Shan.}, language = {en} } @phdthesis{Prasse2016, author = {Prasse, Paul}, title = {Pattern recognition for computer security}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100251}, school = {Universit{\"a}t Potsdam}, pages = {VI, 75}, year = {2016}, abstract = {Computer Security deals with the detection and mitigation of threats to computer networks, data, and computing hardware. This thesis addresses the following two computer security problems: email spam campaign and malware detection. Email spam campaigns can easily be generated using popular dissemination tools by specifying simple grammars that serve as message templates. A grammar is disseminated to nodes of a bot net, the nodes create messages by instantiating the grammar at random. Email spam campaigns can encompass huge data volumes and therefore pose a threat to the stability of the infrastructure of email service providers that have to store them. Malware -software that serves a malicious purpose- is affecting web servers, client computers via active content, and client computers through executable files. Without the help of malware detection systems it would be easy for malware creators to collect sensitive information or to infiltrate computers. The detection of threats -such as email-spam messages, phishing messages, or malware- is an adversarial and therefore intrinsically difficult problem. Threats vary greatly and evolve over time. The detection of threats based on manually-designed rules is therefore difficult and requires a constant engineering effort. Machine-learning is a research area that revolves around the analysis of data and the discovery of patterns that describe aspects of the data. Discriminative learning methods extract prediction models from data that are optimized to predict a target attribute as accurately as possible. Machine-learning methods hold the promise of automatically identifying patterns that robustly and accurately detect threats. This thesis focuses on the design and analysis of discriminative learning methods for the two computer-security problems under investigation: email-campaign and malware detection. The first part of this thesis addresses email-campaign detection. We focus on regular expressions as a syntactic framework, because regular expressions are intuitively comprehensible by security engineers and administrators, and they can be applied as a detection mechanism in an extremely efficient manner. In this setting, a prediction model is provided with exemplary messages from an email-spam campaign. The prediction model has to generate a regular expression that reveals the syntactic pattern that underlies the entire campaign, and that a security engineers finds comprehensible and feels confident enough to use the expression to blacklist further messages at the email server. We model this problem as two-stage learning problem with structured input and output spaces which can be solved using standard cutting plane methods. Therefore we develop an appropriate loss function, and derive a decoder for the resulting optimization problem. The second part of this thesis deals with the problem of predicting whether a given JavaScript or PHP file is malicious or benign. Recent malware analysis techniques use static or dynamic features, or both. In fully dynamic analysis, the software or script is executed and observed for malicious behavior in a sandbox environment. By contrast, static analysis is based on features that can be extracted directly from the program file. In order to bypass static detection mechanisms, code obfuscation techniques are used to spread a malicious program file in many different syntactic variants. Deobfuscating the code before applying a static classifier can be subjected to mostly static code analysis and can overcome the problem of obfuscated malicious code, but on the other hand increases the computational costs of malware detection by an order of magnitude. In this thesis we present a cascaded architecture in which a classifier first performs a static analysis of the original code and -based on the outcome of this first classification step- the code may be deobfuscated and classified again. We explore several types of features including token \$n\$-grams, orthogonal sparse bigrams, subroutine-hashings, and syntax-tree features and study the robustness of detection methods and feature types against the evolution of malware over time. The developed tool scans very large file collections quickly and accurately. Each model is evaluated on real-world data and compared to reference methods. Our approach of inferring regular expressions to filter emails belonging to an email spam campaigns leads to models with a high true-positive rate at a very low false-positive rate that is an order of magnitude lower than that of a commercial content-based filter. Our presented system -REx-SVMshort- is being used by a commercial email service provider and complements content-based and IP-address based filtering. Our cascaded malware detection system is evaluated on a high-quality data set of almost 400,000 conspicuous PHP files and a collection of more than 1,00,000 JavaScript files. From our case study we can conclude that our system can quickly and accurately process large data collections at a low false-positive rate.}, language = {en} } @phdthesis{Hohenbrink2016, author = {Hohenbrink, Tobias Ludwig}, title = {Turning a problem into a solution: heterogeneities in soil hydrology}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101485}, school = {Universit{\"a}t Potsdam}, pages = {x, 123}, year = {2016}, abstract = {It is commonly recognized that soil moisture exhibits spatial heterogeneities occurring in a wide range of scales. These heterogeneities are caused by different factors ranging from soil structure at the plot scale to land use at the landscape scale. There is an urgent need for effi-cient approaches to deal with soil moisture heterogeneity at large scales, where manage-ment decisions are usually made. The aim of this dissertation was to test innovative ap-proaches for making efficient use of standard soil hydrological data in order to assess seep-age rates and main controls on observed hydrological behavior, including the role of soil het-erogeneities. As a first step, the applicability of a simplified Buckingham-Darcy method to estimate deep seepage fluxes from point information of soil moisture dynamics was assessed. This was done in a numerical experiment considering a broad range of soil textures and textural het-erogeneities. The method performed well for most soil texture classes. However, in pure sand where seepage fluxes were dominated by heterogeneous flow fields it turned out to be not applicable, because it simply neglects the effect of water flow heterogeneity. In this study a need for new efficient approaches to handle heterogeneities in one-dimensional water flux models was identified. As a further step, an approach to turn the problem of soil moisture heterogeneity into a solu-tion was presented: Principal component analysis was applied to make use of the variability among soil moisture time series for analyzing apparently complex soil hydrological systems. It can be used for identifying the main controls on the hydrological behavior, quantifying their relevance, and describing their particular effects by functional averaged time series. The ap-proach was firstly tested with soil moisture time series simulated for different texture classes in homogeneous and heterogeneous model domains. Afterwards, it was applied to 57 mois-ture time series measured in a multifactorial long term field experiment in Northeast Germa-ny. The dimensionality of both data sets was rather low, because more than 85 \% of the total moisture variance could already be explained by the hydrological input signal and by signal transformation with soil depth. The perspective of signal transformation, i.e. analyzing how hydrological input signals (e.g., rainfall, snow melt) propagate through the vadose zone, turned out to be a valuable supplement to the common mass flux considerations. Neither different textures nor spatial heterogeneities affected the general kind of signal transfor-mation showing that complex spatial structures do not necessarily evoke a complex hydro-logical behavior. In case of the field measured data another 3.6\% of the total variance was unambiguously explained by different cropping systems. Additionally, it was shown that dif-ferent soil tillage practices did not affect the soil moisture dynamics at all. The presented approach does not require a priori assumptions about the nature of physical processes, and it is not restricted to specific scales. Thus, it opens various possibilities to in-corporate the key information from monitoring data sets into the modeling exercise and thereby reduce model uncertainties.}, language = {en} } @phdthesis{Dannberg2016, author = {Dannberg, Juliane}, title = {Dynamics of mantle plumes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91024}, school = {Universit{\"a}t Potsdam}, pages = {162}, year = {2016}, abstract = {Mantle plumes are a link between different scales in the Earth's mantle: They are an important part of large-scale mantle convection, transporting material and heat from the core-mantle boundary to the surface, but also affect processes on a smaller scale, such as melt generation and transport and surface magmatism. When they reach the base of the lithosphere, they cause massive magmatism associated with the generation of large igneous provinces, and they can be related to mass extinction events (Wignall, 2001) and continental breakup (White and McKenzie, 1989). Thus, mantle plumes have been the subject of many previous numerical modelling studies (e.g. Farnetani and Richards, 1995; d'Acremont et al., 2003; Lin and van Keken, 2005; Sobolev et al., 2011; Ballmer et al., 2013). However, complex mechanisms, such as the development and implications of chemical heterogeneities in plumes, their interaction with mid-ocean ridges and global mantle flow, and melt ascent from the source region to the surface are still not very well understood; and disagreements between observations and the predictions of classical plume models have led to a challenge of the plume concept in general (Czamanske et al., 1998; Anderson, 2000; Foulger, 2011). Hence, there is a need for more sophisticated models that can explain the underlying physics, assess which properties and processes are important, explain how they cause the observations visible at the Earth's surface and provide a link between the different scales. In this work, integrated plume models are developed that investigate the effect of dense recycled oceanic crust on the development of mantle plumes, plume-ridge interaction under the influence of global mantle flow and melting and melt migration in form of two-phase flow. The presented analysis of these models leads to a new, updated picture of mantle plumes: Models considering a realistic depth-dependent density of recycled oceanic crust and peridotitic mantle material show that plumes with excess temperatures of up to 300 K can transport up to 15\% of recycled oceanic crust through the whole mantle. However, due to the high density of recycled crust, plumes can only advance to the base of the lithosphere directly if they have high excess temperatures, high plume volumes and the lowermost mantle is subadiabatic, or plumes rise from the top or edges of thermo-chemical piles. They might only cause minor surface uplift, and instead of the classical head-tail structure, these low-buoyancy plumes are predicted to be broad features in the lower mantle with much less pronounced plume heads. They can form a variety of shapes and regimes, including primary plumes directly advancing to the base of the lithosphere, stagnating plumes, secondary plumes rising from the core-mantle boundary or a pool of eclogitic material in the upper mantle and failing plumes. In the upper mantle, plumes are tilted and deflected by global mantle flow, and the shape, size and stability of the melting region is influenced by the distance from nearby plate boundaries, the speed of the overlying plate and the movement of the plume tail arriving from the lower mantle. Furthermore, the structure of the lithosphere controls where hot material is accumulated and melt is generated. In addition to melting in the plume tail at the plume arrival position, hot plume material flows upwards towards opening rifts, towards mid-ocean ridges and towards other regions of thinner lithosphere, where it produces additional melt due to decompression. This leads to the generation of either broad ridges of thickened magmatic crust or the separation into multiple thinner lines of sea mount chains at the surface. Once melt is generated within the plume, it influences its dynamics, lowering the viscosity and density, and while it rises the melt volume is increased up to 20\% due to decompression. Melt has the tendency to accumulate at the top of the plume head, forming diapirs and initiating small-scale convection when the plume reaches the base of the lithosphere. Together with the introduced unstable, high-density material produced by freezing of melt, this provides an efficient mechanism to thin the lithosphere above plume heads. In summary, this thesis shows that mantle plumes are more complex than previously considered, and linking the scales and coupling the physics of different processes occurring in mantle plumes can provide insights into how mantle plumes are influenced by chemical heterogeneities, interact with the lithosphere and global mantle flow, and are affected by melting and melt migration. Including these complexities in geodynamic models shows that plumes can also have broad plume tails, might produce only negligible surface uplift, can generate one or several volcanic island chains in interaction with a mid-ocean ridge, and can magmatically thin the lithosphere.}, language = {en} } @phdthesis{Antoniewicz2016, author = {Antoniewicz, Franziska}, title = {Automatic evaluations of exercising}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-92280}, school = {Universit{\"a}t Potsdam}, year = {2016}, abstract = {Changing the perspective sometimes offers completely new insights to an already well-known phenomenon. Exercising behavior, defined as planned, structured and repeated bodily movements with the intention to maintain or increase the physical fitness (Caspersen, Powell, \& Christenson, 1985), can be thought of as such a well-known phenomenon that has been in the scientific focus for many decades (Dishman \& O'Connor, 2005). Within these decades a perspective that assumes rational and controlled evaluations as the basis for decision making, was predominantly used to understand why some people engage in physical activity and others do not (Ekkekakis \& Zenko, 2015). Dual-process theories (Ekkekakis \& Zenko, 2015; Payne \& Gawronski, 2010) provide another perspective, that is not exclusively influenced by rational reasoning. These theories differentiate two different processes that guide behavior "depending on whether they operate automatically or in a controlled fashion" (Gawronski \& Creighton, 2012, p. 282). Following this line of thought, exercise behavior is not solely influenced by thoughtful deliberations (e.g. concluding that exercising is healthy) but also by spontaneous affective reactions (e.g. disliking being sweaty while exercising). The theoretical frameworks of dual-process models are not new in psychology (Chaiken \& Trope, 1999) and have already been used for the explanation of numerous behaviors (e.g. Hofmann, Friese, \& Wiers, 2008; Huijding, de Jong, Wiers, \& Verkooijen, 2005). However, they have only rarely been used for the explanation of exercise behavior (e.g. Bluemke, Brand, Schweizer, \& Kahlert, 2010; Conroy, Hyde, Doerksen, \& Ribeiro, 2010; Hyde, Doerksen, Ribeiro, \& Conroy, 2010). The assumption of two dissimilar behavior influencing processes, differs fundamentally from previous theories and thus from the research that has been conducted in the last decades in exercise psychology. Research mainly concentrated on predictors of the controlled processes and addressed the identified predictors in exercise interventions (Ekkekakis \& Zenko, 2015; Hagger, Chatzisarantis, \& Biddle, 2002). Predictors arising from the described automatic processes, for example automatic evaluations for exercising (AEE), have been neglected in exercise psychology for many years. Until now, only a few researchers investigated the influence of these AEE for exercising behavior (Bluemke et al., 2010; Brand \& Schweizer, 2015; Markland, Hall, Duncan, \& Simatovic, 2015). Marginally more researchers focused on the impact of AEE for physical activity behavior (Calitri, Lowe, Eves, \& Bennett, 2009; Conroy et al., 2010; Hyde et al., 2010; Hyde, Elavsky, Doerksen, \& Conroy, 2012). The extant studies mainly focused on the quality of AEE and the associated quantity of exercise (exercise much or little; Bluemke et al., 2010; Calitri et al., 2009; Conroy et al., 2010; Hyde et al., 2012). In sum, there is still a dramatic lack of empirical knowledge, when applying dual-process theories to exercising behavior, even though these theories have proven to be successful in explaining behavior in many other health-relevant domains like eating, drinking or smoking behavior (e.g. Hofmann et al., 2008). The main goal of the present dissertation was to collect empirical evidence for the influence of AEE on exercise behavior and to expand the so far exclusively correlational studies by experimentally controlled studies. By doing so, the ongoing debate on a paradigm shift from controlled and deliberative influences of exercise behavior towards approaches that consider automatic and affective influences (Ekkekakis \& Zenko, 2015) should be encouraged. All three conducted publications are embedded in dual-process theorizing (Gawronski \& Bodenhausen, 2006, 2014; Strack \& Deutsch, 2004). These theories offer a theoretical framework that could integrate the established controlled variables of exercise behavior explanation and additionally consider automatic factors for exercise behavior like AEE. Taken together, the empirical findings collected suggest that AEE play an important and diverse role for exercise behavior. They represent exercise setting preferences, are a cause for short-term exercise decisions and are decisive for long-term exercise adherence. Adding to the few already present studies in this field, the influence of (positive) AEE for exercise behavior was confirmed in all three presented publications. Even though the available set of studies needs to be extended in prospectively studies, first steps towards a more complete picture have been taken. Closing with the beginning of the synopsis: I think that time is right for a change of perspectives! This means a careful extension of the present theories with controlled evaluations explaining exercise behavior. Dual-process theories including controlled and automatic evaluations could provide such a basis for future research endeavors in exercise psychology.}, language = {en} } @phdthesis{Wichitsanguan2016, author = {Wichitsa-nguan, Korakot}, title = {Modifications and extensions of the logistic regression and Cox model}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90033}, school = {Universit{\"a}t Potsdam}, pages = {x, 131}, year = {2016}, abstract = {In many statistical applications, the aim is to model the relationship between covariates and some outcomes. A choice of the appropriate model depends on the outcome and the research objectives, such as linear models for continuous outcomes, logistic models for binary outcomes and the Cox model for time-to-event data. In epidemiological, medical, biological, societal and economic studies, the logistic regression is widely used to describe the relationship between a response variable as binary outcome and explanatory variables as a set of covariates. However, epidemiologic cohort studies are quite expensive regarding data management since following up a large number of individuals takes long time. Therefore, the case-cohort design is applied to reduce cost and time for data collection. The case-cohort sampling collects a small random sample from the entire cohort, which is called subcohort. The advantage of this design is that the covariate and follow-up data are recorded only on the subcohort and all cases (all members of the cohort who develop the event of interest during the follow-up process). In this thesis, we investigate the estimation in the logistic model for case-cohort design. First, a model with a binary response and a binary covariate is considered. The maximum likelihood estimator (MLE) is described and its asymptotic properties are established. An estimator for the asymptotic variance of the estimator based on the maximum likelihood approach is proposed; this estimator differs slightly from the estimator introduced by Prentice (1986). Simulation results for several proportions of the subcohort show that the proposed estimator gives lower empirical bias and empirical variance than Prentice's estimator. Then the MLE in the logistic regression with discrete covariate under case-cohort design is studied. Here the approach of the binary covariate model is extended. Proving asymptotic normality of estimators, standard errors for the estimators can be derived. The simulation study demonstrates the estimation procedure of the logistic regression model with a one-dimensional discrete covariate. Simulation results for several proportions of the subcohort and different choices of the underlying parameters indicate that the estimator developed here performs reasonably well. Moreover, the comparison between theoretical values and simulation results of the asymptotic variance of estimator is presented. Clearly, the logistic regression is sufficient for the binary outcome refers to be available for all subjects and for a fixed time interval. Nevertheless, in practice, the observations in clinical trials are frequently collected for different time periods and subjects may drop out or relapse from other causes during follow-up. Hence, the logistic regression is not appropriate for incomplete follow-up data; for example, an individual drops out of the study before the end of data collection or an individual has not occurred the event of interest for the duration of the study. These observations are called censored observations. The survival analysis is necessary to solve these problems. Moreover, the time to the occurence of the event of interest is taken into account. The Cox model has been widely used in survival analysis, which can effectively handle the censored data. Cox (1972) proposed the model which is focused on the hazard function. The Cox model is assumed to be λ(t|x) = λ0(t) exp(β^Tx) where λ0(t) is an unspecified baseline hazard at time t and X is the vector of covariates, β is a p-dimensional vector of coefficient. In this thesis, the Cox model is considered under the view point of experimental design. The estimability of the parameter β0 in the Cox model, where β0 denotes the true value of β, and the choice of optimal covariates are investigated. We give new representations of the observed information matrix In(β) and extend results for the Cox model of Andersen and Gill (1982). In this way conditions for the estimability of β0 are formulated. Under some regularity conditions, ∑ is the inverse of the asymptotic variance matrix of the MPLE of β0 in the Cox model and then some properties of the asymptotic variance matrix of the MPLE are highlighted. Based on the results of asymptotic estimability, the calculation of local optimal covariates is considered and shown in examples. In a sensitivity analysis, the efficiency of given covariates is calculated. For neighborhoods of the exponential models, the efficiencies have then been found. It is appeared that for fixed parameters β0, the efficiencies do not change very much for different baseline hazard functions. Some proposals for applicable optimal covariates and a calculation procedure for finding optimal covariates are discussed. Furthermore, the extension of the Cox model where time-dependent coefficient are allowed, is investigated. In this situation, the maximum local partial likelihood estimator for estimating the coefficient function β(·) is described. Based on this estimator, we formulate a new test procedure for testing, whether a one-dimensional coefficient function β(·) has a prespecified parametric form, say β(·; ϑ). The score function derived from the local constant partial likelihood function at d distinct grid points is considered. It is shown that the distribution of the properly standardized quadratic form of this d-dimensional vector under the null hypothesis tends to a Chi-squared distribution. Moreover, the limit statement remains true when replacing the unknown ϑ0 by the MPLE in the hypothetical model and an asymptotic α-test is given by the quantiles or p-values of the limiting Chi-squared distribution. Finally, we propose a bootstrap version of this test. The bootstrap test is only defined for the special case of testing whether the coefficient function is constant. A simulation study illustrates the behavior of the bootstrap test under the null hypothesis and a special alternative. It gives quite good results for the chosen underlying model. References P. K. Andersen and R. D. Gill. Cox's regression model for counting processes: a large samplestudy. Ann. Statist., 10(4):1100{1120, 1982. D. R. Cox. Regression models and life-tables. J. Roy. Statist. Soc. Ser. B, 34:187{220, 1972. R. L. Prentice. A case-cohort design for epidemiologic cohort studies and disease prevention trials. Biometrika, 73(1):1{11, 1986.}, language = {en} } @phdthesis{Wittenberg2016, author = {Wittenberg, Eva}, title = {With Light Verb Constructions from Syntax to Concepts}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-329-9}, issn = {2190-4545}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82361}, school = {Universit{\"a}t Potsdam}, pages = {ii, 139}, year = {2016}, abstract = {This dissertation uses a common grammatical phenomenon, light verb constructions (LVCs) in English and German, to investigate how syntax-semantics mapping defaults influence the relationships between language processing, representation and conceptualization. LVCs are analyzed as a phenomenon of mismatch in the argument structure. The processing implication of this mismatch are experimentally investigated, using ERPs and a dual task. Data from these experiments point to an increase in working memory. Representational questions are investigated using structural priming. Data from this study suggest that while the syntax of LVCs is not different from other structures', the semantics and mapping are represented differently. This hypothesis is tested with a new categorization paradigm, which reveals that the conceptual structure that LVC evoke differ in interesting, and predictable, ways from non-mismatching structures'.}, language = {en} } @phdthesis{Kuehn2016, author = {K{\"u}hn, Jane}, title = {Functionally-driven language change}, doi = {10.25932/publishup-42207}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-422079}, school = {Universit{\"a}t Potsdam}, pages = {369}, year = {2016}, abstract = {Since the 1960ies, Germany has been host to a large Turkish immigrant community. While migrant communities often shift to the majority language over the course of time, Turkish is a very vital minority language in Germany and bilingualism in this community is an obvious fact which has been subject to several studies. The main focus usually is on German, the second language (L2) of these speakers (e.g. Hinnenkamp 2000, Keim 2001, Auer 2003, Cindark \& Aslan (2004), Kern \& Selting 2006, Selting 2009, Kern 2013). Research on the Turkish spoken by Turkish bilinguals has also attracted attention although to a lesser extend mainly in the framework of so called heritage language research (cf. Polinski 2011). Bilingual Turkish has been investigated under the perspective of code-switching and codemixing (e.g. Kallmeyer \& Keim 2003, Keim 2003, 2004, Keim \& Cindark 2003, Hinnenkamp 2003, 2005, 2008, Dirim \& Auer 2004), and with respect to changes in the morphologic, the syntactic and the orthographic system (e.g. Rehbein \& Karako{\c{c}} 2004, Schroeder 2007). Attention to the changes in the prosodic system of bilingual Turkish on the other side has been exceptional so far (Queen 2001, 2006). With the present dissertation, I provide a study on contact induced linguistic changes on the prosodic level in the Turkish heritage language of adult early German-Turkish bilinguals. It describes structural changes in the L1 Turkish intonation of yes/no questions of a representative sample of bilingual Turkish speakers. All speakers share a similar sociolinguistic background. All acquired Turkish as their first language from their families and the majority language German as an early L2 at latest in the kinder garden by the age of 3. A study of changes in bilingual varieties requires a previous cross-linguistic comparison of both of the involved languages in language contact in order to draw conclusions on the contact-induced language change in delimitation to language-internal development. While German is one of the best investigated languages with respect to its prosodic system, research on Turkish intonational phonology is not as progressed. To this effect, the analysis of bilingual Turkish, as elicited for the present dissertation, is preceded by an experimental study on monolingual Turkish. In this regard an additional experiment with 11 monolingual university students of non-linguistic subjects was conducted at the Ege University in Izmir in 2013. On these grounds the present dissertation additionally contributes new insights with respect to Turkish intonational phonology and typology. The results of the contrastive analysis of German and Turkish bring to light that the prosodic systems of both languages differ with respect to the use of prosodic cues in the marking of information structure (IS) and sentence type. Whereas German distinguishes in the prosodic marking between explicit categories for focus and givenness, Turkish uses only one prosodic cue to mark IS. Furthermore it is shown that Turkish in contrast to German does not use a prosodic correlate to mark yes/no questions, but a morphological question marker. To elicit Turkish yes/no questions in a bilingual context which differ with respect to their information structure in a further step the methodology of Xu (1999) to elicit in-situ focus on different constituents was adapted in the experimental study. A data set of 400 Turkish yes/no questions of 20 bilingual Turkish speakers was compiled at the Zentrum f{\"u}r Allgemeine Sprachwissenschaft (ZAS) in Berlin and at the University of Potsdam in 2013. The prosodic structure of the yes/no questions was phonologically and phonetically analyzed with respect to changes in the f0 contour according to IS modifications and the use of prosodic cues to indicate sentence type. The results of the analyses contribute surprising observations to the research of bilingual prosody. Studies on bilingual language change and language acquisition have repeatedly shown that the use of prosodic features that are considered as marked by means of lower and implicational use across and within a language cause difficulties in language contact and second language acquisition. Especially, they are not expected to pass from one language to another through language contact. However, this structurally determined expectation on language development is refuted by the results of the present study. Functionally related prosody, such as the cues to indicate IS, are transferred from German L2 to the Turkish L1 of German-Turkish bilingual speakers. This astonishing observation provides the base for an approach to language change centered on functional motivation. Based on Matras' (2007, 2010) assumption of functionality in language change, Paradis' (1993, 2004, 2008) approach of Language Activation and the Subsystem Theory and the Theory of Language as a Dynamic System (Heredina \& Jessner 2002), it will be shown that prosodic features which are absent in one of the languages of bilingual speech communities are transferred from the respective language to the other when they contribute to the contextualization of a pragmatic concept which is not expressed by other linguistic means in the target language. To this effect language interaction is based on language activation and inhibition mechanisms dealing with differences in the implicit pragmatic knowledge between bilinguals and monolinguals. The motivator for this process of language change is the contextualization of the message itself and not the structure of the respective feature on the surface. It is shown that structural consideration may influence language change but that bilingual language change does not depend on structural restrictions nor does the structure cause a change. The conclusions drawn on the basis of empirical facts can especially contribute to a better understanding of the processes of bilingual language development as it combines methodologies and theoretical aspects of different linguistic subfields.}, language = {en} } @phdthesis{Dick2016, author = {Dick, Uwe}, title = {Discriminative Classification Models for Internet Security}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102593}, school = {Universit{\"a}t Potsdam}, pages = {x, 57}, year = {2016}, abstract = {Services that operate over the Internet are under constant threat of being exposed to fraudulent use. Maintaining good user experience for legitimate users often requires the classification of entities as malicious or legitimate in order to initiate countermeasures. As an example, inbound email spam filters decide for spam or non-spam. They can base their decision on both the content of each email as well as on features that summarize prior emails received from the sending server. In general, discriminative classification methods learn to distinguish positive from negative entities. Each decision for a label may be based on features of the entity and related entities. When labels of related entities have strong interdependencies---as can be assumed e.g. for emails being delivered by the same user---classification decisions should not be made independently and dependencies should be modeled in the decision function. This thesis addresses the formulation of discriminative classification problems that are tailored for the specific demands of the following three Internet security applications. Theoretical and algorithmic solutions are devised to protect an email service against flooding of user inboxes, to mitigate abusive usage of outbound email servers, and to protect web servers against distributed denial of service attacks. In the application of filtering an inbound email stream for unsolicited emails, utilizing features that go beyond each individual email's content can be valuable. Information about each sending mail server can be aggregated over time and may help in identifying unwanted emails. However, while this information will be available to the deployed email filter, some parts of the training data that are compiled by third party providers may not contain this information. The missing features have to be estimated at training time in order to learn a classification model. In this thesis an algorithm is derived that learns a decision function that integrates over a distribution of values for each missing entry. The distribution of missing values is a free parameter that is optimized to learn an optimal decision function. The outbound stream of emails of an email service provider can be separated by the customer IDs that ask for delivery. All emails that are sent by the same ID in the same period of time are related, both in content and in label. Hijacked customer accounts may send batches of unsolicited emails to other email providers, which in turn might blacklist the sender's email servers after detection of incoming spam emails. The risk of being blocked from further delivery depends on the rate of outgoing unwanted emails and the duration of high spam sending rates. An optimization problem is developed that minimizes the expected cost for the email provider by learning a decision function that assigns a limit on the sending rate to customers based on the each customer's email stream. Identifying attacking IPs during HTTP-level DDoS attacks allows to block those IPs from further accessing the web servers. DDoS attacks are usually carried out by infected clients that are members of the same botnet and show similar traffic patterns. HTTP-level attacks aim at exhausting one or more resources of the web server infrastructure, such as CPU time. If the joint set of attackers cannot increase resource usage close to the maximum capacity, no effect will be experienced by legitimate users of hosted web sites. However, if the additional load raises the computational burden towards the critical range, user experience will degrade until service may be unavailable altogether. As the loss of missing one attacker depends on block decisions for other attackers---if most other attackers are detected, not blocking one client will likely not be harmful---a structured output model has to be learned. In this thesis an algorithm is developed that learns a structured prediction decoder that searches the space of label assignments, guided by a policy. Each model is evaluated on real-world data and is compared to reference methods. The results show that modeling each classification problem according to the specific demands of the task improves performance over solutions that do not consider the constraints inherent to an application.}, language = {en} } @phdthesis{Samaras2016, author = {Samaras, Stefanos}, title = {Microphysical retrieval of non-spherical aerosol particles using regularized inversion of multi-wavelength lidar data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396528}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 190}, year = {2016}, abstract = {Numerous reports of relatively rapid climate changes over the past century make a clear case of the impact of aerosols and clouds, identified as sources of largest uncertainty in climate projections. Earth's radiation balance is altered by aerosols depending on their size, morphology and chemical composition. Competing effects in the atmosphere can be further studied by investigating the evolution of aerosol microphysical properties, which are the focus of the present work. The aerosol size distribution, the refractive index, and the single scattering albedo are commonly used such properties linked to aerosol type, and radiative forcing. Highly advanced lidars (light detection and ranging) have reduced aerosol monitoring and optical profiling into a routine process. Lidar data have been widely used to retrieve the size distribution through the inversion of the so-called Lorenz-Mie model (LMM). This model offers a reasonable treatment for spherically approximated particles, it no longer provides, though, a viable description for other naturally occurring arbitrarily shaped particles, such as dust particles. On the other hand, non-spherical geometries as simple as spheroids reproduce certain optical properties with enhanced accuracy. Motivated by this, we adapt the LMM to accommodate the spheroid-particle approximation introducing the notion of a two-dimensional (2D) shape-size distribution. Inverting only a few optical data points to retrieve the shape-size distribution is classified as a non-linear ill-posed problem. A brief mathematical analysis is presented which reveals the inherent tendency towards highly oscillatory solutions, explores the available options for a generalized solution through regularization methods and quantifies the ill-posedness. The latter will improve our understanding on the main cause fomenting instability in the produced solution spaces. The new approach facilitates the exploitation of additional lidar data points from depolarization measurements, associated with particle non-sphericity. However, the generalization of LMM vastly increases the complexity of the problem. The underlying theory for the calculation of the involved optical cross sections (T-matrix theory) is computationally so costly, that would limit a retrieval analysis to an unpractical point. Moreover the discretization of the model equation by a 2D collocation method, proposed in this work, involves double integrations which are further time consuming. We overcome these difficulties by using precalculated databases and a sophisticated retrieval software (SphInX: Spheroidal Inversion eXperiments) especially developed for our purposes, capable of performing multiple-dataset inversions and producing a wide range of microphysical retrieval outputs. Hybrid regularization in conjunction with minimization processes is used as a basis for our algorithms. Synthetic data retrievals are performed simulating various atmospheric scenarios in order to test the efficiency of different regularization methods. The gap in contemporary literature in providing full sets of uncertainties in a wide variety of numerical instances is of major concern here. For this, the most appropriate methods are identified through a thorough analysis on an overall-behavior basis regarding accuracy and stability. The general trend of the initial size distributions is captured in our numerical experiments and the reconstruction quality depends on data error level. Moreover, the need for more or less depolarization points is explored for the first time from the point of view of the microphysical retrieval. Finally, our approach is tested in various measurement cases giving further insight for future algorithm improvements.}, language = {en} } @phdthesis{Synodinos2016, author = {Synodinos, Alexios D.}, title = {Savanna dynamics under extreme conditions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395000}, school = {Universit{\"a}t Potsdam}, pages = {x, 168}, year = {2016}, abstract = {Savannas cover a broad geographical range across continents and are a biome best described by a mix of herbaceous and woody plants. The former create a more or less continuous layer while the latter should be sparse enough to leave an open canopy. What has long intrigued ecologists is how these two competing plant life forms of vegetation coexist. Initially attributed to resource competition, coexistence was considered the stable outcome of a root niche differentiation between trees and grasses. The importance of environmental factors became evident later, when data from moister environments demonstrated that tree cover was often lower than what the rainfall conditions would allow for. Our current understanding relies on the interaction of competition and disturbances in space and time. Hence, the influence of grazing and fire and the corresponding feedbacks they generate have been keenly investigated. Grazing removes grass cover, initiating a self-reinforcing process propagating tree cover expansion. This is known as the encroachment phenomenon. Fire, on the other hand, imposes a bottleneck on the tree population by halting the recruitment of young trees into adulthood. Since grasses fuel fires, a feedback linking grazing, grass cover, fire, and tree cover is created. In African savannas, which are the focus of this dissertation, these feedbacks play a major role in the dynamics. The importance of these feedbacks came into sharp focus when the notion of alternative states began to be applied to savannas. Alternative states in ecology arise when different states of an ecosystem can occur under the same conditions. According to this an open savanna and a tree-dominated savanna can be classified as alternative states, since they can both occur under the same climatic conditions. The aforementioned feedbacks are critical in the creation of alternative states. The grass-fire feedback can preserve an open canopy as long as fire intensity and frequency remain above a certain threshold. Conversely, crossing a grazing threshold can force an open savanna to shift to a tree-dominated state. Critically, transitions between such alternative states can produce hysteresis, where a return to pre-transition conditions will not suffice to restore the ecosystem to its original state. In the chapters that follow, I will cover aspects relating to the coexistence mechanisms and the role of feedbacks in tree-grass interactions. Coming back to the coexistence question, due to the overwhelming focus on competition and disturbance another important ecological process was neglected: facilitation. Therefore, in the first study within this dissertation I examine how facilitation can expand the tree-grass coexistence range into drier conditions. For the second study I focus on another aspect of savanna dynamics which remains underrepresented in the literature: the impacts of inter-annual rainfall variability upon savanna trees and the resilience of the savanna state. In the third and final study within this dissertation I approach the well-researched encroachment phenomenon from a new perspective: I search for an early warning indicator of the process to be used as a prevention tool for savanna conservation. In order to perform all this work I developed a mathematical ecohydrological model of Ordinary Differential Equations (ODEs) with three variables: soil moisture content, grass cover and tree cover. Facilitation: Results showed that the removal of grass cover through grazing was detrimental to trees under arid conditions, contrary to expectation based on resource competition. The reason was that grasses preserved moisture in the soil through infiltration and shading, thus ameliorating the harsh conditions for trees in accordance with the Stress Gradient Hypothesis. The exclusion of grasses from the model further demonstrated this: tree cover was lower in the absence of grasses, indicating that the benefits of grass facilitation outweighed the costs of grass competition for trees. Thus, facilitation expanded the climatic range where savannas persisted into drier conditions. Rainfall variability: By adjusting the model to current rainfall patterns in East Africa, I simulated conditions of increasing inter-annual rainfall variability for two distinct mean rainfall scenarios: semi-arid and mesic. Alternative states of tree-less grassland and tree-dominated savanna emerged in both cases. Increasing variability reduced semi-arid savanna tree cover to the point that at high variability the savanna state was eliminated, because variability intensified resource competition and strengthened the fire disturbance during high rainfall years. Mesic savannas, on the other hand, became more resilient along the variability gradient: increasing rainfall variability created more opportunities for the rapid growth of trees to overcome the fire disturbance, boosting the chances of savannas persisting and thus increasing mesic savanna resilience. Preventing encroachment: The breakdown in the grass-fire feedback caused by heavy grazing promoted the expansion of woody cover. This could be irreversible due to the presence of alternative states of encroached and open savanna, which I found along a simulated grazing gradient. When I simulated different short term heavy grazing treatments followed by a reduction to the original grazing conditions, certain cases converged to the encroached state. Utilising woody cover changes only during the heavy grazing treatment, I developed an early warning indicator which identified these cases with a high risk of such hysteresis and successfully distinguished them from those with a low risk. Furthermore, after validating the indicator on encroachment data, I demonstrated that it appeared early enough for encroachment to be prevented through realistic grazing-reduction treatments. Though this dissertation is rooted in the theory of savanna dynamics, its results can have significant applications in savanna conservation. Facilitation has only recently become a topic of interest within savanna literature. Given the threat of increasing droughts and a general anticipation of drier conditions in parts of Africa, insights stemming from this research may provide clues for preserving arid savannas. The impacts of rainfall variability on savannas have not yet been thoroughly studied, either. Conflicting results appear as a result of the lack of a robust theoretical understanding of plant interactions under variable conditions. . My work and other recent studies argue that such conditions may increase the importance of fast resource acquisition creating a 'temporal niche'. Woody encroachment has been extensively studied as phenomenon, though not from the perspective of its early identification and prevention. The development of an encroachment forecasting tool, as the one presented in this work, could protect both the savanna biome and societies dependent upon it for (economic) survival. All studies which follow are bound by the attempt to broaden the horizons of savanna-related research in order to deal with extreme conditions and phenomena; be it through the enhancement of the coexistence debate or the study of an imminent external threat or the development of a management-oriented tool for the conservation of savannas.}, language = {en} } @phdthesis{Ata2016, author = {Ata, Metin}, title = {Phase-space reconstructions of cosmic velocities and the cosmic web}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403565}, school = {Universit{\"a}t Potsdam}, pages = {xi, 155}, year = {2016}, abstract = {In the current paradigm of cosmology, the formation of large-scale structures is mainly driven by non-radiating dark matter, making up the dominant part of the matter budget of the Universe. Cosmological observations however, rely on the detection of luminous galaxies, which are biased tracers of the underlying dark matter. In this thesis I present cosmological reconstructions of both, the dark matter density field that forms the cosmic web, and cosmic velocities, for which both aspects of my work are delved into, the theoretical formalism and the results of its applications to cosmological simulations and also to a galaxy redshift survey.The foundation of our method is relying on a statistical approach, in which a given galaxy catalogue is interpreted as a biased realization of the underlying dark matter density field. The inference is computationally performed on a mesh grid by sampling from a probability density function, which describes the joint posterior distribution of matter density and the three dimensional velocity field. The statistical background of our method is described in Chapter "Implementation of argo", where the introduction in sampling methods is given, paying special attention to Markov Chain Monte-Carlo techniques. In Chapter "Phase-Space Reconstructions with N-body Simulations", I introduce and implement a novel biasing scheme to relate the galaxy number density to the underlying dark matter, which I decompose into a deterministic part, described by a non-linear and scale-dependent analytic expression, and a stochastic part, by presenting a negative binomial (NB) likelihood function that models deviations from Poissonity. Both bias components had already been studied theoretically, but were so far never tested in a reconstruction algorithm. I test these new contributions againstN-body simulations to quantify improvements and show that, compared to state-of-the-art methods, the stochastic bias is inevitable at wave numbers of k≥0.15h Mpc^-1 in the power spectrum in order to obtain unbiased results from the reconstructions. In the second part of Chapter "Phase-Space Reconstructions with N-body Simulations" I describe and validate our approach to infer the three dimensional cosmic velocity field jointly with the dark matter density. I use linear perturbation theory for the large-scale bulk flows and a dispersion term to model virialized galaxy motions, showing that our method is accurately recovering the real-space positions of the redshift-space distorted galaxies. I analyze the results with the isotropic and also the two-dimensional power spectrum.Finally, in Chapter "Phase-space Reconstructions with Galaxy Redshift Surveys", I show how I combine all findings and results and apply the method to the CMASS (for Constant (stellar) Mass) galaxy catalogue of the Baryon Oscillation Spectroscopic Survey (BOSS). I describe how our method is accounting for the observational selection effects inside our reconstruction algorithm. Also, I demonstrate that the renormalization of the prior distribution function is mandatory to account for higher order contributions in the structure formation model, and finally a redshift-dependent bias factor is theoretically motivated and implemented into our method. The various refinements yield unbiased results of the dark matter until scales of k≤0.2 h Mpc^-1in the power spectrum and isotropize the galaxy catalogue down to distances of r∼20h^-1 Mpc in the correlation function. We further test the results of our cosmic velocity field reconstruction by comparing them to a synthetic mock galaxy catalogue, finding a strong correlation between the mock and the reconstructed velocities. The applications of both, the density field without redshift-space distortions, and the velocity reconstructions, are very broad and can be used for improved analyses of the baryonic acoustic oscillations, environmental studies of the cosmic web, the kinematic Sunyaev-Zel'dovic or integrated Sachs-Wolfe effect.}, language = {en} } @phdthesis{Mey2016, author = {Mey, J{\"u}rgen}, title = {Intermontane valley fills}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103158}, school = {Universit{\"a}t Potsdam}, pages = {xii, 111}, year = {2016}, abstract = {Sedimentary valley fills are a widespread characteristic of mountain belts around the world. They transiently store material over time spans ranging from thousands to millions of years and therefore play an important role in modulating the sediment flux from the orogen to the foreland and to oceanic depocenters. In most cases, their formation can be attributed to specific fluvial conditions, which are closely related to climatic and tectonic processes. Hence, valley-fill deposits constitute valuable archives that offer fundamental insight into landscape evolution, and their study may help to assess the impact of future climate change on sediment dynamics. In this thesis I analyzed intermontane valley-fill deposits to constrain different aspects of the climatic and tectonic history of mountain belts over multiple timescales. First, I developed a method to estimate the thickness distribution of valley fills using artificial neural networks (ANNs). Based on the assumption of geometrical similarity between exposed and buried parts of the landscape, this novel and highly automated technique allows reconstructing fill thickness and bedrock topography on the scale of catchments to entire mountain belts. Second, I used the new method for estimating the spatial distribution of post-glacial sediments that are stored in the entire European Alps. A comparison with data from exploratory drillings and from geophysical surveys revealed that the model reproduces the measurements with a root mean squared error (RMSE) of 70m and a coefficient of determination (R2) of 0.81. I used the derived sediment thickness estimates in combination with a model of the Last Glacial Maximum (LGM) icecap to infer the lithospheric response to deglaciation, erosion and deposition, and deduce their relative contribution to the present-day rock-uplift rate. For a range of different lithospheric and upper mantle-material properties, the results suggest that the long-wavelength uplift signal can be explained by glacial isostatic adjustment with a small erosional contribution and a substantial but localized tectonic component exceeding 50\% in parts of the Eastern Alps and in the Swiss Rh{\^o}ne Valley. Furthermore, this study reveals the particular importance of deconvolving the potential components of rock uplift when interpreting recent movements along active orogens and how this can be used to constrain physical properties of the Earth's interior. In a third study, I used the ANN approach to estimate the sediment thickness of alluviated reaches of the Yarlung Tsangpo River, upstream of the rapidly uplifting Namche Barwa massif. This allowed my colleagues and me to reconstruct the ancient river profile of the Yarlung Tsangpo, and to show that in the past, the river had already been deeply incised into the eastern margin of the Tibetan Plateau. Dating of basal sediments from drill cores that reached the paleo-river bed to 2-2.5 Ma are consistent with mineral cooling ages from the Namche Barwa massif, which indicate initiation of rapid uplift at ~4 Ma. Hence, formation of the Tsangpo gorge and aggradation of the voluminous valley fill was most probably a consequence of rapid uplift of the Namche Barwa massif and thus tectonic activity. The fourth and last study focuses on the interaction of fluvial and glacial processes at the southeastern edge of the Karakoram. Paleo-ice-extent indicators and remnants of a more than 400-m-thick fluvio-lacustrine valley fill point to blockage of the Shyok River, a main tributary of the upper Indus, by the Siachen Glacier, which is the largest glacier in the Karakoram Range. Field observations and 10Be exposure dating attest to a period of recurring lake formation and outburst flooding during the penultimate glaciation prior to ~110 ka. The interaction of Rivers and Glaciers all along the Karakorum is considered a key factor in landscape evolution and presumably promoted headward erosion of the Indus-Shyok drainage system into the western margin of the Tibetan Plateau. The results of this thesis highlight the strong influence of glaciation and tectonics on valley-fill formation and how this has affected the evolution of different mountain belts. In the Alps valley-fill deposition influenced the magnitude and pattern of rock uplift since ice retreat approximately 17,000 years ago. Conversely, the analyzed valley fills in the Himalaya are much older and reflect environmental conditions that prevailed at ~110 ka and ~2.5 Ma, respectively. Thus, the newly developed method has proven useful for inferring the role of sedimentary valley-fill deposits in landscape evolution on timescales ranging from 1,000 to 10,000,000 years.}, language = {en} } @phdthesis{Dey2016, author = {Dey, Saptarshi}, title = {Tectonic and climatic control on the evolution of the Himalayan mountain front}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103390}, school = {Universit{\"a}t Potsdam}, pages = {xii, 118}, year = {2016}, abstract = {Variations in the distribution of mass within an orogen may lead to transient sediment storage, which in turn might affect the state of stress and the level of fault activity. Distinguishing between different forcing mechanisms causing variations of sediment flux and tectonic activity, is therefore one of the most challenging tasks in understanding the spatiotemporal evolution of active mountain belts. The Himalayan mountain belt is one of the most significant Cenozoic collisional mountain belt, formed due to collision between northward-bound Indian Plate and the Eurasian Plate during the last 55-50 Ma. Ongoing convergence of these two tectonic plates is accommodated by faulting and folding within the Himalayan arc-shaped orogen and the continued lateral and vertical growth of the Tibetan Plateau and mountain belts adjacent to the plateau as well as regions farther north. Growth of the Himalayan orogen is manifested by the development of successive south-vergent thrust systems. These thrust systems divide the orogen into different morphotectonic domains. From north to south these thrusts are the Main Central Thrust (MCT), the Main Boundary Thrust (MBT) and the Main Frontal Thrust (MFT). The growing topography interacts with moisture-bearing monsoonal winds, which results in pronounced gradients in rainfall, weathering, erosion and sediment transport toward the foreland and beyond. However, a fraction of this sediment is trapped and transiently stored within the intermontane valleys or 'dun's within the lower-elevation foothills of the range. Improved understanding of the spatiotemporal evolution of these sediment archives could provide a unique opportunity to decipher the triggers of variations in sediment production, delivery and storage in an actively deforming mountain belt and support efforts to test linkages between sediment volumes in intermontane basins and changes in the shallow crustal stress field. As sediment redistribution in mountain belts on timescales of 102-104 years can effect cultural characteristics and infrastructure in the intermontane valleys and may even impact the seismotectonics of a mountain belt, there is a heightened interest in understanding sediment-routing processes and causal relationships between tectonism, climate and topography. It is here at the intersection between tectonic processes and superposed climatic and sedimentary processes in the Himalayan orogenic wedge, where my investigation is focused on. The study area is the intermontane Kangra Basin in the northwestern Sub-Himalaya, because the characteristics of the different Himalayan morphotectonic provinces are well developed, the area is part of a region strongly influenced by monsoonal forcing, and the existence of numerous fluvial terraces provides excellent strain markers to assess deformation processes within the Himalayan orogenic wedge. In addition, being located in front of the Dhauladhar Range the region is characterized by pronounced gradients in past and present-day erosion and sediment processes associated with repeatedly changing climatic conditions. In light of these conditions I analysed climate-driven late Pleistocene-Holocene sediment cycles in this tectonically active region, which may be responsible for triggering the tectonic re-organization within the Himalayan orogenic wedge, leading to out-of-sequence thrusting, at least since early Holocene. The Kangra Basin is bounded by the MBT and the Sub-Himalayan Jwalamukhi Thrust (JMT) in the north and south, respectively and transiently stores sediments derived from the Dhauladhar Range. The Basin contains ~200-m-thick conglomerates reflecting two distinct aggradation phases; following aggradation, several fluvial terraces were sculpted into these fan deposits. 10Be CRN surface exposure dating of these terrace levels provides an age of 53.4±3.2 ka for the highest-preserved terrace (AF1); subsequently, this surface was incised until ~15 ka, when the second fan (AF2) began to form. AF2 fan aggradation was superseded by episodic Holocene incision, creating at least four terrace levels. We find a correlation between variations in sediment transport and ∂18O records from regions affected by the Indian Summer Monsoon (ISM). During strengthened ISMs sand post-LGM glacial retreat, aggradation occurred in the Kangra Basin, likely due to high sediment flux, whereas periods of a weakened ISM coupled with lower sediment supply coincided with renewed re-incision. However, the evolution of fluvial terraces along Sub-Himalayan streams in the Kangra sector is also forced by tectonic processes. Back-tilted, folded terraces clearly document tectonic activity of the JMT. Offset of one of the terrace levels indicates a shortening rate of 5.6±0.8 to 7.5±1.0 mm.a-1 over the last ~10 ka. Importantly, my study reveals that late Pleistocene/Holocene out-of-sequence thrusting accommodates 40-60\% of the total 14±2 mm.a-1 shortening partitioned throughout the Sub-Himalaya. Importantly, the JMT records shortening at a lower rate over longer timescales hints towards out-of-sequence activity within the Sub-Himalaya. Re-activation of the JMT could be related to changes in the tectonic stress field caused by large-scale sediment removal from the basin. I speculate that the deformation processes of the Sub-Himalaya behave according to the predictions of critical wedge model and assume the following: While >200m of sediment aggradation would trigger foreland-ward propagation of the deformation front, re-incision and removal of most of the stored sediments (nearly 80-85\% of the optimum basin-fill) would again create a sub-critical condition of the wedge taper and trigger the retreat of the deformation front. While tectonism is responsible for the longer-term processes of erosion associated with steepening hillslopes, sediment cycles in this environment are mainly the result of climatic forcing. My new 10Be cosmogenic nuclide exposure dates and a synopsis of previous studies show the late Pleistocene to Holocene alluvial fills and fluvial terraces studied here record periodic fluctuations of sediment supply and transport capacity on timescales of 1000-100000 years. To further evaluate the potential influence of climate change on these fluctuations, I compared the timing of aggradation and incision phases recorded within remnant alluvial fans and terraces with continental climate archives such as speleothems in neighboring regions affected by monsoonal precipitation. Together with previously published OSL ages yielding the timing of aggradation, I find a correlation between variations in sediment transport with oxygen-isotope records from regions affected by the Indian Summer Monsoon (ISM). Accordingly, during periods of increased monsoon intensity (transitions from dry and cold to wet and warm periods - MIS4 to MIS3 and MIS2 to MIS1) (MIS=marine isotope stage) and post-Last Glacial Maximum glacial retreat, aggradation occurred in the Kangra Basin, likely due to high sediment flux. Conversely, periods of weakened monsoon intensity or lower sediment supply coincide with re-incision of the existing basin-fill. Finally, my study entails part of a low-temperature thermochronology study to assess the youngest exhumation history of the Dhauladhar Range. Zircon helium (ZHe) ages and existing low-temperature data sets (ZHe, apatite fission track (AFT)) across this range, together with 3D thermokinematic modeling (PECUBE) reveals constraints on exhumation and activity of the range-bounding Main Boundary Thrust (MBT) since at least mid-Miocene time. The modeling results indicate mean slip rates on the MBT-fault ramp of ~2 - 3 mm.a-1 since its activation. This has lead to the growth of the >5-km-high frontal Dhauladhar Range and continuous deep-seated exhumation and erosion. The obtained results also provide interesting constraints of deformation patterns and their variation along strike. The results point towards the absence of the time-transient 'mid-crustal ramp' in the basal decollement and duplexing of the Lesser Himalayan sequence, unlike the nearby regions or even the central Nepal domain. A fraction of convergence (~10-15\%) is accommodated along the deep-seated MBT-ramp, most likely merging into the MHT. This finding is crucial for a rigorous assessment of the overall level of tectonic activity in the Himalayan morphotectonic provinces as it contradicts recently-published geodetic shortening estimates. In these studies, it has been proposed that the total Himalayan shortening in the NW Himalaya is accommodated within the Sub-Himalaya whereas no tectonic activity is assigned to the MBT.}, language = {en} } @phdthesis{Olen2016, author = {Olen, Stephanie M.}, title = {Understanding Himalayan denudation at the catchment and orogen scale}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91423}, school = {Universit{\"a}t Potsdam}, pages = {xx, 174}, year = {2016}, abstract = {Understanding the rates and processes of denudation is key to unraveling the dynamic processes that shape active orogens. This includes decoding the roles of tectonic and climate-driven processes in the long-term evolution of high- mountain landscapes in regions with pronounced tectonic activity and steep climatic and surface-process gradients. Well-constrained denudation rates can be used to address a wide range of geologic problems. In steady-state landscapes, denudation rates are argued to be proportional to tectonic or isostatic uplift rates and provide valuable insight into the tectonic regimes underlying surface denudation. The use of denudation rates based on terrestrial cosmogenic nuclide (TCN) such as 10Beryllium has become a widely-used method to quantify catchment-mean denudation rates. Because such measurements are averaged over timescales of 102 to 105 years, they are not as susceptible to stochastic changes as shorter-term denudation rate estimates (e.g., from suspended sediment measurements) and are therefore considered more reliable for a comparison to long-term processes that operate on geologic timescales. However, the impact of various climatic, biotic, and surface processes on 10Be concentrations and the resultant denudation rates remains unclear and is subject to ongoing discussion. In this thesis, I explore the interaction of climate, the biosphere, topography, and geology in forcing and modulating denudation rates on catchment to orogen scales. There are many processes in highly dynamic active orogens that may effect 10Be concentrations in modern river sands and therefore impact 10Be-derived denudation rates. The calculation of denudation rates from 10Be concentrations, however, requires a suite of simplifying assumptions that may not be valid or applicable in many orogens. I investigate how these processes affect 10Be concentrations in the Arun Valley of Eastern Nepal using 34 new 10Be measurements from the main stem Arun River and its tributaries. The Arun Valley is characterized by steep gradients in climate and topography, with elevations ranging from <100 m asl in the foreland basin to >8,000 asl in the high sectors to the north. This is coupled with a five-fold increase in mean annual rainfall across strike of the orogen. Denudation rates from tributary samples increase toward the core of the orogen, from <0.2 to >5 mm/yr from the Lesser to Higher Himalaya. Very high denudation rates (>2 mm/yr), however, are likely the result of 10Be TCN dilution by surface and climatic processes, such as large landsliding and glaciation, and thus may not be representative of long-term denudation rates. Mainstem Arun denudation rates increase downstream from ~0.2 mm/yr at the border with Tibet to 0.91 mm/yr at its outlet into the Sapt Kosi. However, the downstream 10Be concentrations may not be representative of the entire upstream catchment. Instead, I document evidence for downstream fining of grains from the Tibetan Plateau, resulting in an order-of-magnitude apparent decrease in the measured 10Be concentration. In the Arun Valley and across the Himalaya, topography, climate, and vegetation are strongly interrelated. The observed increase in denudation rates at the transition from the Lesser to Higher Himalaya corresponds to abrupt increases in elevation, hillslope gradient, and mean annual rainfall. Thus, across strike (N-S), it is difficult to decipher the potential impacts of climate and vegetation cover on denudation rates. To further evaluate these relationships I instead took advantage of an along-strike west-to-east increase of mean annual rainfall and vegetation density in the Himalaya. An analysis of 136 published 10Be denudation rates from along strike of the revealed that median denudation rates do not vary considerably along strike of the Himalaya, ~1500 km E-W. However, the range of denudation rates generally decreases from west to east, with more variable denudation rates in the northwestern regions of the orogen than in the eastern regions. This denudation rate variability decreases as vegetation density increases (R=- 0.90), and increases proportionately to the annual seasonality of vegetation (R=0.99). Moreover, rainfall and vegetation modulate the relationship between topographic steepness and denudation rates such that in the wet, densely vegetated regions of the Himalaya, topography responds more linearly to changes in denudation rates than in dry, sparsely vegetated regions, where the response of topographic steepness to denudation rates is highly nonlinear. Understanding the relationships between denudation rates, topography, and climate is also critical for interpreting sedimentary archives. However, there is a lack of understanding of how terrestrial organic matter is transported out of orogens and into sedimentary archives. Plant wax lipid biomarkers derived from terrestrial and marine sedimentary records are commonly used as paleo- hydrologic proxy to help elucidate these problems. I address the issue of how to interpret the biomarker record by using the plant wax isotopic composition of modern suspended and riverbank organic matter to identify and quantify organic matter source regions in the Arun Valley. Topographic and geomorphic analysis, provided by the 10Be catchment-mean denudation rates, reveals that a combination of topographic steepness (as a proxy for denudation) and vegetation density is required to capture organic matter sourcing in the Arun River. My studies highlight the importance of a rigorous and careful interpretation of denudation rates in tectonically active orogens that are furthermore characterized by strong climatic and biotic gradients. Unambiguous information about these issues is critical for correctly decoding and interpreting the possible tectonic and climatic forces that drive erosion and denudation, and the manifestation of the erosion products in sedimentary archives.}, language = {en} } @phdthesis{Grewe2016, author = {Grewe, Sina}, title = {Hydro- and biogeochemical investigations of lake sediments in the Kenyan Rift Valley}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98342}, school = {Universit{\"a}t Potsdam}, pages = {110}, year = {2016}, abstract = {Die Seen im kenianischen Riftsystem bieten die einmalige Gelegenheit eine große Bandbreite an hydrochemischen Umweltbedingungen zu studieren, die von S{\"u}ßwasserseen bis hin zu hochsalinen und alkalinen Seen reichen. Da wenig {\"u}ber die hydro- und biogeochemischen Bedingungen in den darunterliegenden Seesedimenten bekannt ist, war es das Ziel dieser Arbeit, bereits existierende Datens{\"a}tze mit Daten aus der Porenwasser- und Biomarker-Analyse zu erweitern. Zus{\"a}tzlich wurden reduzierte Schwefelkomponenten und Sulfatreduktionsraten in den Sedimenten bestimmt. Mit den neu gewonnenen Daten wurde der anthropogene und mikrobielle Einfluss auf die Seesedimente untersucht sowie der Einfluss der Wasserchemie auf den Abbau und den Erhalt von organischem Material im Sediment. Zu den untersuchten Seen geh{\"o}rten: Logipi, Eight (ein kleiner Kratersee in der Region Kangirinyang), Baringo, Bogoria, Naivasha, Oloiden und Sonachi. Die Biomarker-Zusammensetzungen in den untersuchten Seesedimenten waren {\"a}hnlich; allerdings gab es einige Unterschiede zwischen den salinen Seen und den S{\"u}ßwasserseen. Einer dieser Unterschiede war das Vorkommen eines mit β-Carotin verwandten Molek{\"u}ls, das nur in den salinen Seen gefunden wurde. Dieses Molek{\"u}l stammt wahrscheinlich von Cyanobakterien, Einzellern die in großer Anzahl in salinen Seen vorkommen. In den beiden S{\"u}ßwasserseen wurde Stigmasterol gefunden, ein f{\"u}r S{\"u}ßwasseralgen charakteristisches Sterol. In dieser Studie hat sich gezeigt, dass Bogoria und Sonachi f{\"u}r Umweltrekonstruktionen mit Biomarkern besonders gut geeignet sind, da die Abwesenheit von Sauerstoff an deren Seegr{\"u}nden den Abbau von organischem Material verlangsamt. Andere Seen, wie zum Beispiel Naivasha, sind aufgrund des großen anthropogenen Einflusses weniger gut f{\"u}r solche Rekonstruktionen geeignet. Die Biomarker-Analyse bot jedoch die M{\"o}glichkeit, den menschlichen Einfluss auf den See zu studieren. Desweiteren zeigte diese Studie, dass sich Horizonte mit einem hohen Anteil an elementarem Schwefel als temporale Marker nutzen lassen. Diese Horizonte wurden zu einer Zeit abgelagert, als die Wasserpegel sehr niedrig waren. Der Schwefel wurde von Mikroorganismen abgelagert, die zu anoxygener Photosynthese oder Sulfidoxidation f{\"a}hig sind.}, language = {en} } @phdthesis{Castino2016, author = {Castino, Fabiana}, title = {Climate variability and extreme hydro-meteorological events in the Southern Central Andes, NW Argentina}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396815}, school = {Universit{\"a}t Potsdam}, pages = {xi, 144}, year = {2016}, abstract = {Extreme hydro-meteorological events, such as severe droughts or heavy rainstorms, constitute primary manifestations of climate variability and exert a critical impact on the natural environment and human society. This is particularly true for high-mountain areas, such as the eastern flank of the southern Central Andes of NW Argentina, a region impacted by deep convection processes that form the basis of extreme events, often resulting in floods, a variety of mass movements, and hillslope processes. This region is characterized by pronounced E-W gradients in topography, precipitation, and vegetation cover, spanning low to medium-elevation, humid and densely vegetated areas to high-elevation, arid and sparsely vegetated environments. This strong E-W gradient is mirrored by differences in the efficiency of surface processes, which mobilize and transport large amounts of sediment through the fluvial system, from the steep hillslopes to the intermontane basins and further to the foreland. In a highly sensitive high-mountain environment like this, even small changes in the spatiotemporal distribution, magnitude and rates of extreme events may strongly impact environmental conditions, anthropogenic activity, and the well-being of mountain communities and beyond. However, although the NW Argentine Andes comprise the catchments for the La Plata river that traverses one of the most populated and economically relevant areas of South America, there are only few detailed investigations of climate variability and extreme hydro-meteorological events. In this thesis, I focus on deciphering the spatiotemporal variability of rainfall and river discharge, with particular emphasis on extreme hydro-meteorological events in the subtropical southern Central Andes of NW Argentina during the past seven decades. I employ various methods to assess and quantify statistically significant trend patterns of rainfall and river discharge, integrating high-quality daily time series from gauging stations (40 rainfall and 8 river discharge stations) with gridded datasets (CPC-uni and TRMM 3B42 V7), for the period between 1940 and 2015. Evidence for a general intensification of the hydrological cycle at intermediate elevations (~ 0.5 - 3 km asl) at the eastern flank of the southern Central Andes is found both from rainfall and river-discharge time-series analysis during the period from 1940 to 2015. This intensification is associated with the increase of the annual total amount of rainfall and the mean annual discharge. However, most pronounced trends are found at high percentiles, i.e. extreme hydro-meteorological events, particularly during the wet season from December to February.An important outcome of my studies is the recognition of a rapid increase in the amount of river discharge during the period between 1971 and 1977, most likely linked to the 1976-77 global climate shift, which is associated with the North Pacific Ocean sea surface temperature variability. Interestingly, after this rapid increase, both rainfall and river discharge decreased at low and intermediate elevations along the eastern flank of the Andes. In contrast, during the same time interval, at high elevations, extensive areas on the arid Puna de Atacama plateau have recorded increasing annual rainfall totals. This has been associated with more intense extreme hydro-meteorological events from 1979 to 2014. This part of the study reveals that low-, intermediate, and high-elevation sectors in the Andes of NW Argentina respond differently to changing climate conditions. Possible forcing mechanisms of the pronounced hydro-meteorological variability observed in the study area are also investigated. For the period between 1940 and 2015, I analyzed modes of oscillation of river discharge from small to medium drainage basins (102 to 104 km2), located on the eastern flank of the orogen. First, I decomposed the relevant monthly time series using the Hilbert-Huang Transform, which is particularly appropriate for non-stationary time series that result from non-linear natural processes. I observed that in the study region discharge variability can be described by five quasi-periodic oscillatory modes on timescales varying from 1 to ~20 years. Secondly, I tested the link between river-discharge variations and large-scale climate modes of variability, using different climate indices, such as the BEST ENSO (Bivariate El Ni{\~n}o-Southern Oscillation Time-series) index. This analysis reveals that, although most of the variance on the annual timescale is associated with the South American Monsoon System, a relatively large part of river-discharge variability is linked to Pacific Ocean variability (PDO phases) at multi-decadal timescales (~20 years). To a lesser degree, river discharge variability is also linked to the Tropical South Atlantic (TSA) sea surface temperature anomaly at multi-annual timescales (~2-5 years). Taken together, these findings exemplify the high degree of sensitivity of high-mountain environments with respect to climatic variability and change. This is particularly true for the topographic transitions between the humid, low-moderate elevations and the semi-arid to arid highlands of the southern Central Andes. Even subtle changes in the hydro-meteorological regime of these areas of the mountain belt react with major impacts on erosional hillslope processes and generate mass movements that fundamentally impact the transport capacity of mountain streams. Despite more severe storms in these areas, the fluvial system is characterized by pronounced variability of the stream power on different timescales, leading to cycles of sediment aggradation, the loss of agriculturally used land and severe impacts on infrastructure.}, language = {en} } @phdthesis{Georgieva2016, author = {Georgieva, Viktoria}, title = {Neotectonics \& Cooling History of the Southern Patagonian Andes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-104185}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 200 Seiten}, year = {2016}, abstract = {The collision of bathymetric anomalies, such as oceanic spreading centers, at convergent plate margins can profoundly affect subduction dynamics, magmatism, and the structural and geomorphic evolution of the overriding plate. The Southern Patagonian Andes of South America are a prime example for sustained oceanic ridge collision and the successive formation and widening of an extensive asthenospheric slab window since the Middle Miocene. Several of the predicted upper-plate geologic manifestations of such deep-seated geodynamic processes have been studied in this region, but many topics remain highly debated. One of the main controversial topics is the interpretation of the regional low-temperature thermochronology exhumational record and its relationship with tectonic and/or climate-driven processes, ultimately manifested and recorded in the landscape evolution of the Patagonian Andes. The prominent along-strike variance in the topographic characteristics of the Andes, combined with coupled trends in low-temperature thermochronometer cooling ages have been interpreted in very contrasting ways, considering either purely climatic (i.e. glacial erosion) or geodynamic (slab-window related) controlling factors. This thesis focuses on two main aspects of these controversial topics. First, based on field observations and bedrock low-temperature thermochronology data, the thesis addresses an existing research gap with respect to the neotectonic activity of the upper plate in response to ridge collision - a mechanism that has been shown to affect the upper plate topography and exhumational patterns in similar tectonic settings. Secondly, the qualitative interpretation of my new and existing thermochronological data from this region is extended by inverse thermal modelling to define thermal histories recorded in the data and evaluate the relative importance of surface vs. geodynamic factors and their possible relationship with the regional cooling record. My research is centered on the Northern Patagonian Icefield (NPI) region of the Southern Patagonian Andes. This site is located inboard of the present-day location of the Chile Triple Junction - the juncture between the colliding Chile Rise spreading center and the Nazca and Antarctic Plates along the South American convergent margin. As such this study area represents the region of most recent oceanic-ridge collision and associated slab window formation. Importantly, this location also coincides with the abrupt rise in summit elevations and relief characteristics in the Southern Patagonian Andes. Field observations, based on geological, structural and geomorphic mapping, are combined with bedrock apatite (U-Th)/He and apatite fission track (AHe and AFT) cooling ages sampled along elevation transects across the orogen. This new data reveals the existence of hitherto unrecognized neotectonic deformation along the flanks of the range capped by the NPI. This deformation is associated with the closely spaced oblique collision of successive oceanic-ridge segments in this region over the past 6 Ma. I interpret that this has caused a crustal-scale partitioning of deformation and the decoupling, margin-parallel migration, and localized uplift of a large crustal sliver (the NPI block) along the subduction margin. The location of this uplift coincides with a major increase of summit elevations and relief at the northern edge of the NPI massif. This mechanism is compatible with possible extensional processes along the topographically subdued trailing edge of the NPI block as documented by very recent and possibly still active normal faulting. Taken together, these findings suggest a major structural control on short-wavelength variations in topography in the Southern Patagonian Andes - the region affected by ridge collision and slab window formation. The second research topic addressed here focuses on using my new and existing bedrock low-temperature cooling ages in forward and inverse thermal modeling. The data was implemented in the HeFTy and QTQt modeling platforms to constrain the late Cenozoic thermal history of the Southern Patagonian Andes in the region of the most recent upper-plate sectors of ridge collision. The data set combines AHe and AFT data from three elevation transects in the region of the Northern Patagonian Icefield. Previous similar studies claimed far-reaching thermal effects of the approaching ridge collision and slab window to affect patterns of Late Miocene reheating in the modelled thermal histories. In contrast, my results show that the currently available data can be explained with a simpler thermal history than previously proposed. Accordingly, a reheating event is not needed to reproduce the observations. Instead, the analyzed ensemble of modelled thermal histories defines a Late Miocene protracted cooling and Pliocene-to-recent stepwise exhumation. These findings agree with the geological record of this region. Specifically, this record indicates an Early Miocene phase of active mountain building associated with surface uplift and an active fold-and-thrust belt, followed by a period of stagnating deformation, peneplanation, and lack of synorogenic deposition in the Patagonian foreland. The subsequent period of stepwise exhumation likely resulted from a combination of pulsed glacial erosion and coeval neotectonic activity. The differences between the present and previously published interpretation of the cooling record can be reconciled with important inconsistencies of previously used model setup. These include mainly the insufficient convergence of the models and improper assumptions regarding the geothermal conditions in the region. This analysis puts a methodological emphasis on the prime importance of the model setup and the need for its thorough examination to evaluate the robustness of the final outcome.}, language = {en} } @phdthesis{Tamasi2016, author = {Tamasi, Katalin}, title = {Measuring children's sensitivity to phonological detail using eye tracking and pupillometry}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395954}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 165}, year = {2016}, abstract = {Infants' lexical processing is modulated by featural manipulations made to words, suggesting that early lexical representations are sufficiently specified to establish a match with the corresponding label. However, the precise degree of detail in early words requires further investigation due to equivocal findings. We studied this question by assessing children's sensitivity to the degree of featural manipulation (Chapters 2 and 3), and sensitivity to the featural makeup of homorganic and heterorganic consonant clusters (Chapter 4). Gradient sensitivity on the one hand and sensitivity to homorganicity on the other hand would suggest that lexical processing makes use of sub-phonemic information, which in turn would indicate that early words contain sub-phonemic detail. The studies presented in this thesis assess children's sensitivity to sub-phonemic detail using minimally demanding online paradigms suitable for infants: single-picture pupillometry and intermodal preferential looking. Such paradigms have the potential to uncover lexical knowledge that may be masked otherwise due to cognitive limitations. The study reported in Chapter 2 obtained a differential response in pupil dilation to the degree of featural manipulation, a result consistent with gradient sensitivity. The study reported in Chapter 3 obtained a differential response in proportion of looking time and pupil dilation to the degree of featural manipulation, a result again consistent with gradient sensitivity. The study reported in Chapter 4 obtained a differential response to the manipulation of homorganic and heterorganic consonant clusters, a result consistent with sensitivity to homorganicity. These results suggest that infants' lexical representations are not only specific, but also detailed to the extent that they contain sub-phonemic information.}, language = {en} } @phdthesis{Harding2016, author = {Harding, Eleanor Elizabeth}, title = {Neurocognitive entrainment to meter influences syntactic comprehension in music and language}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102258}, school = {Universit{\"a}t Potsdam}, pages = {vi, 318}, year = {2016}, abstract = {Meter and syntax have overlapping elements in music and speech domains, and individual differences have been documented in both meter perception and syntactic comprehension paradigms. Previous evidence insinuated but never fully explored the relationship that metrical structure has to syntactic comprehension, the comparability of these processes across music and language domains, and the respective role of individual differences. This dissertation aimed to investigate neurocognitive entrainment to meter in music and language, the impact that neurocognitive entrainment had on syntactic comprehension, and whether individual differences in musical expertise, temporal perception and working memory played a role during these processes. A theoretical framework was developed, which linked neural entrainment, cognitive entrainment, and syntactic comprehension while detailing previously documented effects of individual differences on meter perception and syntactic comprehension. The framework was developed in both music and language domains and was tested using behavioral and EEG methods across three studies (seven experiments). In order to satisfy empirical evaluation of neurocognitive entrainment and syntactic aspects of the framework, original melodies and sentences were composed. Each item had four permutations: regular and irregular metricality, based on the hierarchical organization of strong and weak notes and syllables, and preferred and non-preferred syntax, based on structurally alternate endings. The framework predicted — for both music and language domains — greater neurocognitive entrainment in regular compared to irregular metricality conditions, and accordingly, better syntactic integration in regular compared to irregular metricality conditions. Individual differences among participants were expected for both entrainment and syntactic processes. Altogether, the dissertation was able to support a holistic account of neurocognitive entrainment to musical meter and its subsequent influence on syntactic integration of melodies, with musician participants. The theoretical predictions were not upheld in the language domain with musician participants, but initial behavioral evidence in combination with previous EEG evidence suggest that perhaps non-musician language EEG data would support the framework's predictions. Musicians' deviation from hypothesized results in the language domain were suspected to reflect heightened perception of acoustic features stemming from musical training, which caused current 'overly' regular stimuli to distract the cognitive system. The individual-differences approach was vindicated by the surfacing of two factors scores, Verbal Working Memory and Time and Pitch Discrimination, which in turn correlated with multiple experimental data across the three studies.}, language = {en} } @phdthesis{Schroen2016, author = {Schr{\"o}n, Martin}, title = {Cosmic-ray neutron sensing and its applications to soil and land surface hydrology}, publisher = {Verlag Dr. Hut GmbH}, address = {M{\"u}nchen}, isbn = {978-3-8439-3139-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-395433}, school = {Universit{\"a}t Potsdam}, pages = {223}, year = {2016}, abstract = {Water scarcity, adaption on climate change, and risk assessment of droughts and floods are critical topics for science and society these days. Monitoring and modeling of the hydrological cycle are a prerequisite to understand and predict the consequences for weather and agriculture. As soil water storage plays a key role for partitioning of water fluxes between the atmosphere, biosphere, and lithosphere, measurement techniques are required to estimate soil moisture states from small to large scales. The method of cosmic-ray neutron sensing (CRNS) promises to close the gap between point-scale and remote-sensing observations, as its footprint was reported to be 30 ha. However, the methodology is rather young and requires highly interdisciplinary research to understand and interpret the response of neutrons to soil moisture. In this work, the signal of nine detectors has been systematically compared, and correction approaches have been revised to account for meteorological and geomagnetic variations. Neutron transport simulations have been consulted to precisely characterize the sensitive footprint area, which turned out to be 6--18 ha, highly local, and temporally dynamic. These results have been experimentally confirmed by the significant influence of water bodies and dry roads. Furthermore, mobile measurements on agricultural fields and across different land use types were able to accurately capture the various soil moisture states. It has been further demonstrated that the corresponding spatial and temporal neutron data can be beneficial for mesoscale hydrological modeling. Finally, first tests with a gyrocopter have proven the concept of airborne neutron sensing, where increased footprints are able to overcome local effects. This dissertation not only bridges the gap between scales of soil moisture measurements. It also establishes a close connection between the two worlds of observers and modelers, and further aims to combine the disciplines of particle physics, geophysics, and soil hydrology to thoroughly explore the potential and limits of the CRNS method.}, language = {en} } @phdthesis{Lyu2016, author = {Lyu, Xiaojing}, title = {Operators on singular manifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103643}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2016}, abstract = {We study the interplay between analysis on manifolds with singularities and complex analysis and develop new structures of operators based on the Mellin transform and tools for iterating the calculus for higher singularities. We refer to the idea of interpreting boundary value problems (BVPs) in terms of pseudo-differential operators with a principal symbolic hierarchy, taking into account that BVPs are a source of cone and edge operator algebras. The respective cone and edge pseudo-differential algebras in turn are the starting point of higher corner theories. In addition there are deep relationships between corner operators and complex analysis. This will be illustrated by the Mellin symbolic calculus.}, language = {en} } @phdthesis{Osudar2016, author = {Osudar, Roman}, title = {Methane distribution and oxidation across aquatic interfaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96799}, school = {Universit{\"a}t Potsdam}, pages = {vii, 170}, year = {2016}, abstract = {The increase in atmospheric methane concentration, which is determined by an imbalance between its sources and sinks, has led to investigations of the methane cycle in various environments. Aquatic environments are of an exceptional interest due to their active involvement in methane cycling worldwide and in particular in areas sensitive to climate change. Furthermore, being connected with each other aquatic environments form networks that can be spread on vast areas involving marine, freshwater and terrestrial ecosystems. Thus, aquatic systems have a high potential to translate local or regional environmental and subsequently ecosystem changes to a bigger scale. Many studies neglect this connectivity and focus on individual aquatic or terrestrial ecosystems. The current study focuses on environmental controls of the distribution and aerobic oxidation of methane at the example of two aquatic ecosystems. These ecosystems are Arctic fresh water bodies and the Elbe estuary which represent interfaces between freshwater-terrestrial and freshwater-marine environments, respectively. Arctic water bodies are significant atmospheric sources of methane. At the same time the methane cycle in Arctic water bodies is strongly affected by the surrounding permafrost environment, which is characterized by high amounts of organic carbon. The results of this thesis indicate that the methane concentrations in Arctic lakes and streams substantially vary between each other being regulated by local landscape features (e.g. floodplain area) and the morphology of the water bodies (lakes, streams and river). The highest methane concentrations were detected in the lake outlets and in a floodplain lake complex. In contrast, the methane concentrations measured at different sites of the Lena River did not vary substantially. The lake complexes in comparison to the Lena River, thus, appear as more individual and heterogeneous systems with a pronounced imprint of the surrounding soil environment. Furthermore, connected with each other Arctic aquatic environments have a large potential to transport methane from methane-rich water bodies such as streams and floodplain lakes to aquatic environments relatively poor in methane such as the Lena River. Estuaries represent hot spots of oceanic methane emissions. Also, estuaries are intermediate zones between methane-rich river water and methane depleted marine water. Substantiated through this thesis at the example of the Elbe estuary, the methane distribution in estuaries, however, cannot entirely be described by the conservative mixing model i.e. gradual decrease from the freshwater end-member to the marine water end-member. In addition to the methane-rich water from the Elbe River mouth substantial methane input occurs from tidal flats, areas of significant interaction between aquatic and terrestrial environments. Thus, this study demonstrates the complex interactions and their consequences for the methane distribution within estuaries. Also it reveals how important it is to investigate estuaries at larger spatial scales. Methane oxidation (MOX) rates are commonly correlated with methane concentrations. This was shown in previous research studies and was substantiated by the present thesis. In detail, the highest MOX rates in the Arctic water bodies were detected in methane-rich streams and in the floodplain area while in the Elbe estuary the highest MOX rates were observed at the coastal stations. However, in these bordering environments, MOX rates are affected not only via the regulation through methane concentrations. The MOX rates in the Arctic lakes were shown to be also dependent on the abundance and community composition of methane-oxidising bacteria (MOB), that in turn are controlled by local landscape features (regardless of the methane concentrations) and by the transport of MOB between neighbouring environments. In the Elbe estuary, the MOX rates in addition to the methane concentrations are largely affected by the salinity, which is in turn regulated by the mixing of fresh- and marine waters. The magnitude of the salinity impact on MOX rates thereby depends on the MOB community composition and on the rate of the salinity change. This study extends our knowledge of environmental controls of methane distribution and aerobic methane oxidation in aquatic environments. It also illustrates how important it is to investigate complex ecosystems rather than individual ecosystems to better understand the functioning of whole biomes.}, language = {en} } @phdthesis{Marc2016, author = {Marc, Odin}, title = {Earthquake-induced landsliding}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96808}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 171}, year = {2016}, abstract = {Earthquakes deform Earth's surface, building long-lasting topographic features and contributing to landscape and mountain formation. However, seismic waves produced by earthquakes may also destabilize hillslopes, leading to large amounts of soil and bedrock moving downslope. Moreover, static deformation and shaking are suspected to damage the surface bedrock and therefore alter its future properties, affecting hydrological and erosional dynamics. Thus, earthquakes participate both in mountain building and stimulate directly or indirectly their erosion. Moreover, the impact of earthquakes on hillslopes has important implications for the amount of sediment and organic matter delivered to rivers, and ultimately to oceans, during episodic catastrophic seismic crises, the magnitude of life and property losses associated with landsliding, the perturbation and recovery of landscape properties after shaking, and the long term topographic evolution of mountain belts. Several of these aspects have been addressed recently through individual case studies but additional data compilation as well as theoretical or numerical modelling are required to tackle these issues in a more systematic and rigorous manner. This dissertation combines data compilation of earthquake characteristics, landslide mapping, and seismological data interpretation with physically-based modeling in order to address how earthquakes impact on erosional processes and landscape evolution. Over short time scales (10-100 s) and intermediate length scales (10 km), I have attempted to improve our understanding and ability to predict the amount of landslide debris triggered by seismic shaking in epicentral areas. Over long time scales (1-100 ky) and across a mountain belt (100 km) I have modeled the competition between erosional unloading and building of topography associated with earthquakes. Finally, over intermediate time scales (1-10 y) and at the hillslope scale (0.1-1 km) I have collected geomorphological and seismological data that highlight persistent effects of earthquakes on landscape properties and behaviour. First, I compiled a database on earthquakes that produced significant landsliding, including an estimate of the total landslide volume and area, and earthquake characteristics such as seismic moment and source depth. A key issue is the accurate conversion of landslide maps into volume estimates. Therefore I also estimated how amalgamation - when mapping errors lead to the bundling of multiple landslide into a single polygon - affects volume estimates from various earthquake-induced landslide inventories and developed an algorithm to automatically detect this artifact. The database was used to test a physically-based prediction of the total landslide area and volume caused by earthquakes, based on seismological scaling relationships and a statistical description of the landscape properties. The model outperforms empirical fits in accuracy, with 25 out of 40 cases well predicted, and allows interpretation of many outliers in physical terms. Apart from seismological complexities neglected by the model I found that exceptional rock strength properties or antecedent conditions may explain most outliers. Second, I assessed the geomorphic effects of large earthquakes on landscape dynamics by surveying the temporal evolution of precipitation-normalized landslide rate. I found strongly elevated landslide rates following earthquakes that progressively recover over 1 to 4 years, indicating that regolith strength drops and recovers. The relaxation is clearly non-linear for at least one case, and does not seem to correlate with coseismic landslide reactivation, water table level increase or tree root-system recovery. I suggested that shallow bedrock is damaged by the earthquake and then heals on annual timescales. Such variations in ground strength must be translated into shallow subsurface seismic velocities that are increasingly surveyed with ambient seismic noise correlations. With seismic noise autocorrelation I computed the seismic velocity in the epicentral areas of three earthquakes where I constrained a change in landslide rate. We found similar recovery dynamics and timescales, suggesting that seismic noise correlation techniques could be further developed to meaningfully assess ground strength variations for landscape dynamics. These two measurements are also in good agreement with the temporal dynamics of post-seismic surface displacement measured by GPS. This correlation suggests that the surface healing mechanism may be driven by tectonic deformation, and that the surface regolith and fractured bedrock may behave as a granular media that slowly compacts as it is sheared or vibrated. Last, I compared our model of earthquake-induced landsliding with a standard formulation of surface deformation caused by earthquakes to understand which parameters govern the competition between the building and destruction of topography caused by earthquakes. In contrast with previous studies I found that very large (Mw>8) earthquakes always increase the average topography, whereas only intermediate (Mw ~ 7) earthquakes in steep landscapes may reduce topography. Moreover, I illustrated how the net effect of earthquakes varies with depth or landscape steepness implying a complex and ambivalent role through the life of a mountain belt. Further I showed that faults producing a Gutenberg-Richter distribution of earthquake sizes, will limit topography over a larger range of fault sizes than faults producing repeated earthquakes with a characteristic size.}, language = {en} } @phdthesis{JaraMunoz2016, author = {Jara Mu{\~n}oz, Julius}, title = {Quantifying forearc deformation patterns using coastal geomorphic markers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102652}, school = {Universit{\"a}t Potsdam}, pages = {XXV, 213}, year = {2016}, abstract = {Rapidly uplifting coastlines are frequently associated with convergent tectonic boundaries, like subduction zones, which are repeatedly breached by giant megathrust earthquakes. The coastal relief along tectonically active realms is shaped by the effect of sea-level variations and heterogeneous patterns of permanent tectonic deformation, which are accumulated through several cycles of megathrust earthquakes. However, the correlation between earthquake deformation patterns and the sustained long-term segmentation of forearcs, particularly in Chile, remains poorly understood. Furthermore, the methods used to estimate permanent deformation from geomorphic markers, like marine terraces, have remained qualitative and are based on unrepeatable methods. This contrasts with the increasing resolution of digital elevation models, such as Light Detection and Ranging (LiDAR) and high-resolution bathymetric surveys. Throughout this thesis I study permanent deformation in a holistic manner: from the methods to assess deformation rates, to the processes involved in its accumulation. My research focuses particularly on two aspects: Developing methodologies to assess permanent deformation using marine terraces, and comparing permanent deformation with seismic cycle deformation patterns under different spatial scales along the M8.8 Maule earthquake (2010) rupture zone. Two methods are developed to determine deformation rates from wave-built and wave-cut terraces respectively. I selected an archetypal example of a wave-built terrace at Santa Maria Island studying its stratigraphy and recognizing sequences of reoccupation events tied with eleven radiocarbon sample ages (14C ages). I developed a method to link patterns of reoccupation with sea-level proxies by iterating relative sea level curves for a range of uplift rates. I find the best fit between relative sea-level and the stratigraphic patterns for an uplift rate of 1.5 +- 0.3 m/ka. A Graphical User Interface named TerraceM® was developed in Matlab®. This novel software tool determines shoreline angles in wave-cut terraces under different geomorphic scenarios. To validate the methods, I select test sites in areas of available high-resolution LiDAR topography along the Maule earthquake rupture zone and in California, USA. The software allows determining the 3D location of the shoreline angle, which is a proxy for the estimation of permanent deformation rates. The method is based on linear interpolations to define the paleo platform and cliff on swath profiles. The shoreline angle is then located by intersecting these interpolations. The accuracy and precision of TerraceM® was tested by comparing its results with previous assessments, and through an experiment with students in a computer lab setting at the University of Potsdam. I combined the methods developed to analyze wave-built and wave-cut terraces to assess regional patterns of permanent deformation along the (2010) Maule earthquake rupture. Wave-built terraces are tied using 12 Infra Red Stimulated luminescence ages (IRSL ages) and shoreline angles in wave-cut terraces are estimated from 170 aligned swath profiles. The comparison of coseismic slip, interseismic coupling, and permanent deformation, leads to three areas of high permanent uplift, terrace warping, and sharp fault offsets. These three areas correlate with regions of high slip and low coupling, as well as with the spatial limit of at least eight historical megathrust ruptures (M8-9.5). I propose that the zones of upwarping at Arauco and Topocalma reflect changes in frictional properties of the megathrust, which result in discrete boundaries for the propagation of mega earthquakes. To explore the application of geomorphic markers and quantitative morphology in offshore areas I performed a local study of patterns of permanent deformation inferred from hitherto unrecognized drowned shorelines at the Arauco Bay, at the southern part of the (2010) Maule earthquake rupture zone. A multidisciplinary approach, including morphometry, sedimentology, paleontology, 3D morphoscopy, and a landscape Evolution Model is used to recognize, map, and assess local rates and patterns of permanent deformation in submarine environments. Permanent deformation patterns are then reproduced using elastic models to assess deformation rates of an active submarine splay fault defined as Santa Maria Fault System. The best fit suggests a reverse structure with a slip rate of 3.7 m/ka for the last 30 ka. The register of land level changes during the earthquake cycle at Santa Maria Island suggest that most of the deformation may be accrued through splay fault reactivation during mega earthquakes, like the (2010) Maule event. Considering a recurrence time of 150 to 200 years, as determined from historical and geological observations, slip between 0.3 and 0.7 m per event would be required to account for the 3.7 m/ka millennial slip rate. However, if the SMFS slips only every ~1000 years, representing a few megathrust earthquakes, then a slip of ~3.5 m per event would be required to account for the long- term rate. Such event would be equivalent to a magnitude ~6.7 earthquake capable to generate a local tsunami. The results of this thesis provide novel and fundamental information regarding the amount of permanent deformation accrued in the crust, and the mechanisms responsible for this accumulation at millennial time-scales along the M8.8 Maule earthquake (2010) rupture zone. Furthermore, the results of this thesis highlight the application of quantitative geomorphology and the use of repeatable methods to determine permanent deformation, improve the accuracy of marine terrace assessments, and estimates of vertical deformation rates in tectonically active coastal areas. This is vital information for adequate coastal-hazard assessments and to anticipate realistic earthquake and tsunami scenarios.}, language = {en} } @phdthesis{Falter2016, author = {Falter, Daniela}, title = {A novel approach for large-scale flood risk assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90239}, school = {Universit{\"a}t Potsdam}, pages = {95}, year = {2016}, abstract = {In the past, floods were basically managed by flood control mechanisms. The focus was set on the reduction of flood hazard. The potential consequences were of minor interest. Nowadays river flooding is increasingly seen from the risk perspective, including possible consequences. Moreover, the large-scale picture of flood risk became increasingly important for disaster management planning, national risk developments and the (re-) insurance industry. Therefore, it is widely accepted that risk-orientated flood management ap-proaches at the basin-scale are needed. However, large-scale flood risk assessment methods for areas of several 10,000 km² are still in early stages. Traditional flood risk assessments are performed reach wise, assuming constant probabilities for the entire reach or basin. This might be helpful on a local basis, but where large-scale patterns are important this approach is of limited use. Assuming a T-year flood (e.g. 100 years) for the entire river network is unrealistic and would lead to an overestimation of flood risk at the large scale. Due to the lack of damage data, additionally, the probability of peak discharge or rainfall is usually used as proxy for damage probability to derive flood risk. With a continuous and long term simulation of the entire flood risk chain, the spatial variability of probabilities could be consider and flood risk could be directly derived from damage data in a consistent way. The objective of this study is the development and application of a full flood risk chain, appropriate for the large scale and based on long term and continuous simulation. The novel approach of 'derived flood risk based on continuous simulations' is introduced, where the synthetic discharge time series is used as input into flood impact models and flood risk is directly derived from the resulting synthetic damage time series. The bottleneck at this scale is the hydrodynamic simu-lation. To find suitable hydrodynamic approaches for the large-scale a benchmark study with simplified 2D hydrodynamic models was performed. A raster-based approach with inertia formulation and a relatively high resolution of 100 m in combination with a fast 1D channel routing model was chosen. To investigate the suitability of the continuous simulation of a full flood risk chain for the large scale, all model parts were integrated into a new framework, the Regional Flood Model (RFM). RFM consists of the hydrological model SWIM, a 1D hydrodynamic river network model, a 2D raster based inundation model and the flood loss model FELMOps+r. Subsequently, the model chain was applied to the Elbe catchment, one of the largest catchments in Germany. For the proof-of-concept, a continuous simulation was per-formed for the period of 1990-2003. Results were evaluated / validated as far as possible with available observed data in this period. Although each model part introduced its own uncertainties, results and runtime were generally found to be adequate for the purpose of continuous simulation at the large catchment scale. Finally, RFM was applied to a meso-scale catchment in the east of Germany to firstly perform a flood risk assessment with the novel approach of 'derived flood risk assessment based on continuous simulations'. Therefore, RFM was driven by long term synthetic meteorological input data generated by a weather generator. Thereby, a virtual time series of climate data of 100 x 100 years was generated and served as input to RFM providing subsequent 100 x 100 years of spatially consistent river discharge series, inundation patterns and damage values. On this basis, flood risk curves and expected annual damage could be derived directly from damage data, providing a large-scale picture of flood risk. In contrast to traditional flood risk analysis, where homogenous return periods are assumed for the entire basin, the presented approach provides a coherent large-scale picture of flood risk. The spatial variability of occurrence probability is respected. Additionally, data and methods are consistent. Catchment and floodplain processes are repre-sented in a holistic way. Antecedent catchment conditions are implicitly taken into account, as well as physical processes like storage effects, flood attenuation or channel-floodplain interactions and related damage influencing effects. Finally, the simulation of a virtual period of 100 x 100 years and consequently large data set on flood loss events enabled the calculation of flood risk directly from damage distributions. Problems associated with the transfer of probabilities in rainfall or peak runoff to probabilities in damage, as often used in traditional approaches, are bypassed. RFM and the 'derived flood risk approach based on continuous simulations' has the potential to provide flood risk statements for national planning, re-insurance aspects or other questions where spatially consistent, large-scale assessments are required.}, language = {en} } @phdthesis{Nied2016, author = {Nied, Manuela}, title = {The role of soil moisture and weather patterns for flood occurrence and characteristics at the river basin scale}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94612}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 86}, year = {2016}, abstract = {Flood generation at the scale of large river basins is triggered by the interaction of the hydrological pre-conditions and the meteorological event conditions at different spatial and temporal scales. This interaction controls diverse flood generating processes and results in floods varying in magnitude and extent, duration as well as socio-economic consequences. For a process-based understanding of the underlying cause-effect relationships, systematic approaches are required. These approaches have to cover the complete causal flood chain, including the flood triggering meteorological event in combination with the hydrological (pre-)conditions in the catchment, runoff generation, flood routing, possible floodplain inundation and finally flood losses. In this thesis, a comprehensive probabilistic process-based understanding of the causes and effects of floods is advanced. The spatial and temporal dynamics of flood events as well as the geophysical processes involved in the causal flood chain are revealed and the systematic interconnections within the flood chain are deciphered by means of the classification of their associated causes and effects. This is achieved by investigating the role of the hydrological pre-conditions and the meteorological event conditions with respect to flood occurrence, flood processes and flood characteristics as well as their interconnections at the river basin scale. Broadening the knowledge about flood triggers, which up to now has been limited to linking large-scale meteorological conditions to flood occurrence, the influence of large-scale pre-event hydrological conditions on flood initiation is investigated. Using the Elbe River basin as an example, a classification of soil moisture, a key variable of pre-event conditions, is developed and a probabilistic link between patterns of soil moisture and flood occurrence is established. The soil moisture classification is applied to continuously simulated soil moisture data which is generated using the semi-distributed conceptual rainfall-runoff model SWIM. Applying successively a principal component analysis and a cluster analysis, days of similar soil moisture patterns are identified in the period November 1951 to October 2003. The investigation of flood triggers is complemented by including meteorological conditions described by a common weather pattern classification that represents the main modes of atmospheric state variability. The newly developed soil moisture classification thereby provides the basis to study the combined impact of hydrological pre-conditions and large-scale meteorological event conditions on flood occurrence at the river basin scale. A process-based understanding of flood generation and its associated probabilities is attained by classifying observed flood events into process-based flood types such as snowmelt floods or long-rain floods. Subsequently, the flood types are linked to the soil moisture and weather patterns. Further understanding of the processes is gained by modeling of the complete causal flood chain, incorporating a rainfall-runoff model, a 1D/2D hydrodynamic model and a flood loss model. A reshuffling approach based on weather patterns and the month of their occurrence is developed to generate synthetic data fields of meteorological conditions, which drive the model chain, in order to increase the flood sample size. From the large number of simulated flood events, the impact of hydro-meteorological conditions on various flood characteristics is detected through the analysis of conditional cumulative distribution functions and regression trees. The results show the existence of catchment-scale soil moisture patterns, which comprise of large-scale seasonal wetting and drying components as well as of smaller-scale variations related to spatially heterogeneous catchment processes. Soil moisture patterns frequently occurring before the onset of floods are identified. In winter, floods are initiated by catchment-wide high soil moisture, whereas in summer the flood-initiating soil moisture patterns are diverse and the soil moisture conditions are less stable in time. The combined study of both soil moisture and weather patterns shows that the flood favoring hydro-meteorological patterns as well as their interactions vary seasonally. In the analysis period, 18 \% of the weather patterns only result in a flood in the case of preceding soil saturation. The classification of 82 past events into flood types reveals seasonally varying flood processes that can be linked to hydro-meteorological patterns. For instance, the highest flood potential for long-rain floods is associated with a weather pattern that is often detected in the presence of so-called 'Vb' cyclones. Rain-on-snow and snowmelt floods are associated with westerly and north-westerly wind directions. The flood characteristics vary among the flood types and can be reproduced by the applied model chain. In total, 5970 events are simulated. They reproduce the observed event characteristics between September 1957 and August 2002 and provide information on flood losses. A regression tree analysis relates the flood processes of the simulated events to the hydro-meteorological (pre-)event conditions and highlights the fact that flood magnitude is primarily controlled by the meteorological event, whereas flood extent is primarily controlled by the soil moisture conditions. Describing flood occurrence, processes and characteristics as a function of hydro-meteorological patterns, this thesis is part of a paradigm shift towards a process-based understanding of floods. The results highlight that soil moisture patterns as well as weather patterns are not only beneficial to a probabilistic conception of flood initiation but also provide information on the involved flood processes and the resulting flood characteristics.}, language = {en} } @phdthesis{Intziegianni2016, author = {Intziegianni, Konstantina}, title = {Influence of age and pathology on Achilles tendon properties under functional tasks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-398732}, school = {Universit{\"a}t Potsdam}, pages = {ix, 85, xii}, year = {2016}, abstract = {Prevalence of Achilles tendinopathy increases with age, leading to a weaker tendon with predisposition to rupture. Previous studies, investigating Achilles tendon (AT) properties, are restricted to standardized isometric conditions. Knowledge regarding the influence of age and pa-thology on AT response under functional tasks remains limited. Therefore, the aim of the thesis was to investigate the influence of age and pathology on AT properties during a single-leg vertical jump. Healthy children, asymptomatic adults and patients with Achilles tendinopathy participated. Ultrasonography was used to assess AT-length, AT-cross-sectional area and AT-elongation. The reliability of the methodology used was evaluated both Intra- and inter-rater at rest and at maximal isometric plantar-flexion contraction and was further implemented to investigate tendon properties during functional task. During the functional task a single-leg vertical jump on a force plate was performed while simultaneously AT elongation and vertical ground reaction forces were recorded. AT compliance [mm/N] (elongation/force) and AT strain [\%] (elongation/length) were calculated. Differences between groups were evaluated with respect to age (children vs. adults) and pathology (asymptomatic adults vs. patients). Good to excellent reliability with low levels of variability was achieved in the assessment of AT properties. During the jumps AT elongation was found to be statistical significant higher in children. However, no statistical significant difference was found for force among the groups. AT compliance and strain were found to be statistical significant higher only in children. No significant differences were found between asymptomatic adults and patients with tendinopathy. The methodology used to assess AT properties is reliable, allowing its implementation into further investigations. Higher AT-compliance in children might be considered as a protective factor against load-related injuries. During functional task, when higher forces are acting on the AT, tendinopathy does not result in a weaker tendon.}, language = {en} } @phdthesis{MartinezValdes2016, author = {Mart{\´i}nez Vald{\´e}s, Eduardo Andr{\´e}s}, title = {Neuromuscular adaptations of either endurance or high-intensity interval training}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396383}, school = {Universit{\"a}t Potsdam}, pages = {VII, 140, XII}, year = {2016}, abstract = {During the last decade, high intensity interval training (HIIT) has been used as an alternative to endurance (END) exercise, since it requires less time to produce similar physiological adaptations. Previous literature has focused on HIIT changes in aerobic metabolism and cardiorespiratory fitness, however, there are currently no studies focusing on its neuromuscular adaptations. Therefore, this thesis aimed to compare the neuromuscular adaptations of both HIIT and END after a two-week training intervention, by using a novel technology called high-density surface electromyography (HDEMG) motor unit decomposition. This project consisted in two experiments, where healthy young men were recruited (aged between 18 to 35 years). In experiment one, the reliability of HDEMG motor unit variables (mean discharge rate, peak-to-peak amplitude, conduction velocity and discharge rate variability) was tested (Study 1), a new method to track the same motor units longitudinally was proposed (Study 2), and the level of low (<5Hz) and high (>5Hz) frequency motor unit coherence between vastus medialis (VM) and lateralis (VL) knee extensor muscles was measured (Study 4). In experiment two, a two-week HIIT and END intervention was conducted where cardiorespiratory fitness parameters (e.g. peak oxygen uptake) and motor unit variables from the VM and VL muscles were assessed pre and post intervention (Study 3). The results showed that HDEMG is reliable to monitor changes in motor unit activity and also allows the tracking of the same motor units across different testing sessions. As expected, both HIIT and END improved cardiorespiratory fitness parameters similarly. However, the neuromuscular adaptations of both types of training differed after the intervention, with HIIT showing a significant increase in knee extensor muscle strength that was accompanied by increased VM and VL motor unit discharge rates and HDEMG amplitude at the highest force levels [(50 and 70\% of the maximum voluntary contraction force (MVC)], while END training induced a marked increase in time to task failure at lower force levels (30\% MVC), without any influence on HDEMG amplitude and discharge rates. Additionally, the results showed that VM and VL muscles share most of their synaptic input since they present a large amount of low and high frequency motor unit coherence, which can explain the findings of the training intervention where both muscles showed similar changes in HDEMG amplitude and discharge rates. Taken together, the findings of the current thesis show that despite similar improvements in cardiopulmonary fitness, HIIT and END induced opposite adjustments in motor unit behavior. These results suggest that HIIT and END show specific neuromuscular adaptations, possibly related to their differences in exercise load intensity and training volume.}, language = {en} } @phdthesis{Engel2016, author = {Engel, Tilman}, title = {Motor control strategies in response to unexpected disturbances of dynamic postural control in people with and without low back pain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-400742}, school = {Universit{\"a}t Potsdam}, pages = {110}, year = {2016}, abstract = {Background: Low back pain (LBP) is one of the world wide leading causes of limited activity and disability. Impaired motor control has been found to be one of the possible factors related to the development or persistence of LBP. In particularly, motor control strategies seemed to be altered in situations requiring reactive responses of the trunk counteracting sudden external forces. However, muscular responses were mostly assessed in (quasi) static testing situations under simplified laboratory conditions. Comprehensive investigations in motor control strategies during dynamic everyday situations are lacking. The present research project aimed to investigate muscular compensation strategies following unexpected gait perturbations in people with and without LBP. A novel treadmill stumbling protocol was tested for its validity and reliability to provoke muscular reflex responses at the trunk and the lower extremities (study 1). Thereafter, motor control strategies in response to sudden perturbations were compared between people with LBP and asymptomatic controls (CTRL) (study 2). In accordance with more recent concepts of motor adaptation to pain, it was hypothesized that pain may have profound consequences on motor control strategies in LBP. Therefore, it was investigated whether differences in compensation strategies were either consisting of changes local to the painful area at the trunk, or also being present in remote areas such as at the lower extremities. Methods: All investigations were performed on a custom build split-belt treadmill simulating trip-like events by unexpected rapid deceleration impulses (amplitude: 2 m/s; duration: 100 ms; 200 ms after heel contact) at 1m/s baseline velocity. A total number of 5 (study 1) and 15 (study 2) right sided perturbations were applied during walking trials. Muscular activities were assessed by surface electromyography (EMG), recorded at 12 trunk muscles and 10 (study 1) respectively 5 (study 2) leg muscles. EMG latencies of muscle onset [ms] were retrieved by a semi-automatic detection method. EMG amplitudes (root mean square (RMS)) were assessed within 200 ms post perturbation, normalized to full strides prior to any perturbation [RMS\%]. Latency and amplitude investigations were performed for each muscle individually, as well as for pooled data of muscles grouped by location. Characteristic pain intensity scores (CPIS; 0-100 points, von Korff) based on mean intensity ratings reported for current, worst and average pain over the last three months were used to allocate participants into LBP (≥30 points) or CTRL (≤10 points). Test-retest reproducibility between measurements was determined by a compilation of measures of reliability. Differences in muscular activities between LBP and CTRL were analysed descriptively for individual muscles; differences based on grouped muscles were statistically tested by using a multivariate analysis of variance (MANOVA, α =0.05). Results: Thirteen individuals were included into the analysis of study 1. EMG latencies revealed reflex muscle activities following the perturbation (mean: 89 ms). Respective EMG amplitudes were on average 5-fold of those assessed in unperturbed strides, though being characterized by a high inter-subject variability. Test-retest reliability of muscle latencies showed a high reproducibility, both for muscles at the trunk and legs. In contrast, reproducibility of amplitudes was only weak to moderate for individual muscles, but increased when being assessed as a location specific outcome summary of grouped muscles. Seventy-six individuals were eligible for data analysis in study 2. Group allocation according to CPIS resulted in n=25 for LBP and n=29 for CTRL. Descriptive analysis of activity onsets revealed longer delays for all muscles within LBP compared to CTRL (trunk muscles: mean 10 ms; leg muscles: mean 3 ms). Onset latencies of grouped muscles revealed statistically significant differences between LBP and CTRL for right (p=0.009) and left (p=0.007) abdominal muscle groups. EMG amplitude analysis showed a high variability in activation levels between individuals, independent of group assignment or location. Statistical testing of grouped muscles indicated no significant difference in amplitudes between LBP and CTRL. Discussion: The present research project could show that perturbed treadmill walking is suitable to provoke comprehensive reflex responses at the trunk and lower extremities, both in terms of sudden onsets and amplitudes of reflex activity. Moreover, it could demonstrate that sudden loadings under dynamic conditions provoke an altered reflex timing of muscles surrounding the trunk in people with LBP compared to CTRL. In line with previous investigations, compensation strategies seemed to be deployed in a task specific manner, with differences between LBP and CTRL being evident predominately at ventral sides. No muscular alterations exceeding the trunk could be found when being assessed under the automated task of locomotion. While rehabilitation programs tailored towards LBP are still under debate, it is tempting to urge the implementation of dynamic sudden loading incidents of the trunk to enhance motor control and thereby to improve spinal protection. Moreover, in respect to the consistently observed task specificity of muscular compensation strategies, such a rehabilitation program should be rich in variety.}, language = {en} } @phdthesis{Kopinski2016, author = {Kopinski, Stephan}, title = {The neuromuscular efficiency of lower back muscles in low back pain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101241}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2016}, abstract = {BACKGROUND: The etiology of low back pain (LBP), one of the most prevalent and costly diseases of our time, is accepted to be multi-causal, placing functional factors in the focus of research. Thereby, pain models suggest a centrally controlled strategy of trunk stiffening in LBP. However, supporting biomechanical evidence is mostly limited to static measurements during maximum voluntary contractions (MVC), probably influenced by psychological factors in LBP. Alternatively, repeated findings indicate that the neuromuscular efficiency (NME), characterized by the strength-to-activation relationship (SAR), of lower back muscles is impaired in LBP. Therefore, a dynamic SAR protocol, consisting of normalized trunk muscle activation recordings during submaximal loads (SMVC) seems to be relevant. This thesis aimed to investigate the influence of LBP on the NME and activation pattern of trunk muscles during dynamic trunk extensions. METHODS: The SAR protocol consisted of an initial MVC reference trial (MVC1), followed by SMVCs at 20, 40, 60 and 80\% of MVC1 load. An isokinetic trunk dynamometer (Con-Trex TP, ROM: 45° flexion to 10° extension, velocity: 45°/s) and a trunk surface EMG setup (myon, up to 12 leads) was used. Extension torque output [Nm] and muscular activation [V] were assessed in all trials. Finally, another MVC trial was performed (MVC2) for reliability analysis. For SAR evaluation the SMVC trial values were normalized [\%MVC1] and compared inter- and intra-individually. The methodical validity of the approach was tested in an isometric SAR single-case pilot study (S1a: N = 2, female LBP patient vs. healthy male). In addition, the validity of the MVC reference method was verified by comparing different contraction modes (S1b: N = 17, healthy individuals). Next, the isokinetic protocol was validated in terms of content for its applicability to display known physiological differences between sexes in a cross-sectional study (S2: each n = 25 healthy males/females). Finally, the influence of acute pain on NME was investigated longitudinally by comparing N = 8 acute LBP patients with the retest after remission of pain (S3). The SAR analysis focused on normalized agonistic extensor activation and abdominal and synergistic extensor co-activation (t-tests, ANOVA, α = .05) as well as on reliability of MVC1/2 outcomes. RESULTS: During the methodological validation of the protocol (S1a), the isometric SAR was found to be descriptively different between individuals. Whereas torque output was highest during eccentric MVC, no relevant difference in peak EMG activation was found between contraction modes (S1b). The isokinetic SAR sex comparison (S2), though showing no significant overall effects, revealed higher normalized extensor activation at moderate submaximal loads in females (13 ± 4\%), primarily caused by pronounced thoracic activation. Similarly, co-activation analysis resulted in significantly higher antagonistic activation at moderate loads compared to males (33 ± 9\%). During intra-individual analysis of SAR in LBP patients (S3), a significant effect of pain status on the SAR has been identified, manifesting as increased normalized EMG activation of extensors during acute LBP (11 ± 8\%) particularly at high load. Abdominal co-activation tended to be elevated (27 ± 11\%) just as the thoracic extensor parts seemed to take over proportions of lumbar activation. All together, the M. erector spinae behaviour during the SAR protocol was rather linear with the tendency to rise exponentially during high loads. For the level of normalized EMG activation during SMVCs, a clear increasing trend from healthy males to females over to non-acute and acute LBP patients was discovered. This was associated by elevated antagonistic activation and a shift of synergistic towards lumbar extensor activation. The MVC data revealed overall good reliability, with clearly higher variability during acute LBP. DISCUSSION: The present thesis demonstrates that the NME of lower back muscles is impaired in LBP patients, especially during an acute pain episode. A new dynamic protocol has been developed that makes it possible to display the underlying SAR using normalized trunk muscle EMG during submaximal isokinetic loads. The protocol shows promise as a biomechanical tool for diagnostic analysis of NME in LBP patients and monitoring of rehabilitation progress. Furthermore, reliability not of maximum strength but rather of peak EMG of MVC measurements seems to be decreased in LBP patients. Meanwhile, the findings of this thesis largely substantiate the assumptions made by the recently presented 'motor adaptation to pain' model, suggesting a pain-related intra- and intermuscular activation redistribution affecting movement and stiffness of the trunk. Further research is needed to distinguish the grade of NME impairment between LBP subgroups.}, language = {en} } @phdthesis{Wang2016, author = {Wang, Victor-C.}, title = {Injury and illness risk factors for elite athletes in training environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100925}, school = {Universit{\"a}t Potsdam}, pages = {viii, 84, ix}, year = {2016}, abstract = {Since 1998, elite athletes' sport injuries have been monitored in single sport event, which leads to the development of first comprehensive injury surveillance system in multi-sport Olympic Games in 2008. However, injury and illness occurred in training phases have not been systematically studied due to its multi-facets, potentially interactive risk related factors. The present thesis aim to address issues of feasibility of establishing a validated measure for injury/illness, training environment and psychosocial risk factors by creating the evaluation tool namely risk of injury questionnaire (Risk-IQ) for elite athletes, which based on IOC consensus statement 2009 recommended content of preparticipation evaluation(PPE) and periodic health exam (PHE). A total of 335 top level athletes and a total of 88 medical care providers from Germany and Taiwan participated in tow "cross-sectional plus longitudinal" Risk-IQ and MCPQ surveys respectively. Four categories of injury/illness related risk factors questions were asked in Risk-IQ for athletes while injury risk and psychological related questions were asked in MCPQ for MCP cohorts. Answers were quantified scales wise/subscales wise before analyzed with other factors/scales. In addition, adapted variables such as sport format were introduced for difference task of analysis. Validated with 2-wyas translation and test-retest reliabilities, the Risk-IQ was proved to be in good standard which were further confirmed by analyzed results from official surveys in both Germany and Taiwan. The result of Risk-IQ revealed that elite athletes' accumulated total injuries, in general, were multi-factor dependent; influencing factors including but not limited to background experiences, medical history, PHE and PPE medical resources as well as stress from life events. Injuries of different body parts were sport format and location specific. Additionally, medical support of PPE and PHE indicated significant difference between German and Taiwan. The result of the present thesis confirmed that it is feasible to construct a comprehensive evalua-tion instrument for heterogeneous elite athletes cohorts' risk factor analysis for injury/illness oc-curred during their non-competition periods. In average and with many moderators involved, Ger-man elite athletes have superior medical care support yet suffered more severe injuries than Tai-wanese counterparts. Opinions of injury related psychological issues reflected differently on vari-ous MCP groups irrespective of different nationalities. In general, influencing factors and interac-tions existed among relevant factors in both studies which implied further investigation with multiple regression analysis is needed for better understanding.}, language = {en} } @phdthesis{Pavlenko2016, author = {Pavlenko, Elena}, title = {Hybrid nanolayer architectures for ultrafast acousto-plasmonics in soft matter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-99544}, school = {Universit{\"a}t Potsdam}, pages = {85}, year = {2016}, abstract = {The goal of the presented work is to explore the interaction between gold nanorods (GNRs) and hyper-sound waves. For the generation of the hyper-sound I have used Azobenzene-containing polymer transducers. Multilayer polymer structures with well-defined thicknesses and smooth interfaces were built via layer-by-layer deposition. Anionic polyelectrolytes with Azobenzene side groups (PAzo) were alternated with cationic polymer PAH, for the creation of transducer films. PSS/PAH multilayer were built for spacer layers, which do not absorb in the visible light range. The properties of the PAzo/PAH film as a transducer are carefully characterized by static and transient optical spectroscopy. The optical and mechanical properties of the transducer are studied on the picosecond time scale. In particular the relative change of the refractive index of the photo-excited and expanded PAH/PAzo is Δn/n = - 2.6*10-4. Calibration of the generated strain is performed by ultrafast X-ray diffraction calibrated the strain in a Mica substrate, into which the hyper-sound is transduced. By simulating the X-ray data with a linear-chain-model the strain in the transducer under the excitation is derived to be Δd/d ~ 5*10-4. Additional to the investigation of the properties of the transducer itself, I have performed a series of experiments to study the penetration of the generated strain into various adjacent materials. By depositing the PAzo/PAH film onto a PAH/PSS structure with gold nanorods incorporated in it, I have shown that nanoscale impurities can be detected via the scattering of hyper-sound. Prior to the investigation of complex structures containing GNRs and the transducer, I have performed several sets of experiments on GNRs deposited on a small buffer of PSS/PAH. The static and transient response of GNRs is investigated for different fluence of the pump beam and for different dielectric environments (GNRs covered by PSS/PAH). A systematic analysis of sample architectures is performed in order to construct a sample with the desired effect of GNRs responding to the hyper-sound strain wave. The observed shift of a feature related to the longitudinal plasmon resonance in the transient reflection spectra is interpreted as the event of GNRs sensing the strain wave. We argue that the shift of the longitudinal plasmon resonance is caused by the viscoelastic deformation of the polymer around the nanoparticle. The deformation is induced by the out of plane difference in strain in the area directly under a particle and next to it. Simulations based on the linear chain model support this assumption. Experimentally this assumption is proven by investigating the same structure, with GNRs embedded in a PSS/PAH polymer layer. The response of GNRs to the hyper-sound wave is also observed for the sample structure with GNRs embedded in PAzo/PAH films. The response of GNRs in this case is explained to be driven by the change of the refractive index of PAzo during the strain propagation.}, language = {en} } @phdthesis{Gopalakrishnan2016, author = {Gopalakrishnan, Sathej}, title = {Mathematical modelling of host-disease-drug interactions in HIV disease}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100100}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2016}, abstract = {The human immunodeficiency virus (HIV) has resisted nearly three decades of efforts targeting a cure. Sustained suppression of the virus has remained a challenge, mainly due to the remarkable evolutionary adaptation that the virus exhibits by the accumulation of drug-resistant mutations in its genome. Current therapeutic strategies aim at achieving and maintaining a low viral burden and typically involve multiple drugs. The choice of optimal combinations of these drugs is crucial, particularly in the background of treatment failure having occurred previously with certain other drugs. An understanding of the dynamics of viral mutant genotypes aids in the assessment of treatment failure with a certain drug combination, and exploring potential salvage treatment regimens. Mathematical models of viral dynamics have proved invaluable in understanding the viral life cycle and the impact of antiretroviral drugs. However, such models typically use simplified and coarse-grained mutation schemes, that curbs the extent of their application to drug-specific clinical mutation data, in order to assess potential next-line therapies. Statistical models of mutation accumulation have served well in dissecting mechanisms of resistance evolution by reconstructing mutation pathways under different drug-environments. While these models perform well in predicting treatment outcomes by statistical learning, they do not incorporate drug effect mechanistically. Additionally, due to an inherent lack of temporal features in such models, they are less informative on aspects such as predicting mutational abundance at treatment failure. This limits their application in analyzing the pharmacology of antiretroviral drugs, in particular, time-dependent characteristics of HIV therapy such as pharmacokinetics and pharmacodynamics, and also in understanding the impact of drug efficacy on mutation dynamics. In this thesis, we develop an integrated model of in vivo viral dynamics incorporating drug-specific mutation schemes learned from clinical data. Our combined modelling approach enables us to study the dynamics of different mutant genotypes and assess mutational abundance at virological failure. As an application of our model, we estimate in vivo fitness characteristics of viral mutants under different drug environments. Our approach also extends naturally to multiple-drug therapies. Further, we demonstrate the versatility of our model by showing how it can be modified to incorporate recently elucidated mechanisms of drug action including molecules that target host factors. Additionally, we address another important aspect in the clinical management of HIV disease, namely drug pharmacokinetics. It is clear that time-dependent changes in in vivo drug concentration could have an impact on the antiviral effect, and also influence decisions on dosing intervals. We present a framework that provides an integrated understanding of key characteristics of multiple-dosing regimens including drug accumulation ratios and half-lifes, and then explore the impact of drug pharmacokinetics on viral suppression. Finally, parameter identifiability in such nonlinear models of viral dynamics is always a concern, and we investigate techniques that alleviate this issue in our setting.}, language = {en} } @phdthesis{Bojahr2016, author = {Bojahr, Andre}, title = {Hypersound interaction studied by time-resolved inelastic light and x-ray scattering}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93860}, school = {Universit{\"a}t Potsdam}, pages = {xxiii, 201}, year = {2016}, abstract = {This publications-based thesis summarizes my contribution to the scientific field of ultrafast structural dynamics. It consists of 16 publications, about the generation, detection and coupling of coherent gigahertz longitudinal acoustic phonons, also called hypersonic waves. To generate such high frequency phonons, femtosecond near infrared laser pulses were used to heat nanostructures composed of perovskite oxides on an ultrashort timescale. As a consequence the heated regions of such a nanostructure expand and a high frequency acoustic phonon pulse is generated. To detect such coherent acoustic sound pulses I use ultrafast variants of optical Brillouin and x-ray scattering. Here an incident optical or x-ray photon is scattered by the excited sound wave in the sample. The scattered light intensity measures the occupation of the phonon modes. The central part of this work is the investigation of coherent high amplitude phonon wave packets which can behave nonlinearly, quite similar to shallow water waves which show a steepening of wave fronts or solitons well known as tsunamis. Due to the high amplitude of the acoustic wave packets in the solid, the acoustic properties can change significantly in the vicinity of the sound pulse. This may lead to a shape change of the pulse. I have observed by time-resolved Brillouin scattering, that a single cycle hypersound pulse shows a wavefront steepening. I excited hypersound pulses with strain amplitudes until 1\% which I have calibrated by ultrafast x-ray diffraction (UXRD). On the basis of this first experiment we developed the idea of the nonlinear mixing of narrowband phonon wave packets which we call "nonlinear phononics" in analogy with the nonlinear optics, which summarizes a kaleidoscope of surprising optical phenomena showing up at very high electric fields. Such phenomena are for instance Second Harmonic Generation, four-wave-mixing or solitons. But in case of excited coherent phonons the wave packets have usually very broad spectra which make it nearly impossible to look at elementary scattering processes between phonons with certain momentum and energy. For that purpose I tested different techniques to excite narrowband phonon wave packets which mainly consist of phonons with a certain momentum and frequency. To this end epitaxially grown metal films on a dielectric substrate were excited with a train of laser pulses. These excitation pulses drive the metal film to oscillate with the frequency given by their inverse temporal displacement and send a hypersonic wave of this frequency into the substrate. The monochromaticity of these wave packets was proven by ultrafast optical Brillouin and x-ray scattering. Using the excitation of such narrowband phonon wave packets I was able to observe the Second Harmonic Generation (SHG) of coherent phonons as a first example of nonlinear wave mixing of nanometric phonon wave packets.}, language = {en} } @phdthesis{Haendler2016, author = {Haendler, Yair}, title = {Effects of embedded pronouns on relative clause processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396883}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 186}, year = {2016}, abstract = {Difficulties with object relative clauses (ORC), as compared to subject relative clauses (SR), are widely attested across different languages, both in adults and in children. This SR-ORC asymmetry is reduced, or even eliminated, when the embedded constituent in the ORC is a pronoun, rather than a lexical noun phrase. The studies included in this thesis were designed to explore under what circumstances the pronoun facilitation occurs; whether all pronouns have the same effect; whether SRs are also affected by embedded pronouns; whether children perform like adults on such structures; and whether performance is related to cognitive abilities such as memory or grammatical knowledge. Several theoretical approaches that explain the pronoun facilitation in relative clauses are evaluated. The experimental data have been collected in three languages-German, Italian and Hebrew-stemming from both children and adults. In the German study (Chapter 2), ORCs with embedded 1st- or 3rd-person pronouns are compared to ORCs with an embedded lexical noun phrase. Eye-movement data from 5-year-old children show that the 1st-person pronoun facilitates processing, but not the 3rd-person pronoun. Moreover, children's performance is modulated by additive effects of their memory and grammatical skills. In the Italian study (Chapter 3), the 1st-person pronoun advantage over the 3rd-person pronoun is tested in ORCs and SRs that display a similar word order. Eye-movement data from 5-year-olds and adult controls and reading times data from adults are pitted against the outcome of a corpus analysis, showing that the 1st-/3rd-person pronoun asymmetry emerges in the two relative clause types to an equal extent. In the Hebrew study (Chapter 4), the goal is to test the effect of a special kind of pronoun-a non-referential arbitrary subject pronoun-on ORC comprehension, in the light of potential confounds in previous studies that used this pronoun. Data from a referent-identification task with 4- to 5-year-olds indicate that, when the experimental material is controlled, the non-referential pronoun does not necessarily facilitate ORC comprehension. Importantly, however, children have even more difficulties when the embedded constituent is a referential pronoun. The non-referentiality / referentiality asymmetry is emphasized by the relation between children's performance on the experimental task and their memory skills. Together, the data presented in this thesis indicate that sentence processing is not only driven by structural (or syntactic) factors, but also by discourse-related ones, like pronouns' referential properties or their discourse accessibility mechanism, which is defined as the level of ease or difficulty with which referents of pronouns are identified and retrieved from the discourse model. Although independent in essence, these structural and discourse factors can in some cases interact in a way that affects sentence processing. Moreover, both types of factors appear to be strongly related to memory. The data also support the idea that, from early on, children are sensitive to the same factors that affect adults' sentence processing, and that the processing strategies of both populations are qualitatively similar. In sum, this thesis suggests that a comprehensive theory of human sentence processing needs to account for effects that are due to both structural and discourse-related factors, which operate as a function of memory capacity.}, language = {en} } @phdthesis{Engelmann2016, author = {Engelmann, Felix}, title = {Toward an integrated model of sentence processing in reading}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100864}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 143}, year = {2016}, abstract = {In experiments investigating sentence processing, eye movement measures such as fixation durations and regression proportions while reading are commonly used to draw conclusions about processing difficulties. However, these measures are the result of an interaction of multiple cognitive levels and processing strategies and thus are only indirect indicators of processing difficulty. In order to properly interpret an eye movement response, one has to understand the underlying principles of adaptive processing such as trade-off mechanisms between reading speed and depth of comprehension that interact with task demands and individual differences. Therefore, it is necessary to establish explicit models of the respective mechanisms as well as their causal relationship with observable behavior. There are models of lexical processing and eye movement control on the one side and models on sentence parsing and memory processes on the other. However, no model so far combines both sides with explicitly defined linking assumptions. In this thesis, a model is developed that integrates oculomotor control with a parsing mechanism and a theory of cue-based memory retrieval. On the basis of previous empirical findings and independently motivated principles, adaptive, resource-preserving mechanisms of underspecification are proposed both on the level of memory access and on the level of syntactic parsing. The thesis first investigates the model of cue-based retrieval in sentence comprehension of Lewis \& Vasishth (2005) with a comprehensive literature review and computational modeling of retrieval interference in dependency processing. The results reveal a great variability in the data that is not explained by the theory. Therefore, two principles, 'distractor prominence' and 'cue confusion', are proposed as an extension to the theory, thus providing a more adequate description of systematic variance in empirical results as a consequence of experimental design, linguistic environment, and individual differences. In the remainder of the thesis, four interfaces between parsing and eye movement control are defined: Time Out, Reanalysis, Underspecification, and Subvocalization. By comparing computationally derived predictions with experimental results from the literature, it is investigated to what extent these four interfaces constitute an appropriate elementary set of assumptions for explaining specific eye movement patterns during sentence processing. Through simulations, it is shown how this system of in itself simple assumptions results in predictions of complex, adaptive behavior. In conclusion, it is argued that, on all levels, the sentence comprehension mechanism seeks a balance between necessary processing effort and reading speed on the basis of experience, task demands, and resource limitations. Theories of linguistic processing therefore need to be explicitly defined and implemented, in particular with respect to linking assumptions between observable behavior and underlying cognitive processes. The comprehensive model developed here integrates multiple levels of sentence processing that hitherto have only been studied in isolation. The model is made publicly available as an expandable framework for future studies of the interactions between parsing, memory access, and eye movement control.}, language = {en} }