@phdthesis{Wettstein2015, author = {Wettstein, Christoph}, title = {Cytochrome c-DNA and cytochrome c-enzyme interactions for the construction of analytical signal chains}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78367}, school = {Universit{\"a}t Potsdam}, pages = {120}, year = {2015}, abstract = {Electron transfer (ET) reactions play a crucial role in the metabolic pathways of all organisms. In biotechnological approaches, the redox properties of the protein cytochrome c (cyt c), which acts as an electron shuttle in the respiratory chain, was utilized to engineer ET chains on electrode surfaces. With the help of the biopolymer DNA, the redox protein assembles into electro active multilayer (ML) systems, providing a biocompatible matrix for the entrapment of proteins. In this study the characteristics of the cyt c and DNA interaction were defined on the molecular level for the first time and the binding sites of DNA on cyt c were identified. Persistent cyt c/DNA complexes were formed in solution under the assembly conditions of ML architectures, i.e. pH 5.0 and low ionic strength. At pH 7.0, no agglomerates were formed, permitting the characterization of the NMR spectroscopy. Using transverse relaxation-optimized spectroscopy (TROSY)-heteronuclear single quantum coherence (HSQC) experiments, DNAs' binding sites on the protein were identified. In particular, negatively charged AA residues, which are known interaction sites in cyt c/protein binding were identified as the main contact points of cyt c and DNA. Moreover, the sophisticated task of arranging proteins on electrode surfaces to create functional ET chains was addressed. Therefore, two different enzyme types, the flavin dependent fructose dehydrogenase (FDH) and the pyrroloquinoline quinone dependent glucose dehydrogenase (PQQ-GDH), were tested as reaction partners of freely diffusing cyt c and cyt c immobilized on electrodes in mono- and MLs. The characterisation of the ET processes was performed by means of electrochemistry and the protein deposition was monitored by microgravimetric measurements. FDH and PQQ-GDH were found to be generally suitable for combination with the cyt c/DNA ML system, since both enzymes interact with cyt c in solution and in the immobilized state. The immobilization of FDH and cyt c was achieved with the enzyme on top of a cyt c monolayer electrode without the help of a polyelectrolyte. Combining FDH with the cyt c/DNA ML system did not succeed, yet. However, the basic conditions for this protein-protein interaction were defined. PQQ-GDH was successfully coupled with the ML system, demonstrating that that the cyt c/DNA ML system provides a suitable interface for enzymes and that the creation of signal chains, based on the idea of co-immobilized proteins is feasible. Future work may be directed to the investigation of cyt c/DNA interaction under the precise conditions of ML assembly. Therefore, solid state NMR or X-ray crystallography may be required. Based on the results of this study, the combination of FDH with the ML system should be addressed. Moreover, alternative types of enzymes may be tested as catalytic component of the ML assembly, aiming on the development of innovative biosensor applications.}, language = {en} } @phdthesis{Westbury2018, author = {Westbury, Michael V.}, title = {Unraveling evolution through Next Generation Sequencing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409981}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {The sequencing of the human genome in the early 2000s led to an increased interest in cheap and fast sequencing technologies. This interest culminated in the advent of next generation sequencing (NGS). A number of different NGS platforms have arisen since then all promising to do the same thing, i.e. produce large amounts of genetic information for relatively low costs compared to more traditional methods such as Sanger sequencing. The capabilities of NGS meant that researchers were no longer bound to species for which a lot of previous work had already been done (e.g. model organisms and humans) enabling a shift in research towards more novel and diverse species of interest. This capability has greatly benefitted many fields within the biological sciences, one of which being the field of evolutionary biology. Researchers have begun to move away from the study of laboratory model organisms to wild, natural populations and species which has greatly expanded our knowledge of evolution. NGS boasts a number of benefits over more traditional sequencing approaches. The main benefit comes from the capability to generate information for drastically more loci for a fraction of the cost. This is hugely beneficial to the study of wild animals as, even when large numbers of individuals are unobtainable, the amount of data produced still allows for accurate, reliable population and species level results from a small selection of individuals. The use of NGS to study species for which little to no previous research has been carried out on and the production of novel evolutionary information and reference datasets for the greater scientific community were the focuses of this thesis. Two studies in this thesis focused on producing novel mitochondrial genomes from shotgun sequencing data through iterative mapping, bypassing the need for a close relative to serve as a reference sequence. These mitochondrial genomes were then used to infer species level relationships through phylogenetic analyses. The first of these studies involved reconstructing a complete mitochondrial genome of the bat eared fox (Otocyon megalotis). Phylogenetic analyses of the mitochondrial genome confidently placed the bat eared fox as sister to the clade consisting of the raccoon dog and true foxes within the canidae family. The next study also involved reconstructing a mitochondrial genome but in this case from the extinct Macrauchenia of South America. As this study utilised ancient DNA, it involved a lot of parameter testing, quality controls and strict thresholds to obtain a near complete mitochondrial genome devoid of contamination known to plague ancient DNA studies. Phylogenetic analyses confidently placed Macrauchenia as sister to all living representatives of Perissodactyla with a divergence time of ~66 million years ago. The third and final study of this thesis involved de novo assemblies of both nuclear and mitochondrial genomes from brown and striped hyena and focussed on demographic, genetic diversity and population genomic analyses within the brown hyena. Previous studies of the brown hyena hinted at very low levels of genomic diversity and, perhaps due to this, were unable to find any notable population structure across its range. By incorporating a large number of genetic loci, in the form of complete nuclear genomes, population structure within the brown hyena was uncovered. On top of this, genomic diversity levels were compared to a number of other species. Results showed the brown hyena to have the lowest genomic diversity out of all species included in the study which was perhaps caused by a continuous and ongoing decline in effective population size that started about one million years ago and dramatically accelerated towards the end of the Pleistocene. The studies within this thesis show the power NGS sequencing has and its utility within evolutionary biology. The most notable capabilities outlined in this thesis involve the study of species for which no reference data is available and in the production of large amounts of data, providing evolutionary answers at the species and population level that data produced using more traditional techniques simply could not.}, language = {en} } @phdthesis{Werth2010, author = {Werth, Susanna}, title = {Calibration of the global hydrological model WGHM with water mass variations from GRACE gravity data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41738}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Since the start-up of the GRACE (Gravity Recovery And Climate Experiment) mission in 2002 time dependent global maps of the Earth's gravity field are available to study geophysical and climatologically-driven mass redistributions on the Earth's surface. In particular, GRACE observations of total water storage changes (TWSV) provide a comprehensive data set for analysing the water cycle on large scales. Therefore they are invaluable for validation and calibration of large-scale hydrological models as the WaterGAP Global Hydrology Model (WGHM) which simulates the continental water cycle including its most important components, such as soil, snow, canopy, surface- and groundwater. Hitherto, WGHM exhibits significant differences to GRACE, especially for the seasonal amplitude of TWSV. The need for a validation of hydrological models is further highlighted by large differences between several global models, e.g. WGHM, the Global Land Data Assimilation System (GLDAS) and the Land Dynamics model (LaD). For this purpose, GRACE links geodetic and hydrological research aspects. This link demands the development of adequate data integration methods on both sides, forming the main objectives of this work. They include the derivation of accurate GRACE-based water storage changes, the development of strategies to integrate GRACE data into a global hydrological model as well as a calibration method, followed by the re-calibration of WGHM in order to analyse process and model responses. To achieve these aims, GRACE filter tools for the derivation of regionally averaged TWSV were evaluated for specific river basins. Here, a decorrelation filter using GRACE orbits for its design is most efficient among the tested methods. Consistency in data and equal spatial resolution between observed and simulated TWSV were realised by the inclusion of all most important hydrological processes and an equal filtering of both data sets. Appropriate calibration parameters were derived by a WGHM sensitivity analysis against TWSV. Finally, a multi-objective calibration framework was developed to constrain model predictions by both river discharge and GRACE TWSV, realised with a respective evolutionary method, the ε-Non-dominated-Sorting-Genetic-Algorithm-II (ε-NSGAII). Model calibration was done for the 28 largest river basins worldwide and for most of them improved simulation results were achieved with regard to both objectives. From the multi-objective approach more reliable and consistent simulations of TWSV within the continental water cycle were gained and possible model structure errors or mis-modelled processes for specific river basins detected. For tropical regions as such, the seasonal amplitude of water mass variations has increased. The findings lead to an improved understanding of hydrological processes and their representation in the global model. Finally, the robustness of the results is analysed with respect to GRACE and runoff measurement errors. As a main conclusion obtained from the results, not only soil water and snow storage but also groundwater and surface water storage have to be included in the comparison of the modelled and GRACE-derived total water budged data. Regarding model calibration, the regional varying distribution of parameter sensitivity suggests to tune only parameter of important processes within each region. Furthermore, observations of single storage components beside runoff are necessary to improve signal amplitudes and timing of simulated TWSV as well as to evaluate them with higher accuracy. The results of this work highlight the valuable nature of GRACE data when merged into large-scale hydrological modelling and depict methods to improve large-scale hydrological models.}, language = {en} } @phdthesis{Werhahn2023, author = {Werhahn, Maria}, title = {Simulating galaxy evolution with cosmic rays: the multi-frequency view}, doi = {10.25932/publishup-57285}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-572851}, school = {Universit{\"a}t Potsdam}, pages = {5, 220}, year = {2023}, abstract = {Cosmic rays (CRs) constitute an important component of the interstellar medium (ISM) of galaxies and are thought to play an essential role in governing their evolution. In particular, they are able to impact the dynamics of a galaxy by driving galactic outflows or heating the ISM and thereby affecting the efficiency of star-formation. Hence, in order to understand galaxy formation and evolution, we need to accurately model this non-thermal constituent of the ISM. But except in our local environment within the Milky Way, we do not have the ability to measure CRs directly in other galaxies. However, there are many ways to indirectly observe CRs via the radiation they emit due to their interaction with magnetic and interstellar radiation fields as well as with the ISM. In this work, I develop a numerical framework to calculate the spectral distribution of CRs in simulations of isolated galaxies where a steady-state between injection and cooling is assumed. Furthermore, I calculate the non-thermal emission processes arising from the modelled CR proton and electron spectra ranging from radio wavelengths up to the very high-energy gamma-ray regime. I apply this code to a number of high-resolution magneto-hydrodynamical (MHD) simulations of isolated galaxies, where CRs are included. This allows me to study their CR spectra and compare them to observations of the CR proton and electron spectra by the Voyager-1 satellite and the AMS-02 instrument in order to reveal the origin of the measured spectral features. Furthermore, I provide detailed emission maps, luminosities and spectra of the non-thermal emission from our simulated galaxies that range from dwarfs to Milk-Way analogues to starburst galaxies at different evolutionary stages. I successfully reproduce the observed relations between the radio and gamma-ray luminosities with the far-infrared (FIR) emission of star-forming (SF) galaxies, respectively, where the latter is a good tracer of the star-formation rate. I find that highly SF galaxies are close to the limit where their CR population would lose all of their energy due to the emission of radiation, whereas CRs tend to escape low SF galaxies more quickly. On top of that, I investigate the properties of CR transport that are needed in order to match the observed gamma-ray spectra. Furthermore, I uncover the underlying processes that enable the FIR-radio correlation (FRC) to be maintained even in starburst galaxies and find that thermal free-free-emission naturally explains the observed radio spectra in SF galaxies like M82 and NGC 253 thus solving the riddle of flat radio spectra that have been proposed to contradict the observed tight FRC. Lastly, I scrutinise the steady-state modelling of the CR proton component by investigating for the first time the influence of spectrally resolved CR transport in MHD simulations on the hadronic gamma-ray emission of SF galaxies revealing new insights into the observational signatures of CR transport both spectrally and spatially.}, language = {en} } @phdthesis{Wendi2018, author = {Wendi, Dadiyorto}, title = {Recurrence Plots and Quantification Analysis of Flood Runoff Dynamics}, doi = {10.25932/publishup-43191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431915}, school = {Universit{\"a}t Potsdam}, pages = {114}, year = {2018}, abstract = {This paper introduces a novel measure to assess similarity between event hydrographs. It is based on Cross Recurrence Plots and Recurrence Quantification Analysis which have recently gained attention in a range of disciplines when dealing with complex systems. The method attempts to quantify the event runoff dynamics and is based on the time delay embedded phase space representation of discharge hydrographs. A phase space trajectory is reconstructed from the event hydrograph, and pairs of hydrographs are compared to each other based on the distance of their phase space trajectories. Time delay embedding allows considering the multi-dimensional relationships between different points in time within the event. Hence, the temporal succession of discharge values is taken into account, such as the impact of the initial conditions on the runoff event. We provide an introduction to Cross Recurrence Plots and discuss their parameterization. An application example based on flood time series demonstrates how the method can be used to measure the similarity or dissimilarity of events, and how it can be used to detect events with rare runoff dynamics. It is argued that this methods provides a more comprehensive approach to quantify hydrograph similarity compared to conventional hydrological signatures.}, language = {en} } @phdthesis{Wen2020, author = {Wen, Xi}, title = {Distribution patterns and environmental drivers of methane-cycling microorganisms in natural environments and restored wetlands}, doi = {10.25932/publishup-47177}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471770}, school = {Universit{\"a}t Potsdam}, pages = {VIII, iii, 152}, year = {2020}, abstract = {Methane is an important greenhouse gas contributing to global climate change. Natural environments and restored wetlands contribute a large proportion to the global methane budget. Methanogenic archaea (methanogens) and methane oxidizing bacteria (methanotrophs), the biogenic producers and consumers of methane, play key roles in the methane cycle in those environments. A large number of studies revealed the distribution, diversity and composition of these microorganisms in individual habitats. However, uncertainties exist in predicting the response and feedback of methane-cycling microorganisms to future climate changes and related environmental changes due to the limited spatial scales considered so far, and due to a poor recognition of the biogeography of these important microorganisms combining global and local scales. With the aim of improving our understanding about whether and how methane-cycling microbial communities will be affected by a series of dynamic environmental factors in response to climate change, this PhD thesis investigates the biogeographic patterns of methane-cycling communities, and the driving factors which define these patterns at different spatial scales. At the global scale, a meta-analysis was performed by implementing 94 globally distributed public datasets together with environmental data from various natural environments including soils, lake sediments, estuaries, marine sediments, hydrothermal sediments and mud volcanos. In combination with a global biogeographic map of methanogenic archaea from multiple natural environments, this thesis revealed that biogeographic patterns of methanogens exist. The terrestrial habitats showed higher alpha diversities than marine environments. Methanoculleus and Methanosaeta (Methanothrix) are the most frequently detected taxa in marine habitats, while Methanoregula prevails in terrestrial habitats. Estuary ecosystems, the transition zones between marine and terrestrial/limnic ecosystems, have the highest methanogenic richness but comparably low methane emission rates. At the local scale, this study compared two rewetted fens with known high methane emissions in northeastern Germany, a coastal brackish fen (H{\"u}telmoor) and a freshwater riparian fen (Polder Zarnekow). Consistent with different geochemical conditions and land-use history, the two rewetted fens exhibit dissimilar methanogenic and, especially, methanotrophic community compositions. The methanotrophic community was generally under-represented among the prokaryotic communities and both fens show similarly low ratios of methanotrophic to methanogenic abundances. Since few studies have characterized methane-cycling microorganisms in rewetted fens, this study provides first evidence that the rapid and well re-established methanogenic community in combination with the low and incomplete re-establishment of the methanotrophic community after rewetting contributes to elevated sustained methane fluxes following rewetting. Finally, this thesis demonstrates that dispersal limitation only slightly regulates the biogeographic distribution patterns of methanogenic microorganisms in natural environments and restored wetlands. Instead, their existence, adaption and establishment are more associated with the selective pressures under different environmental conditions. Salinity, pH and temperature are identified as the most important factors in shaping microbial community structure at different spatial scales (global versus terrestrial environments). Predicted changes in climate, such as increasing temperature, changes in precipitation patterns and increasing frequency of flooding events, are likely to induce a series of environmental alterations, which will either directly or indirectly affect the driving environmental forces of methanogenic communities, leading to changes in their community composition and thus potentially also in methane emission patterns in the future.}, language = {en} } @phdthesis{Welsch2022, author = {Welsch, Maryna}, title = {Investigation of the stress tolerance regulatory network integration of the NAC transcription factor JUNGBRUNNEN1 (JUB1)}, doi = {10.25932/publishup-54731}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-547310}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 116}, year = {2022}, abstract = {The NAC transcription factor (TF) JUNGBRUNNEN1 (JUB1) is an important negative regulator of plant senescence, as well as of gibberellic acid (GA) and brassinosteroid (BR) biosynthesis in Arabidopsis thaliana. Overexpression of JUB1 promotes longevity and enhances tolerance to drought and other abiotic stresses. A similar role of JUB1 has been observed in other plant species, including tomato and banana. Our data show that JUB1 overexpressors (JUB1-OXs) accumulate higher levels of proline than WT plants under control conditions, during the onset of drought stress, and thereafter. We identified that overexpression of JUB1 induces key proline biosynthesis and suppresses key proline degradation genes. Furthermore, bZIP63, the transcription factor involved in proline metabolism, was identified as a novel downstream target of JUB1 by Yeast One-Hybrid (Y1H) analysis and Chromatin immunoprecipitation (ChIP). However, based on Electrophoretic Mobility Shift Assay (EMSA), direct binding of JUB1 to bZIP63 could not be confirmed. Our data indicate that JUB1-OX plants exhibit reduced stomatal conductance under control conditions. However, selective overexpression of JUB1 in guard cells did not improve drought stress tolerance in Arabidopsis. Moreover, the drought-tolerant phenotype of JUB1 overexpressors does not solely depend on the transcriptional control of the DREB2A gene. Thus, our data suggest that JUB1 confers tolerance to drought stress by regulating multiple components. Until today, none of the previous studies on JUB1´s regulatory network focused on identifying protein-protein interactions. We, therefore, performed a yeast two-hybrid screen (Y2H) which identified several protein interactors of JUB1, two of which are the calcium-binding proteins CaM1 and CaM4. Both proteins interact with JUB1 in the nucleus of Arabidopsis protoplasts. Moreover, JUB1 is expressed with CaM1 and CaM4 under the same conditions. Since CaM1.1 and CaM4.1 encode proteins with identical amino acid sequences, all further experiments were performed with constructs involving the CaM4 coding sequence. Our data show that JUB1 harbors multiple CaM-binding sites, which are localized in both the N-terminal and C-terminal regions of the protein. One of the CaM-binding sites, localized in the DNA-binding domain of JUB1, was identified as a functional CaM-binding site since its mutation strongly reduced the binding of CaM4 to JUB1. Furthermore, JUB1 transactivates expression of the stress-related gene DREB2A in mesophyll cells; this effect is significantly reduced when the calcium-binding protein CaM4 is expressed as well. Overexpression of both genes in Arabidopsis results in early senescence observed through lower chlorophyll content and an enhanced expression of senescence-associated genes (SAGs) when compared with single JUB1 overexpressors. Our data also show that JUB1 and CaM4 proteins interact in senescent leaves, which have increased Ca2+ levels when compared to young leaves. Collectively, our data indicate that JUB1 activity towards its downstream targets is fine-tuned by calcium-binding proteins during leaf senescence.}, language = {en} } @phdthesis{Wellmann2023, author = {Wellmann, Caroline}, title = {Early sensitivity to prosodic phrase boundary cues: Behavioral evidence from German-learning infants}, doi = {10.25932/publishup-57393}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-573937}, school = {Universit{\"a}t Potsdam}, pages = {xii, 136}, year = {2023}, abstract = {This dissertation seeks to shed light on the relation of phrasal prosody and developmental speech perception in German-learning infants. Three independent empirical studies explore the role of acoustic correlates of major prosodic boundaries, specifically pitch change, final lengthening, and pause, in infant boundary perception. Moreover, it was examined whether the sensitivity to prosodic phrase boundary markings changes during the first year of life as a result of perceptual attunement to the ambient language (Aslin \& Pisoni, 1980). Using the headturn preference procedure six- and eight-month-old monolingual German-learning infants were tested on their discrimination of two different prosodic groupings of the same list of coordinated names either with or without an internal IPB after the second name, that is, [Moni und Lilli] [und Manu] or [Moni und Lilli und Manu]. The boundary marking was systematically varied with respect to single prosodic cues or specific cue combinations. Results revealed that six- and eight-month-old German-learning infants successfully detect the internal prosodic boundary when it is signaled by all the three main boundary cues pitch change, final lengthening, and pause. For eight-, but not for six-month-olds, the combination of pitch change and final lengthening, without the occurrence of a pause, is sufficient. This mirrors an adult-like perception by eight-months (Holzgrefe-Lang et al., 2016). Six-month-olds detect a prosodic phrase boundary signaled by final lengthening and pause. The findings suggest a developmental change in German prosodic boundary cue perception from a strong reliance on the pause cue at six months to a differentiated sensitivity to the more subtle cues pitch change and final lengthening at eight months. Neither for six- nor for eight-month-olds the occurrence of pitch change or final lengthening as single cues is sufficient, similar to what has been observed for adult speakers of German (Holzgrefe-Lang et al., 2016). The present dissertation provides new scientific knowledge on infants' sensitivity to individual prosodic phrase boundary cues in the first year of life. Methodologically, the studies are pathbreaking since they used exactly the same stimulus materials - phonologically thoroughly controlled lists of names - that have also been used with adults (Holzgrefe-Lang et al., 2016) and with infants in a neurophysiological paradigm (Holzgrefe-Lang, Wellmann, H{\"o}hle, \& Wartenburger, 2018), allowing for comparisons across age (six/ eight months and adults) and method (behavioral vs. neurophysiological methods). Moreover, materials are suited to be transferred to other languages allowing for a crosslinguistic comparison. Taken together with a study with similar French materials (van Ommen et al., 2020) the observed change in sensitivity in German-learning infants can be interpreted as a language-specific one, from an initial language-general processing mechanism that primarily focuses on the presence of pauses to a language-specific processing that takes into account prosodic properties available in the ambient language. The developmental pattern is discussed as an interplay of acoustic salience, prosodic typology (prosodic regularity) and cue reliability.}, language = {en} } @phdthesis{Weisshuhn2020, author = {Weißhuhn, Peter}, title = {Assessing biotope vulnerability to landscape changes}, doi = {10.25932/publishup-44277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442777}, school = {Universit{\"a}t Potsdam}, pages = {v, 134}, year = {2020}, abstract = {Largescale patterns of global land use change are very frequently accompanied by natural habitat loss. To assess the consequences of habitat loss for the remaining natural and semi-natural biotopes, inclusion of cumulative effects at the landscape level is required. The interdisciplinary concept of vulnerability constitutes an appropriate assessment framework at the landscape level, though with few examples of its application for ecological assessments. A comprehensive biotope vulnerability analysis allows identification of areas most affected by landscape change and at the same time with the lowest chances of regeneration. To this end, a series of ecological indicators were reviewed and developed. They measured spatial attributes of individual biotopes as well as some ecological and conservation characteristics of the respective resident species community. The final vulnerability index combined seven largely independent indicators, which covered exposure, sensitivity and adaptive capacity of biotopes to landscape changes. Results for biotope vulnerability were provided at the regional level. This seems to be an appropriate extent with relevance for spatial planning and designing the distribution of nature reserves. Using the vulnerability scores calculated for the German federal state of Brandenburg, hot spots and clusters within and across the distinguished types of biotopes were analysed. Biotope types with high dependence on water availability, as well as biotopes of the open landscape containing woody plants (e.g., orchard meadows) are particularly vulnerable to landscape changes. In contrast, the majority of forest biotopes appear to be less vulnerable. Despite the appeal of such generalised statements for some biotope types, the distribution of values suggests that conservation measures for the majority of biotopes should be designed specifically for individual sites. Taken together, size, shape and spatial context of individual biotopes often had a dominant influence on the vulnerability score. The implementation of biotope vulnerability analysis at the regional level indicated that large biotope datasets can be evaluated with high level of detail using geoinformatics. Drawing on previous work in landscape spatial analysis, the reproducible approach relies on transparent calculations of quantitative and qualitative indicators. At the same time, it provides a synoptic overview and information on the individual biotopes. It is expected to be most useful for nature conservation in combination with an understanding of population, species, and community attributes known for specific sites. The biotope vulnerability analysis facilitates a foresighted assessment of different land uses, aiding in identifying options to slow habitat loss to sustainable levels. It can also be incorporated into planning of restoration measures, guiding efforts to remedy ecological damage. Restoration of any specific site could yield synergies with the conservation objectives of other sites, through enhancing the habitat network or buffering against future landscape change. Biotope vulnerability analysis could be developed in line with other important ecological concepts, such as resilience and adaptability, further extending the broad thematic scope of the vulnerability concept. Vulnerability can increasingly serve as a common framework for the interdisciplinary research necessary to solve major societal challenges.}, language = {en} } @phdthesis{Weissenberger2017, author = {Weißenberger, Martin}, title = {Start-up subsidies for the unemployed - New evaluation approaches and insights}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406362}, school = {Universit{\"a}t Potsdam}, pages = {viii, 239}, year = {2017}, abstract = {Start-up incentives targeted at unemployed individuals have become an important tool of the Active Labor Market Policy (ALMP) to fight unemployment in many countries in recent years. In contrast to traditional ALMP instruments like training measures, wage subsidies, or job creation schemes, which are aimed at reintegrating unemployed individuals into dependent employment, start-up incentives are a fundamentally different approach to ALMP, in that they intend to encourage and help unemployed individuals to exit unemployment by entering self-employment and, thus, by creating their own jobs. In this sense, start-up incentives for unemployed individuals serve not only as employment and social policy to activate job seekers and combat unemployment but also as business policy to promote entrepreneurship. The corresponding empirical literature on this topic so far has been mainly focused on the individual labor market perspective, however. The main part of the thesis at hand examines the new start-up subsidy ("Gr{\"u}ndungszuschuss") in Germany and consists of four empirical analyses that extend the existing evidence on start-up incentives for unemployed individuals from multiple perspectives and in the following directions: First, it provides the first impact evaluation of the new start-up subsidy in Germany. The results indicate that participation in the new start-up subsidy has significant positive and persistent effects on both reintegration into the labor market as well as the income profiles of participants, in line with previous evidence on comparable German and international programs, which emphasizes the general potential of start-up incentives as part of the broader ALMP toolset. Furthermore, a new innovative sensitivity analysis of the applied propensity score matching approach integrates findings from entrepreneurship and labor market research about the key role of an individual's personality on start-up decision, business performance, as well as general labor market outcomes, into the impact evaluation of start-up incentives. The sensitivity analysis with regard to the inclusion and exclusion of usually unobserved personality variables reveals that differences in the estimated treatment effects are small in magnitude and mostly insignificant. Consequently, concerns about potential overestimation of treatment effects in previous evaluation studies of similar start-up incentives due to usually unobservable personality variables are less justified, as long as the set of observed control variables is sufficiently informative (Chapter 2). Second, the thesis expands our knowledge about the longer-term business performance and potential of subsidized businesses arising from the start-up subsidy program. In absolute terms, the analysis shows that a relatively high share of subsidized founders successfully survives in the market with their original businesses in the medium to long run. The subsidy also yields a "double dividend" to a certain extent in terms of additional job creation. Compared to "regular", i.e., non-subsidized new businesses founded by non-unemployed individuals in the same quarter, however, the economic and growth-related impulses set by participants of the subsidy program are only limited with regard to employment growth, innovation activity, or investment. Further investigations of possible reasons for these differences show that differential business growth paths of subsidized founders in the longer run seem to be mainly limited by higher restrictions to access capital and by unobserved factors, such as less growth-oriented business strategies and intentions, as well as lower (subjective) entrepreneurial persistence. Taken together, the program has only limited potential as a business and entrepreneurship policy intended to induce innovation and economic growth (Chapters 3 and 4). And third, an empirical analysis on the level of German regional labor markets yields that there is a high regional variation in subsidized start-up activity relative to overall new business formation. The positive correlation between regular start-up intensity and the share among all unemployed individuals who participate in the start-up subsidy program suggests that (nascent) unemployed founders also profit from the beneficial effects of regional entrepreneurship capital. Moreover, the analysis of potential deadweight and displacement effects from an aggregated regional perspective emphasizes that the start-up subsidy for unemployed individuals represents a market intervention into existing markets, which affects incumbents and potentially produces inefficiencies and market distortions. This macro perspective deserves more attention and research in the future (Chapter 5).}, language = {en} } @phdthesis{Weiss2018, author = {Weiß, Katharina}, title = {Three Essays on EFRAG}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415355}, school = {Universit{\"a}t Potsdam}, pages = {II, 180}, year = {2018}, abstract = {This cumulative doctoral thesis consists of three papers that deal with the role of one specific European accounting player in the international accounting standard-setting, namely the European Financial Reporting Advisory Group (EFRAG). The first paper examines whether and how EFRAG generally fulfills its role in articulating Europe's interests toward the International Accounting Standards Board (IASB). The qualitative data from the conducted interviews reveal that EFRAG influences the IASB's decision making at a very early stage, long before other constituents are officially asked to comment on the IASB's proposals. The second paper uses quantitative data and investigates the formal participation behavior of European constituents that seek to determine EFRAG's voice. More precisely, this paper analyzes the nature of the constituents' participation in EFRAG's due process in terms of representation (constituent groups and geographical distribution) and the drivers of their participation behavior. EFRAG's official decision making process is dominated by some specific constituent groups (such as preparers and the accounting profession) and by constituents from some specific countries (e.g. those with effective enforcement regimes). The third paper investigates in a first step who of the European constituents choose which lobbying channel (participation only at IASB, only at EFRAG, or at both institutions) and unveils in a second step possible reasons for their lobbying choices. The paper comprises quantitative and qualitative data. It reveals that English skills, time issues, the size of the constituent, and the country of origin are factors that can explain why the majority participates only in the IASB's due process.}, language = {en} } @phdthesis{Weiss2011, author = {Weiß, Jan}, title = {Synthesis and self-assembly of multiple thermoresponsive amphiphilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53360}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present thesis, the self-assembly of multi thermoresponsive block copolymers in dilute aqueous solution was investigated by a combination of turbidimetry, dynamic light scattering, TEM measurements, NMR as well as fluorescence spectroscopy. The successive conversion of such block copolymers from a hydrophilic into a hydrophobic state includes intermediate amphiphilic states with a variable hydrophilic-to-lipophilic balance. As a result, the self-organization is not following an all-or-none principle but a multistep aggregation in dilute solution was observed. The synthesis of double thermoresponsive diblock copolymers as well as triple thermoresponsive triblock copolymers was realized using twofold-TMS labeled RAFT agents which provide direct information about the average molar mass as well as residual end group functionality from a routine proton NMR spectrum. First a set of double thermosensitive diblock copolymers poly(N-n-propylacrylamide)-b-poly(N-ethylacrylamide) was synthesized which differed only in the relative size of the two blocks. Depending on the relative block lengths, different aggregation pathways were found. Furthermore, the complementary TMS-labeled end groups served as NMR-probes for the self-assembly of these diblock copolymers in dilute solution. Reversible, temperature sensitive peak splitting of the TMS-signals in NMR spectroscopy was indicative for the formation of mixed star-/flower-like micelles in some cases. Moreover, triple thermoresponsive triblock copolymers from poly(N-n-propylacrylamide) (A), poly(methoxydiethylene glycol acrylate) (B) and poly(N-ethylacrylamide) (C) were obtained from sequential RAFT polymerization in all possible block sequences (ABC, BAC, ACB). Their self-organization behavior in dilute aqueous solution was found to be rather complex and dependent on the positioning of the different blocks within the terpolymers. Especially the localization of the low-LCST block (A) had a large influence on the aggregation behavior. Above the first cloud point, aggregates were only observed when the A block was located at one terminus. Once placed in the middle, unimolecular micelles were observed which showed aggregation only above the second phase transition temperature of the B block. Carrier abilities of such triple thermosensitive triblock copolymers tested in fluorescence spectroscopy, using the solvatochromic dye Nile Red, suggested that the hydrophobic probe is less efficiently incorporated by the polymer with the BAC sequence as compared to ABC or ACB polymers above the first phase transition temperature. In addition, due to the problem of increasing loss of end group functionality during the subsequent polymerization steps, a novel concept for the one-step synthesis of multi thermoresponsive block copolymers was developed. This allowed to synthesize double thermoresponsive di- and triblock copolymers in a single polymerization step. The copolymerization of different N-substituted maleimides with a thermosensitive styrene derivative (4-vinylbenzyl methoxytetrakis(oxyethylene) ether) led to alternating copolymers with variable LCST. Consequently, an excess of this styrene-based monomer allowed the synthesis of double thermoresponsive tapered block copolymers in a single polymerization step.}, language = {en} } @phdthesis{Weidlich2011, author = {Weidlich, Matthias}, title = {Behavioural profiles : a relational approach to behaviour consistency}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55590}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business Process Management (BPM) emerged as a means to control, analyse, and optimise business operations. Conceptual models are of central importance for BPM. Most prominently, process models define the behaviour that is performed to achieve a business value. In essence, a process model is a mapping of properties of the original business process to the model, created for a purpose. Different modelling purposes, therefore, result in different models of a business process. Against this background, the misalignment of process models often observed in the field of BPM is no surprise. Even if the same business scenario is considered, models created for strategic decision making differ in content significantly from models created for process automation. Despite their differences, process models that refer to the same business process should be consistent, i.e., free of contradictions. Apparently, there is a trade-off between strictness of a notion of consistency and appropriateness of process models serving different purposes. Existing work on consistency analysis builds upon behaviour equivalences and hierarchical refinements between process models. Hence, these approaches are computationally hard and do not offer the flexibility to gradually relax consistency requirements towards a certain setting. This thesis presents a framework for the analysis of behaviour consistency that takes a fundamentally different approach. As a first step, an alignment between corresponding elements of related process models is constructed. Then, this thesis conducts behavioural analysis grounded on a relational abstraction of the behaviour of a process model, its behavioural profile. Different variants of these profiles are proposed, along with efficient computation techniques for a broad class of process models. Using behavioural profiles, consistency of an alignment between process models is judged by different notions and measures. The consistency measures are also adjusted to assess conformance of process logs that capture the observed execution of a process. Further, this thesis proposes various complementary techniques to support consistency management. It elaborates on how to implement consistent change propagation between process models, addresses the exploration of behavioural commonalities and differences, and proposes a model synthesis for behavioural profiles.}, language = {en} } @phdthesis{Weidenfeld2003, author = {Weidenfeld, Andrea}, title = {Interpretation of and reasoning with conditionals : probabilities, mental models, and causality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5207}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In everyday conversation \"if\" is one of the most frequently used conjunctions. This dissertation investigates what meaning an everyday conditional transmits and what inferences it licenses. It is suggested that the nature of the relation between the two propositions in a conditional might play a major role for both questions. Thus, in the experiments reported here conditional statements that describe a causal relationship (e.g., \"If you touch that wire, you will receive an electric shock\") were compared to arbitrary conditional statements in which there is no meaningful relation between the antecedent and the consequent proposition (e.g., \"If Napoleon is dead, then Bristol is in England\"). Initially, central assumptions from several approaches to the meaning and the reasoning from causal conditionals will be integrated into a common model. In the model the availability of exceptional situations that have the power to generate exceptions to the rule described in the conditional (e.g., the electricity is turned off), reduces the subjective conditional probability of the consequent, given the antecedent (e.g., the probability of receiving an electric shock when touching the wire). This conditional probability determines people's degree of belief in the conditional, which in turn affects their willingness to accept valid inferences (e.g., \"Peter touches the wire, therefore he receives an electric shock\") in a reasoning task. Additionally to this indirect pathway, the model contains a direct pathway: Cognitive availability of exceptional situations directly reduces the readiness to accept valid conclusions. The first experimental series tested the integrated model for conditional statements embedded in pseudo-natural cover stories that either established a causal relation between the antecedent and the consequent event (causal conditionals) or did not connect the propositions in a meaningful way (arbitrary conditionals). The model was supported for the causal, but not for the arbitrary conditional statements. Furthermore, participants assigned lower degrees of belief to arbitrary than to causal conditionals. Is this effect due to the presence versus absence of a semantic link between antecedent and consequent in the conditionals? This question was one of the starting points for the second experimental series. Here, the credibility of the conditionals was manipulated by adding explicit frequency information about possible combinations of presence or absence of antecedent and consequent events to the problems (i.e., frequencies of cases of 1. true antecedent with true consequent, 2. true antecedent with false consequent, 3. false antecedent with true consequent, 4. false antecedent with false consequent). This paradigm allows testing different approaches to the meaning of conditionals (Experiment 4) as well as theories of conditional reasoning against each other (Experiment 5). The results of Experiment 4 supported mainly the conditional probability approach to the meaning of conditionals (Edgington, 1995) according to which the degree of belief a listener has in a conditional statement equals the conditional probability that the consequent is true given the antecedent (e.g., the probability of receiving an electric shock when touching the wire). Participants again assigned lower degrees of belief to the arbitrary than the causal conditionals, although the conditional probability of the consequent given the antecedent was held constant within every condition of explicit frequency information. This supports the hypothesis that the mere presence of a causal link enhances the believability of a conditional statement. In Experiment 5 participants solved conditional reasoning tasks from problems that contained explicit frequency information about possible relevant cases. The data favored the probabilistic approach to conditional reasoning advanced by Oaksford, Chater, and Larkin (2000). The two experimental series reported in this dissertation provide strong support for recent probabilistic theories: for the conditional probability approach to the meaning of conditionals by Edgington (1995) and the probabilistic approach to conditional reasoning by Oaksford et al. (2000). In the domain of conditional reasoning, there was additionally support for the modified mental model approaches by Markovits and Barrouillet (2002) and Schroyens and Schaeken (2003). Probabilistic and mental model approaches could be reconciled within a dual-process-model as suggested by Verschueren, Schaeken, and d\&\#39;Ydewalle (2003).}, subject = {Konditional}, language = {en} } @phdthesis{Wegerich2010, author = {Wegerich, Franziska}, title = {Engineered human cytochrome c : investigation of superoxide and protein-protein interaction and application in bioelectronic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50782}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The aim of this thesis is the design, expression and purification of human cytochrome c mutants and their characterization with regard to electrochemical and structural properties as well as with respect to the reaction with the superoxide radical and the selected proteins sulfite oxidase from human and fungi bilirubin oxidase. All three interaction partners are studied here for the first time with human cyt c and with mutant forms of cyt c. A further aim is the incorporation of the different cyt c forms in two bioelectronic systems: an electrochemical superoxide biosensor with an enhanced sensitivity and a protein multilayer assembly with and without bilirubin oxidase on electrodes. The first part of the thesis is dedicated to the design, expression and characterization of the mutants. A focus is here the electrochemical characterization of the protein in solution and immobilized on electrodes. Further the reaction of these mutants with superoxide was investigated and the possible reaction mechanisms are discussed. In the second part of the work an amperometric superoxide biosensor with selected human cytochrome c mutants was constructed and the performance of the sensor electrodes was studied. The human wild-type and four of the five mutant electrodes could be applied successfully for the detection of the superoxide radical. In the third part of the thesis the reaction of horse heart cyt c, the human wild-type and seven human cyt c mutants with the two proteins sulfite oxidase and bilirubin oxidase was studied electrochemically and the influence of the mutations on the electron transfer reactions was discussed. Finally protein multilayer electrodes with different cyt form including the mutant forms G77K and N70K which exhibit different reaction rates towards BOD were investigated and BOD together with the wild-type and engineered cyt c was embedded in the multilayer assembly. The relevant electron transfer steps and the kinetic behavior of the multilayer electrodes are investigated since the functionality of electroactive multilayer assemblies with incorporated redox proteins is often limited by the electron transfer abilities of the proteins within the multilayer. The formation via the layer-by-layer technique and the kinetic behavior of the mono and bi-protein multilayer system are studied by SPR and cyclic voltammetry. In conclusion this thesis shows that protein engineering is a helpful instrument to study protein reactions as well as electron transfer mechanisms of complex bioelectronic systems (such as bi-protein multilayers). Furthermore, the possibility to design tailored recognition elements for the construction of biosensors with an improved performance is demonstrated.}, language = {en} } @phdthesis{WegerCoenen2021, author = {Weger Coenen, Lindsey}, title = {Exploring potential impacts from transitions in German and European energy on GHG and air pollutant emissions and on ozone air quality}, doi = {10.25932/publishup-49698}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-496986}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 161}, year = {2021}, abstract = {Energy is at the heart of the climate crisis—but also at the heart of any efforts for climate change mitigation. Energy consumption is namely responsible for approximately three quarters of global anthropogenic greenhouse gas (GHG) emissions. Therefore, central to any serious plans to stave off a climate catastrophe is a major transformation of the world's energy system, which would move society away from fossil fuels and towards a net-zero energy future. Considering that fossil fuels are also a major source of air pollutant emissions, the energy transition has important implications for air quality as well, and thus also for human and environmental health. Both Europe and Germany have set the goal of becoming GHG neutral by 2050, and moreover have demonstrated their deep commitment to a comprehensive energy transition. Two of the most significant developments in energy policy over the past decade have been the interest in expansion of shale gas and hydrogen, which accordingly have garnered great interest and debate among public, private and political actors. In this context, sound scientific information can play an important role by informing stakeholder dialogue and future research investments, and by supporting evidence-based decision-making. This thesis examines anticipated environmental impacts from possible, relevant changes in the European energy system, in order to impart valuable insight and fill critical gaps in knowledge. Specifically, it investigates possible future shale gas development in Germany and the United Kingdom (UK), as well as a hypothetical, complete transition to hydrogen mobility in Germany. Moreover, it assesses the impacts on GHG and air pollutant emissions, and on tropospheric ozone (O3) air quality. The analysis is facilitated by constructing emission scenarios and performing air quality modeling via the Weather Research and Forecasting model coupled with chemistry (WRF-Chem). The work of this thesis is presented in three research papers. The first paper finds that methane (CH4) leakage rates from upstream shale gas development in Germany and the UK would range between 0.35\% and 1.36\% in a realistic, business-as-usual case, while they would be significantly lower - between 0.08\% and 0.15\% - in an optimistic, strict regulation and high compliance case, thus demonstrating the value and potential of measures to substantially reduce emissions. Yet, while the optimistic case is technically feasible, it is unlikely that the practices and technologies assumed would be applied and accomplished on a systematic, regular basis, owing to economics and limited monitoring resources. The realistic CH4 leakage rates estimated in this study are comparable to values reported by studies carried out in the US and elsewhere. In contrast, the optimistic rates are similar to official CH4 leakage data from upstream gas production in Germany and in the UK. Considering that there is a lack of systematic, transparent and independent reports supporting the official values, this study further highlights the need for more research efforts in this direction. Compared with national energy sector emissions, this study suggests that shale gas emissions of volatile organic compounds (VOCs) could be significant, though relatively insignificant for other air pollutants. Similar to CH4, measures could be effective for reducing VOCs emissions. The second paper shows that VOC and nitrogen oxides (NOx) emissions from a future shale gas industry in Germany and the UK have potentially harmful consequences for European O3 air quality on both the local and regional scale. The results indicate a peak increase in maximum daily 8-hour average O3 (MDA8) ranging from 3.7 µg m-3 to 28.3 µg m-3. Findings suggest that shale gas activities could result in additional exceedances of MDA8 at a substantial percentage of regulatory measurement stations both locally and in neighboring and distant countries, with up to circa one third of stations in the UK and one fifth of stations in Germany experiencing additional exceedances. Moreover, the results reveal that the shale gas impact on the cumulative health-related metric SOMO35 (annual Sum of Ozone Means Over 35 ppb) could be substantial, with a maximum increase of circa 28\%. Overall, the findings suggest that shale gas VOC emissions could play a critical role in O3 enhancement, while NOx emissions would contribute to a lesser extent. Thus, the results indicate that stringent regulation of VOC emissions would be important in the event of future European shale gas development to minimize deleterious health outcomes. The third paper demonstrates that a hypothetical, complete transition of the German vehicle fleet to hydrogen fuel cell technology could contribute substantially to Germany's climate and air quality goals. The results indicate that if the hydrogen were to be produced via renewable-powered water electrolysis (green hydrogen), German carbon dioxide equivalent (CO2eq) emissions would decrease by 179 MtCO2eq annually, though if electrolysis were powered by the current electricity mix, emissions would instead increase by 95 MtCO2eq annually. The findings generally reveal a notable anticipated decrease in German energy emissions of regulated air pollutants. The results suggest that vehicular hydrogen demand is 1000 PJ annually, which would require between 446 TWh and 525 TWh for electrolysis, hydrogen transport and storage. When only the heavy duty vehicle segment (HDVs) is shifted to green hydrogen, the results of this thesis show that vehicular hydrogen demand drops to 371 PJ, while a deep emissions cut is still realized (-57 MtCO2eq), suggesting that HDVs are a low-hanging fruit for contributing to decarbonization of the German road transport sector with hydrogen energy.}, language = {en} } @phdthesis{Weege2017, author = {Weege, Stefanie}, title = {Climatic drivers of retrogressive thaw slump activity and resulting sediment and carbon release to the nearshore zone of Herschel Island, Yukon Territory, Canada}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397947}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2017}, abstract = {The Yukon Coast in Canada is an ice-rich permafrost coast and highly sensitive to changing environmental conditions. Retrogressive thaw slumps are a common thermoerosion feature along this coast, and develop through the thawing of exposed ice-rich permafrost on slopes and removal of accumulating debris. They contribute large amounts of sediment, including organic carbon and nitrogen, to the nearshore zone. The objective of this study was to 1) identify the climatic and geomorphological drivers of sediment-meltwater release, 2) quantify the amount of released meltwater, sediment, organic carbon and nitrogen, and 3) project the evolution of sediment-meltwater release of retrogressive thaw slumps in a changing future climate. The analysis is based on data collected over 18 days in July 2013 and 18 days in August 2012. A cut-throat flume was set up in the main sediment-meltwater channel of the largest retrogressive thaw slump on Herschel Island. In addition, two weather stations, one on top of the undisturbed tundra and one on the slump floor, measured incoming solar radiation, air temperature, wind speed and precipitation. The discharge volume eroding from the ice-rich permafrost and retreating snowbanks was measured and compared to the meteorological data collected in real time with a resolution of one minute. The results show that the release of sediment-meltwater from thawing of the ice-rich permafrost headwall is strongly related to snowmelt, incoming solar radiation and air temperature. Snowmelt led to seasonal differences, especially due to the additional contribution of water to the eroding sediment-meltwater from headwall ablation, lead to dilution of the sediment-meltwater composition. Incoming solar radiation and air temperature were the main drivers for diurnal and inter-diurnal fluctuations. In July (2013), the retrogressive thaw slump released about 25 000 m³ of sediment-meltwater, containing 225 kg dissolved organic carbon and 2050 t of sediment, which in turn included 33 t organic carbon, and 4 t total nitrogen. In August (2012), just 15 600 m³ of sediment-meltwater was released, since there was no additional contribution from snowmelt. However, even without the additional dilution, 281 kg dissolved organic carbon was released. The sediment concentration was twice as high as in July, with sediment contents of up to 457 g l-1 and 3058 t of sediment, including 53 t organic carbon and 5 t nitrogen, being released. In addition, the data from the 36 days of observations from Slump D were upscaled to cover the main summer season of 1 July to 31 August (62 days) and to include all 229 active retrogressive thaw slumps along the Yukon Coast. In total, all retrogressive thaw slumps along the Yukon Coast contribute a minimum of 1.4 Mio. m³ sediment-meltwater each thawing season, containing a minimum of 172 000 t sediment with 3119 t organic carbon, 327 t nitrogen and 17 t dissolved organic carbon. Therefore, in addition to the coastal erosion input to the Beaufort Sea, retrogressive thaw slumps additionally release 3 \% of sediment and 8 \% of organic carbon into the ocean. Finally, the future evolution of retrogressive thaw slumps under a warming scenario with summer air temperatures increasing by 2-3 °C by 2081-2100, would lead to an increase of 109-114\% in release of sediment-meltwater. It can be concluded that retrogressive thaw slumps are sensitive to climatic conditions and under projected future Arctic warming will contribute larger amounts of thawed permafrost material (including organic carbon and nitrogen) into the environment.}, language = {en} } @phdthesis{Wechakama2013, author = {Wechakama, Maneenate}, title = {Multi-messenger constraints and pressure from dark matter annihilation into electron-positron pairs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67401}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Despite striking evidence for the existence of dark matter from astrophysical observations, dark matter has still escaped any direct or indirect detection until today. Therefore a proof for its existence and the revelation of its nature belongs to one of the most intriguing challenges of nowadays cosmology and particle physics. The present work tries to investigate the nature of dark matter through indirect signatures from dark matter annihilation into electron-positron pairs in two different ways, pressure from dark matter annihilation and multi-messenger constraints on the dark matter annihilation cross-section. We focus on dark matter annihilation into electron-positron pairs and adopt a model-independent approach, where all the electrons and positrons are injected with the same initial energy E_0 ~ m_dm*c^2. The propagation of these particles is determined by solving the diffusion-loss equation, considering inverse Compton scattering, synchrotron radiation, Coulomb collisions, bremsstrahlung, and ionization. The first part of this work, focusing on pressure from dark matter annihilation, demonstrates that dark matter annihilation into electron-positron pairs may affect the observed rotation curve by a significant amount. The injection rate of this calculation is constrained by INTEGRAL, Fermi, and H.E.S.S. data. The pressure of the relativistic electron-positron gas is computed from the energy spectrum predicted by the diffusion-loss equation. For values of the gas density and magnetic field that are representative of the Milky Way, it is estimated that the pressure gradients are strong enough to balance gravity in the central parts if E_0 < 1 GeV. The exact value depends somewhat on the astrophysical parameters, and it changes dramatically with the slope of the dark matter density profile. For very steep slopes, as those expected from adiabatic contraction, the rotation curves of spiral galaxies would be affected on kiloparsec scales for most values of E_0. By comparing the predicted rotation curves with observations of dwarf and low surface brightness galaxies, we show that the pressure from dark matter annihilation may improve the agreement between theory and observations in some cases, but it also imposes severe constraints on the model parameters (most notably, the inner slope of the halo density profile, as well as the mass and the annihilation cross-section of dark matter particles into electron-positron pairs). In the second part, upper limits on the dark matter annihilation cross-section into electron-positron pairs are obtained by combining observed data at different wavelengths (from Haslam, WMAP, and Fermi all-sky intensity maps) with recent measurements of the electron and positron spectra in the solar neighbourhood by PAMELA, Fermi, and H.E.S.S.. We consider synchrotron emission in the radio and microwave bands, as well as inverse Compton scattering and final-state radiation at gamma-ray energies. For most values of the model parameters, the tightest constraints are imposed by the local positron spectrum and synchrotron emission from the central regions of the Galaxy. According to our results, the annihilation cross-section should not be higher than the canonical value for a thermal relic if the mass of the dark matter candidate is smaller than a few GeV. In addition, we also derive a stringent upper limit on the inner logarithmic slope α of the density profile of the Milky Way dark matter halo (α < 1 if m_dm < 5 GeV, α < 1.3 if m_dm < 100 GeV and α < 1.5 if m_dm < 2 TeV) assuming a dark matter annihilation cross-section into electron-positron pairs (σv) = 3*10^-26 cm^3 s^-1, as predicted for thermal relics from the big bang.}, language = {en} } @phdthesis{Weber2004, author = {Weber, Michael H.}, title = {Robotic telescopes \& Doppler imaging : measuring differential rotation on long-period active stars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001834}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Auf der Sonne sind viele Ph{\"a}nomene zu sehen die mit der solaren magnetischen Aktivit{\"a}t zusammenh{\"a}ngen. Das daf{\"u}r zust{\"a}ndige Magnetfeld wird durch einen Dynamo erzeugt, der sich vermutlich am Boden der Konvektionszone in der sogenannten Tachocline befindet. Angetrieben wird der Dynamo teils von der differenziellen Rotation, teils von den magnetischen Turbulenzen in der Konvektionszone. Die differentielle Rotation kann an der Sonnenoberfl{\"a}che durch beobachten der Sonnenfleckbewegungen gemessen werden.Um einen gr{\"o}ßeren Parameterraum zum Testen von Dynamotheorien zu erhalten, kann man diese Messungen auch auf andere Sterne ausdehnen. Das prim{\"a}re Problem dabei ist, dass die Oberfl{\"a}chen von Sternen nicht direkt beobachtet werden k{\"o}nnen. Indirekt kann man dies jedoch mit Hilfe der Doppler-imaging Methode erreichen, die die Doppler-Verbreitung der Spektrallinien von schnell rotierenden Sternen ben{\"u}tzt. Um jedoch ein Bild der Sternoberfl{\"a}che zu erhalten, bedarf es vieler hochaufgel{\"o}ster spektroskopischer Beobachtungen, die gleichm{\"a}ßig {\"u}ber eine Sternrotation verteilt sein m{\"u}ssen. F{\"u}r Sterne mit langen Rotationsperioden sind diese Beobachtungen nur schwierig durchzuf{\"u}hren. Das neue robotische Observatorium STELLA adressiert dieses Problem und bietet eine auf Dopplerimaging abgestimmte Ablaufplanung der Beobachtungen an. Dies wird solche Beobachtungen nicht nur leichter durchf{\"u}hrbar machen, sondern auch effektiver gestalten.Als Vorschau welche Ergebnisse mit STELLA erwartet werden k{\"o}nnen dient eine Studie an sieben Sternen die allesamt eine lange (zwischen sieben und 25 Tagen) Rotationsperiode haben. Alle Sterne zeigen differentielle Rotation, allerdings sind die Messfehler aufgrund der nicht zufriedenstellenden Datenqualit{\"a}t von gleicher Gr{\"o}ßenordnung wie die Ergebnisse, ein Problem das bei STELLA nicht auftreten wird. Um die Konsistenz der Ergebnisse zu pr{\"u}fen wurde wenn m{\"o}glich sowohl eine Kreuzkorrelationsanalyse als auch die sheared-image Methode angewandt. Vier von diesen sieben Sternen weisen eine differentielle Rotation in umgekehrter Richtung auf als auf der Sonne zu sehen ist. Die restlichen drei Sterne weisen schwache, aber in der Richtung sonnen{\"a}hnliche differentielle Rotation auf.Abschließend werden diese neuen Messungen mit bereits publizierten Werten kombiniert, und die so erhaltenen Daten auf Korrelationen zwischen differentieller Rotation, Rotationsperiode, Evolutionsstaus, Spektraltyp und Vorhandensein eines Doppelsterns {\"u}berpr{\"u}ft. Alle Sterne zusammen zeigen eine signifikante Korrelation zwischen dem Betrag der differenziellen Rotation und der Rotationsperiode. Unterscheidet man zwischen den Richtungen der differentiellen Rotation, so bleibt nur eine Korrelation der Sterne mit antisolarem Verhalten. Dar{\"u}berhinaus zeigt sich auch, dass Doppelsterne schw{\"a}cher differentiell rotieren.}, language = {en} } @phdthesis{Wattenbach2008, author = {Wattenbach, Martin}, title = {The hydrological effects of changes in forest area and species composition in the federal state of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27394}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This thesis aims to quantify the human impact on the natural resource water at the landscape scale. The drivers in the federal state of Brandenburg (Germany), the area under investigation, are land-use changes induced by policy decisions at European and federal state level. The water resources of the federal state are particularly sensitive to changes in land-use due to low precipitation rates in the summer combined with sandy soils and high evapotranspiration rates. Key elements in landscape hydrology are forests because of their unique capacity to transport water from the soil to the atmosphere. Given these circumstances, decisions made at any level of administration that may have effects on the forest sector in the state are critical in relation to the water cycle. It is therefore essential to evaluate any decision that may change forest area and structure in such a sensitive region. Thus, as a first step, it was necessary to develop and implement a model able to simulate possible interactions and feedbacks between forested surfaces and the hydrological cycle at the landscape scale. The result is a model for simulating the hydrological properties of forest stands based on a robust computation of the temporal and spatial LAI (leaf area index) dynamics. The approach allows the simulation of all relevant hydrological processes with a low parameter demand. It includes the interception of precipitation and transpiration of forest stands with and without groundwater in the rooting zone. The model also considers phenology, biomass allocation, as well as mortality and simple management practices. It has been implemented as a module in the eco-hydrological model SWIM (Soil and Water Integrated Model). This model has been tested in two pre-studies to verify the applicability of its hydrological process description for the hydrological conditions typical for the state. The newly implemented forest module has been tested for Scots Pine (Pinus sylvestris) and in parts for Common Oak (Quercus robur and Q. petraea) in Brandenburg. For Scots Pine the results demonstrate a good simulation of annual biomass increase and LAI in addition to the satisfactory simulation of litter production. A comparison of the simulated and measured data of the May sprout for Scots pine and leaf unfolding for Oak, as well as the evaluation against daily transpiration measurements for Scots Pine, does support the applicability of the approach. The interception of precipitation has also been simulated and compared with weekly observed data for a Scots Pine stand which displays satisfactory results in both the vegetation periods and annual sums. After the development and testing phase, the model is used to analyse the effects of two scenarios. The first scenario is an increase in forest area on abandoned agricultural land that is triggered by a decrease in European agricultural production support. The second one is a shift in species composition from predominant Scots Pine to Common Oak that is based on decisions of the regional forestry authority to support a more natural species composition. The scenario effects are modelled for the federal state of Brandenburg on a 50m grid utilising spatially explicit land-use patterns. The results, for the first scenario, suggest a negative impact of an increase in forest area (9.4\% total state area) on the regional water balance, causing an increase in mean long-term annual evapotranspiration of 3.7\% at 100\% afforestation when compared to no afforestation. The relatively small annual change conceals a much more pronounced seasonal effect of a mean long-term evapotranspiration increase by 25.1\% in the spring causing a pronounced reduction in groundwater recharge and runoff. The reduction causes a lag effect that aggravates the scarcity of water resources in the summer. In contrast, in the second scenario, a change in species composition in existing forests (29.2\% total state area) from predominantly Scots Pine to Common Oak decreases the long-term annual mean evapotranspiration by 3.4\%, accompanied by a much weaker, but apparent, seasonal pattern. Both scenarios exhibit a high spatial heterogeneity because of the distinct natural conditions in the different regions of the state. Areas with groundwater levels near the surface are particularly sensitive to changes in forest area and regions with relatively high proportion of forest respond strongly to the change in species composition. In both cases this regional response is masked by a smaller linear mean effect for the total state area. Two critical sources of uncertainty in the model results have been investigated. The first one originates from the model calibration parameters estimated in the pre-study for lowland regions, such as the federal state. The combined effect of the parameters, when changed within their physical meaningful limits, unveils an overestimation of the mean water balance by 1.6\%. However, the distribution has a wide spread with 14.7\% for the 90th percentile and -9.9\% for the 10th percentile. The second source of uncertainty emerges from the parameterisation of the forest module. The analysis exhibits a standard deviation of 0.6 \% over a ten year period in the mean of the simulated evapotranspiration as a result of variance in the key forest parameters. The analysis suggests that the combined uncertainty in the model results is dominated by the uncertainties of calibration parameters. Therefore, the effect of the first scenario might be underestimated because the calculated increase in evapotranspiration is too small. This may lead to an overestimation of the water balance towards runoff and groundwater recharge. The opposite can be assumed for the second scenario in which the decrease in evapotranspiration might be overestimated.}, language = {en} } @phdthesis{Wattebled2006, author = {Wattebled, Laurent}, title = {Oligomeric surfactants as novel type of amphiphiles : structure - property relationships and behaviour with additives}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12855}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The properties of a series of well-defined new surfactant oligomers (dimers to tetramers)were examined. From a molecular point of view, these oligomeric surfactants consist of simple monomeric cationic surfactant fragments coupled via the hydrophilic ammonium chloride head groups by spacer groups (different in nature and length). Properties of these cationic surfactant oligomers in aqueous solution such as solubility, micellization and surface activity, micellar size and aggregation number were discussed with respect to the two new molecular variables introduced, i.e. degree of oligomerization and spacer group, in order to establish structure - property relationships. Thus, increasing the degree of oligomerization results in a pronounced decrease of the critical micellization concentration (CMC). Both reduced spacer length and increased spacer hydrophobicity lead to a decrease of the CMC, but to a lesser extent. For these particular compounds, the formed micelles are relatively small and their aggregation number decreases with increasing the degree of oligomerization, increasing spacer length and sterical hindrance. In addition, pseudo-phase diagrams were established for the dimeric surfactants in more complex systems, namely inverse microemulsions, demonstrating again the important influence of the spacer group on the surfactant behaviour. Furthermore, the influence of additives on the property profile of the dimeric compounds was examined, in order to see if the solution properties can be improved while using less material. Strong synergistic effects were observed by adding special organic salts (e.g. sodium salicylate, sodium vinyl benzoate, etc.) to the surfactant dimers in stoichiometric amounts. For such mixtures, the critical aggregation concentration is strongly shifted to lower concentration, the effect being more pronounced for dimers than for analogous monomers. A sharp decrease of the surface tension can also be attained. Many of the organic anions produce viscoelastic solutions when added to the relatively short-chain dimers in aqueous solution, as evidenced by rheological measurements. This behaviour reflects the formation of entangled wormlike micelles due to strong interactions of the anions with the cationic surfactants, decreasing the curvature of the micellar aggregates. It is found that the associative behaviour is enhanced by dimerization. For a given counterion, the spacer group may also induce a stronger viscosifying effect depending on its length and hydrophobicity. Oppositely charged surfactants were combined with the cationic dimers, too. First, some mixtures with the conventional anionic surfactant SDS revealed vesicular aggregates in solution. Also, in view of these catanionic mixtures, a novel anionic dimeric surfactant based on EDTA was synthesized and studied. The synthesis route is relatively simple and the compound exhibits particularly appealing properties such as low CMC and σCMC values, good solubilization capacity of hydrophobic probes and high tolerance to hard water. Noteworthy, mixtures with particular cationic dimers gave rise to viscous solutions, reflecting the micelle growth.}, language = {en} } @phdthesis{Wasiolka2007, author = {Wasiolka, Bernd}, title = {The impact of overgrazing on reptile diversity and population dynamics of Pedioplanis l. lineoocellata in the southern Kalahari}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-16611}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Die Vegetationskomposition und -struktur, beispielsweise die unterschiedliche Architektur von B{\"a}umen, Str{\"a}uchern, Gr{\"a}sern und Kr{\"a}utern, bietet ein großes Spektrum an Habitaten und Nischen, die wiederum eine hohe Tierdiversit{\"a}t in den Savannensystemen des s{\"u}dlichen Afrikas erm{\"o}glichen. Dieses {\"O}kosystem wurde jedoch {\"u}ber Jahrzehnte weltweit durch intensive anthropogene Landnutzung (z.B. Viehwirtschaft) nachhaltig ver{\"a}ndert. Dabei wurden die Zusammensetzung, Diversit{\"a}t und Struktur der Vegetation stark ver{\"a}ndert. {\"U}berweidung in Savannensystemen f{\"u}hrt zu einer Degradation des Habitates einhergehend mit dem Verlust von perennierenden Gr{\"a}sern und krautiger Vegetation. Dies f{\"u}hrt zu einem Anstieg an vegetationsfreien Bodenfl{\"a}chen. Beides, sowohl der Verlust an perennierenden Gr{\"a}sern und krautiger Vegetation sowie der Anstieg an vegetationsfreien Fl{\"a}chen f{\"u}hrt zu verbesserten Etablierungsbedingungen f{\"u}r Str{\"a}ucher (z.B. Rhigozum trichotomum, Acacia mellifera) und auf lange Sicht zu stark verbuschten Fl{\"a}chen. Die Tierdiversit{\"a}t in Savannen ist hiervon entscheidend beeinflusst. Mit sinkender struktureller Diversit{\"a}t verringert sich auch die Tierdiversit{\"a}t. W{\"a}hrend der Einfluss von {\"U}berweidung auf die Vegetation relativ gut untersucht ist sind Informationen {\"u}ber den Einfluss von {\"U}berweidung auf die Tierdiversit{\"a}t, speziell f{\"u}r Reptilien, eher sp{\"a}rlich vorhanden. Zus{\"a}tzlich ist sehr wenig bekannt zum Einfluss auf die Populationsdynamik (z.B. Verhaltensanpassungen, Raumnutzung, {\"U}berlebensrate, Sterberate) einzelner Reptilienarten. Ziel meiner Doktorarbeit ist es den Einfluss von {\"U}berweidung durch kommerzielle Farmnutzung auf die Reptiliengemeinschaft und auf verschiedene Aspekte der Populationsdynamik der Echse Pedioplanis lineoocellata lineoocellata zu untersuchen. Hinsichtlich bestimmter Naturschutzmaßnahmen ist es einerseits wichtig zu verstehen welchen Auswirkungen {\"U}berweidung auf die gesamte Reptiliengemeinschaft hat. Und zum anderen wie entscheidende Faktoren der Populationsdynamik beeinflusst werden. Beides f{\"u}hrt zu einem besseren Verst{\"a}ndnis der Reaktion von Reptilien auf Habitatdegradation zu erlangen. Die Ergebnisse meiner Doktorarbeit zeigen eindeutig einen negativen Einfluss der {\"U}berweidung und der daraus resultierende Habitatdegradation auf (1) die gesamte Reptiliengemeinschaft und (2) auf einzelne Aspekte der Populationsdynamik von P. lineoocellata. Im Teil 1 wird die signifikante Reduzierung der Reptiliendiversit{\"a}t und Abundanz in degradierten Habitaten beschrieben. Im zweiten Teil wird gezeigt, dass P. lineoocellata das Verhalten an die verschlechterten Lebensbedingungen anpassen kann. Die Art bewegt sich sowohl h{\"a}ufiger als auch {\"u}ber einen l{\"a}ngeren Zeitraum und legt dabei gr{\"o}ßere Distanzen zur{\"u}ck. Zus{\"a}tzlich vergr{\"o}ßerte die Art ihr Revier (home range) (Teil 3). Im abschließenden Teil wird der negative Einfluss von {\"U}berweidung auf die Populationsdynamik von P. lineoocellata beschrieben: In degradierten Habitaten nimmt die Populationsgr{\"o}ße von adulten und juvenilen Echsen ab, die {\"U}berlebens- und Geburtenrate sinken, w{\"a}hren zus{\"a}tzlich das Pr{\"a}dationsrisiko ansteigt. Verantwortlich hierf{\"u}r ist zum einen die ebenfalls reduzierte Nahrungsverf{\"u}gbarkeit (Arthropoden) auf degradierten Fl{\"a}chen. Dies hat zur Folge, dass die Populationsgr{\"o}ße abnimmt und die Fitness der Individuen verringert wird, welches sich durch eine Reduzierung der {\"U}berlebens- und Geburtenrate bemerkbar macht. Und zum anderen ist es die Reduzierung der Vegetationsbedeckung und der R{\"u}ckgang an perennierenden Gr{\"a}sern welche sich negativ auswirken. Als Konsequenz hiervon gehen Nischen und Mikrohabitate verloren und die M{\"o}glichkeiten der Reptilien zur Thermoregulation sind verringert. Des Weiteren hat dieser Verlust an perennierender Grasbedeckung auch ein erh{\"o}htes Pr{\"a}dationsrisikos zur Folge. Zusammenfassend l{\"a}sst sich sagen, dass nicht nur B{\"a}ume und Str{\"a}ucher, wie in anderen Studien gezeigt, eine bedeutende Rolle f{\"u}r die Diversit{\"a}t spielen, sondern auch das perennierende Gras eine wichtige Rolle f{\"u}r die Faunendiversit{\"a}t spielt. Weiterhin zeigte sich, dass Habitatdegradation nicht nur die Population als gesamtes beeinflusst, sondern auch das Verhalten und Populationsparameter einzelner Arten. Des Weiteren ist es Reptilien m{\"o}glich durch Verhaltensflexibilit{\"a}t auf verschlechterte Umweltbedingen zu reagieren.}, language = {en} } @phdthesis{Washuettl2004, author = {Wash{\"u}ttl, Albert}, title = {EI Eridani and the art of doppler imaging : a long-term study}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001714}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Das Verst{\"a}ndnis magnetisch verursachter Aktivit{\"a}t auf Sternen sowie der zugrundeliegenden Dynamoprozesse ist von fundamentaler Bedeutung f{\"u}r das Verst{\"a}ndnis von Entstehung und Entwicklung von Sternen sowie des Lebens im Universum. Sichtbare Erscheinungen dieser stellaren Aktivit{\"a}t sind u.a. Sternflecken, welche als Indikatoren des zugrundeliegenden Magnetfeldes dienen. Solche Flecken k{\"o}nnen auf anderen Sternen als der Sonne nicht direkt beobachtet werden, zumal mit den heutigen technischen Mitteln eine Aufl{\"o}sung der Oberfl{\"a}che selbst der benachbarten Sterne unm{\"o}glich ist. Eine indirekte Rekonstruktionsmethode namens 'Doppler Imaging' erlaubt es jedoch, auf die Temperaturverteilung auf der Sternoberfl{\"a}che zu schließen. F{\"u}r diese Arbeit wurden elf Jahre kontinuierlicher spektroskopischer Beobachtungen des aktiven Doppelsterns EI Eridani herangezogen, um insgesamt 34 Dopplerkarten zu erstellen. In der Folge wird versucht, eine Grundlage zu schaffen f{\"u}r die Analyse des zweidimensionalen Informationsgehalts dieser Karten. Drei Oberfl{\"a}chenkartenparameter werden vorgeschlagen: gemittelte Temperatur, getrennt f{\"u}r verschiedenen stellare Breitenb{\"a}nder; relative Fleckenh{\"a}ufigkeit; und, zum Zwecke der Auswertung der strukturellen Temperaturverteilung, L{\"a}ngen- und Breiten-Ortsfunktion der Sternfleckenh{\"a}ufung. Die resultierenden Werte zeigen deutlich, daß kein zeitlicher Zusammenhang mit dem photometrischen Aktivit{\"a}tszyklus besteht. Die Morphologie der Fleckenverteilung bleibt w{\"a}hrend des kompletten Beobachtungszeitraums im wesentlichen konstant. Im Gegensatz zur Sonne gibt es also, im beobachteten Zeitraum und innerhalb der bestehenden Genauigkeit, keinen Fleckenzyklus auf dem aktiven Stern EI Eri. Dar{\"u}berhinaus wurde eine ausf{\"u}hrliche Studie der stellaren Parameter von EI Eri und eine vorl{\"a}ufige Absch{\"a}tzung der differentiellen Rotation auf EI Eri durchgef{\"u}hrt, die eine anti-solare Ausrichtung aufzuweisen scheint, d.h. der Pol rotiert schneller als der {\"A}quator.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Yongbo}, title = {Late glacial to Holocene climate and vegetation changes on the Tibetan Plateau inferred from fossil pollen records in lacustrine sediments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63155}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The past climate in central Asia, and especially on the Tibetan Plateau (TP), is of great importance for an understanding of global climate processes and for predicting the future climate. As a major influence on the climate in this region, the Asian Summer Monsoon (ASM) and its evolutionary history are of vital importance for accurate predictions. However, neither the evolutionary pattern of the summer monsoon nor the driving mechanisms behind it are yet clearly understood. For this research, I first synthesized previously published Late Glacial to Holocene climatic records from monsoonal central Asia in order to extract the general climate signals and the associated summer monsoon intensities. New climate and vegetation sequences were then established using improved quantitative methods, focusing on fossil pollen records recovered from Tibetan lakes and also incorporating new modern datasets. The pollen-vegetation and vegetation-climate relationships on the TP were also evaluated in order to achieve a better understanding of fossil pollen records. The synthesis of previously published moisture-related palaeoclimate records in monsoonal central Asia revealed generally different temporal patterns for the two monsoonal subsystems, i.e. the Indian Summer Monsoon (ISM) and East Asian Summer Monsoon (EASM). The ISM appears to have experienced maximum wet conditions during the early Holocene, while many records from the area affected by the EASM indicate relatively dry conditions at that time, particularly in north-central China where the maximum moisture levels occurred during the middle Holocene. A detailed consideration of possible driving factors affecting the summer monsoon, including summer solar insolation and sea surface temperatures, revealed that the ISM was primarily driven by variations in northern hemisphere solar insolation, and that the EASM may have been constrained by the ISM resulting in asynchronous patterns of evolution for these two subsystems. This hypothesis is further supported by modern monsoon indices estimated using the NCEP/NCAR Reanalysis data from the last 50 years, which indicate a significant negative correlation between the two summer monsoon subsystems. By analogy with the early Holocene, intensification of the ISM during coming decades could lead to increased aridification elsewhere as a result of the asynchronous nature of the monsoon subsystems, as can already be observed in the meteorological data from the last 15 years. A quantitative climate reconstruction using fossil pollen records was achieved through analysis of sediment core recovered from Lake Donggi Cona (in the north-eastern part of the TP) which has been dated back to the Last Glacial Maximum (LGM). A new data-set of modern pollen collected from large lakes in arid to semi-arid regions of central Asia is also presented herein. The concept of "pollen source area" was introduced to modern climate calibration based on pollen from large lakes, and was applied to the fossil pollen sequence from Lake Donggi Cona. Extremely dry conditions were found to have dominated the LGM, and a subsequent gradually increasing trend in moisture during the Late Glacial period was terminated by an abrupt reversion to a dry phase that lasted for about 1000 years and coincided with the first Heinrich Event of the northern Atlantic region. Subsequent periods corresponding to the warm B{\o}lling-Aller{\o}d period and the Younger Dryas cold event were followed by moist conditions during the early Holocene, with annual precipitation of up to about 400 mm. A slightly drier trend after 9 cal ka BP was then followed by a second wet phase during the middle Holocene that lasted until 4.5 cal ka BP. Relatively steady conditions with only slight fluctuations then dominated the late Holocene, resulting in the present climatic conditions. In order to investigate the relationship between vegetation and climate, temporal variations in the possible driving factors for vegetation change on the northern TP were examined using a high resolution late Holocene pollen record from Lake Kusai. Moving-window Redundancy Analyses (RDAs) were used to evaluate the correlations between pollen assemblages and individual sedimentary proxies. These analyses have revealed frequent fluctuations in the relative abundances of alpine steppe and alpine desert components, and in particular a decrease in the total vegetation cover at around 1500 cal a BP. The climate was found to have had an important influence on vegetation changes when conditions were relatively wet and stable. However, after the 1500 cal a BP threshold in vegetation cover was crossed the vegetation appears to have been affected more by extreme events such as dust storms or fluvial erosion than by the general climatic trends. In addition, pollen spectra over the last 600 years have been revealed by Procrustes analysis to be significantly different from those recovered from older samples, which is attributed to an increased human impact that resulted in unprecedented changes to the composition of the vegetation. Theoretical models that have been developed and widely applied to the European area (i.e. the Extended R-Value (ERV) model and the Regional Estimates of Vegetation Abundance from Large Sites (REVEALS) model) have been applied to the high alpine TP ecosystems in order to investigate the pollen-vegetation relationships, as well as for quantitative reconstructions of vegetation abundance. The modern pollen-vegetation relationships for four common pollen species on the TP have been investigated using Poaceae as the reference taxa. The ERV Submodel 2 yielded relatively high PPEs for the steppe and desert taxa (Artemisia Chenopodiaceae), and low PPEs for the Cyperaceae that are characteristic of the alpine Kobresia meadows. The plant abundances on the central and north-eastern TP were quantified by applying these PPEs to four post-Late Glacial fossil pollen sequences. The reconstructed vegetation assemblages for the four pollen sequences always yielded smaller compositional species turnovers than suggested by the pollen spectra, indicating that the strength of the previously-reported vegetation changes may therefore have been overestimated. In summary, the key findings of this thesis are that (a) the two ASM subsystems show asynchronous patterns during both the Holocene and modern time periods, (b) fossil pollen records from large lakes reflect regional signals for which the pollen source areas need to be taken into account, (c) climate is not always the main driver for vegetation change, and (d) previously reported vegetation changes on the TP may have been overestimated because they ignored inter-species variations in pollen productivity.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Xia}, title = {Reef ecosystem recovery following the Middle Permian (Capitanian) mass extinction}, doi = {10.25932/publishup-48750}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487502}, school = {Universit{\"a}t Potsdam}, pages = {XI, 144}, year = {2020}, abstract = {To find out the future of nowadays reef ecosystem turnover under the environmental stresses such as global warming and ocean acidification, analogue studies from the geologic past are needed. As a critical time of reef ecosystem innovation, the Permian-Triassic transition witnessed the most severe demise of Phanerozoic reef builders, and the establishment of modern style symbiotic relationships within the reef-building organisms. Being the initial stage of this transition, the Middle Permian (Capitanian) mass extinction coursed a reef eclipse in the early Late Permian, which lead to a gap of understanding in the post-extinction Wuchiapingian reef ecosystem, shortly before the radiation of Changhsingian reefs. Here, this thesis presents detailed biostratigraphic, sedimentological, and palaeoecological studies of the Wuchiapingian reef recovery following the Middle Permian (Capitanian) mass extinction, on the only recorded Wuchiapingian reef setting, outcropping in South China at the Tieqiao section. Conodont biostratigraphic zonations were revised from the Early Permian Artinskian to the Late Permian Wuchiapingian in the Tieqiao section. Twenty main and seven subordinate conodont zones are determined at Tieqiao section including two conodont zone below and above the Tieqiao reef complex. The age of Tieqiao reef was constrained as early to middle Wuchiapingian. After constraining the reef age, detailed two-dimensional outcrop mapping combined with lithofacies study were carried out on the Wuchiapingian Tieqiao Section to investigate the reef growth pattern stratigraphically as well as the lateral changes of reef geometry on the outcrop scale. Semi-quantitative studies of the reef-building organisms were used to find out their evolution pattern within the reef recovery. Six reef growth cycles were determined within six transgressive-regressive cycles in the Tieqiao section. The reefs developed within the upper part of each regressive phase and were dominated by different biotas. The timing of initial reef recovery after the Middle Permian (Capitanian) mass extinction was updated to the Clarkina leveni conodont zone, which is earlier than previous understanding. Metazoans such as sponges were not the major components of the Wuchiapingian reefs until the 5th and 6th cycles. So, the recovery of metazoan reef ecosystem after the Middle Permian (Capitanian) mass extinction was obviously delayed. In addition, although the importance of metazoan reef builders such as sponges did increase following the recovery process, encrusting organisms such as Archaeolithoporella and Tubiphytes, combined with microbial carbonate precipitation, still played significant roles to the reef building process and reef recovery after the mass extinction. Based on the results from outcrop mapping and sedimentological studies, quantitative composition analysis of the Tieqiao reef complex were applied on selected thin sections to further investigate the functioning of reef building components and the reef evolution after the Middle Permian (Capitanian) mass extinction. Data sets of skeletal grains and whole rock components were analyzed. The results show eleven biocommunity clusters/eight rock composition clusters dominated by different skeletal grains/rock components. Sponges, Archaeolithoporella and Tubiphytes were the most ecologically important components within the Wuchiapingian Tieqiao reef, while the clotted micrites and syndepositional cements are the additional important rock components for reef cores. The sponges were important within the whole reef recovery. Tubiphytes were broadly distributed in different environments and played a key-role in the initial reef communities. Archaeolithoporella concentrated in the shallower part of reef cycles (i.e., the upper part of reef core) and was functionally significant for the enlargement of reef volume. In general, the reef recovery after the Middle Permian (Capitanian) mass extinction has some similarities with the reef recovery following the end-Permian mass extinction. It shows a delayed recovery of metazoan reefs and a stepwise recovery pattern that was controlled by both ecological and environmental factors. The importance of encrusting organisms and microbial carbonates are also similar to most of the other post-extinction reef ecosystems. These findings can be instructive to extend our understanding of the reef ecosystem evolution under environmental perturbation or stresses.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Weishi}, title = {Influence of river reconstruction at a bank filtration site}, doi = {10.25932/publishup-49023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490234}, school = {Universit{\"a}t Potsdam}, pages = {IIV, 120}, year = {2020}, abstract = {Bank filtration is an effective water treatment technique and is widely adopted in Europe along major rivers. It is the process where surface water penetrates the riverbed, flows through the aquifer, and then is extracted by near-bank production wells. By flowing in the subsurface flow passage, the water quality can be improved by a series of beneficial processes. Long-term riverbank filtration also produces colmation layers on the riverbed. The colmation layer may act as a bioactive zone that is governed by biochemical and physical processes owing to its enrichment of microbes and organic matter. Low permeability may strongly limit the surface water infiltration and further lead to a decreasing recoverable ratio of production wells.The removal of the colmation layer is therefore a trade-off between the treatment capacity and treatment efficiency. The goal of this Ph.D. thesis is to focus on the temporal and spatial change of the water quality and quantity along the flow path of a hydrogeological heterogeneous riverbank filtration site adjacent to an artificial-reconstructed (bottom excavation and bank reconstruction) canal in Potsdam, Germany. To quantify the change of the infiltration rate, travel time distribution, and the thermal field brought by the canal reconstruction, a three-dimensional flow and heat transport model was created. This model has two scenarios, 1) 'with' canal reconstruction, and 2) 'without' canal reconstruction. Overall, the model calibration results of both water heads and temperatures matched those observed in the field study. In comparison to the model without reconstruction, the reconstruction model led to more water being infiltrated into the aquifer on that section, on average 521 m3/d, which corresponded to around 9\% of the total pumping rate. Subsurface travel-time distribution substantially shifted towards shorter travel times. Flow paths with travel times <200 days increased by ~10\% and those with <300 days by 15\%. Furthermore, the thermal distribution in the aquifer showed that the seasonal variation in the scenario with reconstruction reaches deeper and laterally propagates further. By scatter plotting of δ18O versus δ 2H, the infiltrated river water could be differentiated from water flowing in the deep aquifer, which may contain remnant landside groundwater from further north. In contrast, the increase of river water contribution due to decolmation could be shown by piper plot. Geological heterogeneity caused a substantial spatial difference in redox zonation among different flow paths, both horizontally and vertically. Using the Wilcoxon rank test, the reconstruction changed the redox potential differently in observation wells. However, taking the small absolute concentration level, the change is also relatively minor. The treatment efficiency for both organic matter and inorganic matter is consistent after the reconstruction, except for ammonium. The inconsistent results for ammonium could be explained by changes in the Cation Exchange Capacity (CEC) in the newly paved riverbed. Because the bed is new, it was not yet capable of keeping the newly produced ammonium by sorption and further led to the breakthrough of the ammonium plume. By estimation, the peak of the ammonium plume would reach the most distant observation well before February 2024, while the peaking concentration could be further dampened by sorption and diluted by the afterward low ammonium flow. The consistent DOC and SUVA level suggests that there was no clear preference for the organic matter removal along the flow path.}, language = {en} } @phdthesis{Wang2016, author = {Wang, Victor-C.}, title = {Injury and illness risk factors for elite athletes in training environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100925}, school = {Universit{\"a}t Potsdam}, pages = {viii, 84, ix}, year = {2016}, abstract = {Since 1998, elite athletes' sport injuries have been monitored in single sport event, which leads to the development of first comprehensive injury surveillance system in multi-sport Olympic Games in 2008. However, injury and illness occurred in training phases have not been systematically studied due to its multi-facets, potentially interactive risk related factors. The present thesis aim to address issues of feasibility of establishing a validated measure for injury/illness, training environment and psychosocial risk factors by creating the evaluation tool namely risk of injury questionnaire (Risk-IQ) for elite athletes, which based on IOC consensus statement 2009 recommended content of preparticipation evaluation(PPE) and periodic health exam (PHE). A total of 335 top level athletes and a total of 88 medical care providers from Germany and Taiwan participated in tow "cross-sectional plus longitudinal" Risk-IQ and MCPQ surveys respectively. Four categories of injury/illness related risk factors questions were asked in Risk-IQ for athletes while injury risk and psychological related questions were asked in MCPQ for MCP cohorts. Answers were quantified scales wise/subscales wise before analyzed with other factors/scales. In addition, adapted variables such as sport format were introduced for difference task of analysis. Validated with 2-wyas translation and test-retest reliabilities, the Risk-IQ was proved to be in good standard which were further confirmed by analyzed results from official surveys in both Germany and Taiwan. The result of Risk-IQ revealed that elite athletes' accumulated total injuries, in general, were multi-factor dependent; influencing factors including but not limited to background experiences, medical history, PHE and PPE medical resources as well as stress from life events. Injuries of different body parts were sport format and location specific. Additionally, medical support of PPE and PHE indicated significant difference between German and Taiwan. The result of the present thesis confirmed that it is feasible to construct a comprehensive evalua-tion instrument for heterogeneous elite athletes cohorts' risk factor analysis for injury/illness oc-curred during their non-competition periods. In average and with many moderators involved, Ger-man elite athletes have superior medical care support yet suffered more severe injuries than Tai-wanese counterparts. Opinions of injury related psychological issues reflected differently on vari-ous MCP groups irrespective of different nationalities. In general, influencing factors and interac-tions existed among relevant factors in both studies which implied further investigation with multiple regression analysis is needed for better understanding.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Jingwen}, title = {Electret properties of polypropylene with surface chemical modification and crystalline reconstruction}, doi = {10.25932/publishup-47027}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470271}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2020}, abstract = {As one of the most-produced commodity polymers, polypropylene draws considerable scientific and commercial interest as an electret material. In the present thesis, the influence of the surface chemical modification and crystalline reconstruction on the electret properties of the polypropylene thin films will be discussed. The chemical treatment with orthophosphoric acid can significantly improve the surface charge stability of the polypropylene electrets by introducing phosphorus- and oxygen-containing structures onto the modified surface. The thermally stimulated discharge measurement and charge profiling by means of piezoelectrically generated pressure steps are used to investigate the electret behaviour. It is concluded that deep traps of limited number density are created during the treatment with inorganic chemicals. Hence, the improvement dramatically decreases when the surface-charge density is substantially higher than ±1.2×10^(-3) C·m^(-2). The newly formed traps also show a higher trapping energy for negative charges. The energetic distributions of the traps in the non-treated and chemically treated samples offer an insight regarding the surface and foreign-chemical dominance on the charge storage and transport in the polypropylene electrets. Additionally, different electret properties are observed on the polypropylene films with the spherulitic and transcrystalline structures. It indicates the dependence of the charge storage and transport on the crystallite and molecular orientations in the crystalline phase. In general, a more diverse crystalline growth in the spherulitic samples can result in a more complex energetic trap distribution, in comparison to that in a transcrystalline polypropylene. The double-layer transcrystalline polypropylene film with a crystalline interface in the middle can be obtained by crystallising the film in contact with rough moulding surfaces on both sides. A layer of heterocharges appears on each side of the interface in the double-layer transcrystalline polypropylene electrets after the thermal poling. However, there is no charge captured within the transcrystalline layers. The phenomenon reveals the importance of the crystalline interface in terms of creating traps with the higher activation energy in polypropylene. The present studies highlight the fact that even slight variations in the polypropylene film may lead to dramatic differences in its electret properties.}, language = {en} } @phdthesis{Wallenta2015, author = {Wallenta, Daniel}, title = {Sequences of compact curvature}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87489}, school = {Universit{\"a}t Potsdam}, pages = {viii, 73}, year = {2015}, abstract = {By perturbing the differential of a (cochain-)complex by "small" operators, one obtains what is referred to as quasicomplexes, i.e. a sequence whose curvature is not equal to zero in general. In this situation the cohomology is no longer defined. Note that it depends on the structure of the underlying spaces whether or not an operator is "small." This leads to a magical mix of perturbation and regularisation theory. In the general setting of Hilbert spaces compact operators are "small." In order to develop this theory, many elements of diverse mathematical disciplines, such as functional analysis, differential geometry, partial differential equation, homological algebra and topology have to be combined. All essential basics are summarised in the first chapter of this thesis. This contains classical elements of index theory, such as Fredholm operators, elliptic pseudodifferential operators and characteristic classes. Moreover we study the de Rham complex and introduce Sobolev spaces of arbitrary order as well as the concept of operator ideals. In the second chapter, the abstract theory of (Fredholm) quasicomplexes of Hilbert spaces will be developed. From the very beginning we will consider quasicomplexes with curvature in an ideal class. We introduce the Euler characteristic, the cone of a quasiendomorphism and the Lefschetz number. In particular, we generalise Euler's identity, which will allow us to develop the Lefschetz theory on nonseparable Hilbert spaces. Finally, in the third chapter the abstract theory will be applied to elliptic quasicomplexes with pseudodifferential operators of arbitrary order. We will show that the Atiyah-Singer index formula holds true for those objects and, as an example, we will compute the Euler characteristic of the connection quasicomplex. In addition to this we introduce geometric quasiendomorphisms and prove a generalisation of the Lefschetz fixed point theorem of Atiyah and Bott.}, language = {en} } @phdthesis{Walczak2019, author = {Walczak, Ralf}, title = {Molecular design of nitrogen-doped nanoporous noble carbon materials for gas adsorption}, doi = {10.25932/publishup-43524}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435241}, school = {Universit{\"a}t Potsdam}, pages = {II, 155}, year = {2019}, abstract = {In den modernen Gesellschaften f{\"u}hrt ein stetig steigender Energiebedarf zu dem zunehmenden Verbrauch fossiler Brennstoffe wie Kohle, {\"O}l, und Gas. Die Verbrennung dieser kohlenstoffbasierten Brennstoffe f{\"u}hrt unweigerlich zur Freisetzung von Treibhausgasen, vor allem von CO2. Die CO2 Aufnahme unmittelbar bei den Verbrennungsanlagen oder direkt aus der Luft, zusammen mit Regulierung von CO2 produzierenden Energiesektoren (z.B. K{\"u}hlanlagen), k{\"o}nnen den CO2 Ausstoß reduzieren. Allerdings f{\"u}hren insbesondere bei der CO2 Aufnahme die geringen CO2 Konzentrationen und die Aufnahme konkurrierender Gase zu niedrigen CO2 Kapazit{\"a}ten und Selektivit{\"a}ten. Das Zusammenspiel der Gastmolek{\"u}le mit por{\"o}sen Materialien ist dabei essentiell. Por{\"o}se Kohlenstoffmaterialien besitzen attraktive Eigenschaften, unter anderem elektrische Leitf{\"a}higkeit, einstellbare Porosit{\"a}t, als auch chemische und thermische Stabilit{\"a}t. Allerdings f{\"u}hrt die zu geringe Polarisierbarkeit dieser Materialien zu einer geringen Affinit{\"a}t zu polaren Molek{\"u}len (z.B. CO2, H2O, oder NH3). Diese Affinit{\"a}t kann durch den Einbau von Stickstoff erh{\"o}ht werden. Solche Materialien sind oft „edler" als reine Kohlenstoffe, dies bedeutet, dass sie eher oxidierend wirken, als selbst oxidiert zu werden. Die Problematik besteht darin, einen hohen und gleichm{\"a}ßig verteilten Stickstoffgehalt in das Kohlenstoffger{\"u}st einzubauen. Die Zielsetzung dieser Dissertation ist die Erforschung neuer Synthesewege f{\"u}r stickstoffdotierte edle Kohlenstoffmaterialien und die Entwicklung eines grundlegenden Verst{\"a}ndnisses f{\"u}r deren Anwendung in Gasadsorption und elektrochemischer Energiespeicherung. Es wurde eine templatfreie Synthese f{\"u}r stickstoffreiche, edle, und mikropor{\"o}se Kohlenstoffmaterialien durch direkte Kondensation eines stickstoffreichen organischen Molek{\"u}ls als Vorl{\"a}ufer erarbeitet. Dadurch konnten Materialien mit hohen Adsorptionskapazit{\"a}ten f{\"u}r H2O und CO2 bei niedrigen Konzentrationen und moderate CO2/N2 Selektivit{\"a}ten erzielt werden. Um die CO2/N2 Selektivit{\"a}ten zu verbessern, wurden mittels der Einstellung des Kondensationsgrades die molekulare Struktur und Porosit{\"a}t der Kohlenstoffmaterialien kontrolliert. Diese Materialien besitzen die Eigenschaften eines molekularen Siebs f{\"u}r CO2 {\"u}ber N2, das zu herausragenden CO2/N2 Selektivit{\"a}ten f{\"u}hrt. Der ultrahydrophile Charakter der Porenoberfl{\"a}chen und die kleinen Mikroporen dieser Kohlenstoffmaterialien erm{\"o}glichen grundlegende Untersuchungen f{\"u}r die Wechselwirkungen mit Molek{\"u}len die polarer sind als CO2, n{\"a}mlich H2O und NH3. Eine weitere Reihe stickstoffdotierter Kohlenstoffmaterialien wurde durch Kondensation eines konjugierten mikropor{\"o}sen Polymers synthetisiert und deren strukturelle Besonderheiten als Anodenmaterial f{\"u}r die Natriumionen Batterie untersucht. Diese Dissertation leistet einen Beitrag zur Erforschung stickstoffdotierter Kohlenstoffmaterialien und deren Wechselwirkungen mit verschiedenen Gastmolek{\"u}len.}, language = {en} } @phdthesis{Waha2012, author = {Waha, Katharina}, title = {Climate change impacts on agricultural vegetation in sub-Saharan Africa}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64717}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Agriculture is one of the most important human activities providing food and more agricultural goods for seven billion people around the world and is of special importance in sub-Saharan Africa. The majority of people depends on the agricultural sector for their livelihoods and will suffer from negative climate change impacts on agriculture until the middle and end of the 21st century, even more if weak governments, economic crises or violent conflicts endanger the countries' food security. The impact of temperature increases and changing precipitation patterns on agricultural vegetation motivated this thesis in the first place. Analyzing the potentials of reducing negative climate change impacts by adapting crop management to changing climate is a second objective of the thesis. As a precondition for simulating climate change impacts on agricultural crops with a global crop model first the timing of sowing in the tropics was improved and validated as this is an important factor determining the length and timing of the crops´ development phases, the occurrence of water stress and final crop yield. Crop yields are projected to decline in most regions which is evident from the results of this thesis, but the uncertainties that exist in climate projections and in the efficiency of adaptation options because of political, economical or institutional obstacles have to be considered. The effect of temperature increases and changing precipitation patterns on crop yields can be analyzed separately and varies in space across the continent. Southern Africa is clearly the region most susceptible to climate change, especially to precipitation changes. The Sahel north of 13° N and parts of Eastern Africa with short growing seasons below 120 days and limited wet season precipitation of less than 500 mm are also vulnerable to precipitation changes while in most other part of East and Central Africa, in contrast, the effect of temperature increase on crops overbalances the precipitation effect and is most pronounced in a band stretching from Angola to Ethiopia in the 2060s. The results of this thesis confirm the findings from previous studies on the magnitude of climate change impact on crops in sub-Saharan Africa but beyond that helps to understand the drivers of these changes and the potential of certain management strategies for adaptation in more detail. Crop yield changes depend on the initial growing conditions, on the magnitude of climate change, and on the crop, cropping system and adaptive capacity of African farmers which is only now evident from this comprehensive study for sub-Saharan Africa. Furthermore this study improves the representation of tropical cropping systems in a global crop model and considers the major food crops cultivated in sub-Saharan Africa and climate change impacts throughout the continent.}, language = {en} } @phdthesis{VasquezParra2007, author = {V{\´a}squez Parra, M{\´o}nica Fernanda}, title = {Mafic magmatism in the Eastern Cordillera and Putumayo Basin, Colombia : causes and consequences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13183}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The Eastern Cordillera of Colombia is mainly composed of sedimentary rocks deposited since early Mesozoic times. Magmatic rocks are scarce. They are represented only by a few locally restricted occurrences of dykes and sills of mafic composition presumably emplaced in the Cretaceous and of volcanic rocks of Neogene age. This work is focused on the study of the Cretaceous magmatism with the intention to understand the processes causing the genesis of these rocks and their significance in the regional tectonic setting of the Northern Andes. The magmatic rocks cut the Cretaceous sedimentary succession of black shales and marlstones that crop out in both flanks of the Eastern Cordillera. The studied rocks were classified as gabbros (C{\´a}ceres, Pacho, Rodrigoque), tonalites (C{\´a}ceres, La Corona), diorites and syenodiorites (La Corona), pyroxene-hornblende gabbros (Pacho), and pyroxene-hornblendites (Pajarito). The gabbroic samples are mainly composed of plagioclase, clinopyroxene, and/or green to brown hornblende, whereas the tonalitic rocks are mainly composed of plagioclase and quartz. The samples are highly variable in crystal sizes from fine- to coarse-grained. Accessory minerals such as biotite, titanite and zircon are present. Some samples are characterized by moderate to strong alteration, and show the presence of epidote, actinolite and chlorite. Major and trace element compositions of the rocks as well as the rock-forming minerals show significant differences in the geochemical and petrological characteristics for the different localities, suggesting that this magmatism does not result from a single melting process. The wide compositional spectrum of trace elements in the intrusions is characteristic for different degrees of mantle melting and enrichment of incompatible elements. MORB- and OIB-like compositions suggest at least two different sources of magma with tholeiitic and alkaline affinity, respectively. Evidence of slab-derived fluids can be recognized in the western part of the basin reflected in higher Ba/Nb and Sr/P ratios and also in the Sr radiogenic isotope ratios, which is possible a consequence of metasomatism in the mantle due to processes related to the presence of a previously subducted slab. The trace element patterns evidence an extensional setting in the Cretaceous basin producing a continental rift, with continental crust being stretched until oceanic crust was generated in the last stages of this extension. Electron microprobe analyses (EMPA) of the major elements and synchrotron radiation micro-X-ray fluorescence (μ-SRXRF) analyses of the trace element composition of the early crystallized minerals of the intrusions (clinopyroxenes and amphiboles) reflect the same dual character that has been found in the bulk-rock analyses. Despite the observed alteration of the rocks, the mineral composition shows evidences for an enriched and a relative depleted magma source. Even the normalization of the trace element concentrations of clinopyroxenes and amphiboles to the whole rock nearly follows the pattern predicted by published partition coefficients, suggesting that the alteration did not change the original trace element compositions of the investigated minerals. Sr-Nd-Pb isotope data reveal a large isotopic variation but still suggest an initial origin of the magmas in the mantle. Samples have moderate to highly radiogenic compositions of 143Nd/144Nd and high 87Sr/86Sr ratios and follow a trend towards enriched mantle compositions, like the local South American Paleozoic crust. The melts experienced variable degrees of contamination by sediments, crust, and seawater. The age corrected Pb isotope ratios show two separated groups of samples. This suggests that the chemical composition of the mantle below the Northern Andes has been modified by the interaction with other components resulting in a heterogeneous combination of materials of diverse origins. Although previous K/Ar age dating have shown that the magmatism took place in the Cretaceous, the high error of the analyses and the altered nature of the investigated minerals did preclude reliable interpretations. In the present work 40Ar/39Ar dating was carried out. The results show a prolonged history of magmatism during the Cretaceous over more than 60 Ma, from ~136 to ~74 Ma (Hauterivian to Campanian). Pre-Cretaceous rifting phases occurred in the Triassic-Jurassic for the western part of the basin and in the Paleozoic for the eastern part. Those previous rifting phases are decisive mechanisms controlling the localization and composition of the Cretaceous magmatism. Therefore, it is the structural position and not the age of the intrusions which preconditions the kind of magmatism and the degree of melting. The divergences on ages are the consequence of the segmentation of the basin in several sub-basins which stretching, thermal evolution and subsidence rate evolved independently. The first hypothesis formulated at the beginning of this investigation was that the Cretaceous gabbroic intrusions identified in northern Ecuador could be correlated with the intrusions described in the Eastern Cordillera. The mafic occurrences should mark the location of the most subsiding places of the large Cretaceous basin in northern South America. For this reason, the gabbroic intrusions cutting the Cretaceous succession in the Putumayo Basin, southern Colombia, were investigated. The results of the studies were quite unexpected. The petrologic and geochemical character of the magmatic rocks indicates subduction-related magmatism. K/Ar dating of amphibole yields a Late Miocene to Pliocene age (6.1 ± 0.7 Ma) for the igneous event in the basin. Although there is no correlation between this magmatic event and the Cretaceous magmatic event, the data obtained has significant tectonic and economic implications. The emplacement of the Neogene gabbroic rocks coincides with the late Miocene/Pliocene Andean orogenic uplift as well as with a significant pulse of hydrocarbon generation and expulsion.}, language = {en} } @phdthesis{Vu2012, author = {Vu, Thi Thanh Van}, title = {Local government on the way to good governance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93943}, school = {Universit{\"a}t Potsdam}, pages = {vii, 254}, year = {2012}, abstract = {Bad governance causes economic, social, developmental and environmental problems in many developing countries. Developing countries have adopted a number of reforms that have assisted in achieving good governance. The success of governance reform depends on the starting point of each country - what institutional arrangements exist at the out-set and who the people implementing reforms within the existing institutional framework are. This dissertation focuses on how formal institutions (laws and regulations) and informal institutions (culture, habit and conception) impact on good governance. Three characteristics central to good governance - transparency, participation and accountability are studied in the research. A number of key findings were: Good governance in Hanoi and Berlin represent the two extremes of the scale, while governance in Berlin is almost at the top of the scale, governance in Hanoi is at the bottom. Good governance in Hanoi is still far from achieved. In Berlin, information about public policies, administrative services and public finance is available, reliable and understandable. People do not encounter any problems accessing public information. In Hanoi, however, public information is not easy to access. There are big differences between Hanoi and Berlin in the three forms of participation. While voting in Hanoi to elect local deputies is formal and forced, elections in Berlin are fair and free. The candidates in local elections in Berlin come from different parties, whereas the candidacy of local deputies in Hanoi is thoroughly controlled by the Fatherland Front. Even though the turnout of voters in local deputy elections is close to 90 percent in Hanoi, the legitimacy of both the elections and the process of representation is non-existent because the local deputy candidates are decided by the Communist Party. The involvement of people in solving local problems is encouraged by the government in Berlin. The different initiatives include citizenry budget, citizen activity, citizen initiatives, etc. Individual citizens are free to participate either individually or through an association. Lacking transparency and participation, the quality of public service in Hanoi is poor. Citizens seldom get their services on time as required by the regulations. Citizens who want to receive public services can bribe officials directly, use the power of relationships, or pay a third person - the mediator ("C{\`o}" - in Vietnamese). In contrast, public service delivery in Berlin follows the customer-orientated principle. The quality of service is high in relation to time and cost. Paying speed money, bribery and using relationships to gain preferential public service do not exist in Berlin. Using the examples of Berlin and Hanoi, it is clear to see how transparency, participation and accountability are interconnected and influence each other. Without a free and fair election as well as participation of non-governmental organisations, civil organisations, and the media in political decision-making and public actions, it is hard to hold the Hanoi local government accountable. The key differences in formal institutions (regulative and cognitive) between Berlin and Hanoi reflect the three main principles: rule of law vs. rule by law, pluralism vs. monopoly Party in politics and social market economy vs. market economy with socialist orientation. In Berlin the logic of appropriateness and codes of conduct are respect for laws, respect of individual freedom and ideas and awareness of community development. People in Berlin take for granted that public services are delivered to them fairly. Ideas such as using money or relationships to shorten public administrative procedures do not exist in the mind of either public officials or citizens. In Hanoi, under a weak formal framework of good governance, new values and norms (prosperity, achievement) generated in the economic transition interact with the habits of the centrally-planned economy (lying, dependence, passivity) and traditional values (hierarchy, harmony, family, collectivism) influence behaviours of those involved. In Hanoi "doing the right thing" such as compliance with law doesn't become "the way it is". The unintended consequence of the deliberate reform actions of the Party is the prevalence of corruption. The socialist orientation seems not to have been achieved as the gap between the rich and the poor has widened. Good governance is not achievable if citizens and officials are concerned only with their self-interest. State and society depend on each other. Theoretically to achieve good governance in Hanoi, institutions (formal and informal) able to create good citizens, officials and deputies should be generated. Good citizens are good by habit rather than by nature. The rule of law principle is necessary for the professional performance of local administrations and People's Councils. When the rule of law is applied consistently, the room for informal institutions to function will be reduced. Promoting good governance in Hanoi is dependent on the need and desire to change the government and people themselves. Good governance in Berlin can be seen to be the result of the efforts of the local government and citizens after a long period of development and continuous adjustment. Institutional transformation is always a long and complicated process because the change in formal regulations as well as in the way they are implemented may meet strong resistance from the established practice. This study has attempted to point out the weaknesses of the institutions of Hanoi and has identified factors affecting future development towards good governance. But it is not easy to determine how long it will take to change the institutional setting of Hanoi in order to achieve good governance.}, language = {en} } @phdthesis{Vu2022, author = {Vu, Nils Leif}, title = {A task-based parallel elliptic solver for numerical relativity with discontinuous Galerkin methods}, doi = {10.25932/publishup-56226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562265}, school = {Universit{\"a}t Potsdam}, pages = {172}, year = {2022}, abstract = {Elliptic partial differential equations are ubiquitous in physics. In numerical relativity---the study of computational solutions to the Einstein field equations of general relativity---elliptic equations govern the initial data that seed every simulation of merging black holes and neutron stars. In the quest to produce detailed numerical simulations of these most cataclysmic astrophysical events in our Universe, numerical relativists resort to the vast computing power offered by current and future supercomputers. To leverage these computational resources, numerical codes for the time evolution of general-relativistic initial value problems are being developed with a renewed focus on parallelization and computational efficiency. Their capability to solve elliptic problems for accurate initial data must keep pace with the increasing detail of the simulations, but elliptic problems are traditionally hard to parallelize effectively. In this thesis, I develop new numerical methods to solve elliptic partial differential equations on computing clusters, with a focus on initial data for orbiting black holes and neutron stars. I develop a discontinuous Galerkin scheme for a wide range of elliptic equations, and a stack of task-based parallel algorithms for their iterative solution. The resulting multigrid-Schwarz preconditioned Newton-Krylov elliptic solver proves capable of parallelizing over 200 million degrees of freedom to at least a few thousand cores, and already solves initial data for a black hole binary about ten times faster than the numerical relativity code SpEC. I also demonstrate the applicability of the new elliptic solver across physical disciplines, simulating the thermal noise in thin mirror coatings of interferometric gravitational-wave detectors to unprecedented accuracy. The elliptic solver is implemented in the new open-source SpECTRE numerical relativity code, and set up to support simulations of astrophysical scenarios for the emerging era of gravitational-wave and multimessenger astronomy.}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Voss2005, author = {Voß, Rebecca}, title = {Mesoporous organosilica materials with amine functions : surface characteristics and chirality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5287}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {In this work mesoporous organisilica materials are synthesized through the silica sol-gel process. For this a new class of precursors which are also surfactant are synthesized and self-assembled. This leads to a high surface area functionality which is analysized with copper (II) and water adsorption.}, subject = {Silicate}, language = {en} } @phdthesis{Vossenkuhl2015, author = {Vossenkuhl, Birgit}, title = {Transmission of MRSA along the meat supply chain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-85918}, school = {Universit{\"a}t Potsdam}, pages = {141}, year = {2015}, abstract = {Methicillin-resistente Staphylococcus aureus (MRSA) z{\"a}hlen zu den bedeutendsten antibiotikaresistenten Pathogenen, die vor allem in Krankenh{\"a}usern aber auch außerhalb von Einrichtungen des Gesundheitswesens weit verbreitet sind. Seit einigen Jahren ist eine neue Generation von MRSA auf dem Vormarsch, die vor allem Nutztierbest{\"a}nde als neue Nische besiedelt. Diese sogenannten Nutztier-assoziierten MRSA wurden wiederholt bei wirtschaftlich bedeutenden Nutztieren sowie daraus gewonnenem Fleisch nachgewiesen. Im Rahmen der vorliegenden Arbeit wurde ein methodischer Ansatz verfolgt, um die Hypothese einer m{\"o}glichen {\"U}bertragung von Nutztier-assoziierten MRSA entlang der Lebensmittelkette vom Tier auf dessen Fleisch zu best{\"a}tigen. Angepasst an die Unterschiede in den verf{\"u}gbaren Daten wurden daf{\"u}r zwei neue Konzepte erstellt. Zur Analyse der {\"U}bertragung von MRSA entlang der Schlachtkette wurde ein mathematisches Modell des Schweineschlachtprozesses entwickelt, welches dazu geeignet ist, den Verlauf der MRSA-Pr{\"a}valenz entlang der Schlachtkette zu quantifizieren sowie kritische Prozessschritte f{\"u}r eine MRSA-{\"U}bertragung zu identifizieren. Anhand von Pr{\"a}valenzdaten ist es dem Modell m{\"o}glich, die durchschnittlichen MRSA-Eliminations- und Kontaminationsraten jedes einzelnen Prozessschrittes zu sch{\"a}tzen, die anschließend in eine Monte-Carlo-Simulation einfließen. Im Ergebnis konnte gezeigt werden, dass es generell m{\"o}glich ist, die MRSA Pr{\"a}valenz im Laufe des Schlachtprozesses auf ein niedriges finales Niveau zwischen 0,15 bis 1,15\% zu reduzieren. Vor allem das Br{\"u}hen und Abfl{\"a}mmen der Schlachtk{\"o}rper wurden als kritische Prozesse im Hinblick auf eine MRSA-Dekontamination identifiziert. In Deutschland werden regelm{\"a}ßig MRSA-Pr{\"a}valenz und Typisierungsdaten auf allen Stufen der Lebensmittelkette verschiedener Nutztiere erfasst. Um die MRSA-Daten dieser Querschnittstudie hinsichtlich einer m{\"o}glichen {\"U}bertragung entlang der Kette zu analysieren, wurde ein neuer statistischer Ansatz entwickelt. Hierf{\"u}r wurde eine Chi-Quadrat-Statistik mit der Berechnung des Czekanowski-{\"A}hnlichkeitsindex kombiniert, um Unterschiede in der Verteilung stammspezifischer Eigenschaften zwischen MRSA aus dem Stall, von Karkassen nach der Schlachtung und aus Fleisch im Einzelhandel zu quantifizieren. Die Methode wurde am Beispiel der Putenfleischkette implementiert und zudem bei der Analyse der Kalbfleischkette angewendet. Die durchgehend hohen {\"A}hnlichkeitswerte zwischen den einzelnen Proben weisen auf eine m{\"o}gliche {\"U}bertragung von MRSA entlang der Lebensmittelkette hin. Die erarbeiteten Methoden sind nicht spezifisch bez{\"u}glich Prozessketten und Pathogenen. Sie bieten somit einen großen Anwendungsbereich und erweitern das Methodenspektrum zur Bewertung bakterieller {\"U}bertragungswege.}, language = {en} } @phdthesis{Vosloh2011, author = {Vosloh, Daniel}, title = {Subcellular compartmentation of primary carbon metabolism in mesophyll cells of Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55534}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Metabolismus in Pflanzenzellen ist stark kompartimentiert. Viele Stoffwechselwege haben Reaktionen in mehr als einem Kompartiment. Zum Beispiel wird w{\"a}hrend der Photosynthese in pflanzlichen Mesophyllzellen Kohlenstoff in Form von St{\"a}rke in den Chloroplasten synthetisiert, w{\"a}hrend es im Zytosol in Form von Sacharose gebildet und in der Vakuole gespeichert wird. Diese Reaktionen sind strikt reguliert um ein Gleichgewicht der Kohlenstoffpools der verschiedenen Kompartimente aufrecht zu erhalten und die Energieversorgung aller Teile der Zelle f{\"u}r anabolische Reaktionen sicher zu stellen. Ich wende eine Methode an, bei der die Zellen unter nicht-w{\"a}ssrigen Bedingungen fraktioniert werden und daher der metabolische Status der w{\"a}hrend der Ernte herrschte {\"u}ber den ganzen Zeitraum der Auftrennung beibehalten wird. Durch die Kombination von nichtw{\"a}ssriger Fraktionierung und verschiedener Massenspektrometrietechniken (Fl{\"u}ssigchromotagraphie- und Gaschromotagraphie basierende Massenspekrometrie) ist es m{\"o}glich die intrazellul{\"a}re Verteilung der meisten Intermediate des photosynthetischen Kohlenstoffstoffwechsels und der Produkte der nachgelagerten metabolischen Reaktionen zu bestimmen. Das Wissen {\"u}ber die in vivo Konzentrationen dieser Metabolite wurde genutzt um die {\"A}nderung der freien Gibbs Energie in vivo zu bestimmen. Mit Hilfe dessen kann bestimmt werden, welche Reaktion sich in einem Gleichgewichtszustand befinden und welche davon entfernt sind. Die Konzentration der Enzyme und der Km Werte wurden mit den Konzentrationen der Metabolite in vivo verglichen, um festzustellen, welche Enzyme substratlimitiert sind und somit sensitiv gegen{\"u}ber {\"A}nderungen der Substratkonzentration sind. Verschiedene Intermediate des Calvin-Benson Zyklus sind gleichzeitig Substrate f{\"u}r andere Stoffwechselwege, als da w{\"a}ren Dihyroxyaceton-phosphat (DHAP, Saccharosesynthese), Fructose 6-phosphat (Fru6P, St{\"a}rkesynthese), Erythrose 4-phosphat (E4P, Shikimat Stoffwechselweg) und Ribose 5-phosphat (R5P, Nukleotidbiosynthese). Die Enzyme, die diese Intermediate verstoffwechseln, liegen an den Abzweigungspunkten zu diesen Stoffwechselwegen. Diese sind Trisose phosphat isomerase (DHAP), Transketolase (E4P), Sedoheptulose-1,7 biphosphat aldolase (E4P) und Ribose-5-phosphat isomerase (R5P), welche nicht mit ihren Substraten ges{\"a}ttigt sind, da die jeweilige Substratkonzentration geringer als der zugeh{\"o}rige Km Wert ist. F{\"u}r metabolische Kontrolle bedeutet dies, dass diese Schritte am sensitivsten gegen{\"u}ber {\"A}nderungen der Substratkonzentrationen sind. Im Gegensatz dazu sind die regulierten irreversiblen Schritte von Fructose-1,6.biphosphatase und Sedoheptulose-1,7-biphosphatase relativ insensitiv gegen{\"u}ber {\"A}nderungen der Substratkonzentration. F{\"u}r den Stoffwechselweg der Saccharosesynthese konnte gezeigt werden, dass die zytosolische Aldolase eine geringer Bindeseitenkonzentration als Substratkonzentration (DHAP) aufweist, und dass die Konzentration von Saccharose-6-phosphat geringer als der Km Wert des synthetisierenden Enzyms Saccharose-phosphatase ist. Sowohl die Saccharose-phosphat-synthase, also auch die Saccharose-phosphatase sind in vivo weit von einem Gleichgewichtszustand entfernt. In Wildtyp Arabidopsis thaliana Columbia-0 Bl{\"a}ttern wurde der gesamte Pool von ADPGlc im Chloroplasten gefunden. Das Enzyme ADPGlc pyrophosphorylase ist im Chloroplasten lokalisiert und synthetisiert ADPGlc aus ATP und Glc1P. Dieses Verteilungsmuster spricht eindeutig gegen die Hypothese von Pozueta-Romero und Kollegen, dass ADPGlc im Zytosol durch ADP vermittelte Spaltung von Saccharose durch die Saccharose Synthase erzeugt wird. Basierend auf dieser Beobachtung und anderen ver{\"o}ffentlichten Ergebnissen wurde geschlußfolgert, dass der generell akzeptierte Stoffwechselweg der St{\"a}rkesynthese durch ADPGlc Produktion via ADPGlc pyrophosphorylase in den Chloroplasten korrekt ist, und die Hypothese des alternativen Stoffwechselweges unhaltbar ist. Innerhalb des Stoffwechselweges der Saccharosesynthsese wurde festgestellt, dass die Konzentration von ADPGlc geringer als der Km Wert des St{\"a}rkesynthase ist, was darauf hindeutet, dass das Enzym substratlimitiert ist. Eine generelle Beobachtung ist, dass viele Enzmye des Calvin-Benson Zyklus {\"a}hnliche Bindeseitenkonzentrationen wie Metabolitkonzentrationen aufweisen, wohingegen in den Synthesewegen von Saccharose und St{\"a}rke die Bindeseitenkonzentrationen der Enzyme viel geringer als die Metabolitkonzentrationen sind.}, language = {en} } @phdthesis{Voroshnin2023, author = {Voroshnin, Vladimir}, title = {Control over spin and electronic structure of MoS₂ monolayer via interactions with substrates}, doi = {10.25932/publishup-59070}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-590709}, school = {Universit{\"a}t Potsdam}, pages = {viii, 125}, year = {2023}, abstract = {The molybdenum disulfide (MoS2) monolayer is a semiconductor with a direct bandgap while it is a robust and affordable material. It is a candidate for applications in optoelectronics and field-effect transistors. MoS2 features a strong spin-orbit coupling which makes its spin structure promising for acquiring the Kane-Mele topological concept with corresponding applications in spintronics and valleytronics. From the optical point of view, the MoS2 monolayer features two valleys in the regions of K and K' points. These valleys are differentiated by opposite spins and a related valley-selective circular dichroism. In this study we aim to manipulate the MoS2 monolayer spin structure in the vicinity of the K and K' points to explore the possibility of getting control over the optical and electronic properties. We focus on two different substrates to demonstrate two distinct routes: a gold substrate to introduce a Rashba effect and a graphene/cobalt substrate to introduce a magnetic proximity effect in MoS2. The Rashba effect is proportional to the out-of-plane projection of the electric field gradient. Such a strong change of the electric field occurs at the surfaces of a high atomic number materials and effectively influence conduction electrons as an in-plane magnetic field. A molybdenum and a sulfur are relatively light atoms, thus, similar to many other 2D materials, intrinsic Rashba effect in MoS2 monolayer is vanishing small. However, proximity of a high atomic number substrate may enhance Rashba effect in a 2D material as it was demonstrated for graphene previously. Another way to modify the spin structure is to apply an external magnetic field of high magnitude (several Tesla), and cause a Zeeman splitting, the conduction electrons. However, a similar effect can be reached via magnetic proximity which allows us to reduce external magnetic fields significantly or even to zero. The graphene on cobalt interface is ferromagnetic and stable for MoS2 monolayer synthesis. Cobalt is not the strongest magnet; therefore, stronger magnets may lead to more significant results. Nowadays most experimental studies on the dichalcogenides (MoS2 included) are performed on encapsulated heterostructures that are produced by mechanical exfoliation. While mechanical exfoliation (or scotch-tape method) allows to produce a huge variety of structures, the shape and the size of the samples as well as distance between layers in heterostructures are impossible to control reproducibly. In our study we used molecular beam epitaxy (MBE) methods to synthesise both MoS2/Au(111) and MoS2/graphene/Co systems. We chose to use MBE, as it is a scalable and reproducible approach, so later industry may adapt it and take over. We used graphene/cobalt instead of just a cobalt substrate because direct contact of MoS2\ monolayer and a metallic substrate may lead to photoluminescence (PL) quenching in the metallic substrate. Graphene and hexagonal boron nitride monolayer are considered building blocks of a new generation of electronics also commonly used as encapsulating materials for PL studies. Moreover graphene is proved to be a suitable substrate for the MBE growth of transitional metal dichalcogenides (TMDCs). In chapter 1, we start with an introduction to TMDCs. Then we focus on MoS2 monolayer state of the art research in the fields of application scenario; synthesis approaches; electronic, spin, and optical properties; and interactions with magnetic fields and magnetic materials. We briefly touch the basics of magnetism in solids and move on to discuss various magnetic exchange interactions and magnetic proximity effect. Then we describe MoS2 optical properties in more detail. We start from basic exciton physics and its manifestation in the MoS2 monolayer. We consider optical selection rules in the MoS2 monolayer and such properties as chirality, spin-valley locking, and coexistence of bright and dark excitons. Chapter 2 contains an overview of the employed surface science methods: angle-integrated, angle-resolved, and spin-resolved photoemission; low energy electron diffraction and scanning tunneling microscopy. In chapter 3, we describe MoS2 monolayer synthesis details for two substrates: gold monocrystal with (111) surface and graphene on cobalt thin film with Co(111) surface orientation. The synthesis descriptions are followed by a detailed characterisation of the obtained structures: fingerprints of MoS2 monolayer formation; MoS2 monolayer symmetry and its relation to the substrate below; characterisation of MoS2 monolayer coverage, domain distribution, sizes and shapes, and moire structures. In chapter~4, we start our discussion with MoS2/Au(111) electronic and spin structure. Combining density functional theory computations (DFT) and spin-resolved photoemission studies, we demonstrate that the MoS2 monolayer band structure features an in-plane Rashba spin splitting. This confirms the possibility of MoS2 monolayer spin structure manipulation via a substrate. Then we investigate the influence of a magnetic proximity in the MoS2/graphene/Co system on the MoS2 monolayer spin structure. We focus our investigation on MoS2 high symmetry points: G and K. First, using spin-resolved measurements, we confirm that electronic states are spin-split at the G point via a magnetic proximity effect. Second, combining spin-resolved measurements and DFT computations for MoS2 monolayer in the K point region, we demonstrate the appearance of a small in-plane spin polarisation in the valence band top and predict a full in-plane spin polarisation for the conduction band bottom. We move forward discussing how these findings are related to the MoS2 monolayer optical properties, in particular the possibility of dark exciton observation. Additionally, we speculate on the control of the MoS2 valley energy via magnetic proximity from cobalt. As graphene is spatially buffering the MoS2 monolayer from the Co thin film, we speculate on the role of graphene in the magnetic proximity transfer by replacing graphene with vacuum and other 2D materials in our computations. We finish our discussion by investigating the K-doped MoS2/graphene/Co system and the influence of this doping on the electronic and spin structure as well as on the magnetic proximity effect. In summary, using a scalable MBE approach we synthesised MoS2/Au(111) and MoS2/graphene/Co systems. We found a Rashba effect taking place in MoS2/Au(111) which proves that the MoS2 monolayer in-plane spin structure can be modified. In MoS2/graphene/Co the in-plane magnetic proximity effect indeed takes place which rises the possibility of fine tuning the MoS2 optical properties via manipulation of the the substrate magnetisation.}, language = {en} } @phdthesis{Vorogushyn2008, author = {Vorogushyn, Sergiy}, title = {Analysis of flood hazard under consideration of dike breaches}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27646}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {River reaches protected by dikes exhibit high damage potential due to strong value accumulation in the hinterland areas. While providing an efficient protection against low magnitude flood events, dikes may fail under the load of extreme water levels and long flood durations. Hazard and risk assessments for river reaches protected by dikes have not adequately considered the fluvial inundation processes up to now. Particularly, the processes of dike failures and their influence on the hinterland inundation and flood wave propagation lack comprehensive consideration. This study focuses on the development and application of a new modelling system which allows a comprehensive flood hazard assessment along diked river reaches under consideration of dike failures. The proposed Inundation Hazard Assessment Model (IHAM) represents a hybrid probabilistic-deterministic model. It comprises three models interactively coupled at runtime. These are: (1) 1D unsteady hydrodynamic model of river channel and floodplain flow between dikes, (2) probabilistic dike breach model which determines possible dike breach locations, breach widths and breach outflow discharges, and (3) 2D raster-based diffusion wave storage cell model of the hinterland areas behind the dikes. Due to the unsteady nature of the 1D and 2D coupled models, the dependence between hydraulic load at various locations along the reach is explicitly considered. The probabilistic dike breach model describes dike failures due to three failure mechanisms: overtopping, piping and slope instability caused by the seepage flow through the dike core (micro-instability). The 2D storage cell model driven by the breach outflow boundary conditions computes an extended spectrum of flood intensity indicators such as water depth, flow velocity, impulse, inundation duration and rate of water rise. IHAM is embedded in a Monte Carlo simulation in order to account for the natural variability of the flood generation processes reflected in the form of input hydrographs and for the randomness of dike failures given by breach locations, times and widths. The model was developed and tested on a ca. 91 km heavily diked river reach on the German part of the Elbe River between gauges Torgau and Vockerode. The reach is characterised by low slope and fairly flat extended hinterland areas. The scenario calculations for the developed synthetic input hydrographs for the main river and tributary were carried out for floods with return periods of T = 100, 200, 500, 1000 a. Based on the modelling results, probabilistic dike hazard maps could be generated that indicate the failure probability of each discretised dike section for every scenario magnitude. In the disaggregated display mode, the dike hazard maps indicate the failure probabilities for each considered breach mechanism. Besides the binary inundation patterns that indicate the probability of raster cells being inundated, IHAM generates probabilistic flood hazard maps. These maps display spatial patterns of the considered flood intensity indicators and their associated return periods. Finally, scenarios of polder deployment for the extreme floods with T = 200, 500, 1000 were simulated with IHAM. The developed IHAM simulation system represents a new scientific tool for studying fluvial inundation dynamics under extreme conditions incorporating effects of technical flood protection measures. With its major outputs in form of novel probabilistic inundation and dike hazard maps, the IHAM system has a high practical value for decision support in flood management.}, language = {en} } @phdthesis{vonNordheim2018, author = {von Nordheim, Danny}, title = {Dielectric non-linearities of P(VDF-TrFE) single and multilayers for memory applications}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421778}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 109}, year = {2018}, abstract = {Poly(vinylidene fluoride-trifluoroethylene) (P(VDF-TrFE)) ferroelectric thin films of different molar ratio have been studied with regard to data memory applications. Therefore, films with thicknesses of 200 nm and less have been spin coated from solution. Observations gained from single layers have been extended to multilayer capacitors and three terminal transistor devices. Besides conventional hysteresis measurements, the measurement of dielectric non-linearities has been used as a main tool of characterisation. Being a very sensitive and non-destructive method, non-linearity measurements are well suited for polarisation readout and property studies. Samples have been excited using a high quality, single-frequency sinusoidal voltage with an amplitude significantly smaller than the coercive field of the samples. The response was then measured at the excitation frequency and its higher harmonics. Using the measurement results, the linear and non-linear dielectric permittivities ɛ₁, ɛ₂ and ɛ₃ have been determined. The permittivities have been used to derive the temperature-dependent polarisation behaviour as well as the polarisation state and the order of the phase transitions. The coercive field in VDF-TrFE copolymers is high if compared to their ceramic competitors. Therefore, the film thickness had to be reduced significantly. Considering a switching voltage of 5 V and a coercive field of 50 MV/m, the film thickness has to be 100 nm and below. If the thickness becomes substantially smaller than the other dimensions, surface and interface layer effects become more pronounced. For thicker films of P(VDF-TrFE) with a molar fraction of 56/44 a second-order phase transition without a thermal hysteresis for an ɛ₁(T) temperature cycle has been predicted and observed. This however, could not be confirmed by the measurements of thinner films. A shift of transition temperatures as well as a temperature independent, non-switchable polarisation and a thermal hysteresis for P(VDF-TrFE) 56/44 have been observed. The impact of static electric fields on the polarisation and the phase transition has therefore been studied and simulated, showing that all aforementioned phenomena including a linear temperature dependence of the polarisation might originate from intrinsic electric fields. In further experiments the knowledge gained from single layer capacitors has been extended to bilayer copolymer thin films of different molar composition. Bilayers have been deposited by succeeding cycles of spin coating from solution. Single layers and their bilayer combination have been studied individually in order to prove the layers stability. The individual layers have been found to be physically stable. But while the bilayers reproduced the main ɛ₁(T) properties of the single layers qualitatively, quantitative numbers could not be explained by a simple serial connection of capacitors. Furthermore, a linear behaviour of the polarisation throughout the measured temperature range has been observed. This was found to match the behaviour predicted considering a constant electric field. Retention time is an important quantity for memory applications. Hence, the retention behaviour of VDF-TrFE copolymer thin films has been determined using dielectric non-linearities. The polarisation loss in P(VDF-TrFE) poled samples has been found to be less than 20\% if recorded over several days. The loss increases significantly if the samples have been poled with lower amplitudes, causing an unsaturated polarisation. The main loss was attributed to injected charges. Additionally, measurements of dielectric non-linearities have been proven to be a sensitive and non-destructive tool to measure the retention behaviour. Finally, a ferroelectric field effect transistor using mainly organic materials (FerrOFET) has been successfully studied. DiNaphtho[2,3-b:2',3'-f]Thieno[3,2-b]Thiophene (DNTT) has proven to be a stable, suitable organic semiconductor to build up ferroelectric memory devices. Furthermore, an oxidised aluminium bottom electrode and additional dielectric layers, i.e. parylene C, have proven to reduce the leakage current and therefore enhance the performance significantly.}, language = {en} } @phdthesis{vonKaphengst2019, author = {von Kaphengst, Dragana}, title = {Project's management quality in development cooperation}, doi = {10.25932/publishup-43099}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430992}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 237}, year = {2019}, abstract = {In light of the debate on the consequences of competitive contracting out of traditionally public services, this research compares two mechanisms used to allocate funds in development cooperation—direct awarding and competitive contracting out—aiming to identify their potential advantages and disadvantages. The agency theory is applied within the framework of rational-choice institutionalism to study the institutional arrangements that surround two different money allocation mechanisms, identify the incentives they create for the behavior of individual actors in the field, and examine how these then transfer into measurable differences in managerial quality of development aid projects. In this work, project management quality is seen as an important determinant of the overall project success. For data-gathering purposes, the German development agency, the Gesellschaft f{\"u}r Internationale Zusammenarbeit (GIZ), is used due to its unique way of work. Whereas the majority of projects receive funds via direct-award mechanism, there is a commercial department, GIZ International Services (GIZ IS) that has to compete for project funds. The data concerning project management practices on the GIZ and GIZ IS projects was gathered via a web-based, self-administered survey of project team leaders. Principal component analysis was applied to reduce the dimensionality of the independent variable to total of five components of project management. Furthermore, multiple regression analysis identified the differences between the separate components on these two project types. Enriched by qualitative data gathered via interviews, this thesis offers insights into everyday managerial practices in development cooperation and identifies the advantages and disadvantages of the two allocation mechanisms. The thesis first reiterates the responsibility of donors and implementers for overall aid effectiveness. It shows that the mechanism of competitive contracting out leads to better oversight and control of implementers, fosters deeper cooperation between the implementers and beneficiaries, and has a potential to strengthen ownership of recipient countries. On the other hand, it shows that the evaluation quality does not tremendously benefit from the competitive allocation mechanism and that the quality of the component knowledge management and learning is better when direct-award mechanisms are used. This raises questions about the lacking possibilities of actors in the field to learn about past mistakes and incorporate the finings into the future interventions, which is one of the fundamental issues of aid effectiveness. Finally, the findings show immense deficiencies in regard to oversight and control of individual projects in German development cooperation.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Thomas}, title = {Model-driven engineering of self-adaptive software}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409755}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 357}, year = {2018}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls the underlying adaptable software by a feedback loop. State-of-the-art approaches prescribe the feedback loop in terms of numbers, how the activities (e.g., monitor, analyze, plan, and execute (MAPE)) and the knowledge are structured to a feedback loop, and the type of knowledge. Moreover, the feedback loop is usually hidden in the implementation or framework and therefore not visible in the architectural design. Additionally, an adaptation engine often employs runtime models that either represent the adaptable software or capture strategic knowledge such as reconfiguration strategies. State-of-the-art approaches do not systematically address the interplay of such runtime models, which would otherwise allow developers to freely design the entire feedback loop. This thesis presents ExecUtable RuntimE MegAmodels (EUREMA), an integrated model-driven engineering (MDE) solution that rigorously uses models for engineering feedback loops. EUREMA provides a domain-specific modeling language to specify and an interpreter to execute feedback loops. The language allows developers to freely design a feedback loop concerning the activities and runtime models (knowledge) as well as the number of feedback loops. It further supports structuring the feedback loops in the adaptation engine that follows a layered architectural style. Thus, EUREMA makes the feedback loops explicit in the design and enables developers to reason about design decisions. To address the interplay of runtime models, we propose the concept of a runtime megamodel, which is a runtime model that contains other runtime models as well as activities (e.g., MAPE) working on the contained models. This concept is the underlying principle of EUREMA. The resulting EUREMA (mega)models are kept alive at runtime and they are directly executed by the EUREMA interpreter to run the feedback loops. Interpretation provides the flexibility to dynamically adapt a feedback loop. In this context, EUREMA supports engineering self-adaptive software in which feedback loops run independently or in a coordinated fashion within the same layer as well as on top of each other in different layers of the adaptation engine. Moreover, we consider preliminary means to evolve self-adaptive software by providing a maintenance interface to the adaptation engine. This thesis discusses in detail EUREMA by applying it to different scenarios such as single, multiple, and stacked feedback loops for self-repairing and self-optimizing the mRUBiS application. Moreover, it investigates the design and expressiveness of EUREMA, reports on experiments with a running system (mRUBiS) and with alternative solutions, and assesses EUREMA with respect to quality attributes such as performance and scalability. The conducted evaluation provides evidence that EUREMA as an integrated and open MDE approach for engineering self-adaptive software seamlessly integrates the development and runtime environments using the same formalism to specify and execute feedback loops, supports the dynamic adaptation of feedback loops in layered architectures, and achieves an efficient execution of feedback loops by leveraging incrementality.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Stefanie}, title = {Sequence dependency of photon and electron induced DNA strand breaks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419669}, school = {Universit{\"a}t Potsdam}, pages = {xii, 117}, year = {2018}, abstract = {Deoxyribonucleic acid (DNA) is the carrier of human genetic information and is exposed to environmental influences such as the ultraviolet (UV) fraction of sunlight every day. The photostability of the DNA against UV light is astonishing. Even if the DNA bases have a strong absorption maximum at around 260 nm/4.77 eV, their quantum yield of photoproducts remains very low 1. If the photon energies exceed the ionization energy (IE) of the nucleobases ( ̴ 8-9 eV) 2, the DNA can be severely damaged. Photoexcitation and -ionization reactions occur, which can induce strand breaks in the DNA. The efficiency of the excitation and ionization induced strand breaks in the target DNA sequences are represented by cross sections. If Si as a substrate material is used in the VUV irradiation experiments, secondary electrons with an energy below 3.6 eV are generated from the substrate. This low energy electrons (LEE) are known to induce dissociative electron attachment (DEA) in DNA and with it DNA strand breakage very efficiently. LEEs play an important role in cancer radiation therapy, since they are generated secondarily along the radiation track of ionizing radiation. In the framework of this thesis, different single stranded DNA sequences were irradiated with 8.44 eV vacuum UV (VUV) light and cross sections for single strand breaks (SSB) were determined. Several sequences were also exposed to secondary LEEs, which additionally contributed to the SSBs. First, the cross sections for SSBs depending on the type of nucleobases were determined. Both types of DNA sequences, mono-nucleobase and mixed sequences showed very similar results upon VUV radiation. The additional influence of secondarily generated LEEs resulted in contrast in a clear trend for the SSB cross sections. In this, the polythymine sequence had the highest cross section for SSBs, which can be explained by strong anionic resonances in this energy range. Furthermore, SSB cross sections were determined as a function of sequence length. This resulted in an increase in the strand breaks to the same extent as the increase in the geometrical cross section. The longest DNA sequence (20 nucleotides) investigated in this series, however, showed smaller cross section values for SSBs, which can be explained by conformational changes in the DNA. Moreover, several DNA sequences that included the radiosensitizers 5-Bromouracil (5BrU) and 8-Bromoadenine (8BrA) were investigated and the corresponding SSB cross sections were determined. It was shown that 5BrU reacts very strongly to VUV radiation leading to high strand break yields, which showed in turn a strong sequence-dependency. 8BrA, on the other hand, showed no sensitization to the applied VUV radiation, since almost no increase in strand breakage yield was observed in comparison to non-modified DNA sequences. In order to be able to identify the mechanisms of radiation damage by photons, the IEs of certain DNA sequences were further explored using photoionization tandem mass spectrometry. By varying the DNA sequence, both the IEs depending on the type of nucleobase as well as on the DNA strand length could be identified and correlated to the SSB cross sections. The influence of the IE on the photoinduced reaction in the brominated DNA sequences could be excluded.}, language = {en} } @phdthesis{Vogel2013, author = {Vogel, Kristin}, title = {Applications of Bayesian networks in natural hazard assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69777}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Even though quite different in occurrence and consequences, from a modeling perspective many natural hazards share similar properties and challenges. Their complex nature as well as lacking knowledge about their driving forces and potential effects make their analysis demanding: uncertainty about the modeling framework, inaccurate or incomplete event observations and the intrinsic randomness of the natural phenomenon add up to different interacting layers of uncertainty, which require a careful handling. Nevertheless deterministic approaches are still widely used in natural hazard assessments, holding the risk of underestimating the hazard with disastrous effects. The all-round probabilistic framework of Bayesian networks constitutes an attractive alternative. In contrast to deterministic proceedings, it treats response variables as well as explanatory variables as random variables making no difference between input and output variables. Using a graphical representation Bayesian networks encode the dependency relations between the variables in a directed acyclic graph: variables are represented as nodes and (in-)dependencies between variables as (missing) edges between the nodes. The joint distribution of all variables can thus be described by decomposing it, according to the depicted independences, into a product of local conditional probability distributions, which are defined by the parameters of the Bayesian network. In the framework of this thesis the Bayesian network approach is applied to different natural hazard domains (i.e. seismic hazard, flood damage and landslide assessments). Learning the network structure and parameters from data, Bayesian networks reveal relevant dependency relations between the included variables and help to gain knowledge about the underlying processes. The problem of Bayesian network learning is cast in a Bayesian framework, considering the network structure and parameters as random variables itself and searching for the most likely combination of both, which corresponds to the maximum a posteriori (MAP score) of their joint distribution given the observed data. Although well studied in theory the learning of Bayesian networks based on real-world data is usually not straight forward and requires an adoption of existing algorithms. Typically arising problems are the handling of continuous variables, incomplete observations and the interaction of both. Working with continuous distributions requires assumptions about the allowed families of distributions. To "let the data speak" and avoid wrong assumptions, continuous variables are instead discretized here, thus allowing for a completely data-driven and distribution-free learning. An extension of the MAP score, considering the discretization as random variable as well, is developed for an automatic multivariate discretization, that takes interactions between the variables into account. The discretization process is nested into the network learning and requires several iterations. Having to face incomplete observations on top, this may pose a computational burden. Iterative proceedings for missing value estimation become quickly infeasible. A more efficient albeit approximate method is used instead, estimating the missing values based only on the observations of variables directly interacting with the missing variable. Moreover natural hazard assessments often have a primary interest in a certain target variable. The discretization learned for this variable does not always have the required resolution for a good prediction performance. Finer resolutions for (conditional) continuous distributions are achieved with continuous approximations subsequent to the Bayesian network learning, using kernel density estimations or mixtures of truncated exponential functions. All our proceedings are completely data-driven. We thus avoid assumptions that require expert knowledge and instead provide domain independent solutions, that are applicable not only in other natural hazard assessments, but in a variety of domains struggling with uncertainties.}, language = {en} } @phdthesis{Vockenberg2006, author = {Vockenberg, Kerstin}, title = {Updating of representations in working memory}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-11767}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The limited capacity of working memory forces people to update its contents continuously. Two aspects of the updating process were investigated in the present experimental series. The first series concerned the question if it is possible to update several representations in parallel. Similar results were obtained for the updating of object features as well as for the updating of whole objects, participants were able to update representations in parallel. The second experimental series addressed the question if working memory representations which were replaced in an updating disappear directly or interfere with the new representations. Evidence for the existence of old representations was found under working memory conditions and under conditions exceeding working memory capacity. These results contradict the hypothesis that working memory contents are protected from proactive interference of long-term memory contents.}, subject = {Aktualisierung}, language = {en} } @phdthesis{Vlasov2015, author = {Vlasov, Vladimir}, title = {Synchronization of oscillatory networks in terms of global variables}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78182}, school = {Universit{\"a}t Potsdam}, pages = {82}, year = {2015}, abstract = {Synchronization of large ensembles of oscillators is an omnipresent phenomenon observed in different fields of science like physics, engineering, life sciences, etc. The most simple setup is that of globally coupled phase oscillators, where all the oscillators contribute to a global field which acts on all oscillators. This formulation of the problem was pioneered by Winfree and Kuramoto. Such a setup gives a possibility for the analysis of these systems in terms of global variables. In this work we describe nontrivial collective dynamics in oscillator populations coupled via mean fields in terms of global variables. We consider problems which cannot be directly reduced to standard Kuramoto and Winfree models. In the first part of the thesis we adopt a method introduced by Watanabe and Strogatz. The main idea is that the system of identical oscillators of particular type can be described by a low-dimensional system of global equations. This approach enables us to perform a complete analytical analysis for a special but vast set of initial conditions. Furthermore, we show how the approach can be expanded for some nonidentical systems. We apply the Watanabe-Strogatz approach to arrays of Josephson junctions and systems of identical phase oscillators with leader-type coupling. In the next parts of the thesis we consider the self-consistent mean-field theory method that can be applied to general nonidentical globally coupled systems of oscillators both with or without noise. For considered systems a regime, where the global field rotates uniformly, is the most important one. With the help of this approach such solutions of the self-consistency equation for an arbitrary distribution of frequencies and coupling parameters can be found analytically in the parametric form, both for noise-free and noisy cases. We apply this method to deterministic Kuramoto-type model with generic coupling and an ensemble of spatially distributed oscillators with leader-type coupling. Furthermore, with the proposed self-consistent approach we fully characterize rotating wave solutions of noisy Kuramoto-type model with generic coupling and an ensemble of noisy oscillators with bi-harmonic coupling. Whenever possible, a complete analysis of global dynamics is performed and compared with direct numerical simulations of large populations.}, language = {en} } @phdthesis{Vitagliano2024, author = {Vitagliano, Gerardo}, title = {Modeling the structure of tabular files for data preparation}, doi = {10.25932/publishup-62435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624351}, school = {Universit{\"a}t Potsdam}, pages = {ii, 114}, year = {2024}, abstract = {To manage tabular data files and leverage their content in a given downstream task, practitioners often design and execute complex transformation pipelines to prepare them. The complexity of such pipelines stems from different factors, including the nature of the preparation tasks, often exploratory or ad-hoc to specific datasets; the large repertory of tools, algorithms, and frameworks that practitioners need to master; and the volume, variety, and velocity of the files to be prepared. Metadata plays a fundamental role in reducing this complexity: characterizing a file assists end users in the design of data preprocessing pipelines, and furthermore paves the way for suggestion, automation, and optimization of data preparation tasks. Previous research in the areas of data profiling, data integration, and data cleaning, has focused on extracting and characterizing metadata regarding the content of tabular data files, i.e., about the records and attributes of tables. Content metadata are useful for the latter stages of a preprocessing pipeline, e.g., error correction, duplicate detection, or value normalization, but they require a properly formed tabular input. Therefore, these metadata are not relevant for the early stages of a preparation pipeline, i.e., to correctly parse tables out of files. In this dissertation, we turn our focus to what we call the structure of a tabular data file, i.e., the set of characters within a file that do not represent data values but are required to parse and understand the content of the file. We provide three different approaches to represent file structure, an explicit representation based on context-free grammars; an implicit representation based on file-wise similarity; and a learned representation based on machine learning. In our first contribution, we use the grammar-based representation to characterize a set of over 3000 real-world csv files and identify multiple structural issues that let files deviate from the csv standard, e.g., by having inconsistent delimiters or containing multiple tables. We leverage our learnings about real-world files and propose Pollock, a benchmark to test how well systems parse csv files that have a non-standard structure, without any previous preparation. We report on our experiments on using Pollock to evaluate the performance of 16 real-world data management systems. Following, we characterize the structure of files implicitly, by defining a measure of structural similarity for file pairs. We design a novel algorithm to compute this measure, which is based on a graph representation of the files' content. We leverage this algorithm and propose Mondrian, a graphical system to assist users in identifying layout templates in a dataset, classes of files that have the same structure, and therefore can be prepared by applying the same preparation pipeline. Finally, we introduce MaGRiTTE, a novel architecture that uses self-supervised learning to automatically learn structural representations of files in the form of vectorial embeddings at three different levels: cell level, row level, and file level. We experiment with the application of structural embeddings for several tasks, namely dialect detection, row classification, and data preparation efforts estimation. Our experimental results show that structural metadata, either identified explicitly on parsing grammars, derived implicitly as file-wise similarity, or learned with the help of machine learning architectures, is fundamental to automate several tasks, to scale up preparation to large quantities of files, and to provide repeatable preparation pipelines.}, language = {en} }