@phdthesis{Andorf2011, author = {Andorf, Sandra}, title = {A systems biological approach towards the molecular basis of heterosis in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51173}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Heterosis is defined as the superiority in performance of heterozygous genotypes compared to their corresponding genetically different homozygous parents. This phenomenon is already known since the beginning of the last century and it has been widely used in plant breeding, but the underlying genetic and molecular mechanisms are not well understood. In this work, a systems biological approach based on molecular network structures is proposed to contribute to the understanding of heterosis. Hybrids are likely to contain additional regulatory possibilities compared to their homozygous parents and, therefore, they may be able to correctly respond to a higher number of environmental challenges, which leads to a higher adaptability and, thus, the heterosis phenomenon. In the network hypothesis for heterosis, presented in this work, more regulatory interactions are expected in the molecular networks of the hybrids compared to the homozygous parents. Partial correlations were used to assess this difference in the global interaction structure of regulatory networks between the hybrids and the homozygous genotypes. This network hypothesis for heterosis was tested on metabolite profiles as well as gene expression data of the two parental Arabidopsis thaliana accessions C24 and Col-0 and their reciprocal crosses. These plants are known to show a heterosis effect in their biomass phenotype. The hypothesis was confirmed for mid-parent and best-parent heterosis for either hybrid of our experimental metabolite as well as gene expression data. It was shown that this result is influenced by the used cutoffs during the analyses. Too strict filtering resulted in sets of metabolites and genes for which the network hypothesis for heterosis does not hold true for either hybrid regarding mid-parent as well as best-parent heterosis. In an over-representation analysis, the genes that show the largest heterosis effects according to our network hypothesis were compared to genes of heterotic quantitative trait loci (QTL) regions. Separately for either hybrid regarding mid-parent as well as best-parent heterosis, a significantly larger overlap between the resulting gene lists of the two different approaches towards biomass heterosis was detected than expected by chance. This suggests that each heterotic QTL region contains many genes influencing biomass heterosis in the early development of Arabidopsis thaliana. Furthermore, this integrative analysis led to a confinement and an increased confidence in the group of candidate genes for biomass heterosis in Arabidopsis thaliana identified by both approaches.}, language = {en} } @phdthesis{AndradeLinares2011, author = {Andrade Linares, Diana Roc{\´i}o}, title = {Characterization of tomato root-endophytic fungi and analysis of their effects on plant development, on fruit yield and quality and on interaction with the pathogen Verticillium dahliae}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51375}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Non-mycorrhizal fungal endophytes are able to colonize internally roots without causing visible disease symptoms establishing neutral or mutualistic associations with plants. These fungi known as non-clavicipitaceous endophytes have a broad host range of monocot and eudicot plants and are highly diverse. Some of them promote plant growth and confer increased abiotic-stress tolerance and disease resistance. According to such possible effects on host plants, it was aimed to isolate and to characterize native fungal root endophytes from tomato (Lycopersicon esculentum Mill.) and to analyze their effects on plant development, plant resistance and fruit yield and quality together with the model endophyte Piriformospora indica. Fifty one new fungal strains were isolated from desinfected tomato roots of four different crop sites in Colombia. These isolates were roughly characterized and fourteen potential endophytes were further analyzed concerning their taxonomy, their root colonization capacity and their impact on plant growth. Sequencing of the ITS region from the ribosomal RNA gene cluster and in-depth morphological characterisation revealed that they correspond to different phylogenetic groups among the phylum Ascomycota. Nine different morphotypes were described including six dark septate endophytes (DSE) that did not correspond to the Phialocephala group. Detailed confocal microscopy analysis showed various colonization patterns of the endophytes inside the roots ranging from epidermal penetration to hyphal growth through the cortex. Tomato pot experiments under glass house conditions showed that they differentially affect plant growth depending on colonization time and inoculum concentration. Three new isolates (two unknown fungal endophyte DSE48, DSE49 and one identified as Leptodontidium orchidicola) with neutral or positiv effects were selected and tested in several experiments for their influence on vegetative growth, fruit yield and quality and their ability to diminish the impact of the pathogen Verticillium dahliae on tomato plants. Although plant growth promotion by all three fungi was observed in young plants, vegetative growth parameters were not affected after 22 weeks of cultivation except a reproducible increase of root diameter by the endophyte DSE49. Additionally, L. orchidicola increased biomass and glucose content of tomato fruits, but only at an early date of harvest and at a certain level of root colonization. Concerning bioprotective effects, the endophytes DSE49 and L. orchidicola decreased significantly disease symptoms caused by the pathogen V. dahliae, but only at a low dosis of the pathogen. In order to analyze, if the model root endophytic fungus Piriformospora indica could be suitable for application in production systems, its impact on tomato was evaluated. Similarly to the new fungal isolates, significant differences for vegetative growth parameters were only observable in young plants and, but protection against V. dahliae could be seen in one experiment also at high dosage of the pathogen. As the DSE L. orchidicola, P. indica increased the number and biomass of marketable tomatoes only at the beginning of fruit setting, but this did not lead to a significant higher total yield. If the effects on growth are due to a better nutrition of the plant with mineral element was analyzed in barley in comparison to the arbuscular mycorrhizal fungus Glomus mosseae. While the mycorrhizal fungus increased nitrogen and phosphate uptake of the plant, no such effect was observed for P. indica. In summary this work shows that many different fungal endophytes can be also isolated from roots of crops and, that these isolates can have positive effects on early plant development. This does, however, not lead to an increase in total yield or in improvement of fruit quality of tomatoes under greenhouse conditions.}, language = {en} } @phdthesis{Avila2011, author = {Avila, Gast{\´o}n}, title = {Asymptotic staticity and tensor decompositions with fast decay conditions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54046}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Corvino, Corvino and Schoen, Chruściel and Delay have shown the existence of a large class of asymptotically flat vacuum initial data for Einstein's field equations which are static or stationary in a neighborhood of space-like infinity, yet quite general in the interior. The proof relies on some abstract, non-constructive arguments which makes it difficult to calculate such data numerically by using similar arguments. A quasilinear elliptic system of equations is presented of which we expect that it can be used to construct vacuum initial data which are asymptotically flat, time-reflection symmetric, and asymptotic to static data up to a prescribed order at space-like infinity. A perturbation argument is used to show the existence of solutions. It is valid when the order at which the solutions approach staticity is restricted to a certain range. Difficulties appear when trying to improve this result to show the existence of solutions that are asymptotically static at higher order. The problems arise from the lack of surjectivity of a certain operator. Some tensor decompositions in asymptotically flat manifolds exhibit some of the difficulties encountered above. The Helmholtz decomposition, which plays a role in the preparation of initial data for the Maxwell equations, is discussed as a model problem. A method to circumvent the difficulties that arise when fast decay rates are required is discussed. This is done in a way that opens the possibility to perform numerical computations. The insights from the analysis of the Helmholtz decomposition are applied to the York decomposition, which is related to that part of the quasilinear system which gives rise to the difficulties. For this decomposition analogous results are obtained. It turns out, however, that in this case the presence of symmetries of the underlying metric leads to certain complications. The question, whether the results obtained so far can be used again to show by a perturbation argument the existence of vacuum initial data which approach static solutions at infinity at any given order, thus remains open. The answer requires further analysis and perhaps new methods.}, language = {en} } @phdthesis{BarbosaPfannes2011, author = {Barbosa Pfannes, Eva Katharina}, title = {Probing the regulatory mechanisms of the actomyosin system in motile cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57812}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Actin-based directional motility is important for embryonic development, wound healing, immune responses, and development of tissues. Actin and myosin are essential players in this process that can be subdivided into protrusion, adhesion, and traction. Protrusion is the forward movement of the membrane at the leading edge of the cell. Adhesion is required to enable movement along a substrate, and traction finally leads to the forward movement of the entire cell body, including its organelles. While actin polymerization is the main driving force in cell protrusions, myosin motors lead to the contraction of the cell body. The goal of this work was to study the regulatory mechanisms of the motile machinery by selecting a representative key player for each stage of the signaling process: the regulation of Arp2/3 activity by WASP (actin system), the role of cGMP in myosin II assembly (myosin system), and the influence of phosphoinositide signaling (upstream receptor pathway). The model organism chosen for this work was the social ameba Dictyostelium discoideum, due to the well-established knowledge of its cytoskeletal machinery, the easy handling, and the high motility of its vegetative and starvation developed cells. First, I focused on the dynamics of the actin cytoskeleton by modulating the activity of one of its key players, the Arp2/3 complex. This was achieved using the carbazole derivative Wiskostatin, an inhibitor of the Arp2/3 activator WASP. Cells treated with Wiskostatin adopted a round shape, with no of few pseudopodia. With the help of a microfluidic cell squeezer device, I could show that Wiskostatin treated cells display a reduced mechanical stability, comparable to cells treated with the actin disrupting agent Latrunculin A. Furthermore, the WASP inhibited cells adhere stronger to a surface and show a reduced motility and chemotactic performance. However, the overall F-actin content in the cells was not changed. Confocal microscopy and TIRF microscopy imaging showed that the cells maintained an intact actin cortex. Localized dynamic patches of increased actin polymerization were observed that, however, did not lead to membrane deformation. This indicated that the mechanisms of actin-driven force generation were impaired in Wiskostatin treated cells. It is concluded that in these cells, an altered architecture of the cortical network leads to a reduced overall stiffness of the cell, which is insufficient to support the force generation required for membrane deformation and pseudopod formation. Second, the role of cGMP in myosin II dynamics was investigated. Cyclic GMP is known to regulate the association of myosin II with the cytoskeleton. In Dictyostelium, intracellular cGMP levels increase when cells are exposed to chemoattractants, but also in response to osmotic stress. To study the influence of cyclic GMP on actin and myosin II dynamics, I used the laser-induced photoactivation of a DMACM-caged-Br-cGMP to locally release cGMP inside the cell. My results show that cGMP directly activates the myosin II machinery, but is also able to induce an actin response independently of cAMP receptor activation and signaling. The actin response was observed in both vegetative and developed cells. Possible explanations include cGMP-induced actin polymerization through VASP (vasodilator-stimulated phosphoprotein) or through binding of cGMP to cyclic nucleotide-dependent kinases. Finally, I investigated the role of phosphoinositide signaling using the Polyphosphoinositide-Binding Peptide (PBP10) that binds preferentially to PIP2. Phosphoinositides can recruit actin-binding proteins to defined subcellular sites and alter their activity. Neutrophils, as well as developed Dictyostelium cells produce PIP3 in the plasma membrane at their leading edge in response to an external chemotactic gradient. Although not essential for chemotaxis, phosphoinositides are proposed to act as an internal compass in the cell. When treated with the peptide PBP10, cells became round, with fewer or no pseudopods. PH-CRAC translocation to the membrane still occurs, even at low cAMP stimuli, but cell motility (random and directional) was reduced. My data revealed that the decrease in the pool of available PIP2 in the cell is sufficient to impair cell motility, but enough PIP2 remains so that PIP3 is formed in response to chemoattractant stimuli. My data thus highlights how sensitive cell motility and morphology are to changes in the phosphoinositide signaling. In summary, I have analyzed representative regulatory mechanisms that govern key parts of the motile machinery and characterized their impact on cellular properties including mechanical stability, adhesion and chemotaxis.}, language = {en} } @phdthesis{Bergner2011, author = {Bergner, Andr{\´e}}, title = {Synchronization in complex systems with multiple time scales}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53407}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present work synchronization phenomena in complex dynamical systems exhibiting multiple time scales have been analyzed. Multiple time scales can be active in different manners. Three different systems have been analyzed with different methods from data analysis. The first system studied is a large heterogenous network of bursting neurons, that is a system with two predominant time scales, the fast firing of action potentials (spikes) and the burst of repetitive spikes followed by a quiescent phase. This system has been integrated numerically and analyzed with methods based on recurrence in phase space. An interesting result are the different transitions to synchrony found in the two distinct time scales. Moreover, an anomalous synchronization effect can be observed in the fast time scale, i.e. there is range of the coupling strength where desynchronization occurs. The second system analyzed, numerically as well as experimentally, is a pair of coupled CO₂ lasers in a chaotic bursting regime. This system is interesting due to its similarity with epidemic models. We explain the bursts by different time scales generated from unstable periodic orbits embedded in the chaotic attractor and perform a synchronization analysis of these different orbits utilizing the continuous wavelet transform. We find a diverse route to synchrony of these different observed time scales. The last system studied is a small network motif of limit cycle oscillators. Precisely, we have studied a hub motif, which serves as elementary building block for scale-free networks, a type of network found in many real world applications. These hubs are of special importance for communication and information transfer in complex networks. Here, a detailed study on the mechanism of synchronization in oscillatory networks with a broad frequency distribution has been carried out. In particular, we find a remote synchronization of nodes in the network which are not directly coupled. We also explain the responsible mechanism and its limitations and constraints. Further we derive an analytic expression for it and show that information transmission in pure phase oscillators, such as the Kuramoto type, is limited. In addition to the numerical and analytic analysis an experiment consisting of electrical circuits has been designed. The obtained results confirm the former findings.}, language = {en} } @phdthesis{Bierbaum2011, author = {Bierbaum, Veronika}, title = {Chemomechanical coupling and motor cycles of the molecular motor myosin V}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53614}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the living cell, the organization of the complex internal structure relies to a large extent on molecular motors. Molecular motors are proteins that are able to convert chemical energy from the hydrolysis of adenosine triphosphate (ATP) into mechanical work. Being about 10 to 100 nanometers in size, the molecules act on a length scale, for which thermal collisions have a considerable impact onto their motion. In this way, they constitute paradigmatic examples of thermodynamic machines out of equilibrium. This study develops a theoretical description for the energy conversion by the molecular motor myosin V, using many different aspects of theoretical physics. Myosin V has been studied extensively in both bulk and single molecule experiments. Its stepping velocity has been characterized as a function of external control parameters such as nucleotide concentration and applied forces. In addition, numerous kinetic rates involved in the enzymatic reaction of the molecule have been determined. For forces that exceed the stall force of the motor, myosin V exhibits a 'ratcheting' behaviour: For loads in the direction of forward stepping, the velocity depends on the concentration of ATP, while for backward loads there is no such influence. Based on the chemical states of the motor, we construct a general network theory that incorporates experimental observations about the stepping behaviour of myosin V. The motor's motion is captured through the network description supplemented by a Markov process to describe the motor dynamics. This approach has the advantage of directly addressing the chemical kinetics of the molecule, and treating the mechanical and chemical processes on equal grounds. We utilize constraints arising from nonequilibrium thermodynamics to determine motor parameters and demonstrate that the motor behaviour is governed by several chemomechanical motor cycles. In addition, we investigate the functional dependence of stepping rates on force by deducing the motor's response to external loads via an appropriate Fokker-Planck equation. For substall forces, the dominant pathway of the motor network is profoundly different from the one for superstall forces, which leads to a stepping behaviour that is in agreement with the experimental observations. The extension of our analysis to Markov processes with absorbing boundaries allows for the calculation of the motor's dwell time distributions. These reveal aspects of the coordination of the motor's heads and contain direct information about the backsteps of the motor. Our theory provides a unified description for the myosin V motor as studied in single motor experiments.}, language = {en} } @phdthesis{Boeche2011, author = {Boeche, Corrado}, title = {Chemical gradients in the Milky Way from unsupervised chemical abundances measurements of the RAVE spectroscopic data set}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The present thesis was born and evolved within the RAdial Velocity Experiment (RAVE) with the goal of measuring chemical abundances from the RAVE spectra and exploit them to investigate the chemical gradients along the plane of the Galaxy to provide constraints on possible Galactic formation scenarios. RAVE is a large spectroscopic survey which aims to observe spectroscopically ~10^6 stars by the end of 2012 and measures their radial velocities, atmospheric parameters and chemical abundances. The project makes use of the UK Schmidt telescope at Australian Astronomical Observatory (AAO) in Siding Spring, Australia, equipped with the multiobject spectrograph 6dF. To date, RAVE collected and measured more than 450,000 spectra. The precision of the chemical abundance estimations depends on the reliability of the atomic and atmosphere parameters adopted (in particular the oscillator strengths of the absorption lines and the effective temperature, gravity, and metallicity of the stars measured). Therefore we first identified 604 absorption lines in the RAVE wavelength range and refined their oscillator strengths with an inverse spectral analysis. Then, we improved the RAVE stellar parameters by modifying the RAVE pipeline and the spectral library the pipeline rely on. The modifications removed some systematic errors in stellar parameters discovered during this work. To obtain chemical abundances, we developed two different processing pipelines. Both of them perform chemical abundances measurements by assuming stellar atmospheres in Local Thermodynamic Equilibrium (LTE). The first one determines elements abundances from equivalent widths of absorption lines. Since this pipeline showed poor sensibility on abundances relative to iron, it has been superseded. The second one exploits the chi^2 minimization technique between observed and model spectra. Thanks to its precision, it has been adopted for the creation of the RAVE chemical catalogue. This pipeline provides abundances with uncertains of about ~0.2dex for spectra with signal-to-noise ratio S/N>40 and ~0.3dex for spectra with 20>S/N>40. For this work, the pipeline measured chemical abundances up to 7 elements for 217,358 RAVE stars. With these data we investigated the chemical gradients along the Galactic radius of the Milky Way. We found that stars with low vertical velocities |W| (which stay close to the Galactic plane) show an iron abundance gradient in agreement with previous works (~-0.07\$ dex kpc^-1) whereas stars with larger |W| which are able to reach larger heights above the Galactic plane, show progressively flatter gradients. The gradients of the other elements follow the same trend. This suggests that an efficient radial mixing acts in the Galaxy or that the thick disk formed from homogeneous interstellar matter. In particular, we found hundreds of stars which can be kinetically classified as thick disk stars exhibiting a chemical composition typical of the thin disk. A few stars of this kind have already been detected by other authors, and their origin is still not clear. One possibility is that they are thin disk stars kinematically heated, and then underwent an efficient radial mixing process which blurred (and so flattened) the gradient. Alternatively they may be a transition population" which represents an evolutionary bridge between thin and thick disk. Our analysis shows that the two explanations are not mutually exclusive. Future follow-up high resolution spectroscopic observations will clarify their role in the Galactic disk evolution.}, language = {en} } @phdthesis{Boehme2011, author = {B{\"o}hme, Dimo}, title = {EU-Russia energy relations: What chance for solutions? : A focus on the natural gas sector}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-120-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50210}, school = {Universit{\"a}t Potsdam}, pages = {xix, 322}, year = {2011}, abstract = {Public debate about energy relations between the EU and Russia is distorted. These distortions present considerable obstacles to the development of true partnership. At the core of the conflict is a struggle for resource rents between energy producing, energy consuming and transit countries. Supposed secondary aspects, however, are also of great importance. They comprise of geopolitics, market access, economic development and state sovereignty. The European Union, having engaged in energy market liberalisation, faces a widening gap between declining domestic resources and continuously growing energy demand. Diverse interests inside the EU prevent the definition of a coherent and respected energy policy. Russia, for its part, is no longer willing to subsidise its neighbouring economies by cheap energy exports. The Russian government engages in assertive policies pursuing Russian interests. In so far, it opts for a different globalisation approach, refusing the role of mere energy exporter. In view of the intensifying struggle for global resources, Russia, with its large energy potential, appears to be a very favourable option for European energy supplies, if not the best one. However, several outcomes of the strategic game between the two partners can be imagined. Engaging in non-cooperative strategies will in the end leave all stakeholders worse-off. The European Union should therefore concentrate on securing its partnership with Russia instead of damaging it. Stable cooperation would need the acceptance that the partner may pursue his own goals, which might be different from one's own interests. The question is, how can a sustainable compromise be found? This thesis finds that a mix of continued dialogue, a tit for tat approach bolstered by an international institutional framework and increased integration efforts appears as a preferable solution.}, language = {en} } @phdthesis{CastroPrieto2011, author = {Castro Prieto, Aines del Carmen}, title = {Immunogenetics of free-ranging felids on Namibian farmlands}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55505}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Genetic variation is crucial for the long-term survival of the species as it provides the potential for adaptive responses to environmental changes such as emerging diseases. The Major Histocompatibility Complex (MHC) is a gene family that plays a central role in the vertebrate's immune system by triggering the adaptive immune response after exposure to pathogens. MHC genes have become highly suitable molecular markers of adaptive significance. They synthesize two primary cell surface molecules namely MHC class I and class II that recognize short fragments of proteins derived respectively from intracellular (e.g. viruses) and extracellular (e.g. bacteria, protozoa, arthropods) origins and present them to immune cells. High levels of MHC polymorphism frequently observed in natural populations are interpreted as an adaptation to detect and present a wide array of rapidly evolving pathogens. This variation appears to be largely maintained by positive selection driven mainly by pathogenic selective pressures. For my doctoral research I focused on MHC I and II variation in free-ranging cheetahs (Acinonyx jubatus) and leopards (Panthera pardus) on Namibian farmlands. Both felid species are sympatric thus subject to similar pathogenic pressures but differ in their evolutionary and demographic histories. The main aims were to investigate 1) the extent and patterns of MHC variation at the population level in both felids, 2) the association between levels of MHC variation and disease resistance in free-ranging cheetahs, and 3) the role of selection at different time scales in shaping MHC variation in both felids. Cheetahs and leopards represent the largest free-ranging carnivores in Namibia. They concentrate in unprotected areas on privately owned farmlands where domestic and other wild animals also occur and the risk of pathogen transmission is increased. Thus, knowledge on adaptive genetic variation involved in disease resistance may be pertinent to both felid species' conservation. The cheetah has been used as a classic example in conservation genetics textbooks due to overall low levels of genetic variation. Reduced variation at MHC genes has been associated with high susceptibility to infectious diseases in cheetahs. However, increased disease susceptibility has only been observed in captive cheetahs whereas recent studies in free-ranging Namibian cheetahs revealed a good health status. This raised the question whether the diversity at MHC I and II genes in free-ranging cheetahs is higher than previously reported. In this study, a total of 10 MHC I alleles and four MHC II alleles were observed in 149 individuals throughout Namibia. All alleles but one likely belong to functional MHC genes as their expression was confirmed. The observed alleles belong to four MHC I and three MHC II genes in the species as revealed by phylogenetic analyses. Signatures of historical positive selection acting on specific sites that interact directly with pathogen-derived proteins were detected in both MHC classes. Furthermore, a high genetic differentiation at MHC I was observed between Namibian cheetahs from east-central and north-central regions known to differ substantially in exposure to feline-specific viral pathogens. This suggests that the patterns of MHC I variation in the current population mirrors different pathogenic selective pressure imposed by viruses. Cheetahs showed low levels of MHC diversity compared with other mammalian species including felids, but this does not seem to influence the current immunocompetence of free-ranging cheetahs in Namibia and contradicts the previous conclusion that the cheetah is a paradigm species of disease susceptibility. However, it cannot be ruled out that the low MHC variation might limit a prosperous immunocompetence in the case of an emerging disease scenario because none of the remaining alleles might be able to recognize a novel pathogen. In contrast to cheetahs, leopards occur in most parts of Africa being perhaps the most abundant big cat in the continent. Leopards seem to have escaped from large-scale declines due to epizootics in the past in contrast to some free-ranging large carnivore populations in Africa that have been afflicted by epizootics. Currently, no information about the MHC sequence variation and constitution in African leopards exists. In this study, I characterized genetic variation at MHC I and MHC II genes in free-ranging leopards from Namibia. A total of six MHC I and six MHC II sequences were detected in 25 individuals from the east-central region. The maximum number of sequences observed per individual suggests that they likely correspond to at least three MHC I and three MHC II genes. Hallmarks of MHC evolution were confirmed such as historical positive selection, recombination and trans-species polymorphism. The low MHC variation detected in Namibian leopards is not conclusive and further research is required to assess the extent of MHC variation in different areas of its geographic range. Results from this thesis will contribute to better understanding the evolutionary significance of MHC and conservation implications in free-ranging felids. Translocation of wildlife is an increasingly used management tool for conservation purposes that should be conducted carefully as it may affect the ability of the translocated animals to cope with different pathogenic selective pressures.}, language = {en} } @phdthesis{Devers2011, author = {Devers, Emanuel}, title = {Phosphate homeostasis and novel microRNAs are involved in the regulation of the arbuscular mycorrhizal symbiosis in Medicago truncatula}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55572}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die arbuskul{\"a}re Mykorrhiza ist die wahrscheinlich {\"a}lteste Form der Wurzelsymbiosen zwischen Pflanzen und Pilzen und hat sich vor 420 Millionen Jahren entwickelt. In dieser Symbiose, die zwischen nahezu allen Landpflanzen und Pilzen des Reiches Glomeromycota ausgebildet wird, versorgt der Pilz die Pflanze mit N{\"a}hrstoffen, wobei die verbesserte Versorgung mit Phosphat f{\"u}r die Pflanze sicher den gr{\"o}ßten Vorteil darstellt. Im Gegenzug erh{\"a}lt der Pilz Zucker, welche die Pflanze aus der Photosynthese bereitstellt. Zu hohe Phosphatkonzentrationen im Boden oder D{\"u}nger f{\"u}hren allerdings zu einer Verringerung in der Auspr{\"a}gung der arbuskul{\"a}ren Mykorrhiza. Diese Unterdr{\"u}ckung der Symbiose wird nicht durch eine lokale Reaktion der Wurzeln ausgel{\"o}st, sondern in erster Linie durch einen hohen Phosphatgehalt im Pflanzenspross. Somit handelt es sich also um eine systemische, also dem Gesamtsystem „Pflanze" betreffende Antwort. Die molekularen Mechanismen dieser Anpassung sind noch wenig bekannt und sind vor allem f{\"u}r die Agrarwirtschaft von besonderem Interesse. Eine Mikro-RNA (miRNA) des bereits bekannten Phosphathom{\"o}ostasesignalwegs (PHR1-miRNA399-PHO2 Signalweg) akkumuliert verst{\"a}rkt in mykorrhizierten Wurzeln. Das deutet daraufhin, dass dieser Signalweg und diese miRNA eine wichtige Rolle in der Regulation der arbuskul{\"a}ren Mykorrhiza spielen. Ziel dieser Studie war es neue Einblicke in die molekularen Mechanismen, die zur Unterdr{\"u}ckung der arbuskul{\"a}ren Mykorrhiza bei hohen Phosphatkonzentrationen f{\"u}hren, zu gewinnen. Dabei sollte der Einfluss von PHO2, sowie von miRNAs in dieser Symbiose genauer untersucht werden. Ein funktionelles Ortholog von PHO2, MtPho2, wurde in der Pflanze Medicago truncatula identifiziert. MtPho2-Mutanten, welche nicht mehr in der Lage waren ein funktionales PHO2 Protein zu exprimieren, zeigten schnellere Kolonisierung durch den AM-Pilz. Jedoch wurde auch in den mtpho2-Mutanten die Symbiose durch hohe Phosphatkonzentrationen unterdr{\"u}ckt. Dies bedeutet, dass PHO2 und somit der PHR1-miRNA399-PHO2 Signalweg eine wichtige Funktion w{\"a}hrend der fortschreitenden Kolonisierung der Wurzel durch den Pilz hat, aber und weitere Mechanismen in der Unterd{\"u}ckung der Symbiose bei hohen Phosphatkonzentrationen beteiligt sein m{\"u}ssen. Die Analyse von Transkriptionsprofilen von Spross- und Wurzeln mittels Microarrays zeigte, dass die Unterdr{\"u}ckung der AM Symbiose durch hohe Phosphatkonzentrationen m{\"o}glicherweise auf eine Unterdr{\"u}ckung der Expression einer Reihe symbiosespezifischer Gene im Spross der Pflanze beruht. Um die Rolle weiterer miRNA in der AM Symbiose zu untersuchen, wurden mittels einer Hochdurchsatz-Sequenzierung 243 neue und 181 aus anderen Pflanzen bekannte miRNAs in M. truncatula entdeckt. Zwei dieser miRNAs, miR5229 und miR160f*, sind ausschließlich w{\"a}hrend der arbuskul{\"a}ren Mykorrhiza zu finden und weitere miRNAs werden w{\"a}hrend dieser Symbiose verst{\"a}rkt gebildet. Interessanterweise f{\"u}hren einige dieser miRNAs zum Abbau von Transkripten, die eine wichtige Funktion in der arbuskul{\"a}ren Mykorrhiza und Wurzelkn{\"o}llchensymbiose besitzen. Die Ergebnisse dieser Studie liefern eine neue Grundlage f{\"u}r die Untersuchung von regulatorischen Netzwerken, die zur zellul{\"a}ren Umprogrammierung w{\"a}hrend der Interaktion zwischen Pflanzen und arbuskul{\"a}ren Mykorrhiza-Pilzen bei verschiedenen Phosphatbedingungen f{\"u}hren.}, language = {en} } @phdthesis{Federico2011, author = {Federico, Stefania}, title = {Synthetic peptides derived from decorin as building blocks for biomaterials based on supramolecular interactions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59661}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this work, the development of a new molecular building block, based on synthetic peptides derived from decorin, is presented. These peptides represent a promising basis for the design of polymer-based biomaterials that mimic the ECM on a molecular level and exploit specific biological recognition for technical applications. Multiple sequence alignments of the internal repeats of decorin that formed the inner and outer surface of the arch-shaped protein were used to develop consensus sequences. These sequences contained conserved sequence motifs that are likely to be related to structural and functional features of the protein. Peptides representative for the consensus sequences were synthesized by microwave-assisted solid phase peptide synthesis and purified by RP-HPLC, with purities higher than 95 mol\%. After confirming the desired masses by MALDI-TOF-MS, the primary structure of each peptide was investigated by 1H and 2D NMR, from which a full assignment of the chemical shifts was obtained. The characterization of the peptides conformation in solution was performed by CD spectroscopy, which demonstrated that using TFE, the peptides from the outer surface of decorin show a high propensity to fold into helical structures as observed in the original protein. To the contrary, the peptides from the inner surface did not show propensity to form stable secondary structure. The investigation of the binding capability of the peptides to Collagen I was performed by surface plasmon resonance analyses, from which all but one of the peptides representing the inner surface of decorin showed binding affinity to collagen with values of dissociation constant between 2•10-7 M and 2.3•10-4 M. On the other hand, the peptides representative for the outer surface of decorin did not show any significant interaction to collagen. This information was then used to develop experimental demonstration for the binding capabilities of the peptides from the inner surface of decorin to collagen even when used in more complicated situations close to possible appications. With this purpose, the peptide (LRELHLNNN) which showed the highest binding affinity to collagen (2•10-7 M) was functionalized with an N-terminal triple bond in order to obtain a peptide dimer via copper(I)-catalyzed cycloaddition reaction with 4,4'-diazidostilbene-2,2'-disulfonic acid. Rheological measurements showed that the presence of the peptide dimer was able to enhance the elastic modulus (G') of a collagen gel from ~ 600 Pa (collagen alone) to ~ 2700 Pa (collagen and peptide dimer). Moreover, it was shown that the mechanical properties of a collagen gel can be tailored by using different molar ratios of peptide dimer respect to collagen. The same peptide, functionalized with the triple bond, was used to obtain a peptide-dye conjugate by coupling it with N-(5'-azidopentanoyl)-5-aminofluorescein. An aqueous solution (5 vol\% methanol) of the peptide dye conjugate was injected into a collagen and a hyaluronic acid (HA) gel and images of fluorescence detection showed that the diffusion of the peptide was slower in the collagen gel compared to the HA gel. The third experimental demonstration was gained using the peptide (LSELRLHNN) which showed the lower binding affinity (2.3•10-4 M) to collagen. This peptide was grafted to hyaluronic acid via EDC-chemistry, with a degree of functionalization of 7 ± 2 mol\% as calculated by 1H-NMR. The grafting was further confirmed by FTIR and TGA measurements, which showed that the onset of decomposition for the HA-g-peptide decreased by 10 °C compared to the native HA. Rheological measurements showed that the elastic modulus of a system based on collagen and HA-g-peptide increased by almost two order of magnitude (G' = 200 Pa) compared to a system based on collagen and HA (G' = 0.9 Pa). Overall, this study showed that the synthetic peptides, which were identified from decorin, can be applied as potential building blocks for biomimetic materials that function via biological recognition.}, language = {en} } @phdthesis{Fellinger2011, author = {Fellinger, Tim-Patrick}, title = {Hydrothermal and ionothermal carbon structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57825}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The needs for sustainable energy generation, but also a sustainable chemistry display the basic motivation of the current thesis. By different single investigated cases, which are all related to the element carbon, the work can be devided into two major topics. At first, the sustainable synthesis of "useful" carbon materials employing the process of hydrothermal carbonisation (HC) is described. In the second part, the synthesis of heteroatom - containing carbon materials for electrochemical and fuel cell applications employing ionic liquid precursors is presented. On base of a thorough review of the literature on hydrothermolysis and hydrothermal carbonisation of sugars in addition to the chemistry of hydroxymethylfurfural, mechanistic considerations of the formation of hydrothermal carbon are proposed. On the base of these reaction schemes, the mineral borax, is introduced as an additive for the hydrothermal carbonisation of glucose. It was found to be a highly active catalyst, resulting in decreased reaction times and increased carbon yields. The chemical impact of borax, in the following is exploited for the modification of the micro- and nanostructure of hydrothermal carbon. From the borax - mediated aggregation of those primary species, widely applicable, low density, pure hydrothermal carbon aerogels with high porosities and specific surface areas are produced. To conclude the first section of the thesis, a short series of experiments is carried out, for the purpose of demonstrating the applicability of the HC model to "real" biowaste i.e. watermelon waste as feedstock for the production of useful materials. In part two cyano - containing ionic liquids are employed as precursors for the synthesis of high - performance, heteroatom - containing carbon materials. By varying the ionic liquid precursor and the carbonisation conditions, it was possible to design highly active non - metal electrocatalyst for the reduction of oxygen. In the direct reduction of oxygen to water (like used in polymer electrolyte fuel cells), compared to commercial platinum catalysts, astonishing activities are observed. In another example the selective and very cost efficient electrochemical synthesis of hydrogen peroxide is presented. In a last example the synthesis of graphitic boron carbon nitrides from the ionic liquid 1 - Ethyl - 3 - methylimidazolium - tetracyanoborate is investigated in detail. Due to the employment of unreactive salts as a new tool to generate high surface area these materials were first time shown to be another class of non - precious metal oxygen reduction electrocatalyst.}, language = {en} } @phdthesis{FloresSuarez2011, author = {Flores Su{\´a}rez, Rosaura}, title = {Three-dimensional polarization probing in polymer ferroelectrics, polymer-dispersed liquid crystals, and polymer ferroelectrets}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60173}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {A key non-destructive technique for analysis, optimization and developing of new functional materials such as sensors, transducers, electro-optical and memory devices is presented. The Thermal-Pulse Tomography (TPT) provides high-resolution three-dimensional images of electric field and polarization distribution in a material. This thermal technique use a pulsed heating by means of focused laser light which is absorbed by opaque electrodes. The diffusion of the heat causes changes in the sample geometry, generating a short-circuit current or change in surface potential, which contains information about the spatial distribution of electric dipoles or space charges. Afterwards, a reconstruction of the internal electric field and polarization distribution in the material is possible via Scale Transformation or Regularization methods. In this way, the TPT was used for the first time to image the inhomogeneous ferroelectric switching in polymer ferroelectric films (candidates to memory devices). The results shows the typical pinning of electric dipoles in the ferroelectric polymer under study and support the previous hypotheses of a ferroelectric reversal at a grain level via nucleation and growth. In order to obtain more information about the impact of the lateral and depth resolution of the thermal techniques, the TPT and its counterpart called Focused Laser Intensity Modulation Method (FLIMM) were implemented in ferroelectric films with grid-shaped electrodes. The results from both techniques, after the data analysis with different regularization and scale methods, are in total agreement. It was also revealed a possible overestimated lateral resolution of the FLIMM and highlights the TPT method as the most efficient and reliable thermal technique. After an improvement in the optics, the Thermal-Pulse Tomography method was implemented in polymer-dispersed liquid crystals (PDLCs) films, which are used in electro-optical applications. The results indicated a possible electrostatic interaction between the COH group in the liquid crystals and the fluorinate atoms of the used ferroelectric matrix. The geometrical parameters of the LC droplets were partially reproduced as they were compared with Scanning Electron Microscopy (SEM) images. For further applications, it is suggested the use of a non-strong-ferroelectric polymer matrix. In an effort to develop new polymerferroelectrets and for optimizing their properties, new multilayer systems were inspected. The results of the TPT method showed the non-uniformity of the internal electric-field distribution in the shaped-macrodipoles and thus suggested the instability of the sample. Further investigation on multilayers ferroelectrets was suggested and the implementation of less conductive polymers layers too.}, language = {en} } @phdthesis{Gebser2011, author = {Gebser, Martin}, title = {Proof theory and algorithms for answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55425}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Answer Set Programming (ASP) is an emerging paradigm for declarative programming, in which a computational problem is specified by a logic program such that particular models, called answer sets, match solutions. ASP faces a growing range of applications, demanding for high-performance tools able to solve complex problems. ASP integrates ideas from a variety of neighboring fields. In particular, automated techniques to search for answer sets are inspired by Boolean Satisfiability (SAT) solving approaches. While the latter have firm proof-theoretic foundations, ASP lacks formal frameworks for characterizing and comparing solving methods. Furthermore, sophisticated search patterns of modern SAT solvers, successfully applied in areas like, e.g., model checking and verification, are not yet established in ASP solving. We address these deficiencies by, for one, providing proof-theoretic frameworks that allow for characterizing, comparing, and analyzing approaches to answer set computation. For another, we devise modern ASP solving algorithms that integrate and extend state-of-the-art techniques for Boolean constraint solving. We thus contribute to the understanding of existing ASP solving approaches and their interconnections as well as to their enhancement by incorporating sophisticated search patterns. The central idea of our approach is to identify atomic as well as composite constituents of a propositional logic program with Boolean variables. This enables us to describe fundamental inference steps, and to selectively combine them in proof-theoretic characterizations of various ASP solving methods. In particular, we show that different concepts of case analyses applied by existing ASP solvers implicate mutual exponential separations regarding their best-case complexities. We also develop a generic proof-theoretic framework amenable to language extensions, and we point out that exponential separations can likewise be obtained due to case analyses on them. We further exploit fundamental inference steps to derive Boolean constraints characterizing answer sets. They enable the conception of ASP solving algorithms including search patterns of modern SAT solvers, while also allowing for direct technology transfers between the areas of ASP and SAT solving. Beyond the search for one answer set of a logic program, we address the enumeration of answer sets and their projections to a subvocabulary, respectively. The algorithms we develop enable repetition-free enumeration in polynomial space without being intrusive, i.e., they do not necessitate any modifications of computations before an answer set is found. Our approach to ASP solving is implemented in clasp, a state-of-the-art Boolean constraint solver that has successfully participated in recent solver competitions. Although we do here not address the implementation techniques of clasp or all of its features, we present the principles of its success in the context of ASP solving.}, language = {en} } @phdthesis{Gendt2011, author = {Gendt, Anja}, title = {Eye movements under the control of working memory : the challenge of a reading-span task}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69224}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {During reading oculomotor processes guide the eyes over the text. The visual information recorded is accessed, evaluated and processed. Only by retrieving the meaning of a word from the long-term memory, as well as through the connection and storage of the information about each individual word, is it possible to access the semantic meaning of a sentence. Therefore memory, and here in particular working memory, plays a pivotal role in the basic processes of reading. The following dissertation investigates to what extent different demands on memory and memory capacity have an effect on eye movement behavior while reading. The frequently used paradigm of the reading span task, in which test subjects read and evaluate individual sentences, was used for the experimental review of the research questions. The results speak for the fact that working memory processes have a direct effect on various eye movement measurements. Thus a high working memory load, for example, reduced the perceptual span while reading. The lower the individual working memory capacity of the reader was, the stronger was the influence of the working memory load on the processing of the sentence.}, language = {en} } @phdthesis{Giesewetter2011, author = {Giesewetter, Stefan}, title = {Resolute readings of later Wittgenstein and the challenge of avoiding hierarchies in philosophy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57021}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This dissertation addresses the question: How did later Wittgenstein aim to achieve his goal of putting forward a way of dissolving philosophical problems which centered on asking ourselves what we mean by our words - yet which did not entail any claims about the essence of language and meaning? This question is discussed with reference to "resolute" readings of Wittgenstein. I discuss the readings of James Conant, Oskari Kuusela, and Martin Gustafsson. I follow Oskari Kuusela's claim that in order to fully appreciate how later Wittgenstein meant to achieve his goal, we need to clearly see how he aimed to do away with hierarchies in philosophy: Not only is the dissolution of philosophical problems via the method of clarifying the grammar of expressions to be taken as independent from any theses about what meaning must be - but furthermore, it is to be taken as independent from the dissolution of any particular problem via this method. As Kuusela stresses, this also holds for the problems involving rule-following and meaning: the clarification of the grammar of "rule" and "meaning" has no foundational status - it is nothing on which the method of clarifying the grammar of expressions as such were meant to in any way rely on. The lead question of this dissertation then is: What does it mean to come to see that the method of dissolving philosophical problems by asking "How is this word actually used?" does not in any way rely on the results of our having investigated the grammar of the particular concepts "rule" and "meaning"? What is the relation of such results - results such as "To follow a rule, [...], to obey an order, [...] are customs (uses, institutions)" or "The meaning of a word is its use in the language" - to this method? From this vantage point, I concern myself with two aspects of the readings of Gustafsson and Kuusela. In Gustafsson, I concern myself with his idea that the dissolution of philosophical problems in general "relies on" the very agreement which - during the dissolution of the rule-following problem - comes out as a presupposition for our talk of "meaning" in terms of rules. In Kuusela, I concern myself with his idea that Wittgenstein, in adopting a way of philosophical clarification which investigates the actual use of expressions, is following the model of "meaning as use" - which model he had previously introduced in order to perspicuously present an aspect of the actual use of the word "meaning". This dissertation aims to show how these two aspects of Gustafsson's and Kuusela's readings still fail to live up to the vision of Wittgenstein as a philosopher who aimed to do away with any hierarchies in philosophy. I base this conclusion on a detailed analysis of which of the occasions where Wittgenstein invokes the notions of "use" and "application" (as also "agreement") have to do with the dissolution of a specific problem only, and which have to do with the dissolution of philosophical problems in general. I discuss Wittgenstein's remarks on rule-following, showing how in the dissolution of the rule-following paradox, notions such as "use", "application", and "practice" figure on two distinct logical levels. I then discuss an example of what happens when this distinction is not duly heeded: Gordon Baker and Peter Hacker's idea that the rule-following remarks have a special significance for his project of dissolving philosophical problems as such. I furnish an argument to the effect that their idea that the clarification of the rules of grammar of the particular expression "following a rule" could answer a question about rules of grammar in general rests on a conflation of the two logical levels on which "use" occurs in the rule-following remarks, and that it leads into a regress. I then show that Gustafsson's view - despite its decisive advance over Baker and Hacker - contains a version of that same idea, and that it likewise leads into a regress. Finally, I show that Kuusela's idea of a special significance of the model "meaning as use" for the whole of the method of stating rules for the use of words is open to a regress argument of a similar kind as that he himself advances against Baker and Hacker. I conclude that in order to avoid such a regress, we need to reject the idea that the grammatical remark "The meaning of a word is its use in the language" - because of the occurrence of "use" in it - stood in any special relation to the method of dissolving philosophical problems by describing the use of words. Rather, we need to take this method as independent from this outcome of the investigation of the use of the particular word "meaning".}, language = {en} } @phdthesis{Giorgi2011, author = {Giorgi, Federico Manuel}, title = {Expression-based reverse engineering of plant transcriptional networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56760}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Regulation of gene transcription plays a major role in mediating cellular responses and physiological behavior in all known organisms. The finding that similar genes are often regulated in a similar manner (co-regulated or "co-expressed") has directed several "guilt-by-association" approaches in order to reverse-engineer the cellular transcriptional networks using gene expression data as a compass. This kind of studies has been considerably assisted in the recent years by the development of high-throughput transcript measurement platforms, specifically gene microarrays and next-generation sequencing. In this thesis, I describe several approaches for improving the extraction and interpretation of the information contained in microarray based gene expression data, through four steps: (1) microarray platform design, (2) microarray data normalization, (3) gene network reverse engineering based on expression data and (4) experimental validation of expression-based guilt-by-association inferences. In the first part test case is shown aimed at the generation of a microarray for Thellungiella salsuginea, a salt and drought resistant close relative to the model plant Arabidopsis thaliana; the transcripts of this organism are generated on the combination of publicly available ESTs and newly generated ad-hoc next-generation sequencing data. Since the design of a microarray platform requires the availability of highly reliable and non-redundant transcript models, these issues are addressed consecutively, proposing several different technical solutions. In the second part I describe how inter-array correlation artifacts are generated by the common microarray normalization methods RMA and GCRMA, together with the technical and mathematical characteristics underlying the problem. A solution is proposed in the form of a novel normalization method, called tRMA. The third part of the thesis deals with the field of expression-based gene network reverse engineering. It is shown how different centrality measures in reverse engineered gene networks can be used to distinguish specific classes of genes, in particular essential genes in Arabidopsis thaliana, and how the use of conditional correlation can add a layer of understanding over the information flow processes underlying transcript regulation. Furthermore, several network reverse engineering approaches are compared, with a particular focus on the LASSO, a linear regression derivative rarely applied before in global gene network reconstruction, despite its theoretical advantages in robustness and interpretability over more standard methods. The performance of LASSO is assessed through several in silico analyses dealing with the reliability of the inferred gene networks. In the final part, LASSO and other reverse engineering methods are used to experimentally identify novel genes involved in two independent scenarios: the seed coat mucilage pathway in Arabidopsis thaliana and the hypoxic tuber development in Solanum tuberosum. In both cases an interesting method complementarity is shown, which strongly suggests a general use of hybrid approaches for transcript expression-based inferences. In conclusion, this work has helped to improve our understanding of gene transcription regulation through a better interpretation of high-throughput expression data. Part of the network reverse engineering methods described in this thesis have been included in a tool (CorTo) for gene network reverse engineering and annotated visualization from custom transcription datasets.}, language = {en} } @phdthesis{Graeff2011, author = {Gr{\"a}ff, Thomas}, title = {Soil moisture dynamics and soil moisture controlled runoff processes at different spatial scales : from observation to modelling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54470}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Soil moisture is a key state variable that controls runoff formation, infiltration and partitioning of radiation into latent and sensible heat. However, the experimental characterisation of near surface soil moisture patterns and their controls on runoff formation remains a challenge. This subject was one aspect of the BMBF-funded OPAQUE project (operational discharge and flooding predictions in head catchments). As part of that project the focus of this dissertation is on: (1) testing the methodology and feasibility of the Spatial TDR technology in producing soil moisture profiles along TDR probes, including an inversion technique of the recorded signal in heterogeneous field soils, (2) the analysis of spatial variability and temporal dynamics of soil moisture at the field scale including field experiments and hydrological modelling, (3) the application of models of different complexity for understanding soil moisture dynamics and its importance for runoff generation as well as for improving the prediction of runoff volumes. To fulfil objective 1, several laboratory experiments were conducted to understand the influence of probe rod geometry and heterogeneities in the sampling volume under different wetness conditions. This includes a detailed analysis on how these error sources affect retrieval of soil moisture profiles in soils. Concerning objective 2 a sampling strategy of two TDR clusters installed in the head water of the Wilde Weißeritz catchment (Eastern Ore Mountains, Germany) was used to investigate how well "the catchment state" can be characterised by means of distributed soil moisture data observed at the field scale. A grassland site and a forested site both located on gentle slopes were instrumented with two Spatial TDR clusters that consist of up to 39 TDR probes. Process understanding was gained by modelling the interaction of evapotranspiration and soil moisture with the hydrological process model CATFLOW. A field scale irrigation experiment was carried out to investigate near subsurface processes at the hillslope scale. The interactions of soil moisture and runoff formation were analysed using discharge data from three nested catchments: the Becherbach with a size of 2 km², the Rehefeld catchment (17 km²) and the superordinate Ammelsdorf catchment (49 km²). Statistical analyses including observations of pre-event runoff, soil moisture and different rainfall characteristics were employed to predict stream flow volume. On the different scales a strong correlation between the average soil moisture and the runoff coefficients of rainfall-runoff events could be found, which almost explains equivalent variability as the pre-event runoff. Furthermore, there was a strong correlation between surface soil moisture and subsurface wetness with a hysteretic behaviour between runoff soil moisture. To fulfil objective 3 these findings were used in a generalised linear model (GLM) analysis which combines state variables describing the catchments antecedent wetness and variables describing the meteorological forcing in order to predict event runoff coefficients. GLM results were compared to simulations with the catchment model WaSiM ETH. Hereby were the model results of the GLMs always better than the simulations with WaSiM ETH. The GLM analysis indicated that the proposed sampling strategy of clustering TDR probes in typical functional units is a promising technique to explore soil moisture controls on runoff generation and can be an important link between the scales. Long term monitoring of such sites could yield valuable information for flood warning and forecasting by identifying critical soil moisture conditions for the former and providing a better representation of the initial moisture conditions for the latter.}, language = {en} } @phdthesis{Haase2011, author = {Haase, Martin F.}, title = {Modification of nanoparticle surfaces for emulsion stabilization and encapsulation of active molecules for anti-corrosive coatings}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55413}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Within this work, three physicochemical methods for the hydrophobization of initially hydrophilic solid particles are investigated. The modified particles are then used for the stabilization of oil-in-water (o/w) emulsions. For all introduced methods electrostatic interactions between strongly or weakly charged groups in the system are es-sential. (i) Short chain alkylammonium bromides (C4 - C12) adsorb on oppositely charged solid particles. Macroscopic contact angle measurements of water droplets under air and hexane on flat silica surfaces in dependency of the surface charge density and alkylchain-length allow the calculation of the surface energy and give insights into the emulsification properties of solid particles modified with alkyltrimethylammonium bromides. The measure-ments show an increase of the contact angle with increasing surface charge density, due to the enhanced adsorp-tion of the oppositely charged alkylammonium bromides. Contact angles are higher for longer alkylchain lengths. The surface energy calculations show that in particular the surface-hexane or surface-air interfacial en-ergy is being lowered upon alkylammonium adsorption, while a significant increase of the surface-water interfa-cial energy occurs only at long alkyl chain lengths and high surface charge densities. (ii) The thickness and the charge density of an adsorbed weak polyelectrolyte layer (e.g. PMAA, PAH) influence the wettability of nanoparticles (e.g. alumina, silica, see Scheme 1(b)). Furthermore, the isoelectric point and the pH range of colloidal stability of particle-polyelectrolyte composites depend on the thickness of the weak polye-lectrolyte layer. Silica nanoparticles with adsorbed PAH and alumina nanoparticles with adsorbed PMAA be-come interfacially active and thus able to stabilize o/w emulsions when the degree of dissociation of the polye-lectrolyte layer is below 80 \%. The average droplet size after emulsification of dodecane in water depends on the thickness and the degree of dissociation of the adsorbed PE-layer. The visualization of the particle-stabilized o/w emulsions by cryogenic SEM shows that for colloidally stable alumina-PMAA composites the oil-water interface is covered with a closely packed monolayer of particles, while for the colloidally unstable case closely packed aggregated particles deposit on the interface. (iii) By emulsifying a mixture of the corrosion inhibitor 8-hydroxyquinoline (8-HQ) and styrene with silica nanoparticles a highly stable o/w emulsion can be obtained in a narrow pH window. The amphoteric character of 8-HQ enables a pH dependent electrostatic interaction with silica nanoparticles, which can render them interfa-cially active. Depending on the concentration and the degree of dissociation of 8-HQ the adsorption onto silica results from electrostatic or aromatic interactions between 8-HQ and the particle-surface. At intermediate amounts of adsorbed 8-HQ the oil wettability of the particles becomes sufficient for stabilizing o/w emulsions. Cryogenic SEM visualization shows that the particles arrange then in a closely packed shell consisting of partly of aggregated domains on the droplet interface. For further increasing amounts of adsorbed 8-HQ the oil wet-tability is reduced again and the particles ability to stabilize emulsions decreases. By the addition of hexadecane to the oil phase the size of the droplets can be reduced down to 200 nm by in-creasing the silica mass fraction. Subsequent polymerization produces corrosion inhibitor filled (20 wt-\%) poly-styrene-silica composite particles. The measurement of the release of 8-hydroxyquinoline shows a rapid increase of 8-hydroxyquinoline in a stirred aqueous solution indicating the release of the total content in less than 5 min-utes. The method is extended for the encapsulation of other organic corrosion inhibitors. The silica-polymer-inhibitor composite particles are then dispersed in a water based alkyd emulsion, and the dispersion is used to coat flat aluminium substrates. After drying and cross-linking the polmer-film Confocal Laser Scanning Micros-copy is employed revealing a homogeneous distribution of the particles in the film. Electrochemical Impedance Spectroscopy in aqueous electrolyte solutions shows that films with aggregated particle domains degrade with time and don't provide long-term corrosion protection of the substrate. However, films with highly dispersed particles have high barrier properties for corrosive species. The comparison of films containing silica-polystyrene composite particles with and without 8-hydroxyquinoline shows higher electrochemical impedances when the inhibitor is present in the film. By applying the Scanning Vibrating Electrode Technique the localized corrosion rate in the fractured area of scratched polymer films containing the silica-polymer-inhibitor composite particles is studied. Electrochemical corrosion cannot be suppressed but the rate is lowered when inhibitor filled composite particles are present in the film. By depositing six polyelectrolyte layers on particle stabilized emulsion droplets their surface morphology changes significantly as shown by SEM visualization. When the oil wettability of the outer polyelectrolyte layer increases, the polyelectrolyte coated droplets can act as emulsion stabilizers themselves by attaching onto bigger oil droplets in a closely packed arrangement. In the presence of 3 mM LaCl3 8-HQ hydrophobized silica particles aggregate strongly on the oil-water inter-face. The application of an ultrasonic field can remove two dimensional shell-compartments from the droplet surface, which are then found in the aqueous bulk phase. Their size ranges up to 1/4th of the spherical particle shell.}, language = {en} } @phdthesis{Hanisch2011, author = {Hanisch, Florian}, title = {Variational problems on supermanifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59757}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this thesis, we discuss the formulation of variational problems on supermanifolds. Supermanifolds incorporate bosonic as well as fermionic degrees of freedom. Fermionic fields take values in the odd part of an appropriate Grassmann algebra and are thus showing an anticommutative behaviour. However, a systematic treatment of these Grassmann parameters requires a description of spaces as functors, e.g. from the category of Grassmann algberas into the category of sets (or topological spaces, manifolds). After an introduction to the general ideas of this approach, we use it to give a description of the resulting supermanifolds of fields/maps. We show that each map is uniquely characterized by a family of differential operators of appropriate order. Moreover, we demonstrate that each of this maps is uniquely characterized by its component fields, i.e. by the coefficients in a Taylor expansion w.r.t. the odd coordinates. In general, the component fields are only locally defined. We present a way how to circumvent this limitation. In fact, by enlarging the supermanifold in question, we show that it is possible to work with globally defined components. We eventually use this formalism to study variational problems. More precisely, we study a super version of the geodesic and a generalization of harmonic maps to supermanifolds. Equations of motion are derived from an energy functional and we show how to decompose them into components. Finally, in special cases, we can prove the existence of critical points by reducing the problem to equations from ordinary geometric analysis. After solving these component equations, it is possible to show that their solutions give rise to critical points in the functor spaces of fields.}, language = {en} } @phdthesis{Hoffmann2011, author = {Hoffmann, Anne}, title = {Comparative aerosol studies based on multi-wavelength Raman LIDAR at Ny-{\AA}lesund, Spitsbergen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52426}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The Arctic is a particularly sensitive area with respect to climate change due to the high surface albedo of snow and ice and the extreme radiative conditions. Clouds and aerosols as parts of the Arctic atmosphere play an important role in the radiation budget, which is, as yet, poorly quantified and understood. The LIDAR (Light Detection And Ranging) measurements presented in this PhD thesis contribute with continuous altitude resolved aerosol profiles to the understanding of occurrence and characteristics of aerosol layers above Ny-{\AA}lesund, Spitsbergen. The attention was turned to the analysis of periods with high aerosol load. As the Arctic spring troposphere exhibits maximum aerosol optical depths (AODs) each year, March and April of both the years 2007 and 2009 were analyzed. Furthermore, stratospheric aerosol layers of volcanic origin were analyzed for several months, subsequently to the eruptions of the Kasatochi and Sarychev volcanoes in summer 2008 and 2009, respectively. The Koldewey Aerosol Raman LIDAR (KARL) is an instrument for the active remote sensing of atmospheric parameters using pulsed laser radiation. It is operated at the AWIPEV research base and was fundamentally upgraded within the framework of this PhD project. It is now equipped with a new telescope mirror and new detection optics, which facilitate atmospheric profiling from 450m above sea level up to the mid-stratosphere. KARL provides highly resolved profiles of the scattering characteristics of aerosol and cloud particles (backscattering, extinction and depolarization) as well as water vapor profiles within the lower troposphere. Combination of KARL data with data from other instruments on site, namely radiosondes, sun photometer, Micro Pulse LIDAR, and tethersonde system, resulted in a comprehensive data set of scattering phenomena in the Arctic atmosphere. The two spring periods March and April 2007 and 2009 were at first analyzed based on meteorological parameters, like local temperature and relative humidity profiles as well as large scale pressure patterns and air mass origin regions. Here, it was not possible to find a clear correlation between enhanced AOD and air mass origin. However, in a comparison of two cloud free periods in March 2007 and April 2009, large AOD values in 2009 coincided with air mass transport through the central Arctic. This suggests the occurrence of aerosol transformation processes during the aerosol transport to Ny-{\AA}lesund. Measurements on 4 April 2009 revealed maximum AOD values of up to 0.12 and aerosol size distributions changing with altitude. This and other performed case studies suggest the differentiation between three aerosol event types and their origin: Vertically limited aerosol layers in dry air, highly variable hygroscopic boundary layer aerosols and enhanced aerosol load across wide portions of the troposphere. For the spring period 2007, the available KARL data were statistically analyzed using a characterization scheme, which is based on optical characteristics of the scattering particles. The scheme was validated using several case studies. Volcanic eruptions in the northern hemisphere in August 2008 and June 2009 arose the opportunity to analyze volcanic aerosol layers within the stratosphere. The rate of stratospheric AOD change was similar within both years with maximum values above 0.1 about three to five weeks after the respective eruption. In both years, the stratospheric AOD persisted at higher rates than usual until the measurements were stopped in late September due to technical reasons. In 2008, up to three aerosol layers were detected, the layer structure in 2009 was characterized by up to six distinct and thin layers which smeared out to one broad layer after about two months. The lowermost aerosol layer was continuously detected at the tropopause altitude. Three case studies were performed, all revealed rather large indices of refraction of m = (1.53-1.55) - 0.02i, suggesting the presence of an absorbing carbonaceous component. The particle radius, derived with inversion calculations, was also similar in both years with values ranging from 0.16 to 0.19 μm. However, in 2009, a second mode in the size distribution was detected at about 0.5 μm. The long term measurements with the Koldewey Aerosol Raman LIDAR in Ny-{\AA}lesund provide the opportunity to study Arctic aerosols in the troposphere and the stratosphere not only in case studies but on longer time scales. In this PhD thesis, both, tropospheric aerosols in the Arctic spring and stratospheric aerosols following volcanic eruptions have been described qualitatively and quantitatively. Case studies and comparative studies with data of other instruments on site allowed for the analysis of microphysical aerosol characteristics and their temporal evolution.}, language = {en} } @phdthesis{Husemann2011, author = {Husemann, Bernd}, title = {The AGN-host galaxy connection : new insights from the extended ionised gas}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55556}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Active Galactic Nuclei (AGN) are powered by gas accretion onto supermassive Black Holes (BH). The luminosity of AGN can exceed the integrated luminosity of their host galaxies by orders of magnitude, which are then classified as Quasi-Stellar Objects (QSOs). Some mechanisms are needed to trigger the nuclear activity in galaxies and to feed the nuclei with gas. Among several possibilities, such as gravitational interactions, bar instabilities, and smooth gas accretion from the environment, the dominant process has yet to be identified. Feedback from AGN may be important an important ingredient of the evolution of galaxies. However, the details of this coupling between AGN and their host galaxies remain unclear. In this work we aim to investigate the connection between the AGN and their host galaxies by studying the properties of the extendend ionised gas around AGN. Our study is based on observations of ~50 luminous, low-redshift (z<0.3) QSOs using the novel technique of integral field spectroscopy that combines imaging and spectroscopy. After spatially separating the emission of AGN-ionised gas from HII regions, ionised solely by recently formed massive stars, we demonstrate that the specific star formation rates in several disc-dominated AGN hosts are consistent with those of normal star forming galaxies, while others display no detectable star formation activity. Whether the star formation has been actively suppressed in those particular host galaxies by the AGN, or their gas content is intrinsically low, remains an open question. By studying the kinematics of the ionised gas, we find evidence for non-gravitational motions and outflows on kpc scales only in a few objects. The gas kinematics in the majority of objects however indicate a gravitational origin. It suggests that the importance of AGN feedback may have been overrated in theoretical works, at least at low redshifts. The [OIII] line is the strongest optical emission line for AGN-ionised gas, which can be extended over several kpc scales, usually called the Narrow-Line Region (NLR). We perform a systematic investigation of the NLR size and determine a NLR size-luminosity relation that is consistent with the scenario of a constant ionisation parameter throughout the NLR. We show that previous narrow-band imaging with the Hubble Space Telescope underestimated the NLR size by a factor of >2 and that the continuum AGN luminosity is better correlated with the NLR size than the [OIII] luminosity. These affects may account for the different NLR size-luminosity relations reported in previous studies. On the other hand, we do not detect extended NLRs around all QSOs, and demonstrate that the detection of extended NLRs goes along with radio emission. We employ emission line ratios as a diagnostic for the abundance of heavy elements in the gas, i.e. its metallicity, and find that the radial metallicity gradients are always flatter than in inactive disc-dominated galaxies. This can be interpreted as evidence for radial gas flows from the outskirts of these galaxies to the nucleus. Recent or ongoing galaxy interactions are likely responsible for this effect and may turn out to be a common prerequisite for QSO activity. The metallicity of bulge-dominated hosts are systematically lower than their disc-dominated counterparts, which we interpret as evidence for minor mergers, supported by our detailed study of the bulge-dominated host of the luminous QSO HE 1029-1401, or smooth gas accretion from the environment. In this line another new discovery is that HE 2158-0107 at z=0.218 is the most metal poor luminous QSO ever observed. Together with a large (30kpc) extended structure of low metallicity ionised gas, we propose smooth cold gas accretion as the most likely scenario. Theoretical studies suggested that this process is much more important at earlier epochs of the universe, so that HE 2158-0107 might be an ideal laboratory to study this mechanism of galaxy and BH growth at low redshift more detailed in the furture.}, language = {en} } @phdthesis{Ivakov2011, author = {Ivakov, Alexander}, title = {Metabolic interactions in leaf development in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59730}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Das Wachstum und {\"U}berleben von Pflanzen basiert auf der Photosynthese in den Bl{\"a}ttern. Diese beinhaltet die Aufnahme von Kohlenstoffdioxid aus der Atmosph{\"a}re und das simultane Einfangen von Lichtenergie zur Bildung organischer Molek{\"u}le. Diese werden nach dem Eintritt in den Metabolismus in viele andere Komponenten umgewandelt, welche die Grundlage f{\"u}r die Zunahme der Biomasse bilden. Bl{\"a}tter sind Organe, die auf die Fixierung von Kohlenstoffdioxid spezialisiert sind. Die Funktionen der Bl{\"a}tter beinhalten vor allem die Optimierung und Feinregulierung vieler Prozesse, um eine effektive Nutzung von Ressourcen und eine maximale Photosynthese zu gew{\"a}hrleisten. Es ist bekannt, dass sich die Morphologie der Bl{\"a}tter den Wachstumsbedingungen der Pflanze anpasst und eine wichtige Rolle bei der Optimierung der Photosynthese spielt. Trotzdem ist die Regulation dieser Art der Anpassung bisher nicht verstanden. Die allgemeine Zielsetzung dieser vorliegenden Arbeit ist das Verst{\"a}ndnis wie das Wachstum und die Morphologie der Bl{\"a}tter im Modellorganismus Arabidopsis thaliana reguliert werden. Besondere Aufmerksamkeit wurde hierbei der M{\"o}glichkeit geschenkt, dass es interne metabolische Signale in der Pflanze geben k{\"o}nnte, die das Wachstum und die Entwicklung von Bl{\"a}ttern beeinflussen. Um diese Fragestellung zu untersuchen, muss das Wachstum und die Entwicklung von Bl{\"a}ttern oberhalb des Levels des einzelnen Organs und im Kontext der gesamten Pflanze betrachtet werden, weil Bl{\"a}tter nicht eigenst{\"a}ndig wachsen, sondern von Ressourcen und regulatorischen Einfl{\"u}ssen der ganzen Pflanze abh{\"a}ngig sind. Aufgrund der Komplexit{\"a}t dieser Fragestellung wurden drei komplement{\"a}re Ans{\"a}tze durchgef{\"u}hrt. Im ersten und spezifischsten Ansatz wurde untersucht ob eine flussabw{\"a}rts liegende Komponente des Zucker-Signalwegs, Trehalose-6-Phosphat (Tre-6-P), das Blattwachstum und die Blattentwicklung beinflussen kann. Um diese Frage zu beantworten wurden transgene Arabidopsis-Linien mit einem gest{\"o}rten Gehalt von Tre-6-P durch die Expression von bakteriellen Proteinen die in dem metabolismus von trehalose beteiligt sind. Die Pflanzen-Linien wurden unter Standard-Bendingungen in Erde angebaut und ihr Metabolismus und ihre Blattmorphologie untersucht. Diese Experimente f{\"u}hrten auch zu einem unerwarteten Projekt hinsichtlich einer m{\"o}glichen Rolle von Tre-6-P in der Regulation der Stomata. In einem zweiten, allgemeineren Ansatz wurde untersucht, ob {\"A}nderungen im Zucker-Gehalt der Pflanzen die Morphogenese der Bl{\"a}tter als Antwort auf Licht beeinflussen. Dazu wurden eine Reihe von Mutanten, die im Zentralmetabolismus beeintr{\"a}chtigt sind, in derselben Lichtbedingung angezogen und bez{\"u}glich ihrer Blattmorphologie analysiert. In einem dritten noch allgemeineren Ansatz wurde die nat{\"u}rliche Variation von morphologischen Auspr{\"a}gungen der Bl{\"a}tter und Rosette anhand von wilden Arabidopsis {\"O}kotypen untersucht, um zu verstehen wie sich die Blattmorphologie auf die Blattfunktion und das gesamte Pflanzenwachstum auswirkt und wie unterschiedliche Eigenschaften miteinander verkn{\"u}pft sind. Das Verh{\"a}ltnis der Blattanzahl zum Gesamtwachstum der Pflanze und Blattgr{\"o}ße wurde gesondert weiter untersucht durch eine Normalisierung der Blattanzahl auf das Frischgewicht der Rosette, um den Parameter „leafing Intensity" abzusch{\"a}tzen. Leafing Intensity integrierte Blattanzahl, Blattgr{\"o}ße und gesamtes Rosettenwachstum in einer Reihe von Kompromiss-Interaktionen, die in einem Wachstumsvorteil resultieren, wenn Pflanzen weniger, aber gr{\"o}ßere Bl{\"a}tter pro Einheit Biomasse ausbilden. Dies f{\"u}hrte zu einem theoretischen Ansatz in dem ein einfaches allometrisch mathematisches Modell konstruiert wurde, um Blattanzahl, Blattgr{\"o}ße und Pflanzenwachstum im Kontext der gesamten Pflanze Arabidopsis zu verkn{\"u}pfen.}, language = {en} } @phdthesis{Junginger2011, author = {Junginger, Annett}, title = {East African climate variability on different time scales : the Suguta Valley in the African-Asian Monsoon Domain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56834}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Motivation | Societal and economic needs of East Africa rely entirely on the availability of water, which is governed by the regular onset and retreat of the rainy seasons. Fluctuations in the amounts of rainfall has tremendous impact causing widespread famine, disease outbreaks and human migrations. Efforts towards high resolution forecasting of seasonal precipitation and hydrological systems are therefore needed, which requires high frequency short to long-term analyses of available climate data that I am going to present in this doctoral thesis by three different studies. 15,000 years - Suguta Valley | The main study of this thesis concentrated on the understanding of humidity changes within the last African Humid Period (AHP, 14.8-5.5 ka BP). The nature and causes of intensity variations of the West-African (WAM) and Indian Summer monsoons (ISM) during the AHP, especially their exact influence on regional climate relative to each other, is currently intensely debated. Here, I present a high-resolution multiproxy lake-level record spanning the AHP from the remote Suguta Valley in the northern Kenya Rift, located between the WAM and ISM domains. The presently desiccated valley was during the AHP filled by a 300 m deep and 2200 km2 large palaeo-lake due to an increase in precipitation of only 26\%. The record explains the synchronous onset of large lakes in the East African Rift System (EARS) with the longitudinal shift of the Congo Air Boundary (CAB) over the East African and Ethiopian Plateaus, as the direct consequence of an enhanced atmospheric pressure gradient between East-Africa and India due to a precessional-forced northern hemisphere insolation maximum. Pronounced, and abrupt lake level fluctuations during the generally wet AHP are explained by small-scale solar irradiation changes weakening this pressure gradient atmospheric moisture availability preventing the CAB from reaching the study area. Instead, the termination of the AHP occurred, in a non-linear manner due to a change towards an equatorial insolation maximum ca. 6.5 ka ago extending the AHP over Ethiopia and West-Africa. 200 years - Lake Naivasha | The second part of the thesis focused on the analysis of a 200 year-old sediment core from Lake Naivasha in the Central Kenya Rift, one of the very few present freshwater lakes in East Africa. The results revealed and confirmed, that the appliance of proxy records for palaeo-climate reconstruction for the last 100 years within a time of increasing industrialisation and therefore human impact to the proxy-record containing sites are broadly limited. Since the middle of the 20th century, intense anthropogenic activity around Lake Naivasha has led to cultural eutrophication, which has overprinted the influence of natural climate variation to the lake usually inferred from proxy records such as diatoms, transfer-functions, geochemical and sedimentological analysis as used in this study. The results clarify the need for proxy records from remote unsettled areas to contribute with pristine data sets to current debates about anthropologic induced global warming since the past 100 years. 14 years - East African Rift | In order to avoid human influenced data sets and validate spatial and temporal heterogeneities of proxy-records from East Africa, the third part of the thesis therefore concentrated on the most recent past 14 years (1996-2010) detecting climate variability by using remotely sensed rainfall data. The advancement in the spatial coverage and temporal resolutions of rainfall data allow a better understanding of influencing climate mechanisms and help to better interpret proxy-records from the EARS in order to reconstruct past climate conditions. The study focuses on the dynamics of intraseasonal rainfall distribution within catchments of eleven lake basins in the EARS that are often used for palaeo-climate studies. We discovered that rainfall in adjacent basins exhibits high complexities in the magnitudes of intraseasonal variability, biennial to triennial precipitation patterns and even are not necessarily correlated often showing opposite trends. The variability among the watersheds is driven by the complex interaction of topography, in particular the shape, length and elevation of the catchment and its relative location to the East African Rift System and predominant influence of the ITCZ or CAB, whose locations and intensities are dependent on the strength of low pressure cells over India, SST variations in the Atlantic, Pacific or Indian Ocean, QBO phases and the 11-year solar cycle. Among all seasons we observed, January-September is the season of highest and most complex rainfall variability, especially for the East African Plateau basins, most likely due to the irregular penetration and sensitivity of the CAB.}, language = {en} } @phdthesis{Jurish2011, author = {Jurish, Bryan}, title = {Finite-state canonicalization techniques for historical German}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55789}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This work addresses issues in the automatic preprocessing of historical German input text for use by conventional natural language processing techniques. Conventional techniques cannot adequately account for historical input text due to conventional tools' reliance on a fixed application-specific lexicon keyed by contemporary orthographic surface form on the one hand, and the lack of consistent orthographic conventions in historical input text on the other. Historical spelling variation is treated here as an error-correction problem or "canonicalization" task: an attempt to automatically assign each (historical) input word a unique extant canonical cognate, thus allowing direct application-specific processing (tagging, parsing, etc.) of the returned canonical forms without need for any additional application-specific modifications. In the course of the work, various methods for automatic canonicalization are investigated and empirically evaluated, including conflation by phonetic identity, conflation by lemma instantiation heuristics, canonicalization by weighted finite-state rewrite cascade, and token-wise disambiguation by a dynamic Hidden Markov Model.}, language = {en} } @phdthesis{Kalbitz2011, author = {Kalbitz, Ren{\´e}}, title = {Stability of polarization in organic ferroelectric metal-insulator-semiconductor structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57276}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Organic thin film transistors (TFT) are an attractive option for low cost electronic applications and may be used for active matrix displays and for RFID applications. To extend the range of applications there is a need to develop and optimise the performance of non-volatile memory devices that are compatible with the solution-processing fabrication procedures used in plastic electronics. A possible candidate is an organic TFT incorporating the ferroelectric co-polymer poly(vinylidenefluoride-trifluoroethylene)(P(VDF-TrFE)) as the gate insulator. Dielectric measurements have been carried out on all-organic metal-insulator-semiconductor structures with the ferroelectric polymer poly(vinylidenefluoride-trifluoroethylene) (P(VDF-TrFE)) as the gate insu-lator. The capacitance spectra of MIS devices, were measured under different biases, showing the effect of charge accumulation and depletion on the Maxwell-Wagner peak. The position and height of this peak clearly indicates the lack of stable depletion behavior and the decrease of mobility when increasing the depletion zone width, i.e. upon moving into the P3HT bulk. The lack of stable depletion was further investigated with capacitance-voltage (C-V) measurements. When the structure was driven into depletion, C-V plots showed a positive flat-band voltage shift, arising from the change in polarization state of the ferroelectric insulator. When biased into accumulation, the polarization was reversed. It is shown that the two polarization states are stable i.e. no depolarization occurs below the coercive field. However, negative charge trapped at the semiconductor-insulator interface during the depletion cycle masks the negative shift in flat-band voltage expected during the sweep to accumulation voltages. The measured output characteristics of the studied ferroelectric-field-effect transistors confirmed the results of the C-V plots. Furthermore, the results indicated a trapping of electrons at the positively charged surfaces of the ferroelectrically polarized P(VDF-TrFE) crystallites near the insulator/semiconductor in-terface during the first poling cycles. The study of the MIS structure by means of thermally stimulated current (TSC) revealed further evidence for the stability of the polarization under depletion voltages. It was shown, that the lack of stable depletion behavior is caused by the compensation of the orientational polarization by fixed electrons at the interface and not by the depolarization of the insulator, as proposed in several publications. The above results suggest a performance improvement of non-volatile memory devices by the optimization of the interface.}, language = {en} } @phdthesis{Kartal2011, author = {Kartal, {\"O}nder}, title = {The role of interfacial and 'entropic' enzymes in transitory starch degradation : a mathematical modeling approach}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53947}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Plants and some unicellular algae store carbon in the form of transitory starch on a diurnal basis. The turnover of this glucose polymer is tightly regulated and timely synthesis as well as mobilization is essential to provide energy for heterotrophic growth. Especially for starch degradation, novel enzymes and mechanisms have been proposed recently. However, the catalytic properties of these enzymes and their coordination with metabolic regulation are still to be discovered. This thesis develops theoretical methods in order to interpret and analyze enzymes and their role in starch degradation. In the first part, a novel description of interfacial enzyme catalysis is proposed. Since the initial steps of starch degradation involve reactions at the starch-stroma interface it is necessary to have a framework which allows the derivation of interfacial enzyme rate laws. A cornerstone of the method is the introduction of the available area function - a concept from surface physics - to describe the adsorption step in the catalytic cycle. The method is applied to derive rate laws for two hydrolases, the Beta-amylase (BAM3) and the Isoamylase (DBE/ISA3), as well as to the Glucan, water dikinase (GWD) and a Phosphoglucan phosphatase (DSP/SEX4). The second part uses the interfacial rate laws to formulate a kinetic model of starch degradation. It aims at reproducing the stimulatory effect of reversible phosphorylation by GWD and DSP on the breakdown of the granule. The model can describe the dynamics of interfacial properties during degradation and suggests that interfacial amylopectin side-chains undergo spontaneous helix-coil transitions. Reversible phosphorylation has a synergistic effect on glucan release especially in the early phase dropping off during degradation. Based on the model, the hypothesis is formulated that interfacial phosphorylation is important for the rapid switch from starch synthesis to starch degradation. The third part takes a broader perspective on carbohydrate-active enzymes (CAZymes) but is motivated by the organization of the downstream pathway of starch breakdown. This comprises Alpha-1,4-glucanotransferases (DPE1 and DPE2) and Alpha-glucan-phosphorylases (Pho or PHS) both in the stroma and in the cytosol. CAZymes accept many different substrates and catalyze numerous reactions and therefore cannot be characterized in classical enzymological terms. A concise characterization is provided by conceptually linking statistical thermodynamics and polymer biochemistry. Each reactant is interpreted as an energy level, transitions between which are constrained by the enzymatic mechanisms. Combinations of in vitro assays of polymer-active CAZymes essential for carbon metabolism in plants confirmed the dominance of entropic gradients. The principle of entropy maximization provides a generalization of the equilibrium constant. Stochastic simulations confirm the results and suggest that randomization of metabolites in the cytosolic pool of soluble heteroglycans (SHG) may contribute to a robust integration of fluctuating carbon fluxes coming from chloroplasts.}, language = {en} } @phdthesis{Kellermann2011, author = {Kellermann, Thorsten}, title = {Accurate numerical relativity simulations of non-vacuumspace-times in two dimensions and applications to critical collapse}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59578}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This Thesis puts its focus on the physics of neutron stars and its description with methods of numerical relativity. In the first step, a new numerical framework the Whisky2D code will be developed, which solves the relativistic equations of hydrodynamics in axisymmetry. Therefore we consider an improved formulation of the conserved form of these equations. The second part will use the new code to investigate the critical behaviour of two colliding neutron stars. Considering the analogy to phase transitions in statistical physics, we will investigate the evolution of the entropy of the neutron stars during the whole process. A better understanding of the evolution of thermodynamical quantities, like the entropy in critical process, should provide deeper understanding of thermodynamics in relativity. More specifically, we have written the Whisky2D code, which solves the general-relativistic hydrodynamics equations in a flux-conservative form and in cylindrical coordinates. This of course brings in 1/r singular terms, where r is the radial cylindrical coordinate, which must be dealt with appropriately. In the above-referenced works, the flux operator is expanded and the 1/r terms, not containing derivatives, are moved to the right-hand-side of the equation (the source term), so that the left hand side assumes a form identical to the one of the three-dimensional (3D) Cartesian formulation. We call this the standard formulation. Another possibility is not to split the flux operator and to redefine the conserved variables, via a multiplication by r. We call this the new formulation. The new equations are solved with the same methods as in the Cartesian case. From a mathematical point of view, one would not expect differences between the two ways of writing the differential operator, but, of course, a difference is present at the numerical level. Our tests show that the new formulation yields results with a global truncation error which is one or more orders of magnitude smaller than those of alternative and commonly used formulations. The second part of the Thesis uses the new code for investigations of critical phenomena in general relativity. In particular, we consider the head-on-collision of two neutron stars in a region of the parameter space where two final states a new stable neutron star or a black hole, lay close to each other. In 1993, Choptuik considered one-parameter families of solutions, S[P], of the Einstein-Klein-Gordon equations for a massless scalar field in spherical symmetry, such that for every P > P⋆, S[P] contains a black hole and for every P < P⋆, S[P] is a solution not containing singularities. He studied numerically the behavior of S[P] as P → P⋆ and found that the critical solution, S[P⋆], is universal, in the sense that it is approached by all nearly-critical solutions regardless of the particular family of initial data considered. All these phenomena have the common property that, as P approaches P⋆, S[P] approaches a universal solution S[P⋆] and that all the physical quantities of S[P] depend only on |P - P⋆|. The first study of critical phenomena concerning the head-on collision of NSs was carried out by Jin and Suen in 2007. In particular, they considered a series of families of equal-mass NSs, modeled with an ideal-gas EOS, boosted towards each other and varied the mass of the stars, their separation, velocity and the polytropic index in the EOS. In this way they could observe a critical phenomenon of type I near the threshold of black-hole formation, with the putative solution being a nonlinearly oscillating star. In a successive work, they performed similar simulations but considering the head-on collision of Gaussian distributions of matter. Also in this case they found the appearance of type-I critical behaviour, but also performed a perturbative analysis of the initial distributions of matter and of the merged object. Because of the considerable difference found in the eigenfrequencies in the two cases, they concluded that the critical solution does not represent a system near equilibrium and in particular not a perturbed Tolmann-Oppenheimer-Volkoff (TOV) solution. In this Thesis we study the dynamics of the head-on collision of two equal-mass NSs using a setup which is as similar as possible to the one considered above. While we confirm that the merged object exhibits a type-I critical behaviour, we also argue against the conclusion that the critical solution cannot be described in terms of equilibrium solution. Indeed, we show that, in analogy with what is found in, the critical solution is effectively a perturbed unstable solution of the TOV equations. Our analysis also considers fine-structure of the scaling relation of type-I critical phenomena and we show that it exhibits oscillations in a similar way to the one studied in the context of scalar-field critical collapse.}, language = {en} } @phdthesis{Kittner2011, author = {Kittner, Madeleine}, title = {Folding and aggregation of amyloid peptides}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53570}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Aggregation of the Amyloid β (Aβ) peptide to amyloid fibrils is associated with the outbreak of Alzheimer's disease. Early aggregation intermediates in form of soluble oligomers are of special interest as they are believed to be the major toxic components in the process. These oligomers are of disordered and transient nature. Therefore, their detailed molecular structure is difficult to access experimentally and often remains unknown. In the present work extensive, fully atomistic replica exchange molecular dynamics simulations were performed to study the preaggregated, monomer states and early aggregation intermediates (dimers, trimers) of Aβ(25-35) and Aβ(10-35)-NH2 in aqueous solution. The folding and aggregation of Aβ(25-35) were studied at neutral pH and 293 K. Aβ(25-35) monomers mainly adopt β-hairpin conformations characterized by a β-turn formed by residues G29 and A30, and a β-sheet between residues N27-K28 and I31-I32 in equilibrium with coiled conformations. The β-hairpin conformations served as initial configurations to model spontaneous aggregation of Aβ(25-35). As expected, within the Aβ(25-35) dimer and trimer ensembles many different poorly populated conformations appear. Nevertheless, we were able to distinguish between disordered and fibril-like oligomers. Whereas disordered oligomers are rather compact with few intermolecular hydrogen bonds (HBs), fibril-like oligomers are characterized by the formation of large intermolecular β-sheets. In most of the fibril-like dimers and trimers individual peptides are fully extended forming in- or out-of-register antiparallel β-sheets. A small amount of fibril-like trimers contained V-shaped peptides forming parallel β-sheets. The dimensions of extended and V-shaped oligomers correspond well to the diameters of two distinct morphologies found for Aβ(25-35) fibrils. The transition from disordered to fibril-like Aβ(25-35) dimers is unfavorable but driven by energy. The lower energy of fibril-like dimers arises from favorable intermolecular HBs and other electrostatic interactions which compete with a loss in entropy. Approximately 25 \% of the entropic cost correspond to configurational entropy. The rest relates to solvent entropy, presumably caused by hydrophobic and electrostatic effects. In contrast to the transition towards fibril-like dimers the first step of aggregation is driven by entropy. Here, we compared structural and thermodynamic properties of the individual monomer, dimer and trimer ensembles to gain qualitative information about the aggregation process. The β-hairpin conformation observed for monomers is successively dissolved in dimer and trimer ensembles while instead intermolecular β-sheets are formed. As expected upon aggregation the configurational entropy decreases. Additionally, the solvent accessible surface area (SASA), especially the hydrophobic SASA, decreases yielding a favorable solvation free energy which overcompensates the loss in configurational entropy. In summary, the hydrophobic effect, possibly combined with electrostatic effects, yields an increase in solvent entropy which is believed to be one major driving force towards aggregation. Spontaneous folding of the Aβ(10-35)-NH2 monomer was modeled using two force fields, GROMOS96 43a1 and OPLS/AA, and compared to primary NMR data collected at pH 5.6 and 283 K taken from the literature. Unexpectedly, the two force fields yielded significantly different main conformations. Comparison between experimental and calculated nuclear Overhauser effect (NOE) distances is not sufficient to distinguish between the different force fields. Additionally, the comparison with scalar coupling constants suggest that the chosen protonation in both simulations corresponds to a pH lower than in the experiment. Based on this analysis we were unable to determine which force field yields a better description of this system. Dimerization of Aβ(10-35)-NH2 was studied at neutral pH and 300 K. Dimer conformations arrange in many distinct, poorly populated and rather complex alignments or interlocking patterns which are rather stabilized by side chain interactions than by specific intermolecular hydrogen bonds. Similar to Aβ(25-35) dimers, transition towards β-sheet-rich, fibril-like Aβ(10-35) dimers is driven by energy competing with a loss in entropy. Here, transition is mediated by favorable peptide-solvent and solvent-solvent interactions mainly arising from electrostatic interactions.}, language = {en} } @phdthesis{Klinkusch2011, author = {Klinkusch, Stefan}, title = {Simulations of laser-induced correlated many-electron dynamics in molecular systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55445}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this thesis, simulations of laser-driven many-electron dynamics in molecules are presented, i.e., the interaction between molecules and an electromagnetic field is demonstrated. When a laser field is applied to a molecular system, a population of higher electronic states takes place as well as other processes, e.g. photoionization, which is described by an appropriate model. Also, a finite lifetime of an excited state can be described by such a model. In the second part, a method is postulated that is capable of describing electron correlation in a time-dependent scheme. This is done by introducing a single-electron entropy that is at least temporarily minimized in a further step.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @phdthesis{Kopetzki2011, author = {Kopetzki, Daniel}, title = {Exploring hydrothermal reactions : from prebiotic synthesis to green chemistry}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52581}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this thesis chemical reactions under hydrothermal conditions were explored, whereby emphasis was put on green chemistry. Water at high temperature and pressure acts as a benign solvent. Motivation to work under hydrothermal conditions was well-founded in the tunability of physicochemical properties with temperature, e.g. of dielectric constant, density or ion product, which often resulted in surprising reactivity. Another cornerstone was the implementation of the principles of green chemistry. Besides the use of water as solvent, this included the employment of a sustainable feedstock and the sensible use of resources by minimizing waste and harmful intermediates and additives. To evaluate the feasibility of hydrothermal conditions for chemical synthesis, exemplary reactions were performed. These were carried out in a continuous flow reactor, allowing for precise control of reaction conditions and kinetics measurements. In most experiments a temperature of 200 °C in combination with a pressure of 100 bar was chosen. In some cases the temperature was even raised to 300 °C. Water in this subcritical range can also be found in nature at hydrothermal vents on the ocean floor. On the primitive earth, environments with such conditions were however present in larger numbers. Therefore we tested whether biologically important carbohydrates could be formed at high temperature from the simple, probably prebiotic precursor formaldehyde. Indeed, this formose reaction could be carried out successfully, although the yield was lower compared to the counterpart reaction under ambient conditions. However, striking differences regarding selectivity and necessary catalysts were observed. At moderate temperatures bases and catalytically active cations like Ca2+ are necessary and the main products are hexoses and pentoses, which accumulate due to their higher stability. In contrast, in high-temperature water no catalyst was necessary but a slightly alkaline solution was sufficient. Hexoses were only formed in negligible amounts, whereas pentoses and the shorter carbohydrates accounted for the major fraction. Amongst the pentoses there was some preference for the formation of ribose. Even deoxy sugars could be detected in traces. The observation that catalysts can be avoided was successfully transferred to another reaction. In a green chemistry approach platform chemicals must be produced from sustainable resources. Carbohydrates can for instance be employed as a basis. They can be transformed to levulinic acid and formic acid, which can both react via a transfer hydrogenation to the green solvent and biofuel gamma-valerolactone. This second reaction usually requires catalysis by Ru or Pd, which are neither sustainable nor low-priced. Under hydrothermal conditions these heavy metals could be avoided and replaced by cheap salts, taking advantage of the temperature dependence of the acid dissociation constant. Simple sulfate was recognized as a temperature switchable base. With this additive high yield could be achieved by simultaneous prevention of waste. In contrast to conventional bases, which create salt upon neutralization, a temperature switchable base becomes neutral again when cooled down and thus can be reused. This adds another sustainable feature to the high atom economy of the presented hydrothermal synthesis. In a last study complex decomposition pathways of biomass were investigated. Gas chromatography in conjunction with mass spectroscopy has proven to be a powerful tool for the identification of unknowns. It was observed that several acids were formed when carbohydrates were treated with bases at high temperature. This procedure was also applied to digest wood. Afterwards it was possible to fermentate the solution and a good yield of methane was obtained. This has to be regarded in the light of the fact that wood practically cannot be used as a feedstock in a biogas factory. Thus the hydrothermal pretreatment is an efficient means to employ such materials as well. Also the reaction network of the hydrothermal decomposition of glycine was investigated using isotope-labeled compounds as comparison for the unambiguous identification of unknowns. This refined analysis allowed the identification of several new molecules and pathways, not yet described in literature. In summary several advantages could be taken from synthesis in high-temperature water. Many catalysts, absolutely necessary under ambient conditions, could either be completely avoided or replaced by cheap, sustainable alternatives. In this respect water is not only a green solvent, but helps to prevent waste and preserves resources.}, language = {en} } @phdthesis{Krause2011, author = {Krause, Jette}, title = {An expert-based Bayesian investigation of greenhouse gas emission reduction options for German passenger vehicles until 2030}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57671}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The present thesis introduces an iterative expert-based Bayesian approach for assessing greenhouse gas (GHG) emissions from the 2030 German new vehicle fleet and quantifying the impacts of their main drivers. A first set of expert interviews has been carried out in order to identify technologies which may help to lower car GHG emissions and to quantify their emission reduction potentials. Moreover, experts were asked for their probability assessments that the different technologies will be widely adopted, as well as for important prerequisites that could foster or hamper their adoption. Drawing on the results of these expert interviews, a Bayesian Belief Network has been built which explicitly models three vehicle types: Internal Combustion Engine Vehicles (which include mild and full Hybrid Electric Vehicles), Plug-In Hybrid Electric Vehicles, and Battery Electric Vehicles. The conditional dependencies of twelve central variables within the BBN - battery energy, fuel and electricity consumption, relative costs, and sales shares of the vehicle types - have been quantified by experts from German car manufacturers in a second series of interviews. For each of the seven second-round interviews, an expert's individually specified BBN results. The BBN have been run for different hypothetical 2030 scenarios which differ, e.g., in regard to battery development, regulation, and fuel and electricity GHG intensities. The present thesis delivers results both in regard to the subject of the investigation and in regard to its method. On the subject level, it has been found that the different experts expect 2030 German new car fleet emission to be at 50 to 65\% of 2008 new fleet emissions under the baseline scenario. They can be further reduced to 40 to 50\% of the emissions of the 2008 fleet though a combination of a higher share of renewables in the electricity mix, a larger share of biofuels in the fuel mix, and a stricter regulation of car CO\$_2\$ emissions in the European Union. Technically, 2030 German new car fleet GHG emissions can be reduced to a minimum of 18 to 44\% of 2008 emissions, a development which can not be triggered by any combination of measures modeled in the BBN alone but needs further commitment. Out of a wealth of existing BBN, few have been specified by individual experts through elicitation, and to my knowledge, none of them has been employed for analyzing perspectives for the future. On the level of methods, this work shows that expert-based BBN are a valuable tool for making experts' expectations for the future explicit and amenable to the analysis of different hypothetical scenarios. BBN can also be employed for quantifying the impacts of main drivers. They have been demonstrated to be a valuable tool for iterative stakeholder-based science approaches.}, language = {en} } @phdthesis{Kristen2011, author = {Kristen, Juliane Ute}, title = {Amphiphilic BAB-triblock copolymers bearing fluorocarbon groups : synthesis and self-organization in aqueous media}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-61782}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this work new fluorinated and non-fluorinated mono- and bifunctional trithiocarbonates of the structure Z-C(=S)-S-R and Z-C(=S)-S-R-S-C(=S)-Z were synthesized for the use as chain transfer agents (CTAs) in the RAFT-process. All newly synthesized CTAs were tested for their efficiency to moderate the free radical polymerization process by polymerizing styrene (M3). Besides characterization of the homopolymers by GPC measurements, end- group analysis of the synthesized block copolymers via 1H-, 19F-NMR, and in some cases also UV-vis spectroscopy, were performed attaching suitable fluorinated moieties to the Z- and/or R-groups of the CTAs. Symmetric triblock copolymers of type BAB and non-symmetric fluorine end- capped polymers were accessible using the RAFT process in just two or one polymerization step. In particular, the RAFT-process enabled the controlled polymerization of hydrophilic monomers such as N-isopropylacrylamide (NIPAM) (M1) as well as N-acryloylpyrrolidine (NAP) (M2) for the A-blocks and of the hydrophobic monomers styrene (M3), 2-fluorostyrene (M4), 3-fluorostyrene (M5), 4-fluorostyrene (M6) and 2,3,4,5,6-pentafluorostyrene (M7) for the B-blocks. The properties of the BAB-triblock copolymers were investigated in dilute, concentrated and highly concentrated aqueous solutions using DLS, turbidimetry, 1H- and 19F-NMR, rheology, determination of the CMC, foam height- and surface tension measurements and microscopy. Furthermore, their ability to stabilize emulsions and microemulsions and the wetting behaviour of their aqueous solutions on different substrates was investigated. The behaviour of the fluorine end-functionalized polymers to form micelles was studied applying DLS measurements in diluted organic solution. All investigated BAB-triblock copolymers were able to form micelles and show surface activity at room temperature in dilute aqueous solution. The aqueous solutions displayed moderate foam formation. With different types and concentrations of oils, the formation of emulsions could be detected using a light microscope. A boosting effect in microemulsions could not be found adding BAB-triblock copolymers. At elevated polymer concentrations, the formation of hydrogels was proved applying rheology measurements.}, language = {en} } @phdthesis{Kubo2011, author = {Kubo, Shiori}, title = {Nanostructured carbohydrate-derived carbonaceous materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53157}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Nanoporous carbon materials are widely used in industry as adsorbents or catalyst supports, whilst becoming increasingly critical to the developing fields of energy storage / generation or separation technologies. In this thesis, the combined use of carbohydrate hydrothermal carbonisation (HTC) and templating strategies is demonstrated as an efficient route to nanostructured carbonaceous materials. HTC is an aqueous-phase, low-temperature (e.g. 130 - 200 °C) carbonisation, which proceeds via dehydration / poly-condensation of carbon precursors (e.g. carbohydrates and their derivatives), allowing facile access to highly functional carbonaceous materials. Whilst possessing utile, modifiable surface functional groups (e.g. -OH and -C=O-containing moieties), materials synthesised via HTC typically present limited accessible surface area or pore volume. Therefore, this thesis focuses on the development of fabrication routes to HTC materials which present enhanced textural properties and well-defined porosity. In the first discussed synthesis, a combined hard templating / HTC route was investigated using a range of sacrificial inorganic templates (e.g. mesoporous silica beads and macroporous alumina membranes (AAO)). Via pore impregnation of mesoporous silica beads with a biomass-derived carbon source (e.g. 2-furaldehyde) and subsequent HTC at 180 oC, an inorganic / carbonaceous hybrid material was produced. Removal of the template component by acid etching revealed the replication of the silica into mesoporous carbonaceous spheres (particle size ~ 5 μm), representing the inverse morphological structure of the original inorganic body. Surface analysis (e.g. FTIR) indicated a material decorated with hydrophilic (oxygenated) functional groups. Further thermal treatment at increasingly elevated temperatures (e.g. at 350, 550, 750 oC) under inert atmosphere allowed manipulation of functionalities from polar hydrophilic to increasingly non-polar / hydrophobic structural motifs (e.g. extension of the aromatic / pseudo-graphitic nature), thus demonstrating a process capable of simultaneous control of nanostructure and surface / bulk chemistry. As an extension of this approach, carbonaceous tubular nanostructures with controlled surface functionality were synthesised by the nanocasting of uniform, linear macropores of an AAO template (~ 200 nm). In this example, material porosity could be controlled, showing increasingly microporous tube wall features as post carbonisation temperature increased. Additionally, by taking advantage of modifiable surface groups, the introduction of useful polymeric moieties (i.e. grafting of thermoresponsive poly(N-isopropylacrylamide)) was also demonstrated, potentially enabling application of these interesting tubular structures in the fields of biotechnology (e.g. enzyme immobilization) and medicine (e.g. as drug micro-containers). Complimentary to these hard templating routes, a combined HTC / soft templating route for the direct synthesis of ordered porous carbonaceous materials was also developed. After selection of structural directing agents and optimisation of synthesis composition, the F127 triblock copolymer (i.e. ethylene oxide (EO)106 propylene oxide (PO)70 ethylene oxide (EO)106) / D-Fructose system was extensively studied. D-Fructose was found to be a useful carbon precursor as the HTC process could be performed at 130 oC, thus allowing access to stable micellular phase. Thermolytic template removal from the synthesised ordered copolymer / carbon composite yielded functional cuboctahedron single crystalline-like particles (~ 5 μm) with well ordered pore structure of a near perfect cubic Im3m symmetry. N2 sorption analysis revealed a predominantly microporous carbonaceous material (i.e. Type I isotherm, SBET = 257 m2g-1, 79 \% microporosity) possessing a pore size of ca. 0.9 nm. The addition of a simple pore swelling additive (e.g. trimethylbenzene (TMB)) to this system was found to direct pore size into the mesopore size domain (i.e. Type IV isotherm, SBET = 116 m2g-1, 60 \% mesoporosity) generating pore size of ca. 4 nm. It is proposed that in both cases as HTC proceeds to generate a polyfuran-like network, the organised block copolymer micellular phase is essentially "templated", either via hydrogen bonding between hydrophilic poly(EO) moiety and the carbohydrate or via hydrophobic interaction between hydrophobic poly(PO) moiety and forming polyfuran-like network, whilst the additive TMB presumably interact with poly(PO) moieties, thus swelling the hydrophobic region expanding the micelle template size further into the mesopore range.}, language = {en} } @phdthesis{Lauterbach2011, author = {Lauterbach, Stefan}, title = {Lateglacial to Holocene climatic and environmental changes in Europe : multi-proxy studies on lake sediments along a transect from northern Italy to northeastern Poland}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58157}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Sediment records of three European lakes were investigated in order to reconstruct the regional climate development during the Lateglacial and Holocene, to investigate the response of local ecosystems to climatic fluctuations and human impact and to relate regional peculiarities of past climate development to climatic changes on a larger spatial scale. The Lake Hańcza (NE Poland) sediment record was studied with a focus on reconstructing the early Holocene climate development and identifying possible differences to Western Europe. Following the initial Holocene climatic improvement, a further climatic improvement occurred between 10 000 and 9000 cal. a BP. Apparently, relatively cold and dry climate conditions persisted in NE Poland during the first ca. 1500 years of the Holocene, most likely due to a specific regional atmospheric circulation pattern. Prevailing anticyclonic circulation linked to a high-pressure cell above the remaining Scandinavian Ice Sheet (SIS) might have blocked the eastward propagation of warm and moist Westerlies and thus attenuated the early Holocene climatic amelioration in this region until the final decay of the SIS, a pattern different from climate development in Western Europe. The Lateglacial sediment record of Lake Mondsee (Upper Austria) was investigated in order to study the regional climate development and the environmental response to rapid climatic fluctuations. While the temperature rise and environmental response at the onset of the Holocene took place quasi-synchronously, major leads and lags in proxy responses characterize the onset of the Lateglacial Interstadial. In particular, the spread of coniferous woodlands and the reduction of detrital flux lagged the initial Lateglacial warming by ca. 500-750 years. Major cooling at the onset of the Younger Dryas took place synchronously with a change in vegetation, while the increase of detrital matter flux was delayed by about 150-300 years. Complex proxy responses are also detected for short-term Lateglacial climatic fluctuations. In summary, periods of abrupt climatic changes are characterized by complex and temporally variable proxy responses, mainly controlled by ecosystem inertia and the environmental preconditions. A second study on the Lake Mondsee sediment record focused on two small-scale climate deteriorations around 8200 and 9100 cal. a BP, which have been triggered by freshwater discharges to the North Atlantic, causing a shutdown of the Atlantic meridional overturning circulation (MOC). Combining microscopic varve counting and AMS 14C dating yielded a precise duration estimate (ca. 150 years) and absolute dating of the 8.2 ka cold event, both being in good agreement with results from other palaeoclimate records. Moreover, a sudden temperature overshoot after the 8.2 ka cold event was identified, also seen in other proxy records around the North Atlantic. This was most likely caused by enhanced resumption of the MOC, which also initiated substantial shifts of oceanic and atmospheric front systems. Although there is also evidence from other proxy records for pronounced recovery of the MOC and atmospheric circulation changes after the 9.1 ka cold event, no temperature overshoot is seen in the Lake Mondsee record, indicating the complex behaviour of the global climate system. The Holocene sediment record of Lake Iseo (northern Italy) was studied to shed light on regional earthquake activity and the influence of climate variability and anthropogenic impact on catchment erosion and detrital flux into the lake. Frequent small-scale detrital layers within the sediments reflect allochthonous sediment supply by extreme surface runoff events. During the early to mid-Holocene, increased detrital flux coincides with periods of cold and wet climate conditions, thus apparently being mainly controlled by climate variability. In contrast, intervals of high detrital flux during the late Holocene partly also correlate with phases of increased human impact, reflecting the complex influences on catchment erosion processes. Five large-scale event layers within the sediments, which are composed of mass-wasting deposits and turbidites, are supposed to have been triggered by strong local earthquakes. While the uppermost of these event layers is assigned to a documented adjacent earthquake in AD 1222, the four other layers are supposed to be related to previously undocumented prehistorical earthquakes.}, language = {en} } @phdthesis{Menzel2011, author = {Menzel, Michael}, title = {Model-driven security in service-oriented architectures : leveraging security patterns to transform high-level security requirements to technical policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59058}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Service-oriented Architectures (SOA) facilitate the provision and orchestration of business services to enable a faster adoption to changing business demands. Web Services provide a technical foundation to implement this paradigm on the basis of XML-messaging. However, the enhanced flexibility of message-based systems comes along with new threats and risks. To face these issues, a variety of security mechanisms and approaches is supported by the Web Service specifications. The usage of these security mechanisms and protocols is configured by stating security requirements in security policies. However, security policy languages for SOA are complex and difficult to create due to the expressiveness of these languages. To facilitate and simplify the creation of security policies, this thesis presents a model-driven approach that enables the generation of complex security policies on the basis of simple security intentions. SOA architects can specify these intentions in system design models and are not required to deal with complex technical security concepts. The approach introduced in this thesis enables the enhancement of any system design modelling languages - for example FMC or BPMN - with security modelling elements. The syntax, semantics, and notion of these elements is defined by our security modelling language SecureSOA. The metamodel of this language provides extension points to enable the integration into system design modelling languages. In particular, this thesis demonstrates the enhancement of FMC block diagrams with SecureSOA. To enable the model-driven generation of security policies, a domain-independent policy model is introduced in this thesis. This model provides an abstraction layer for security policies. Mappings are used to perform the transformation from our model to security policy languages. However, expert knowledge is required to generate instances of this model on the basis of simple security intentions. Appropriate security mechanisms, protocols and options must be chosen and combined to fulfil these security intentions. In this thesis, a formalised system of security patterns is used to represent this knowledge and to enable an automated transformation process. Moreover, a domain-specific language is introduced to state security patterns in an accessible way. On the basis of this language, a system of security configuration patterns is provided to transform security intentions related to data protection and identity management. The formal semantics of the security pattern language enable the verification of the transformation process introduced in this thesis and prove the correctness of the pattern application. Finally, our SOA Security LAB is presented that demonstrates the application of our model-driven approach to facilitate a dynamic creation, configuration, and execution of secure Web Service-based composed applications.}, language = {en} } @phdthesis{Mutwil2011, author = {Mutwil, Marek}, title = {Integrative transcriptomic approaches to analyzing plant co-expression networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50752}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {It is well documented that transcriptionally coordinated genes tend to be functionally related, and that such relationships may be conserved across different species, and even kingdoms. (Ihmels et al., 2004). Such relationships was initially utilized to reveal functional gene modules in yeast and mammals (Ihmels et al., 2004), and to explore orthologous gene functions between different species and kingdoms (Stuart et al., 2003; Bergmann et al., 2004). Model organisms, such as Arabidopsis, are readily used in basic research due to resource availability and relative speed of data acquisition. A major goal is to transfer the acquired knowledge from these model organisms to species that are of greater importance to our society. However, due to large gene families in plants, the identification of functional equivalents of well characterized Arabidopsis genes in other plants is a non-trivial task, which often returns erroneous or inconclusive results. In this thesis, concepts of utilizing co-expression networks to help infer (i) gene function, (ii) organization of biological processes and (iii) knowledge transfer between species are introduced. An often overlooked fact by bioinformaticians is that a bioinformatic method is as useful as its accessibility. Therefore, majority of the work presented in this thesis was directed on developing freely available, user-friendly web-tools accessible for any biologist.}, language = {en} } @phdthesis{Moesta2011, author = {M{\"o}sta, Philipp}, title = {Novel aspects of the dynamics of binary black-hole mergers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59820}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The inspiral and merger of two black holes is among the most exciting and extreme events in our universe. Being one of the loudest sources of gravitational waves, they provide a unique dynamical probe of strong-field general relativity and a fertile ground for the observation of fundamental physics. While the detection of gravitational waves alone will allow us to observe our universe through an entirely new window, combining the information obtained from both gravitational wave and electro-magnetic observations will allow us to gain even greater insight in some of the most exciting astrophysical phenomena. In addition, binary black-hole mergers serve as an intriguing tool to study the geometry of space-time itself. In this dissertation we study the merger process of binary black-holes in a variety of conditions. Our results show that asymmetries in the curvature distribution on the common apparent horizon are correlated to the linear momentum acquired by the merger remnant. We propose useful tools for the analysis of black holes in the dynamical and isolated horizon frameworks and shed light on how the final merger of apparent horizons proceeds after a common horizon has already formed. We connect mathematical theorems with data obtained from numerical simulations and provide a first glimpse on the behavior of these surfaces in situations not accessible to analytical tools. We study electro-magnetic counterparts of super-massive binary black-hole mergers with fully 3D general relativistic simulations of binary black-holes immersed both in a uniform magnetic field in vacuum and in a tenuous plasma. We find that while a direct detection of merger signatures with current electro-magnetic telescopes is unlikely, secondary emission, either by altering the accretion rate of the circumbinary disk or by synchrotron radiation from accelerated charges, may be detectable. We propose a novel approach to measure the electro-magnetic radiation in these simulations and find a non-collimated emission that dominates over the collimated one appearing in the form of dual jets associated with each of the black holes. Finally, we provide an optimized gravitational wave detection pipeline using phenomenological waveforms for signals from compact binary coalescence and show that by including spin effects in the waveform templates, the detection efficiency is drastically improved as well as the bias on recovered source parameters reduced. On the whole, this disseration provides evidence that a multi-messenger approach to binary black-hole merger observations provides an exciting prospect to understand these sources and, ultimately, our universe.}, language = {en} } @phdthesis{Naaf2011, author = {Naaf, Tobias}, title = {Floristic homogenization and impoverishment : herb layer changes over two decades in deciduous forest patches of the Weser-Elbe region (NW Germany)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52446}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Human-induced alterations of the environment are causing biotic changes worldwide, including the extinction of species and a mixing of once disparate floras and faunas. One type of biological communities that is expected to be particularly affected by environmental alterations are herb layer plant communities of fragmented forests such as those in the west European lowlands. However, our knowledge about current changes in species diversity and composition in these communities is limited due to a lack of adequate long-term studies. In this thesis, I resurveyed the herb layer communities of ancient forest patches in the Weser-Elbe region (NW Germany) after two decades using 175 semi-permanent plots. The general objectives were (i) to quantify changes in plant species diversity considering also between-community (β) and functional diversity, (ii) to determine shifts in species composition in terms of species' niche breadth and functional traits and (iii) to find indications on the most likely environmental drivers for the observed changes. These objectives were pursued with four independent research papers (Chapters 1-4) whose results were brought together in a General Discussion. Alpha diversity (species richness) increased by almost four species on average, whereas β diversity tended to decrease (Chapter 1). The latter is interpreted as a beginning floristic homogenization. The observed changes were primarily the result of a spread of native habitat generalists that are able to tolerate broad pH and moisture ranges. The changes in α and β diversity were only significant when species abundances were neglected (Chapters 1 and 2), demonstrating that the diversity changes resulted mainly from gains and losses of low-abundance species. This study is one of the first studies in temperate Europe that demonstrates floristic homogenization of forest plant communities at a larger than local scale. The diversity changes found at the taxonomic level did not result in similar changes at the functional level (Chapter 2). The likely reason is that these communities are functionally "buffered". Single communities involve most of the functional diversity of the regional pool, i.e., they are already functionally rich, while they are functionally redundant among each other, i.e., they are already homogeneous. Independent of taxonomic homogenization, the abundance of 30 species decreased significantly (Chapter 4). These species included 12 ancient forest species (i.e., species closely tied to forest patches with a habitat continuity > 200 years) and seven species listed on the Red List of endangered plant species in NW Germany. If these decreases continue over the next decades, local extinctions may result. This biotic impoverishment would seriously conflict with regional conservation goals. Community assembly mechanisms changed at the local level particularly at sites that experienced disturbance by forest management activities between the sampling periods (Chapter 3). Disturbance altered community assembly mechanisms in two ways: (i) it relaxed environmental filters and allowed the coexistence of different reproduction strategies, as reflected by a higher diversity of reproductive traits at the time of the resurvey, and (ii) it enhanced light availability and tightened competitive filters. These limited the functional diversity with respect to canopy height and selected for taller species. Thirty-one winner and 30 loser species, which had significantly increased or decreased in abundance, respectively, were characterized by various functional traits and ecological performances to find indications on the most likely environmental drivers for the observed floristic changes (Chapter 4). Winner species had higher seed longevity, flowered later in the season and had more often an oceanic distribution compared to loser species. Loser species tended to have a higher specific leaf area, to be more susceptible to deer browsing and to have a performance optimum at higher soil pH values compared to winner species. Multiple logistic regression analyses indicated that disturbances due to forest management interventions were the primary cause of the species shifts. As one of the first European resurvey studies, this study provides indications that an enhanced browsing pressure due to increased deer densities and increasingly warmer winters are important drivers. The study failed to demonstrate that eutrophication and acidification due to atmospheric deposition substantially drive herb layer changes. The restriction of the sample to the most base-rich sites in the region is discussed as a likely reason. Furthermore, the decline of several ancient forest species is discussed as an indication that the forest patches are still paying off their "extinction debt", i.e., exhibit a delayed response to forest fragmentation.}, language = {en} } @phdthesis{Olaka2011, author = {Olaka, Lydia Atieno}, title = {Hydrology across scales : sensitivity of East African lakes to climate changes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55029}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The lakes of the East African Rift System (EARS) have been intensively studied to better understand the influence of climate change on hydrological systems. The exceptional sensitivity of these rift lakes, however, is both a challenge and an opportunity when trying to reconstruct past climate changes from changes in the hydrological budget of lake basins on timescales 100 to 104 years. On one hand, differences in basin geometrics (shape, area, volume, depth), catchment rainfall distributions and varying erosion-deposition rates complicate regional interpretation of paleoclimate information from lacustrine sediment proxies. On the other hand, the sensitivity of rift lakes often provides paleoclimate records of excellent quality characterized by a high signal-to-noise ratio. This study aims at better understanding of the climate-proxy generating process in rift lakes by parameterizing the geomorphological and hydroclimatic conditions of a particular site providing a step towards the establishment of regional calibrations of transfer functions for climate reconstructions. The knowledge of the sensitivity of a lake basin to climate change furthermore is crucial for a better assessment of the probability of catastrophic changes in the future, which bear risks for landscapes, ecosystems, and organisms of all sorts, including humans. Part 1 of this thesis explores the effect of the morphology and the effective moisture of a lake catchment. The availability of digital elevation models (DEM) and gridded climate data sets facilitates the comparison of the morphological and hydroclimatic conditions of rift lakes. I used the hypsometric integral (HI) calculated from Shuttle Radar Topography Mission (SRTM) data to describe the morphology of ten lake basins in Kenya and Ethiopia. The aridity index (AI) describing the difference in the precipitation/evaporation balance within a catchment was used to compare the hydroclimatic of these basins. Correlating HI and AI with published Holocene lake-level variations revealed that lakes responding sensitively to relatively moderate climate change are typically graben shaped and characterized by a HI between 0.23-0.30, and relatively humid conditions with AI >1. These amplifier lakes, a term first introduced but not fully parameterized by Alayne Street-Perrott in the early 80s, are unexceptionally located in the crest of the Kenyan and Ethiopian domes. The non-amplifier lakes in the EARS either have lower HI 0.13-0.22 and higher AI (>1) or higher HI (0.31-0.37) and low AI (<1), reflecting pan-shaped morphologies with more arid hydroclimatic conditions. Part 2 of this work addresses the third important factor to be considered when using lake-level and proxy records to unravel past climate changes in the EARS: interbasin connectivity and groundwater flow through faulted and porous subsurface lithologies in a rift setting. First, I have compiled the available hydrogeological data including lithology, resistivity and water-well data for the adjacent Naivasha and Elmenteita-Nakuru basins in the Central Kenya Rift. Using this subsurface information and established records of lake-level decline at the last wet-dry climate transitions, i.e., the termination of the African Humid Period (AHP, 15 to 5 kyr BP), I used a linear decay model to estimate typical groundwater flow between the two basins. The results suggest a delayed response of the groundwater levels of ca. 5 kyrs if no recharge of groundwater occurs during the wet-dry transition, whereas the lag is 2-2.7 kyrs only using the modern recharge of ca. 0.52 m/yr. The estimated total groundwater flow from higher Lake Naivasha (1,880 m a.s.l. during the AHP) to Nakuru-Elmenteita (1,770 m) was 40 cubic kilometers. The unexpectedly large volume, more than half of the volume of the paleo-Lake Naivasha during the Early Holocene, emphasizes the importance of groundwater in hydrological modeling of paleo-lakes in rifts. Moreover, the subsurface connectivity of rift lakes also causes a significant lag time to the system introducing a nonlinear component to the system that has to be considered while interpreting paleo-lake records. Part 3 of this thesis investigated the modern intraseasonal precipitation variability within eleven lake basins discussed in the first section of the study excluding Lake Victoria and including Lake Tana. Remotely sensed rainfall estimates (RFE) from FEWS NET for 1996-2010, are used for the, March April May (MAM) July August September (JAS), October November (ON) and December January February (DJF). The seasonal precipitation are averaged and correlated with the prevailing regional and local climatic mechanisms. Results show high variability with Biennial to Triennial precipitation patterns. The spatial distribution of precipitation in JAS are linked to the onset and strength of the Congo Air Boundary (CAB) and Indian Summer Monsoon (ISM) dynamics. while in ON they are related to the strength of Positive ENSO and IOD phases This study describes the influence of graben morphologies, extreme climate constrasts within catchments and basins connectivity through faults and porous lithologies on rift lakes. Hence, it shows the importance of a careful characterization of a rift lake by these parameters prior to concluding from lake-level and proxy records to climate changes. Furthermore, this study highlights the exceptional sensitivity of rift lakes to relatively moderate climate change and its consequences for water availability to the biosphere including humans.}, language = {en} } @phdthesis{Popovic2011, author = {Popovic, Jelena}, title = {Novel lithium iron phosphate materials for lithium-ion batteries}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54591}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Conventional energy sources are diminishing and non-renewable, take million years to form and cause environmental degradation. In the 21st century, we have to aim at achieving sustainable, environmentally friendly and cheap energy supply by employing renewable energy technologies associated with portable energy storage devices. Lithium-ion batteries can repeatedly generate clean energy from stored materials and convert reversely electric into chemical energy. The performance of lithium-ion batteries depends intimately on the properties of their materials. Presently used battery electrodes are expensive to be produced; they offer limited energy storage possibility and are unsafe to be used in larger dimensions restraining the diversity of application, especially in hybrid electric vehicles (HEVs) and electric vehicles (EVs). This thesis presents a major progress in the development of LiFePO4 as a cathode material for lithium-ion batteries. Using simple procedure, a completely novel morphology has been synthesized (mesocrystals of LiFePO4) and excellent electrochemical behavior was recorded (nanostructured LiFePO4). The newly developed reactions for synthesis of LiFePO4 are single-step processes and are taking place in an autoclave at significantly lower temperature (200 deg. C) compared to the conventional solid-state method (multi-step and up to 800 deg. C). The use of inexpensive environmentally benign precursors offers a green manufacturing approach for a large scale production. These newly developed experimental procedures can also be extended to other phospho-olivine materials, such as LiCoPO4 and LiMnPO4. The material with the best electrochemical behavior (nanostructured LiFePO4 with carbon coating) was able to delive a stable 94\% of the theoretically known capacity.}, language = {en} } @phdthesis{Pourteau2011, author = {Pourteau, Amaury}, title = {Closure of the Neotethys Ocean in Anatolia : structural, petrologic and geochronologic insights from low-grade high-pressure metasediments, Afyon Zone}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57803}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The complete consumption of the oceanic domain of a tectonic plate by subduction into the upper mantle results in continent subduction, although continental crust is typically of lower density than the upper mantle. Thus, the sites of former oceanic domains (named suture zones) are generally decorated with stratigraphic sequences deposited along continental passive margins that were metamorphosed under low-grade, high-pressure conditions, i.e., low temperature/depth ratios (< 15°C/km) with respect to geothermal gradients in tectonically stable regions. Throughout the Mesozoic and Cenozoic (i.e., since ca. 250 Ma), the Mediterranean realm was shaped by the closure of the Tethyan Ocean, which likely consisted in numerous oceanic domains and microcontinents. However, the exact number and position of Tethyan oceans and continents (i.e., the Tethyan palaeogeography) remains debated. This is particularly the case of Western and Central Anatolia, where a continental fragment was accreted to the southern composite margin of the Eurasia sometime between the Late Cretaceous and the early Cenozoic. The most frontal part of this microcontinent experienced subduction-related metamorphism around 85-80 Ma, and collision-related metamorphism affected more external parts around 35 Ma. This unsually-long period between subduction- and collision-related metamorphisms (ca. 50 Ma) in units ascribed to the same continental edge constitutes a crucial issue to address in order to unravel how Anatolia was assembled. The Afyon Zone is a tectono-sedimentary unit exposed south and structurally below the front high-pressure belt. It is composed of a Mesozoic sedimentary sequence deposited on top of a Precambrian to Palaeozoic continental substratum, which can be traced from Northwestern to southern Central Anatolia, along a possible Tethyan suture. Whereas the Afyon Zone was defined as a low-pressure metamorphic unit, high-pressure minerals (mainly Fe-Mg-carpholite in metasediments) were recently reported from its central part. These findings shattered previous conceptions on the tectono-metamorphic evolution of the Afyon Zone in particular, and of the entire region in general, and shed light on the necessity to revise the regional extent of subduction-related metamorphism by re-inspecting the petrology of poorly-studied metasediments. In this purpose, I re-evaluated the metamorphic evolution of the entire Afyon Zone starting from field observations. Low-grade, high-pressure mineral assemblages (Fe-Mg-carpholite and glaucophane) are reported throughout the unit. Well-preserved carpholite-chloritoid assemblages are useful to improve our understanding of mineral relations and transitions in the FeO-MgO-Al2O3-SiO2-H2O system during rocks' travel down to depth (prograde metamorphism). Inspection of petrographic textures, minute variations in mineral composition and Mg-Fe distribution among carpholite-chloritoid assemblages documents multistage mineral growth, accompanied by a progressive enrichment in Mg, and strong element partitioning. Using an updated database of mineral thermodynamic properties, I modelled the pressure and temperature conditions that are consistent with textural and chemical observations. Carpholite-bearing assemblages in the Afyon Zone account for a temperature increase from 280 to 380°C between 0.9 and 1.1 GPa (equivalent to a depth of 30-35 km). In order to further constrain regional geodynamics, first radiometric ages were determined in close association with pressure-temperature estimates for the Afyon Zone, as well as two other tectono-sedimentary units from the same continental passive margin (the {\"O}ren and Kurudere-Nebiler Units from SW Anatolia). For age determination, I employed 40Ar-39Ar geochronology on white mica in carpholite-bearing rocks. For thermobarometry, a multi-equilibrium approach was used based on quartz-chlorite-mica and quartz-chlorite-chloritoid associations formed at the expense of carpholite-bearing assemblages, i.e., during the exhumation from the subduction zone. This combination allows deciphering the significance of the calculated radiometric ages in terms of metamorphic conditions. Results show that the Afyon Zone and the {\"O}ren Unit represent a latest Cretaceous high-pressure metamorphic belt, and the Kurudere-Nebiler Unit was affected by subduction-related metamorphism around 45 Ma and cooled down after collision-related metamorphism around 26 Ma. The results provided in the present thesis and from the literature allow better understanding continental amalgamation in Western Anatolia. It is shown that at least two distinct oceanic branches, whereas only one was previously considered, have closed during continuous north-dipping subduction between 92 and 45 Ma. Between 85-80 and 70-65 Ma, a narrow continental domain (including the Afyon Zone) was buried into a subduction zone within the northern oceanic strand. Parts of the subducted continent crust were exhumed while the upper oceanic plate was transported southwards. Subduction of underlying lithosphere persisted, leading to the closure of the southern oceanic branch and to subduct the front of a second continental domain (including the Kurudere-Nebiler Unit). This followed by a continental collisional stage characterized by the cease of subduction, crustal thicknening and the detachment of the subducting oceanic slab from the accreted continent lithosphere. The present study supports that in the late Mesozoic the East Mediterranean realm had a complex tectonic configuration similar to present Southeast Asia or the Caribbean, with multiple, coexisting oceanic basins, microcontinents and subduction zones.}, language = {en} } @phdthesis{Reusser2011, author = {Reusser, Dominik Edwin}, title = {Combining smart model diagnostics and effective data collection for snow catchments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52574}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Complete protection against flood risks by structural measures is impossible. Therefore flood prediction is important for flood risk management. Good explanatory power of flood models requires a meaningful representation of bio-physical processes. Therefore great interest exists to improve the process representation. Progress in hydrological process understanding is achieved through a learning cycle including critical assessment of an existing model for a given catchment as a first step. The assessment will highlight deficiencies of the model, from which useful additional data requirements are derived, giving a guideline for new measurements. These new measurements may in turn lead to improved process concepts. The improved process concepts are finally summarized in an updated hydrological model. In this thesis I demonstrate such a learning cycle, focusing on the advancement of model evaluation methods and more cost effective measurements. For a successful model evaluation, I propose that three questions should be answered: 1) when is a model reproducing observations in a satisfactory way? 2) If model results deviate, of what nature is the difference? And 3) what are most likely the relevant model components affecting these differences? To answer the first two questions, I developed a new method to assess the temporal dynamics of model performance (or TIGER - TIme series of Grouped Errors). This method is powerful in highlighting recurrent patterns of insufficient model behaviour for long simulation periods. I answered the third question with the analysis of the temporal dynamics of parameter sensitivity (TEDPAS). For calculating TEDPAS, an efficient method for sensitivity analysis is necessary. I used such an efficient method called Fourier Amplitude Sensitivity Test, which has a smart sampling scheme. Combining the two methods TIGER and TEDPAS provided a powerful tool for model assessment. With WaSiM-ETH applied to the Weisseritz catchment as a case study, I found insufficient process descriptions for the snow dynamics and for the recession during dry periods in late summer and fall. Focusing on snow dynamics, reasons for poor model performance can either be a poor representation of snow processes in the model, or poor data on snow cover, or both. To obtain an improved data set on snow cover, time series of snow height and temperatures were collected with a cost efficient method based on temperature measurements on multiple levels at each location. An algorithm was developed to simultaneously estimate snow height and cold content from these measurements. Both, snow height and cold content are relevant quantities for spring flood forecasting. Spatial variability was observed at the local and the catchment scale with an adjusted sampling design. At the local scale, samples were collected on two perpendicular transects of 60 m length and analysed with geostatistical methods. The range determined from fitted theoretical variograms was within the range of the sampling design for 80\% of the plots. No patterns were found, that would explain the random variability and spatial correlation at the local scale. At the watershed scale, locations of the extensive field campaign were selected according to a stratified sample design to capture the combined effects of elevation, aspect and land use. The snow height is mainly affected by the plot elevation. The expected influence of aspect and land use was not observed. To better understand the deficiencies of the snow module in WaSiM-ETH, the same approach, a simple degree day model was checked for its capability to reproduce the data. The degree day model was capable to explain the temporal variability for plots with a continuous snow pack over the entire snow season, if parameters were estimated for single plots. However, processes described in the simple model are not sufficient to represent multiple accumulation-melt-cycles, as observed for the lower catchment. Thus, the combined spatio-temporal variability at the watershed scale is not captured by the model. Further tests on improved concepts for the representation of snow dynamics at the Weißeritz are required. From the data I suggest to include at least rain on snow and redistribution by wind as additional processes to better describe spatio-temporal variability. Alternatively an energy balance snow model could be tested. Overall, the proposed learning cycle is a useful framework for targeted model improvement. The advanced model diagnostics is valuable to identify model deficiencies and to guide field measurements. The additional data collected throughout this work helps to get a deepened understanding of the processes in the Weisseritz catchment.}, language = {en} } @phdthesis{Riedel2011, author = {Riedel, Katja}, title = {Elucidation of the epithelial sodium channel as a salt taste receptor candidate and search for novel salt taste receptor candidates}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58764}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Salty taste has evolved to maintain electrolyte homeostasis, serving as a detector for salt containing food. In rodents, salty taste involves at least two transduction mechanisms. One is sensitive to the drug amiloride and specific for Na+, involving epithelial sodium channel (ENaC). A second rodent transduction pathway, which is triggered by various cations, is amiloride insensitive and not almost understood to date. Studies in primates showed amiloride-sensitive as well as amiloride-insensitive gustatory responses to NaCl, implying a role of both salt taste transduction pathways in humans. However, sensory studies in humans point to largely amiloride-insensitive sodium taste perception. An involvement of ENaC in human sodium taste perception was not shown, so far. In this study, ENaC subunit protein and mRNA could be localized to human taste bud cells (TBC). Thus, basolateral αβγ-ENaC ion channels are likely in TBC of circumvallate papillae, possibly mediating basolateral sodium entry. Similarly, basolateral βγ-ENaC might play a role in fungiform TBC. Strikingly, δ-ENaC subunit was confined to taste bud pores of both papillae, likely mediating gustatory sodium entry in TBC, either apical or paracellular via tight junctions. However, regional separation of δ-ENaC and βγ-ENaC in fungiform and circumvallate TBC indicate the presence of unknown interaction partner necessary to assemble into functional ion channels. However, screening of a macaque taste tissue cDNA library did neither reveal polypeptides assembling into a functional cation channel by interaction with δ-ENaC or βγ-ENaC nor ENaC independent salt taste receptor candidates. Thus, ENaC subunits are likely involved in human taste transduction, while exact composition and identity of an amiloride (in)sensitive salt taste receptors remain unclear. Localization of δ-ENaC in human taste pores strongly suggests a role in human taste transduction. In contrast, δ-ENaC is classified as pseudogene Scnn1d in mouse. However, no experimental detected sequences are annotated, while evidences for parts of Scnn1d derived mRNAs exist. In order to elucidate if Scnn1d is possibly involved in rodent salt taste perception, Scnn1d was evaluated in this study to clarify if Scnn1d is a gene or a transcribed pseudogene in mice. Comparative mapping of human SCNN1D to mouse chromosome 4 revealed complete Scnn1d sequence as well as its pseudogenization by Mus specific endogenous retroviruses. Moreover, tissue specific transcription of unitary Scnn1d pseudogene was found in mouse vallate papillae, kidney and testis and led to identification of nine Scnn1d transcripts. In vitro translation experiments showed that Scnn1d transcripts are coding competent for short polypeptides, possibly present in vivo. However, no sodium channel like function or sodium channel modulating activity was evident for Scnn1d transcripts and/or derived polypeptides. Thus, an involvement of mouse δ-ENaC in sodium taste transduction is unlikely and points to species specific differences in salt taste transduction mechanisms.}, language = {en} } @phdthesis{Risse2011, author = {Risse, Sarah}, title = {Processing in the perceptual span : investigations with the n+2-boundary paradigm}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60414}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Cognitive psychology is traditionally interested in the interaction of perception, cognition, and behavioral control. Investigating eye movements in reading constitutes a field of research in which the processes and interactions of these subsystems can be studied in a well-defined environment. Thereby, the following questions are pursued: How much information is visually perceived during a fixation, how is processing achieved and temporally coordinated from visual letter encoding to final sentence comprehension, and how do such processes reflect on behavior such as the control of the eyes' movements during reading. Various theoretical models have been proposed to account for the specific eye-movement behavior in reading (for a review see Reichle, Rayner, \& Pollatsek, 2003). Some models are based on the idea of shifting attention serially from one word to the next within the sentence whereas others propose distributed attention allocating processing resources to more than one word at a time. As attention is assumed to drive word recognition processes one major difference between these models is that word processing must either occur in strict serial order, or that word processing is achieved in parallel. In spite of this crucial difference in the time course of word processing, both model classes perform well on explaining many of the benchmark effects in reading. In fact, there seems to be not much empirical evidence that challenges the models to a point at which their basic assumptions could be falsified. One issue often perceived as being decisive in the debate on serial and parallel word processing is how not-yet-fixated words to the right of fixation affect eye movements. Specifically, evidence is discussed as to what spatial extent such parafoveal words are previewed and how this influences current and subsequent word processing. Four experiments investigated parafoveal processing close to the spatial limits of the perceptual span. The present work aims to go beyond mere existence proofs of previewing words at such spatial distances. Introducing a manipulation that dissociates the sources of long-range preview effects, benefits and costs of parafoveal processing can be investigated in a single analysis and the differing impact is tracked across a three-word target region. In addition, the same manipulation evaluates the role of oculomotor error as the cause of non-local distributed effects. In this respect, the results contribute to a better understanding of the time course of word processing inside the perceptual span and attention allocation during reading.}, language = {en} } @phdthesis{Robinson2011, author = {Robinson, Alexander}, title = {Modeling the Greenland Ice Sheet response to climate change in the past and future}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50430}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The Greenland Ice Sheet (GIS) contains enough water volume to raise global sea level by over 7 meters. It is a relic of past glacial climates that could be strongly affected by a warming world. Several studies have been performed to investigate the sensitivity of the ice sheet to changes in climate, but large uncertainties in its long-term response still exist. In this thesis, a new approach has been developed and applied to modeling the GIS response to climate change. The advantages compared to previous approaches are (i) that it can be applied over a wide range of climatic scenarios (both in the deep past and the future), (ii) that it includes the relevant feedback processes between the climate and the ice sheet and (iii) that it is highly computationally efficient, allowing simulations over very long timescales. The new regional energy-moisture balance model (REMBO) has been developed to model the climate and surface mass balance over Greenland and it represents an improvement compared to conventional approaches in modeling present-day conditions. Furthermore, the evolution of the GIS has been simulated over the last glacial cycle using an ensemble of model versions. The model performance has been validated against field observations of the present-day climate and surface mass balance, as well as paleo information from ice cores. The GIS contribution to sea level rise during the last interglacial is estimated to be between 0.5-4.1 m, consistent with previous estimates. The ensemble of model versions has been constrained to those that are consistent with the data, and a range of valid parameter values has been defined, allowing quantification of the uncertainty and sensitivity of the modeling approach. Using the constrained model ensemble, the sensitivity of the GIS to long-term climate change was investigated. It was found that the GIS exhibits hysteresis behavior (i.e., it is multi-stable under certain conditions), and that a temperature threshold exists above which the ice sheet transitions to an essentially ice-free state. The threshold in the global temperature is estimated to be in the range of 1.3-2.3°C above preindustrial conditions, significantly lower than previously believed. The timescale of total melt scales non-linearly with the overshoot above the temperature threshold, such that a 2°C anomaly causes the ice sheet to melt in ca. 50,000 years, but an anomaly of 6°C will melt the ice sheet in less than 4,000 years. The meltback of the ice sheet was found to become irreversible after a fraction of the ice sheet is already lost - but this level of irreversibility also depends on the temperature anomaly.}, language = {en} } @phdthesis{Samereier2011, author = {Samereier, Matthias}, title = {Functional analyses of microtubule and centrosome-associated proteins in Dictyostelium discoideum}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52835}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Understanding the role of microtubule-associated proteins is the key to understand the complex mechanisms regulating microtubule dynamics. This study employs the model system Dictyostelium discoideum to elucidate the role of the microtubule-associated protein TACC (Transforming acidic coiled-coil) in promoting microtubule growth and stability. Dictyostelium TACC was localized at the centrosome throughout the entire cell cycle. The protein was also detected at microtubule plus ends, however, unexpectedly only during interphase but not during mitosis. The same cell cycle-dependent localization pattern was observed for CP224, the Dictyostelium XMAP215 homologue. These ubiquitous MAPs have been found to interact with TACC proteins directly and are known to act as microtubule polymerases and nucleators. This work shows for the first time in vivo that both a TACC and XMAP215 family protein can differentially localize to microtubule plus ends during interphase and mitosis. RNAi knockdown mutants revealed that TACC promotes microtubule growth during interphase and is essential for proper formation of astral microtubules in mitosis. In many organisms, impaired microtubule stability upon TACC depletion was explained by the failure to efficiently recruit the TACC-binding XMAP215 protein to centrosomes or spindle poles. By contrast, fluorescence recovery after photobleaching (FRAP) analyses conducted in this study demonstrate that in Dictyostelium recruitment of CP224 to centrosomes or spindle poles is not perturbed in the absence of TACC. Instead, CP224 could no longer be detected at the tips of microtubules in TACC mutant cells. This finding demonstrates for the first time in vivo that a TACC protein is essential for the association of an XMAP215 protein with microtubule plus ends. The GFP-TACC strains generated in this work also turned out to be a valuable tool to study the unusual microtubule dynamics in Dictyostelium. Here, microtubules exhibit a high degree of lateral bending movements but, in contrast most other organisms, they do not obviously undergo any growth or shrinkage events during interphase. Despite of that they are affected by microtubuledepolymerizing drugs such as thiabendazole or nocodazol which are thought to act solely on dynamic microtubules. Employing 5D-fluorescence live cell microscopy and FRAP analyses this study suggests Dictyostelium microtubules to be dynamic only in the periphery, while they are stable at the centrosome. In the recent years, the identification of yet unknown components of the Dictyostelium centrosome has made tremendous progress. A proteomic approach previously conducted by our group disclosed several uncharacterized candidate proteins, which remained to be verified as genuine centrosomal components. The second part of this study focuses on the investigation of three such candidate proteins, Cenp68, CP103 and the putative spindle assembly checkpoint protein Mad1. While a GFP-CP103 fusion protein could clearly be localized to isolated centrosomes that are free of microtubules, Cenp68 and Mad1 were found to associate with the centromeres and kinetochores, respectively. The investigation of Cenp68 included the generation of a polyclonal anti-Cenp68 antibody, the screening for interacting proteins and the generation of knockout mutants which, however, did not display any obvious phenotype. Yet, Cenp68 has turned out as a very useful marker to study centromere dynamics during the entire cell cycle. During mitosis, GFP-Mad1 localization strongly resembled the behavior of other Mad1 proteins, suggesting the existence of a yet uncharacterized spindle assembly checkpoint in Dictyostelium.}, language = {en} } @phdthesis{Schiefele2011, author = {Schiefele, J{\"u}rgen}, title = {Casimir-Polder interaction in second quantization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54171}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The Casimir-Polder interaction between a single neutral atom and a nearby surface, arising from the (quantum and thermal) fluctuations of the electromagnetic field, is a cornerstone of cavity quantum electrodynamics (cQED), and theoretically well established. Recently, Bose-Einstein condensates (BECs) of ultracold atoms have been used to test the predictions of cQED. The purpose of the present thesis is to upgrade single-atom cQED with the many-body theory needed to describe trapped atomic BECs. Tools and methods are developed in a second-quantized picture that treats atom and photon fields on the same footing. We formulate a diagrammatic expansion using correlation functions for both the electromagnetic field and the atomic system. The formalism is applied to investigate, for BECs trapped near surfaces, dispersion interactions of the van der Waals-Casimir-Polder type, and the Bosonic stimulation in spontaneous decay of excited atomic states. We also discuss a phononic Casimir effect, which arises from the quantum fluctuations in an interacting BEC.}, language = {en} } @phdthesis{Schulze2011, author = {Schulze, Andreas}, title = {Demographics of supermassive black holes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54464}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Supermassive black holes are a fundamental component of the universe in general and of galaxies in particular. Almost every massive galaxy harbours a supermassive black hole (SMBH) in its center. Furthermore, there is a close connection between the growth of the SMBH and the evolution of its host galaxy, manifested in the relationship between the mass of the black hole and various properties of the galaxy's spheroid component, like its stellar velocity dispersion, luminosity or mass. Understanding this relationship and the growth of SMBHs is essential for our picture of galaxy formation and evolution. In this thesis, I make several contributions to improve our knowledge on the census of SMBHs and on the coevolution of black holes and galaxies. The first route I follow on this road is to obtain a complete census of the black hole population and its properties. Here, I focus particularly on active black holes, observable as Active Galactic Nuclei (AGN) or quasars. These are found in large surveys of the sky. In this thesis, I use one of these surveys, the Hamburg/ESO survey (HES), to study the AGN population in the local volume (z~0). The demographics of AGN are traditionally represented by the AGN luminosity function, the distribution function of AGN at a given luminosity. I determined the local (z<0.3) optical luminosity function of so-called type 1 AGN, based on the broad band B_J magnitudes and AGN broad Halpha emission line luminosities, free of contamination from the host galaxy. I combined this result with fainter data from the Sloan Digital Sky Survey (SDSS) and constructed the best current optical AGN luminosity function at z~0. The comparison of the luminosity function with higher redshifts supports the current notion of 'AGN downsizing', i.e. the space density of the most luminous AGN peaks at higher redshifts and the space density of less luminous AGN peaks at lower redshifts. However, the AGN luminosity function does not reveal the full picture of active black hole demographics. This requires knowledge of the physical quantities, foremost the black hole mass and the accretion rate of the black hole, and the respective distribution functions, the active black hole mass function and the Eddington ratio distribution function. I developed a method for an unbiased estimate of these two distribution functions, employing a maximum likelihood technique and fully account for the selection function. I used this method to determine the active black hole mass function and the Eddington ratio distribution function for the local universe from the HES. I found a wide intrinsic distribution of black hole accretion rates and black hole masses. The comparison of the local active black hole mass function with the local total black hole mass function reveals evidence for 'AGN downsizing', in the sense that in the local universe the most massive black holes are in a less active stage then lower mass black holes. The second route I follow is a study of redshift evolution in the black hole-galaxy relations. While theoretical models can in general explain the existence of these relations, their redshift evolution puts strong constraints on these models. Observational studies on the black hole-galaxy relations naturally suffer from selection effects. These can potentially bias the conclusions inferred from the observations, if they are not taken into account. I investigated the issue of selection effects on type 1 AGN samples in detail and discuss various sources of bias, e.g. an AGN luminosity bias, an active fraction bias and an AGN evolution bias. If the selection function of the observational sample and the underlying distribution functions are known, it is possible to correct for this bias. I present a fitting method to obtain an unbiased estimate of the intrinsic black hole-galaxy relations from samples that are affected by selection effects. Third, I try to improve our census of dormant black holes and the determination of their masses. One of the most important techniques to determine the black hole mass in quiescent galaxies is via stellar dynamical modeling. This method employs photometric and kinematic observations of the galaxy and infers the gravitational potential from the stellar orbits. This method can reveal the presence of the black hole and give its mass, if the sphere of the black hole's gravitational influence is spatially resolved. However, usually the presence of a dark matter halo is ignored in the dynamical modeling, potentially causing a bias on the determined black hole mass. I ran dynamical models for a sample of 12 galaxies, including a dark matter halo. For galaxies for which the black hole's sphere of influence is not well resolved, I found that the black hole mass is systematically underestimated when the dark matter halo is ignored, while there is almost no effect for galaxies with well resolved sphere of influence.}, language = {en} }