@phdthesis{Kuehnast2010, author = {Kuehnast, Milena}, title = {Processing negative imperatives in Bulgarian : evidence from normal, aphasic and child language}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45826}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The incremental nature of sentence processing raises questions about the way the information of incoming functional elements is accessed and subsequently employed in building the syntactic structure which sustains interpretation processes. The present work approaches these questions by investigating the negative particle ne used for sentential negation in Bulgarian and its impact on the overt realisation and the interpretation of imperative inflexion, bound aspectual morphemes and clitic pronouns in child, adult and aphasic language. In contrast to other Slavic languages, Bulgarian negative imperatives (NI) are grammatical only with imperfective verbs. We argue that NI are instantiations of overt aspectual coercion induced by the presence of negation as a temporally sensitive sentential operator. The scope relation between imperative mood, negation, and aspect yields the configuration of the imperfective present which in Bulgarian has to be overtly expressed and prompts the imperfective marking of the predicate. The regular and transparent application of the imperfectivising mechanism relates to the organisation of the TAM categories in Bulgarian which not only promotes the representation of fine perspective shifts but also provides for their distinct morphological expression. Using an elicitation task with NI, we investigated the way 3- and 4-year-old children represent negation in deontic contexts as reflected in their use of aspectually appropriate predicates. Our findings suggest that children are sensitive to the imperfectivity requirement in NI from early on. The imperfectivisation strategies reveal some differences from the target morphological realisation. The relatively low production of target imperfectivised prefixed verbs cannot be explained with morphological processing deficits, but rather indicates that up to the age of five children experience difficulties to apply a progressive view point to accomplishments. Two self-paced reading studies present evidence that neurologically unimpaired Bulgarian speakers profit from the syntactic and prosodic properties of negation during online sentence comprehension. The imperfectivity requirement negation imposes on the predicate speeds up lexical access to imperfective verbs. Similarly, clitic pronouns are more accessible after negation due to the phono-syntactic properties of clitic clusters. As the experimental stimuli do not provide external discourse referents, personal pronouns are parsed as object agreement markers. Without subsequent resolution, personal pronouns appear to be less resource demanding than reflexive clitics. This finding is indicative of the syntax-driven co-reference establishment processes triggered through the lexical specification of reflexive clitics. The results obtained from Bulgarian Broca's aphasics show that they exhibit processing patterns similar to those of the control group. Notwithstanding their slow processing speed, the agrammatic group showed no impairment of negation as reflected by their sensitivity to the aspectual requirements of NI, and to the prosodic constraints on clitic placement. The aphasics were able to parse the structural dependency between mood, negation and aspect as functional categories and to represent it morphologically. The prolonged reaction times (RT) elicited by prefixed verbs indicate increasing processing costs due to the semantic integration of prefixes as perfectivity markers into an overall imperfective construal. This inference is supported by the slower RT to reflexive clitics, which undergo a structurally triggered resolution. Evaluated against cross-linguistic findings, the obtained result strongly suggests that aphasic performance with pronouns depends on the interpretation efforts associated with co-reference establishment and varies due to availability of discourse referents. The investigation of normal and agrammatic processing of Bulgarian NI presents support for the hypothesis that the comprehension deficits in Broca's aphasia result from a slowed-down implementation of syntactic operations. The protracted structure building consumes processing resources and causes temporal mismatches with other processes sustaining sentence comprehension. The investigation of the way Bulgarian children and aphasic speakers process NI reveals that both groups are highly sensitive to the imperfective constraint on the aspectual construal imposed by the presence of negation. The imperfective interpretation requires access to morphologically complex verb forms which contain aspectual morphemes with conflicting semantic information - perfective prefixes and imperfective suffixes. Across modalities, both populations exhibit difficulties in processing prefixed imperfectivised verbs which as predicates of negative imperative sentences reflect the inner perspective the speaker and the addressee need to take towards a potentially bounded situation description.}, language = {en} } @phdthesis{Trompelt2010, author = {Trompelt, Helena}, title = {Production of regular and non-regular verbs : evidence for a lexical entry complexity account}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-061-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42120}, school = {Universit{\"a}t Potsdam}, pages = {x, 168}, year = {2010}, abstract = {The incredible productivity and creativity of language depends on two fundamental resources: a mental lexicon and a mental grammar. Rules of grammar enable us to produce and understand complex phrases we have not encountered before and at the same time constrain the computation of complex expressions. The concepts of the mental lexicon and mental grammar have been thoroughly tested by comparing the use of regular versus non-regular word forms. Regular verbs (e.g. walk-walked) are computed using a suffixation rule in a neural system for grammatical processing; non-regular verbs (run-ran) are retrieved from associative memory. The role of regularity has only been explored for the past tense, where regularity is overtly visible. To explore the representation and encoding of regularity as well as the inflectional processes involved in the production of regular and non-regular verbs, this dissertation investigated three groups of German verbs: regular, irregular and hybrid verbs. Hybrid verbs in German have completely regular conjugation in the present tense and irregular conjugation in the past tense. Articulation latencies were measured while participants named pictures of actions, producing the 3rd person singular of regular, hybrid, and irregular verbs in present and past tense. Studying the production of German verbs in past and present tense, this dissertation explored the complexity of lexical entries as a decisive factor in the production of verbs.}, language = {en} } @phdthesis{Gentsch2010, author = {Gentsch, Rafael}, title = {Complex bioactive fiber systems by means of electrospinning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44900}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Nanofibrous mats are interesting scaffold materials for biomedical applications like tissue engineering due to their interconnectivity and their size dimension which mimics the native cell environment. Electrospinning provides a simple route to access such fiber meshes. This thesis addresses the structural and functional control of electrospun fiber mats. In the first section, it is shown that fiber meshes with bimodal size distribution could be obtained in a single-step process by electrospinning. A standard single syringe set-up was used to spin concentrated poly(ε-caprolactone) (PCL) and poly(lactic-co-glycolic acid) (PLGA) solutions in chloroform and meshes with bimodal-sized fiber distribution could be directly obtained by reducing the spinning rate at elevated humidity. Scanning electron microscopy (SEM) and mercury porosity of the meshes suggested a suitable pore size distribution for effective cell infiltration. The bimodal fiber meshes together with unimodal fiber meshes were evaluated for cellular infiltration. While the micrometer fibers in the mixed meshes generate an open pore structure, the submicrometer fibers support cell adhesion and facilitate cell bridging on the large pores. This was revealed by initial cell penetration studies, showing superior ingrowth of epithelial cells into the bimodal meshes compared to a mesh composed of unimodal 1.5 μm fibers. The bimodal fiber meshes together with electrospun nano- and microfiber meshes were further used for the inorganic/organic hybrid fabrication of PCL with calcium carbonate or calcium phosphate, two biorelevant minerals. Such composite structures are attractive for the potential improvement of properties such as stiffness or bioactivity. It was possible to encapsulate nano and mixed sized plasma-treated PCL meshes to areas > 1 mm2 with calcium carbonate using three different mineralization methods including the use of poly(acrylic acid). The additive seemed to be useful in stabilizing amorphous calcium carbonate to effectively fill the space between the electrospun fibers resulting in composite structures. Micro-, nano- and mixed sized fiber meshes were successfully coated within hours by fiber directed crystallization of calcium phosphate using a ten-times concentrated simulated body fluid. It was shown that nanofibers accelerated the calcium phosphate crystallization, as compared to microfibers. In addition, crystallizations performed at static conditions led to hydroxyapatite formations whereas in dynamic conditions brushite coexisted. In the second section, nanofiber functionalization strategies are investigated. First, a one-step process was introduced where a peptide-polymer-conjugate (PLLA-b-CGGRGDS) was co-spun with PLGA in such a way that the peptide is enriched on the surface. It was shown that by adding methanol to the chloroform/blend solution, a dramatic increase of the peptide concentration at the fiber surface could be achieved as determined by X-ray photoelectron spectroscopy (XPS). Peptide accessibility was demonstrated via a contact angle comparison of pure PLGA and RGD-functionalized fiber meshes. In addition, the electrostatic attraction between a RGD-functionalized fiber and a silica bead at pH ~ 4 confirmed the accessibility of the peptide. The bioactivity of these RGD-functionalized fiber meshes was demonstrated using blends containing 18 wt\% bioconjugate. These meshes promoted adhesion behavior of fibroblast compared to pure PLGA meshes. In a second functionalization approach, a modular strategy was investigated. In a single step, reactive fiber meshes were fabricated and then functionalized with bioactive molecules. While the electrospinning of the pure reactive polymer poly(pentafluorophenyl methacrylate) (PPFPMA) was feasible, the inherent brittleness of PPFPMA required to spin a PCL blend. Blends and pure PPFPMA showed a two-step functionalization kinetics. An initial fast reaction of the pentafluorophenyl esters with aminoethanol as a model substance was followed by a slow conversion upon further hydrophilization. This was analysed by UV/Vis-spectroscopy of the pentaflurorophenol release upon nucleophilic substitution with the amines. The conversion was confirmed by increased hydrophilicity of the resulting meshes. The PCL/PPFPMA fiber meshes were then used for functionalization with more complex molecules such as saccharides. Aminofunctionalized D-Mannose or D-Galactose was reacted with the active pentafluorophenyl esters as followed by UV/Vis spectroscopy and XPS. The functionality was shown to be bioactive using macrophage cell culture. The meshes functionalized with D-Mannose specifically stimulated the cytokine production of macrophages when lipopolysaccharides were added. This was in contrast to D-Galactose- or aminoethanol-functionalized and unfunctionalized PCL/PPFPMA fiber mats.}, language = {en} } @phdthesis{Esther2010, author = {Esther, Alexandra}, title = {Investigating mechanisms maintaining plant species diversity in fire prone Mediterranean-type vegetation using spatially-explicit simulation models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44632}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Fire prone Mediterranean-type vegetation systems like those in the Mediterranean Basin and South-Western Australia are global hot spots for plant species diversity. To ensure management programs act to maintain these highly diverse plant communities, it is necessary to get a profound understanding of the crucial mechanisms of coexistence. In the current literature several mechanisms are discussed. The objective of my thesis is to systematically explore the importance of potential mechanisms for maintaining multi-species, fire prone vegetation by modelling. The model I developed is spatially-explicit, stochastic, rule- and individual-based. It is parameterised on data of population dynamics collected over 18 years in the Mediterranean-type shrublands of Eneabba, Western Australia. From 156 woody species of the area seven plant traits have been identified to be relevant for this study: regeneration mode, annual maximum seed production, seed size, maximum crown diameter, drought tolerance, dispersal mode and seed bank type. Trait sets are used for the definition of plant functional types (PFTs). The PFT dynamics are simulated annual by iterating life history processes. In the first part of my thesis I investigate the importance of trade-offs for the maintenance of high diversity in multi-species systems with 288 virtual PFTs. Simulation results show that the trade-off concept can be helpful to identify non-viable combinations of plant traits. However, the Shannon Diversity Index of modelled communities can be high despite of the presence of 'supertypes'. I conclude, that trade-offs between two traits are less important to explain multi-species coexistence and high diversity than it is predicted by more conceptual models. Several studies show, that seed immigration from the regional seed pool is essential for maintaining local species diversity. However, systematical studies on the seed rain composition to multi-species communities are missing. The results of the simulation experiments, as presented in part two of this thesis, show clearly, that without seed immigration the local species community found in Eneabba drifts towards a state with few coexisting PFTs. With increasing immigration rates the number of simulated coexisting PFTs and Shannon diversity quickly approaches values as also observed in the field. Including the regional seed input in the model is suited to explain more aggregated measures of the local plant community structure such as species richness and diversity. Hence, the seed rain composition should be implemented in future studies. In the third part of my thesis I test the sensitivity of Eneabba PFTs to four different climate change scenarios, considering their impact on both local and regional processes. The results show that climate change clearly has the potential to alter the number of dispersed seeds for most of the Eneabba PFTs and therefore the source of the 'immigrants' at the community level. A classification tree analysis shows that, in general, the response to climate change was PFT-specific. In the Eneabba sand plains sensitivity of a PFT to climate change depends on its specific trait combination and on the scenario of environmental change i.e. development of the amount of rainfall and the fire frequency. This result emphasizes that PFT-specific responses and regional process seed immigration should not be ignored in studies dealing with the impact of climate change on future species distribution. The results of the three chapters are finally analysed in a general discussion. The model is discussed and improvements and suggestions are made for future research. My work leads to the following conclusions: i) It is necessary to support modelling with empirical work to explain coexistence in species-rich plant communities. ii) The chosen modelling approach allows considering the complexity of coexistence and improves the understanding of coexistence mechanisms. iii) Field research based assumptions in terms of environmental conditions and plant life histories can relativise the importance of more hypothetic coexistence theories in species-rich systems. In consequence, trade-offs can play a lower role than predicted by conceptual models. iv) Seed immigration is a key process for local coexistence. Its alteration because of climate change should be considered for prognosis of coexistence. Field studies should be carried out to get data on seed rain composition.}, language = {en} } @phdthesis{Abed2010, author = {Abed, Jamil}, title = {An iterative approach to operators on manifolds with singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44757}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {We establish elements of a new approach to ellipticity and parametrices within operator algebras on manifolds with higher singularities, only based on some general axiomatic requirements on parameter-dependent operators in suitable scales of spaes. The idea is to model an iterative process with new generations of parameter-dependent operator theories, together with new scales of spaces that satisfy analogous requirements as the original ones, now on a corresponding higher level. The "full" calculus involves two separate theories, one near the tip of the corner and another one at the conical exit to infinity. However, concerning the conical exit to infinity, we establish here a new concrete calculus of edge-degenerate operators which can be iterated to higher singularities.}, language = {en} } @phdthesis{Wilke2010, author = {Wilke, Franziska Daniela Helena}, title = {Quantifying crystalline exhumation in the Himalaya}, doi = {10.25932/publishup-4119}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-43138}, school = {Universit{\"a}t Potsdam}, pages = {IV, 99}, year = {2010}, abstract = {In 1915, Alfred Wegener published his hypotheses of plate tectonics that revolutionised the world for geologists. Since then, many scientists have studied the evolution of continents and especially the geologic structure of orogens: the most visible consequence of tectonic processes. Although the morphology and landscape evolution of mountain belts can be observed due to surface processes, the driving force and dynamics at lithosphere scale are less well understood despite the fact that rocks from deeper levels of orogenic belts are in places exposed at the surface. In this thesis, such formerly deeply-buried (ultra-) high-pressure rocks, in particular eclogite facies series, have been studied in order to reveal details about the formation and exhumation conditions and rates and thus provide insights into the geodynamics of the most spectacular orogenic belt in the world: the Himalaya. The specific area investigated was the Kaghan Valley in Pakistan (NW Himalaya). Following closure of the Tethyan Ocean by ca. 55-50 Ma, the northward subduction of the leading edge of India beneath the Eurasian Plate and subsequent collision initiated a long-lived process of intracrustal thrusting that continues today. The continental crust of India - granitic basement, Paleozoic and Mesozoic cover series and Permo-Triassic dykes, sills and lavas - has been buried partly to mantle depths. Today, these rocks crop out as eclogites, amphibolites and gneisses within the Higher Himalayan Crystalline between low-grade metamorphosed rocks (600-640°C/ ca. 5 kbar) of the Lesser Himalaya and Tethyan sediments. Beside tectonically driven exhumation mechanisms the channel flow model, that describes a denudation focused ductile extrusion of low viscosity material developed in the middle to lower crust beneath the Tibetan Plateau, has been postulated. To get insights into the lithospheric and crustal processes that have initiated and driven the exhumation of this (ultra-) high-pressure rocks, mineralogical, petrological and isotope-geochemical investigations have been performed. They provide insights into 1) the depths and temperatures to which these rocks were buried, 2) the pressures and temperatures the rocks have experienced during their exhumation, 3) the timing of these processes 4) and the velocity with which these rocks have been brought back to the surface. In detail, through microscopical studies, the identification of key minerals, microprobe analyses, standard geothermobarometry and modelling using an effective bulk rock composition it has been shown that published exhumation paths are incomplete. In particular, the eclogites of the northern Kaghan Valley were buried to depths of 140-100 km (36-30 kbar) at 790-640°C. Subsequently, cooling during decompression (exhumation) towards 40-35 km (17-10 kbar) and 630-580°C has been superseded by a phase of reheating to about 720-650°C at roughly the same depth before final exhumation has taken place. In the southern-most part of the study area, amphibolite facies assemblages with formation conditions similar to the deduced reheating phase indicate a juxtaposition of both areas after the eclogite facies stage and thus a stacking of Indian Plate units. Radiometric dating of zircon, titanite and rutile by U-Pb and amphibole and micas by Ar-Ar reveal peak pressure conditions at 47-48 Ma. With a maximum exhumation rate of 14 cm/a these rocks reached the crust-mantle boundary at 40-35 km within 1 Ma. Subsequent exhumation (46-41 Ma, 40-35 km) decelerated to ca. 1 mm/a at the base of the continental crust but rose again to about 2 mm/a in the period of 41-31 Ma, equivalent to 35-20 km. Apatite fission track (AFT) and (U-Th)/He ages from eclogites, amphibolites, micaschists and gneisses yielded moderate Oligocene to Miocene cooling rates of about 10°C/Ma in the high altitude northern parts of the Kaghan Valley using the mineral-pair method. AFT ages are of 24.5±3.8 to 15.6±2.1 Ma whereas apatite (U-Th)/He analyses yielded ages between 21.0±0.6 and 5.3±0.2 Ma. The southern-most part of the Valley is dominated by younger late Miocene to Pliocene apatite fission track ages of 7.6±2.1 and 4.0±0.5 Ma that support earlier tectonically and petrologically findings of a juxtaposition and stack of Indian Plate units. As this nappe is tectonically lowermost, a later distinct exhumation and uplift driven by thrusting along the Main Boundary Thrust is inferred. A multi-stage exhumation path is evident from petrological, isotope-geochemical and low temperature thermochronology investigations. Buoyancy driven exhumation caused an initial rapid exhumation: exhumation as fast as recent normal plate movements (ca. 10 cm/a). As the exhuming units reached the crust-mantle boundary the process slowed down due to changes in buoyancy. Most likely, this exhumation pause has initiated the reheating event that is petrologically evident (e.g. glaucophane rimmed by hornblende, ilmenite overgrowth of rutile). Late stage processes involved widespread thrusting and folding with accompanied regional greenschist facies metamorphism, whereby contemporaneous thrusting on the Batal Thrust (seen by some authors equivalent to the MCT) and back sliding of the Kohistan Arc along the inverse reactivated Main Mantle Thrust caused final exposure of these rocks. Similar circumstances have been seen at Tso Morari, Ladakh, India, 200 km further east where comparable rock assemblages occur. In conclusion, as exhumation was already done well before the initiation of the monsoonal system, climate dependent effects (erosion) appear negligible in comparison to far-field tectonic effects.}, language = {en} } @phdthesis{Huber2010, author = {Huber, Veronika Emilie Charlotte}, title = {Climate impact on phytoplankton blooms in shallow lakes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42346}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Lake ecosystems across the globe have responded to climate warming of recent decades. However, correctly attributing observed changes to altered climatic conditions is complicated by multiple anthropogenic influences on lakes. This thesis contributes to a better understanding of climate impacts on freshwater phytoplankton, which forms the basis of the food chain and decisively influences water quality. The analyses were, for the most part, based on a long-term data set of physical, chemical and biological variables of a shallow, polymictic lake in north-eastern Germany (M{\"u}ggelsee), which was subject to a simultaneous change in climate and trophic state during the past three decades. Data analysis included constructing a dynamic simulation model, implementing a genetic algorithm to parameterize models, and applying statistical techniques of classification tree and time-series analysis. Model results indicated that climatic factors and trophic state interactively determine the timing of the phytoplankton spring bloom (phenology) in shallow lakes. Under equally mild spring conditions, the phytoplankton spring bloom collapsed earlier under high than under low nutrient availability, due to a switch from a bottom-up driven to a top-down driven collapse. A novel approach to model phenology proved useful to assess the timings of population peaks in an artificially forced zooplankton-phytoplankton system. Mimicking climate warming by lengthening the growing period advanced algal blooms and consequently also peaks in zooplankton abundance. Investigating the reasons for the contrasting development of cyanobacteria during two recent summer heat wave events revealed that anomalously hot weather did not always, as often hypothesized, promote cyanobacteria in the nutrient-rich lake studied. The seasonal timing and duration of heat waves determined whether critical thresholds of thermal stratification, decisive for cyanobacterial bloom formation, were crossed. In addition, the temporal patterns of heat wave events influenced the summer abundance of some zooplankton species, which as predators may serve as a buffer by suppressing phytoplankton bloom formation. This thesis adds to the growing body of evidence that lake ecosystems have strongly responded to climatic changes of recent decades. It reaches beyond many previous studies of climate impacts on lakes by focusing on underlying mechanisms and explicitly considering multiple environmental changes. Key findings show that climate impacts are more severe in nutrient-rich than in nutrient-poor lakes. Hence, to develop lake management plans for the future, limnologists need to seek a comprehensive, mechanistic understanding of overlapping effects of the multi-faceted human footprint on aquatic ecosystems.}, language = {en} } @phdthesis{Chirvasa2010, author = {Chirvasa, Mihaela}, title = {Finite difference methods for 1st Order in time, 2nd order in space, hyperbolic systems used in numerical relativity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42135}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis is concerned with the development of numerical methods using finite difference techniques for the discretization of initial value problems (IVPs) and initial boundary value problems (IBVPs) of certain hyperbolic systems which are first order in time and second order in space. This type of system appears in some formulations of Einstein equations, such as ADM, BSSN, NOR, and the generalized harmonic formulation. For IVP, the stability method proposed in [14] is extended from second and fourth order centered schemes, to 2n-order accuracy, including also the case when some first order derivatives are approximated with off-centered finite difference operators (FDO) and dissipation is added to the right-hand sides of the equations. For the model problem of the wave equation, special attention is paid to the analysis of Courant limits and numerical speeds. Although off-centered FDOs have larger truncation errors than centered FDOs, it is shown that in certain situations, off-centering by just one point can be beneficial for the overall accuracy of the numerical scheme. The wave equation is also analyzed in respect to its initial boundary value problem. All three types of boundaries - outflow, inflow and completely inflow that can appear in this case, are investigated. Using the ghost-point method, 2n-accurate (n = 1, 4) numerical prescriptions are prescribed for each type of boundary. The inflow boundary is also approached using the SAT-SBP method. In the end of the thesis, a 1-D variant of BSSN formulation is derived and some of its IBVPs are considered. The boundary procedures, based on the ghost-point method, are intended to preserve the interior 2n-accuracy. Numerical tests show that this is the case if sufficient dissipation is added to the rhs of the equations.}, language = {en} } @phdthesis{Borchert2010, author = {Borchert, Manuela}, title = {Interactions between aqueous fluids and silicate melts : equilibration, partitioning and complexation of trace elements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42088}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The origin and evolution of granites has been widely studied because granitoid rocks constitute a major portion of the Earth's crust. The formation of granitic magma is, besides temperature mainly triggered by the water content of these rocks. The presence of water in magmas plays an important role due to the ability of aqueous fluids to change the chemical composition of the magma. The exsolution of aqueous fluids from melts is closely linked to a fractionation of elements between the two phases. Then, aqueous fluids migrate to shallower parts of the Earth's crust because of it's lower density compared to that of melts and adjacent rocks. This process separates fluids and melts, and furthermore, during the ascent, aqueous fluids can react with the adjacent rocks and alter their chemical signature. This is particularly impor- tant during the formation of magmatic-hydrothermal ore deposits or in the late stages of the evolution of magmatic complexes. For a deeper insight to these processes, it is essential to improve our knowledge on element behavior in such systems. In particular, trace elements are used for these studies and petrogenetic interpretations because, unlike major elements, they are not essential for the stability of the phases involved and often reflect magmatic processes with less ambiguity. However, for the majority of important trace elements, the dependence of the geochemical behavior on temperature, pressure, and in particular on the composition of the system are only incompletely or not at all experimentally studied. Former studies often fo- cus on the determination of fluid-melt partition coefficients (Df/m=cfluid/cmelt) of economically interesting elements, e.g., Mo, Sn, Cu, and there are some partitioning data available for ele- ments that are also commonly used for petrological interpretations. At present, no systematic experimental data on trace element behavior in fluid-melt systems as function of pressure, temperature, and chemical composition are available. Additionally, almost all existing data are based on the analysis of quenched phases. This results in substantial uncertainties, particularly for the quenched aqueous fluid because trace element concentrations may change upon cooling. The objective of this PhD thesis consisted in the study of fluid-melt partition coefficients between aqueous solutions and granitic melts for different trace elements (Rb, Sr, Ba, La, Y, and Yb) as a function of temperature, pressure, salinity of the fluid, composition of the melt, and experimental and analytical approach. The latter included the refinement of an existing method to measure trace element concentrations in fluids equilibrated with silicate melts di- rectly at elevated pressures and temperatures using a hydrothermal diamond-anvil cell and synchrotron radiation X-ray fluorescence microanalysis. The application of this in-situ method enables to avoid the main source of error in data from quench experiments, i.e., trace element concentration in the fluid. A comparison of the in-situ results to data of conventional quench experiments allows a critical evaluation of quench data from this study and literature data. In detail, starting materials consisted of a suite of trace element doped haplogranitic glasses with ASI varying between 0.8 and 1.4 and H2O or a chloridic solution with m NaCl/KCl=1 and different salinities (1.16 to 3.56 m (NaCl+KCl)). Experiments were performed at 750 to 950◦C and 0.2 or 0.5 GPa using conventional quench devices (externally and internally heated pressure vessels) with different quench rates, and at 750◦C and 0.2 to 1.4 GPa with in-situ analysis of the trace element concentration in the fluids. The fluid-melt partitioning data of all studied trace elements show 1. a preference for the melt (Df/m < 1) at all studied conditions, 2. one to two orders of magnitude higher Df/m using chloridic solutions compared to experiments with H2O, 3. a clear dependence on the melt composition for fluid-melt partitioning of Sr, Ba, La, Y, and Yb in experiments using chloridic solutions, 4. quench rate-related differences of fluid-melt partition coefficients of Rb and Sr, and 5. distinctly higher fluid-melt partitioning data obtained from in-situ experiments than from comparable quench runs, particularly in the case of H2O as starting solution. The data point to a preference of all studied trace elements for the melt even at fairly high salinities, which contrasts with other experimental studies, but is supported by data from studies of natural co-genetically trapped fluid and melt inclusions. The in-situ measurements of trace element concentrations in the fluid verify that aqueous fluids will change their composition upon cooling, which is in particular important for Cl free systems. The distinct differences of the in-situ results to quench data of this study as well as to data from the literature signify the im- portance of a careful fluid sampling and analysis. Therefore, the direct measurement of trace element contents in fluids equilibrated with silicate melts at elevated PT conditions represents an important development to obtain more reliable fluid-melt partition coefficients. For further improvement, both the aqueous fluid and the silicate melt need to be analyzed in-situ because partitioning data that are based on the direct measurement of the trace element content in the fluid and analysis of a quenched melt are still not completely free of quench effects. At present, all available data on element complexation in aqueous fluids in equilibrium with silicate melts at high PT are indirectly derived from partitioning data, which involves in these experiments assumptions on the species present in the fluid. However, the activities of chemical components in these partitioning experiments are not well constrained, which is required for the definition of exchange equilibria between melt and fluid species. For example, the melt-dependent variation of partition coefficient observed for Sr imply that this element can not only be complexed by Cl- as suggested previously. The data indicate a more complicated complexation of Sr in the aqueous fluid. To verify this hypothesis, the in-situ setup was also used to determine strontium complexation in fluids equilibrated with silicate melts at desired PT conditions by the application of X-ray absorption near edge structure (XANES) spectroscopy. First results show a strong effect of both fluid and melt composition on the resulting XANES spectra, which indicates different complexation environments for Sr.}, language = {en} } @phdthesis{Werth2010, author = {Werth, Susanna}, title = {Calibration of the global hydrological model WGHM with water mass variations from GRACE gravity data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41738}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Since the start-up of the GRACE (Gravity Recovery And Climate Experiment) mission in 2002 time dependent global maps of the Earth's gravity field are available to study geophysical and climatologically-driven mass redistributions on the Earth's surface. In particular, GRACE observations of total water storage changes (TWSV) provide a comprehensive data set for analysing the water cycle on large scales. Therefore they are invaluable for validation and calibration of large-scale hydrological models as the WaterGAP Global Hydrology Model (WGHM) which simulates the continental water cycle including its most important components, such as soil, snow, canopy, surface- and groundwater. Hitherto, WGHM exhibits significant differences to GRACE, especially for the seasonal amplitude of TWSV. The need for a validation of hydrological models is further highlighted by large differences between several global models, e.g. WGHM, the Global Land Data Assimilation System (GLDAS) and the Land Dynamics model (LaD). For this purpose, GRACE links geodetic and hydrological research aspects. This link demands the development of adequate data integration methods on both sides, forming the main objectives of this work. They include the derivation of accurate GRACE-based water storage changes, the development of strategies to integrate GRACE data into a global hydrological model as well as a calibration method, followed by the re-calibration of WGHM in order to analyse process and model responses. To achieve these aims, GRACE filter tools for the derivation of regionally averaged TWSV were evaluated for specific river basins. Here, a decorrelation filter using GRACE orbits for its design is most efficient among the tested methods. Consistency in data and equal spatial resolution between observed and simulated TWSV were realised by the inclusion of all most important hydrological processes and an equal filtering of both data sets. Appropriate calibration parameters were derived by a WGHM sensitivity analysis against TWSV. Finally, a multi-objective calibration framework was developed to constrain model predictions by both river discharge and GRACE TWSV, realised with a respective evolutionary method, the ε-Non-dominated-Sorting-Genetic-Algorithm-II (ε-NSGAII). Model calibration was done for the 28 largest river basins worldwide and for most of them improved simulation results were achieved with regard to both objectives. From the multi-objective approach more reliable and consistent simulations of TWSV within the continental water cycle were gained and possible model structure errors or mis-modelled processes for specific river basins detected. For tropical regions as such, the seasonal amplitude of water mass variations has increased. The findings lead to an improved understanding of hydrological processes and their representation in the global model. Finally, the robustness of the results is analysed with respect to GRACE and runoff measurement errors. As a main conclusion obtained from the results, not only soil water and snow storage but also groundwater and surface water storage have to be included in the comparison of the modelled and GRACE-derived total water budged data. Regarding model calibration, the regional varying distribution of parameter sensitivity suggests to tune only parameter of important processes within each region. Furthermore, observations of single storage components beside runoff are necessary to improve signal amplitudes and timing of simulated TWSV as well as to evaluate them with higher accuracy. The results of this work highlight the valuable nature of GRACE data when merged into large-scale hydrological modelling and depict methods to improve large-scale hydrological models.}, language = {en} } @phdthesis{Ganesan2010, author = {Ganesan, Lakshmi Meena}, title = {Coupling of the electrical, mechanical and optical response in polymer/liquid-crystal composites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41572}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Micrometer-sized liquid-crystal (LC) droplets embedded in a polymer matrix may enable optical switching in the composite film through the alignment of the LC director along an external electric field. When a ferroelectric material is used as host polymer, the electric field generated by the piezoelectric effect can orient the director of the LC under an applied mechanical stress, making these materials interesting candidates for piezo-optical devices. In this work, polymer-dispersed liquid crystals (PDLCs) are prepared from poly(vinylidene fluoride-trifluoroethylene) (P(VDF-TrFE)) and a nematic liquid crystal (LC). The anchoring effect is studied by means of dielectric relaxation spectroscopy. Two dispersion regions are observed in the dielectric spectra of the pure P(VDF-TrFE) film. They are related to the glass transition and to a charge-carrier relaxation, respectively. In PDLC films containing 10 and 60 wt\% LC, an additional, bias-field-dependent relaxation peak is found that can be attributed to the motion of LC molecules. Due to the anchoring effect of the LC molecules, this relaxation process is slowed down considerably, when compared with the related process in the pure LC. The electro-optical and piezo-optical behavior of PDLC films containing 10 and 60 wt\% LCs is investigated. In addition to the refractive-index mismatch between the polymer matrix and the LC molecules, the interaction between the polymer dipoles and the LC molecules at the droplet interface influences the light-scattering behavior of the PDLC films. For the first time, it was shown that the electric field generated by the application of a mechanical stress may lead to changes in the transmittance of a PDLC film. Such a piezo-optical PDLC material may be useful e.g. in sensing and visualization applications. Compared to a non-polar matrix polymer, the polar matrix polymer exhibits a strong interaction with the LC molecules at the polymer/LC interface which affects the electro-optical effect of the PDLC films and prevents a larger increase in optical transmission.}, language = {en} } @phdthesis{Ishebabi2010, author = {Ishebabi, Harold}, title = {Architecture synthesis for adaptive multiprocessor systems on chip}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41316}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis presents methods for automated synthesis of flexible chip multiprocessor systems from parallel programs targeted at FPGAs to exploit both task-level parallelism and architecture customization. Automated synthesis is necessitated by the complexity of the design space. A detailed description of the design space is provided in order to determine which parameters should be modeled to facilitate automated synthesis by optimizing a cost function, the emphasis being placed on inclusive modeling of parameters from application, architectural and physical subspaces, as well as their joint coverage in order to avoid pre-constraining the design space. Given a parallel program and a set of an IP library, the automated synthesis problem is to simultaneously (i) select processors (ii) map and schedule tasks to them, and (iii) select one or several networks for inter-task communications such that design constraints and optimization objectives are met. The research objective in this thesis is to find a suitable model for automated synthesis, and to evaluate methods of using the model for architectural optimizations. Our contributions are a holistic approach for the design of such systems, corresponding models to facilitate automated synthesis, evaluation of optimization methods using state of the art integer linear and answer set programming, as well as the development of synthesis heuristics to solve runtime challenges.}, language = {en} } @phdthesis{Childs2010, author = {Childs, Liam H.}, title = {Bioinformatics approaches to analysing RNA mediated regulation of gene expression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41284}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The genome can be considered the blueprint for an organism. Composed of DNA, it harbours all organism-specific instructions for the synthesis of all structural components and their associated functions. The role of carriers of actual molecular structure and functions was believed to be exclusively assumed by proteins encoded in particular segments of the genome, the genes. In the process of converting the information stored genes into functional proteins, RNA - a third major molecule class - was discovered early on to act a messenger by copying the genomic information and relaying it to the protein-synthesizing machinery. Furthermore, RNA molecules were identified to assist in the assembly of amino acids into native proteins. For a long time, these - rather passive - roles were thought to be the sole purpose of RNA. However, in recent years, new discoveries have led to a radical revision of this view. First, RNA molecules with catalytic functions - thought to be the exclusive domain of proteins - were discovered. Then, scientists realized that much more of the genomic sequence is transcribed into RNA molecules than there are proteins in cells begging the question what the function of all these molecules are. Furthermore, very short and altogether new types of RNA molecules seemingly playing a critical role in orchestrating cellular processes were discovered. Thus, RNA has become a central research topic in molecular biology, even to the extent that some researcher dub cells as "RNA machines". This thesis aims to contribute towards our understanding of RNA-related phenomena by applying Bioinformatics means. First, we performed a genome-wide screen to identify sites at which the chemical composition of DNA (the genotype) critically influences phenotypic traits (the phenotype) of the model plant Arabidopsis thaliana. Whole genome hybridisation arrays were used and an informatics strategy developed, to identify polymorphic sites from hybridisation to genomic DNA. Following this approach, not only were genotype-phenotype associations discovered across the entire Arabidopsis genome, but also regions not currently known to encode proteins, thus representing candidate sites for novel RNA functional molecules. By statistically associating them with phenotypic traits, clues as to their particular functions were obtained. Furthermore, these candidate regions were subjected to a novel RNA-function classification prediction method developed as part of this thesis. While determining the chemical structure (the sequence) of candidate RNA molecules is relatively straightforward, the elucidation of its structure-function relationship is much more challenging. Towards this end, we devised and implemented a novel algorithmic approach to predict the structural and, thereby, functional class of RNA molecules. In this algorithm, the concept of treating RNA molecule structures as graphs was introduced. We demonstrate that this abstraction of the actual structure leads to meaningful results that may greatly assist in the characterization of novel RNA molecules. Furthermore, by using graph-theoretic properties as descriptors of structure, we indentified particular structural features of RNA molecules that may determine their function, thus providing new insights into the structure-function relationships of RNA. The method (termed Grapple) has been made available to the scientific community as a web-based service. RNA has taken centre stage in molecular biology research and novel discoveries can be expected to further solidify the central role of RNA in the origin and support of life on earth. As illustrated by this thesis, Bioinformatics methods will continue to play an essential role in these discoveries.}, language = {en} } @phdthesis{Yin2010, author = {Yin, Fan}, title = {Mathematic approaches for the calibration of the CHAMP satellite magnetic field measurements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41201}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {CHAMP (CHAllenging Minisatellite Payload) is a German small satellite mission to study the earth's gravity field, magnetic field and upper atmosphere. Thanks to the good condition of the satellite so far, the planned 5 years mission is extended to year 2009. The satellite provides continuously a large quantity of measurement data for the purpose of Earth study. The measurements of the magnetic field are undertaken by two Fluxgate Magnetometers (vector magnetometer) and one Overhauser Magnetometer (scalar magnetometer) flown on CHAMP. In order to ensure the quality of the data during the whole mission, the calibration of the magnetometers has to be performed routinely in orbit. The scalar magnetometer serves as the magnetic reference and its readings are compared with the readings of the vector magnetometer. The readings of the vector magnetometer are corrected by the parameters that are derived from this comparison, which is called the scalar calibration. In the routine processing, these calibration parameters are updated every 15 days by means of scalar calibration. There are also magnetic effects coming from the satellite which disturb the measurements. Most of them have been characterized during tests before launch. Among them are the remanent magnetization of the spacecraft and fields generated by currents. They are all considered to be constant over the mission life. The 8 years of operation experience allow us to investigate the long-term behaviors of the magnetometers and the satellite systems. According to the investigation, it was found that for example the scale factors of the FGM show obvious long-term changes which can be described by logarithmic functions. The other parameters (offsets and angles between the three components) can be considered constant. If these continuous parameters are applied for the FGM data processing, the disagreement between the OVM and the FGM readings is limited to \pm1nT over the whole mission. This demonstrates, the magnetometers on CHAMP exhibit a very good stability. However, the daily correction of the parameter Z component offset of the FGM improves the agreement between the magnetometers markedly. The Z component offset plays a very important role for the data quality. It exhibits a linear relationship with the standard deviation of the disagreement between the OVM and the FGM readings. After Z offset correction, the errors are limited to \pm0.5nT (equivalent to a standard deviation of 0.2nT). We improved the corrections of the spacecraft field which are not taken into account in the routine processing. Such disturbance field, e.g. from the power supply system of the satellite, show some systematic errors in the FGM data and are misinterpreted in 9-parameter calibration, which brings false local time related variation of the calibration parameters. These corrections are made by applying a mathematical model to the measured currents. This non-linear model is derived from an inversion technique. If the disturbance field of the satellite body are fully corrected, the standard deviation of scalar error \triangle B remains about 0.1nT. Additionally, in order to keep the OVM readings a reliable standard, the imperfect coefficients of the torquer current correction for the OVM are redetermined by solving a minimization problem. The temporal variation of the spacecraft remanent field is investigated. It was found that the average magnetic moment of the magneto-torquers reflects well the moment of the satellite. This allows for a continuous correction of the spacecraft field. The reasons for the possible unknown systemic error are discussed in this thesis. Particularly, both temperature uncertainties and time errors have influence on the FGM data. Based on the results of this thesis the data processing of future magnetic missions can be designed in an improved way. In particular, the upcoming ESA mission Swarm can take advantage of our findings and provide all the auxiliary measurements needed for a proper recovery of the ambient magnetic field.}, language = {en} }