@phdthesis{Hutter2014, author = {Hutter, Anne}, title = {Unveiling the epoch of reionization by simulations and high-redshift galaxies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76998}, school = {Universit{\"a}t Potsdam}, pages = {vi, 155}, year = {2014}, abstract = {The Epoch of Reionization marks after recombination the second major change in the ionization state of the universe, going from a neutral to an ionized state. It starts with the appearance of the first stars and galaxies; a fraction of high-energy photons emitted from galaxies permeate into the intergalactic medium (IGM) and gradually ionize the hydrogen, until the IGM is completely ionized at z~6 (Fan et al., 2006). While the progress of reionization is driven by galaxy evolution, it changes the ionization and thermal state of the IGM substantially and affects subsequent structure and galaxy formation by various feedback mechanisms. Understanding this interaction between reionization and galaxy formation is further impeded by a lack of understanding of the high-redshift galactic properties such as the dust distribution and the escape fraction of ionizing photons. Lyman Alpha Emitters (LAEs) represent a sample of high-redshift galaxies that are sensitive to all these galactic properties and the effects of reionization. In this thesis we aim to understand the progress of reionization by performing cosmological simulations, which allows us to investigate the limits of constraining reionization by high-redshift galaxies as LAEs, and examine how galactic properties and the ionization state of the IGM affect the visibility and observed quantities of LAEs and Lyman Break galaxies (LBGs). In the first part of this thesis we focus on performing radiative transfer calculations to simulate reionization. We have developed a mapping-sphere-scheme, which, starting from spherically averaged temperature and density fields, uses our 1D radiative transfer code and computes the effect of each source on the IGM temperature and ionization (HII, HeII, HeIII) profiles, which are subsequently mapped onto a grid. Furthermore we have updated the 3D Monte-Carlo radiative transfer pCRASH, enabling detailed reionization simulations which take individual source characteristics into account. In the second part of this thesis we perform a reionization simulation by post-processing a smoothed-particle hydrodynamical (SPH) simulation (GADGET-2) with 3D radiative transfer (pCRASH), where the ionizing sources are modelled according to the characteristics of the stellar populations in the hydrodynamical simulation. Following the ionization fractions of hydrogen (HI) and helium (HeII, HeIII), and temperature in our simulation, we find that reionization starts at z~11 and ends at z~6, and high density regions near sources are ionized earlier than low density regions far from sources. In the third part of this thesis we couple the cosmological SPH simulation and the radiative transfer simulations with a physically motivated, self-consistent model for LAEs, in order to understand the importance of the ionization state of the IGM, the escape fraction of ionizing photons from galaxies and dust in the interstellar medium (ISM) on the visibility of LAEs. Comparison of our models results with the LAE Lyman Alpha (Lya) and UV luminosity functions at z~6.6 reveals a three-dimensional degeneracy between the ionization state of the IGM, the ionizing photons escape fraction and the ISM dust distribution, which implies that LAEs act not only as tracers of reionization but also of the ionizing photon escape fraction and of the ISM dust distribution. This degeneracy does not even break down when we compare simulated with observed clustering of LAEs at z~6.6. However, our results show that reionization has the largest impact on the amplitude of the LAE angular correlation functions, and its imprints are clearly distinguishable from those of properties on galactic scales. These results show that reionization cannot be constrained tightly by exclusively using LAE observations. Further observational constraints, e.g. tomographies of the redshifted hydrogen 21cm line, are required. In addition we also use our LAE model to probe the question when a galaxy is visible as a LAE or a LBG. Within our model galaxies above a critical stellar mass can produce enough luminosity to be visible as a LBG and/or a LAE. By finding an increasing duty cycle of LBGs with Lya emission as the UV magnitude or stellar mass of the galaxy rises, our model reveals that the brightest (and most massive) LBGs most often show Lya emission. Predicting the Lya equivalent width (Lya EW) distribution and the fraction of LBGs showing Lya emission at z~6.6, we reproduce the observational trend of the Lya EWs with UV magnitude. However, the Lya EWs of the UV brightest LBGs exceed observations and can only be reconciled by accounting for an increased Lya attenuation of massive galaxies, which implies that the observed Lya brightest LAEs do not necessarily coincide with the UV brightest galaxies. We have analysed the dependencies of LAE observables on the properties of the galactic and intergalactic medium and the LAE-LBG connection, and this enhances our understanding of the nature of LAEs.}, language = {en} } @phdthesis{Sen2014, author = {Sen, Ali Tolga}, title = {Inversion of seismic source parameters for weak mining-induced and natural earthquakes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71914}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The purpose of this thesis is to develop an automated inversion scheme to derive point and finite source parameters for weak earthquakes, here intended with the unusual meaning of earthquakes with magnitudes at the limit or below the bottom magnitude threshold of standard source inversion routines. The adopted inversion approaches entirely rely on existing inversion software, the methodological work mostly targeting the development and tuning of optimized inversion flows. The resulting inversion scheme is tested for very different datasets, and thus allows the discussion on the source inversion problem at different scales. In the first application, dealing with mining induced seismicity, the source parameters determination is addressed at a local scale, with source-sensor distance of less than 3 km. In this context, weak seismicity corresponds to event below magnitude MW 2.0, which are rarely target of automated source inversion routines. The second application considers a regional dataset, namely the aftershock sequence of the 2010 Maule earthquake (Chile), using broadband stations at regional distances, below 300 km. In this case, the magnitude range of the target aftershocks range down to MW 4.0. This dataset is here considered as a weak seismicity case, since the analysis of such moderate seismicity is generally investigated only by moment tensor inversion routines, with no attempt to resolve source duration or finite source parameters. In this work, automated multi-step inversion schemes are applied to both datasets with the aim of resolving point source parameters, both using double couple (DC) and full moment tensor (MT) models, source duration and finite source parameters. A major result of the analysis of weaker events is the increased size of resulting moment tensor catalogues, which interpretation may become not trivial. For this reason, a novel focal mechanism clustering approach is used to automatically classify focal mechanisms, allowing the investigation of the most relevant and repetitive rupture features. The inversion of the mining induced seismicity dataset reveals the repetitive occurrence of similar rupture processes, where the source geometry is controlled by the shape of the mined panel. Moreover, moment tensor solutions indicate a significant contribution of tensile processes. Also the second application highlights some characteristic geometrical features of the fault planes, which show a general consistency with the orientation of the slab. The additional inversion for source duration allowed to verify the empirical correlation for moment normalized earthquakes in subduction zones among a decreasing rupture duration with increasing source depth, which was so far only observed for larger events.}, language = {en} } @phdthesis{Jueppner2014, author = {J{\"u}ppner, Jessica}, title = {Characterization of metabolomic dynamics in synchronized Chlamydomonas reinhardtii cell cultures and the impact of TOR inhibition on cell cycle, proliferation and growth}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76923}, school = {Universit{\"a}t Potsdam}, pages = {VI, 153}, year = {2014}, abstract = {The adaptation of cell growth and proliferation to environmental changes is essential for the surviving of biological systems. The evolutionary conserved Ser/Thr protein kinase "Target of Rapamycin" (TOR) has emerged as a major signaling node that integrates the sensing of numerous growth signals to the coordinated regulation of cellular metabolism and growth. Although the TOR signaling pathway has been widely studied in heterotrophic organisms, the research on TOR in photosynthetic eukaryotes has been hampered by the reported land plant resistance to rapamycin. Thus, the finding that Chlamydomonas reinhardtii is sensitive to rapamycin, establish this unicellular green alga as a useful model system to investigate TOR signaling in photosynthetic eukaryotes. The observation that rapamycin does not fully arrest Chlamydomonas growth, which is different from observations made in other organisms, prompted us to investigate the regulatory function of TOR in Chlamydomonas in context of the cell cycle. Therefore, a growth system that allowed synchronously growth under widely unperturbed cultivation in a fermenter system was set up and the synchronized cells were characterized in detail. In a highly resolved kinetic study, the synchronized cells were analyzed for their changes in cytological parameters as cell number and size distribution and their starch content. Furthermore, we applied mass spectrometric analysis for profiling of primary and lipid metabolism. This system was then used to analyze the response dynamics of the Chlamydomonas metabolome and lipidome to TOR-inhibition by rapamycin The results show that TOR inhibition reduces cell growth, delays cell division and daughter cell release and results in a 50\% reduced cell number at the end of the cell cycle. Consistent with the growth phenotype we observed strong changes in carbon and nitrogen partitioning in the direction of rapid conversion into carbon and nitrogen storage through an accumulation of starch, triacylglycerol and arginine. Interestingly, it seems that the conversion of carbon into triacylglycerol occurred faster than into starch after TOR inhibition, which may indicate a more dominant role of TOR in the regulation of TAG biosynthesis than in the regulation of starch. This study clearly shows, for the first time, a complex picture of metabolic and lipidomic dynamically changes during the cell cycle of Chlamydomonas reinhardtii and furthermore reveals a complex regulation and adjustment of metabolite pools and lipid composition in response to TOR inhibition.}, language = {en} } @phdthesis{Meissner2014, author = {Meissner, Sven}, title = {Implications of Microcystin Production in Microcystis aeruginosa PCC 7806}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75199}, school = {Universit{\"a}t Potsdam}, pages = {VII, 141}, year = {2014}, abstract = {Cyanobacteria produce about 40 percent of the world's primary biomass, but also a variety of often toxic peptides such as microcystin. Mass developments, so called blooms, can pose a real threat to the drinking water supply in many parts of the world. This study aimed at characterizing the biological function of microcystin production in one of the most common bloom-forming cyanobacterium Microcystis aeruginosa. In a first attempt, the effect of elevated light intensity on microcystin production and its binding to cellular proteins was studied. Therefore, conventional microcystin quantification techniques were combined with protein-biochemical methods. RubisCO, the key enzyme for primary carbon fixation was a major microcystin interaction partner. High light exposition strongly stimulated microcystin-protein interactions. Up to 60 percent of the total cellular microcystin was detected bound to proteins, i.e. inaccessible for standard quantification procedures. Underestimation of total microcystin contents when neglecting the protein fraction was also demonstrated in field samples. Finally, an immuno-fluorescence based method was developed to identify microcystin producing cyanobacteria in mixed populations. The high light induced microcystin interaction with proteins suggested an impact of the secondary metabolite on the primary metabolism of Microcystis by e.g. modulating the activity of enzymes. For addressing that question, a comprehensive GC/MS-based approach was conducted to compare the accumulation of metabolites in the wild-type of Microcystis aeruginosa PCC 7806 and the microcystin deficient ΔmcyB mutant. From all 501 detected non-redundant metabolites 85 (17 percent) accumulated significantly different in either of both genotypes upon high light exposition. Accumulation of compatible solutes in the ΔmcyB mutant suggests a role of microcystin in fine-tuning the metabolic flow to prevent stress related to excess light, high oxygen concentration and carbon limitation. Co-analysis of the widely used model cyanobacterium Synechocystis PCC 6803 revealed profound metabolic differences between species of cyanobacteria. Whereas Microcystis channeled more resources towards carbohydrate synthesis, Synechocystis invested more in amino acids. These findings were supported by electron microscopy of high light treated cells and the quantification of storage compounds. While Microcystis accumulated mainly glycogen to about 8.5 percent of its fresh weight within three hours, Synechocystis produced higher amounts of cyanophycin. The results showed that the characterization of species-specific metabolic features should gain more attention with regard to the biotechnological use of cyanobacteria.}, language = {en} } @phdthesis{Schmidt2014, author = {Schmidt, Lukas}, title = {Aerosols and boundary layer structure over Arctic sea ice based on airborne lidar and dropsonde measurements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75076}, school = {Universit{\"a}t Potsdam}, pages = {vii, 98, xiii}, year = {2014}, abstract = {The atmosphere over the Arctic Ocean is strongly influenced by the distribution of sea ice and open water. Leads in the sea ice produce strong convective fluxes of sensible and latent heat and release aerosol particles into the atmosphere. They increase the occurrence of clouds and modify the structure and characteristics of the atmospheric boundary layer (ABL) and thereby influence the Arctic climate. In the course of this study aircraft measurements were performed over the western Arctic Ocean as part of the campaign PAMARCMIP 2012 of the Alfred Wegener Institute for Polar and Marine Research (AWI). Backscatter from aerosols and clouds within the lower troposphere and the ABL were measured with the nadir pointing Airborne Mobile Aerosol Lidar (AMALi) and dropsondes were launched to obtain profiles of meteorological variables. Furthermore, in situ measurements of aerosol properties, meteorological variables and turbulence were part of the campaign. The measurements covered a broad range of atmospheric and sea ice conditions. In this thesis, properties of the ABL over Arctic sea ice with a focus on the influence of open leads are studied based on the data from the PAMARCMIP campaign. The height of the ABL is determined by different methods that are applied to dropsonde and AMALi backscatter profiles. ABL heights are compared for different flights representing different conditions of the atmosphere and of sea ice and open water influence. The different criteria for ABL height that are applied show large variation in terms of agreement among each other, depending on the characteristics of the ABL and its history. It is shown that ABL height determination from lidar backscatter by methods commonly used under mid-latitude conditions is applicable to the Arctic ABL only under certain conditions. Aerosol or clouds within the ABL are needed as a tracer for ABL height detection from backscatter. Hence an aerosol source close to the surface is necessary, that is typically found under the present influence of open water and therefore convective conditions. However it is not always possible to distinguish residual layers from the actual ABL. Stable boundary layers are generally difficult to detect. To illustrate the complexity of the Arctic ABL and processes therein, four case studies are analyzed each of which represents a snapshot of the interplay between atmosphere and underlying sea ice or water surface. Influences of leads and open water on the aerosol and clouds within the ABL are identified and discussed. Leads are observed to cause the formation of fog and cloud layers within the ABL by humidity emission. Furthermore they decrease the stability and increase the height of the ABL and consequently facilitate entrainment of air and aerosol layers from the free troposphere.}, language = {en} } @phdthesis{Sarkar2014, author = {Sarkar, Saswati}, title = {Holocene variations in the strength of the Indian Monsoon system}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-74905}, school = {Universit{\"a}t Potsdam}, pages = {ix, 114}, year = {2014}, abstract = {The monsoon is an important component of the Earth's climate system. It played a vital role in the development and sustenance of the largely agro-based economy in India. A better understanding of past variations in the Indian Summer Monsoon (ISM) is necessary to assess its nature under global warming scenarios. Instead, our knowledge of spatiotemporal patterns of past ISM strength, as inferred from proxy records, is limited due to the lack of high-resolution paleo-hydrological records from the core monsoon domain. In this thesis I aim to improve our understanding of Holocene ISM variability from the core 'monsoon zone' (CMZ) in India. To achieve this goal, I tried to understand modern and thereafter reconstruct Holocene monsoonal hydrology, by studying surface sediments and a high-resolution sedimentary record from the saline-alkaline Lonar crater lake, central India. My approach relies on analyzing stable carbon and hydrogen isotope ratios from sedimentary lipid biomarkers to track past hydrological changes. In order to evaluate the relationship of the modern ecosystem and hydrology of the lake I studied the distribution of lipid biomarkers in the modern ecosystem and compared it to lake surface sediments. The major plants from dry deciduous mixed forest type produced a greater amount of leaf wax n-alkanes and a greater fraction of n-C31 and n-C33 alkanes relative to n-C27 and n-C29. Relatively high average chain length (ACL) values (29.6-32.8) for these plants seem common for vegetation from an arid and warm climate. Additionally I found that human influence and subsequent nutrient supply result in increased lake primary productivity, leading to an unusually high concentration of tetrahymanol, a biomarker for salinity and water column stratification, in the nearshore sediments. Due to this inhomogeneous deposition of tetrahymanol in modern sediments, I hypothesize that lake level fluctuation may potentially affect aquatic lipid biomarker distributions in lacustrine sediments, in addition to source changes. I reconstructed centennial-scale hydrological variability associated with changes in the intensity of the ISM based on a record of leaf wax and aquatic biomarkers and their stable carbon (δ13C) and hydrogen (δD) isotopic composition from a 10 m long sediment core from the lake. I identified three main periods of distinct hydrology over the Holocene in central India. The period between 10.1 and 6 cal. ka BP was likely the wettest during the Holocene. Lower ACL index values (29.4 to 28.6) of leaf wax n-alkanes and their negative δ13C values (-34.8 per mille to -27.8 per mille) indicated the dominance of woody C3 vegetation in the catchment, and negative δDwax (average for leaf wax n-alkanes) values (-171 per mille to -147 per mille) argue for a wet period due to an intensified monsoon. After 6 cal. ka BP, a gradual shift to less negative δ13C values (particularly for the grass derived n-C31) and appearance of the triterpene lipid tetrahymanol, generally considered as a marker for salinity and water column stratification, marked the onset of drier conditions. At 5.1 cal. ka BP increasing flux of leaf wax n-alkanes along with the highest flux of tetrahymanol indicated proximity of the lakeshore to the center due to a major lake level decrease. Rapid fluctuations in abundance of both terrestrial and aquatic biomarkers between 4.8 and 4 cal. ka BP indicated an unstable lake ecosystem, culminating in a transition to arid conditions. A pronounced shift to less negative δ13C values, in particular for n-C31 (-25.2 per mille to -22.8 per mille), over this period indicated a change of dominant vegetation to C4 grasses. Along with a 40 per mille increase in leaf wax n-alkane δD values, which likely resulted from less rainfall and/or higher plant evapotranspiration, I interpret this period to reflect the driest conditions in the region during the last 10.1 ka. This transition led to protracted late Holocene arid conditions and the establishment of a permanently saline lake. This is supported by the high abundance of tetrahymanol. A late Holocene peak of cyanobacterial biomarker input at 1.3 cal. ka BP might represent an event of lake eutrophication, possibly due to human impact and the onset of cattle/livestock farming in the catchment. The most intriguing feature of the mid-Holocene driest period was the high amplitude and rapid fluctuations in δDwax values, probably due to a change in the moisture source and/or precipitation seasonality. I hypothesize that orbital induced weakening of the summer solar insolation and associated reorganization of the general atmospheric circulation were responsible for an unstable hydroclimate in the mid-Holocene in the CMZ. My findings shed light onto the sequence of changes during mean state changes of the monsoonal system, once an insolation driven threshold has been passed, and show that small changes in solar insolation can be associated to major environmental changes and large fluctuations in moisture source, a scenario that may be relevant with respect to future changes in the ISM system.}, language = {en} } @phdthesis{Strauss2014, author = {Strauß, Jens}, title = {Organic carbon in ice-rich permafrost}, doi = {10.25932/publishup-7523}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75236}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 107, 102}, year = {2014}, abstract = {Permafrost, defined as ground that is frozen for at least two consecutive years, is a distinct feature of the terrestrial unglaciated Arctic. It covers approximately one quarter of the land area of the Northern Hemisphere (23,000,000 km²). Arctic landscapes, especially those underlain by permafrost, are threatened by climate warming and may degrade in different ways, including active layer deepening, thermal erosion, and development of rapid thaw features. In Siberian and Alaskan late Pleistocene ice-rich Yedoma permafrost, rapid and deep thaw processes (called thermokarst) can mobilize deep organic carbon (below 3 m depth) by surface subsidence due to loss of ground ice. Increased permafrost thaw could cause a feedback loop of global significance if its stored frozen organic carbon is reintroduced into the active carbon cycle as greenhouse gases, which accelerate warming and inducing more permafrost thaw and carbon release. To assess this concern, the major objective of the thesis was to enhance the understanding of the origin of Yedoma as well as to assess the associated organic carbon pool size and carbon quality (concerning degradability). The key research questions were: - How did Yedoma deposits accumulate? - How much organic carbon is stored in the Yedoma region? - What is the susceptibility of the Yedoma region's carbon for future decomposition? To address these three research questions, an interdisciplinary approach, including detailed field studies and sampling in Siberia and Alaska as well as methods of sedimentology, organic biogeochemistry, remote sensing, statistical analyses, and computational modeling were applied. To provide a panarctic context, this thesis additionally includes results both from a newly compiled northern circumpolar carbon database and from a model assessment of carbon fluxes in a warming Arctic. The Yedoma samples show a homogeneous grain-size composition. All samples were poorly sorted with a multi-modal grain-size distribution, indicating various (re-) transport processes. This contradicts the popular pure loess deposition hypothesis for the origin of Yedoma permafrost. The absence of large-scale grinding processes via glaciers and ice sheets in northeast Siberian lowlands, processes which are necessary to create loess as material source, suggests the polygenetic origin of Yedoma deposits. Based on the largest available data set of the key parameters, including organic carbon content, bulk density, ground ice content, and deposit volume (thickness and coverage) from Siberian and Alaskan study sites, this thesis further shows that deep frozen organic carbon in the Yedoma region consists of two distinct major reservoirs, Yedoma deposits and thermokarst deposits (formed in thaw-lake basins). Yedoma deposits contain ~80 Gt and thermokarst deposits ~130 Gt organic carbon, or a total of ~210 Gt. Depending on the approach used for calculating uncertainty, the range for the total Yedoma region carbon store is ±75 \% and ±20 \% for conservative single and multiple bootstrapping calculations, respectively. Despite the fact that these findings reduce the Yedoma region carbon pool by nearly a factor of two compared to previous estimates, this frozen organic carbon is still capable of inducing a permafrost carbon feedback to climate warming. The complete northern circumpolar permafrost region contains between 1100 and 1500 Gt organic carbon, of which ~60 \% is perennially frozen and decoupled from the short-term carbon cycle. When thawed and reintroduced into the active carbon cycle, the organic matter qualities become relevant. Furthermore, results from investigations into Yedoma and thermokarst organic matter quality studies showed that Yedoma and thermokarst organic matter exhibit no depth-dependent quality trend. This is evidence that after freezing, the ancient organic matter is preserved in a state of constant quality. The applied alkane and fatty-acid-based biomarker proxies including the carbon-preference and the higher-land-plant-fatty-acid indices show a broad range of organic matter quality and thus no significantly different qualities of the organic matter stored in thermokarst deposits compared to Yedoma deposits. This lack of quality differences shows that the organic matter biodegradability depends on different decomposition trajectories and the previous decomposition/incorporation history. Finally, the fate of the organic matter has been assessed by implementing deep carbon pools and thermokarst processes in a permafrost carbon model. Under various warming scenarios for the northern circumpolar permafrost region, model results show a carbon release from permafrost regions of up to ~140 Gt and ~310 Gt by the years 2100 and 2300, respectively. The additional warming caused by the carbon release from newly-thawed permafrost contributes 0.03 to 0.14°C by the year 2100. The model simulations predict that a further increase by the 23rd century will add 0.4°C to global mean surface air temperatures. In conclusion, Yedoma deposit formation during the late Pleistocene was dominated by water-related (alluvial/fluvial/lacustrine) as well as aeolian processes under periglacial conditions. The circumarctic permafrost region, including the Yedoma region, contains a substantial amount of currently frozen organic carbon. The carbon of the Yedoma region is well-preserved and therefore available for decomposition after thaw. A missing quality-depth trend shows that permafrost preserves the quality of ancient organic matter. When the organic matter is mobilized by deep degradation processes, the northern permafrost region may add up to 0.4°C to the global warming by the year 2300.}, language = {en} } @phdthesis{Goldshteyn2014, author = {Goldshteyn, Jewgenij}, title = {Frequency-resolved ultrafast dynamics of phonon polariton wavepackets in the ferroelectric crystals LiNbO₃ and LiTaO₃}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71623}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {During this work I built a four wave mixing setup for the time-resolved femtosecond spectroscopy of Raman-active lattice modes. This setup enables to study the selective excitation of phonon polaritons. These quasi-particles arise from the coupling of electro-magnetic waves and transverse optical lattice modes, the so-called phonons. The phonon polaritons were investigated in the optically non-linear, ferroelectric crystals LiNbO₃ and LiTaO₃. The direct observation of the frequency shift of the scattered narrow bandwidth probe pulses proofs the role of the Raman interaction during the probe and excitation process of phonon polaritons. I compare this experimental method with the measurement where ultra-short laser pulses are used. The frequency shift remains obscured by the relative broad bandwidth of these laser pulses. In an experiment with narrow bandwidth probe pulses, the Stokes and anti-Stokes intensities are spectrally separated. They are assigned to the corresponding counter-propagating wavepackets of phonon polaritons. Thus, the dynamics of these wavepackets was separately studied. Based on these findings, I develop the mathematical description of the so-called homodyne detection of light for the case of light scattering from counter propagating phonon polaritons. Further, I modified the broad bandwidth of the ultra-short pump pulses using bandpass filters to generate two pump pulses with non-overlapping spectra. This enables the frequency-selective excitation of polariton modes in the sample, which allows me to observe even very weak polariton modes in LiNbO₃ or LiTaO₃ that belong to the higher branches of the dispersion relation of phonon polaritons. The experimentally determined dispersion relation of the phonon polaritons could therefore be extended and compared to theoretical models. In addition, I determined the frequency-dependent damping of phonon polaritons.}, language = {en} } @phdthesis{Mallonn2014, author = {Mallonn, Matthias}, title = {Ground-based transmission spectroscopy of three inflated Hot Jupiter exoplanets}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-74403}, school = {Universit{\"a}t Potsdam}, pages = {ix, 115}, year = {2014}, abstract = {The characterization of exoplanets is a young and rapidly expanding field in astronomy. It includes a method called transmission spectroscopy that searches for planetary spectral fingerprints in the light received from the host star during the event of a transit. This techniques allows for conclusions on the atmospheric composition at the terminator region, the boundary between the day and night side of the planet. Observationally a big challenge, first attempts in the community have been successful in the detection of several absorption features in the optical wavelength range. These are for example a Rayleighscattering slope and absorption by sodium and potassium. However, other objects show a featureless spectrum indicative for a cloud or haze layer of condensates masking the probable atmospheric layers. In this work, we performed transmission spectroscopy by spectrophotometry of three Hot Jupiter exoplanets. When we began the work on this thesis, optical transmission spectra have been available for two exoplanets. Our main goal was to advance the current sample of probed objects to learn by comparative exoplanetology whether certain absorption features are common. We selected the targets HAT-P-12b, HAT-P-19b and HAT-P-32b, for which the detection of atmospheric signatures is feasible with current ground-based instrumentation. In addition, we monitored the host stars of all three objects photometrically to correct for influences of stellar activity if necessary. The obtained measurements of the three objects all favor featureless spectra. A variety of atmospheric compositions can explain the lack of a wavelength dependent absorption. But the broad trend of featureless spectra in planets of a wide range of temperatures, found in this work and in similar studies recently published in the literature, favors an explanation based on the presence of condensates even at very low concentrations in the atmospheres of these close-in gas giants. This result points towards the general conclusion that the capability of transmission spectroscopy to determine the atmospheric composition is limited, at least for measurements at low spectral resolution. In addition, we refined the transit parameters and ephemerides of HAT-P-12b and HATP- 19b. Our monitoring campaigns allowed for the detection of the stellar rotation period of HAT-P-19 and a refined age estimate. For HAT-P-12 and HAT-P-32, we derived upper limits on their potential variability. The calculated upper limits of systematic effects of starspots on the derived transmission spectra were found to be negligible for all three targets. Finally, we discussed the observational challenges in the characterization of exoplanet atmospheres, the importance of correlated noise in the measurements and formulated suggestions on how to improve on the robustness of results in future work.}, language = {en} } @phdthesis{Yeldesbay2014, author = {Yeldesbay, Azamat}, title = {Complex regimes of synchronization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73348}, school = {Universit{\"a}t Potsdam}, pages = {ii, 60}, year = {2014}, abstract = {Synchronization is a fundamental phenomenon in nature. It can be considered as a general property of self-sustained oscillators to adjust their rhythm in the presence of an interaction. In this work we investigate complex regimes of synchronization phenomena by means of theoretical analysis, numerical modeling, as well as practical analysis of experimental data. As a subject of our investigation we consider chimera state, where due to spontaneous symmetry-breaking of an initially homogeneous oscillators lattice split the system into two parts with different dynamics. Chimera state as a new synchronization phenomenon was first found in non-locally coupled oscillators system, and has attracted a lot of attention in the last decade. However, the recent studies indicate that this state is also possible in globally coupled systems. In the first part of this work, we show under which conditions the chimera-like state appears in a system of globally coupled identical oscillators with intrinsic delayed feedback. The results of the research explain how initially monostable oscillators became effectivly bistable in the presence of the coupling and create a mean field that sustain the coexistence of synchronized and desynchronized states. Also we discuss other examples, where chimera-like state appears due to frequency dependence of the phase shift in the bistable system. In the second part, we make further investigation of this topic by modeling influence of an external periodic force to an oscillator with intrinsic delayed feedback. We made stability analysis of the synchronized state and constructed Arnold tongues. The results explain formation of the chimera-like state and hysteric behavior of the synchronization area. Also, we consider two sets of parameters of the oscillator with symmetric and asymmetric Arnold tongues, that correspond to mono- and bi-stable regimes of the oscillator. In the third part, we demonstrate the results of the work, which was done in collaboration with our colleagues from Psychology Department of University of Potsdam. The project aimed to study the effect of the cardiac rhythm on human perception of time using synchronization analysis. From our part, we made a statistical analysis of the data obtained from the conducted experiment on free time interval reproduction task. We examined how ones heartbeat influences the time perception and searched for possible phase synchronization between heartbeat cycles and time reproduction responses. The findings support the prediction that cardiac cycles can serve as input signals, and is used for reproduction of time intervals in the range of several seconds.}, language = {en} } @phdthesis{Feld2014, author = {Feld, Christian}, title = {Crustal structure of the Eratosthenes Seamount, Cyprus and S. Turkey from an amphibian wide-angle seismic profile}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73479}, school = {Universit{\"a}t Potsdam}, pages = {xi, 131}, year = {2014}, abstract = {In March 2010, the project CoCoCo (incipient COntinent-COntinent COllision) recorded a 650 km long amphibian N-S wide-angle seismic profile, extending from the Eratosthenes Seamount (ESM) across Cyprus and southern Turkey to the Anatolian plateau. The aim of the project is to reveal the impact of the transition from subduction to continent-continent collision of the African plate with the Cyprus-Anatolian plate. A visual quality check, frequency analysis and filtering were applied to the seismic data and reveal a good data quality. Subsequent first break picking, finite-differences ray tracing and inversion of the offshore wide-angle data leads to a first-arrival tomographic model. This model reveals (1) P-wave velocities lower than 6.5 km/s in the crust, (2) a variable crustal thickness of about 28 - 37 km and (3) an upper crustal reflection at 5 km depth beneath the ESM. Two land shots on Turkey, also recorded on Cyprus, airgun shots south of Cyprus and geological and previous seismic investigations provide the information to derive a layered velocity model beneath the Anatolian plateau and for the ophiolite complex on Cyprus. The analysis of the reflections provides evidence for a north-dipping plate subducting beneath Cyprus. The main features of this layered velocity model are (1) an upper and lower crust with large lateral changes of the velocity structure and thickness, (2) a Moho depth of about 38 - 45 km beneath the Anatolian plateau, (3) a shallow north-dipping subducting plate below Cyprus with an increasing dip and (4) a typical ophiolite sequence on Cyprus with a total thickness of about 12 km. The offshore-onshore seismic data complete and improve the information about the velocity structure beneath Cyprus and the deeper part of the offshore tomographic model. Thus, the wide-angle seismic data provide detailed insights into the 2-D geometry and velocity structures of the uplifted and overriding Cyprus-Anatolian plate. Subsequent gravity modelling confirms and extends the crustal P-wave velocity model. The deeper part of the subducting plate is constrained by the gravity data and has a dip angle of ~ 28°. Finally, an integrated analysis of the geophysical and geological information allows a comprehensive interpretation of the crustal structure related to the collision process.}, language = {en} } @phdthesis{Schulz2014, author = {Schulz, Anneli}, title = {Search for gamma-ray emission from bow shocks of runaway stars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73905}, school = {Universit{\"a}t Potsdam}, pages = {123}, year = {2014}, abstract = {The mystery of the origin of cosmic rays has been tackled for more than hundred years and is still not solved. Cosmic rays are detected with energies spanning more than 10 orders of magnitude and reaching energies up to ~10²¹ eV, far higher than any man-made accelerator can reach. Different theories on the astrophysical objects and processes creating such highly energetic particles have been proposed. A very prominent explanation for a process producing highly energetic particles is shock acceleration. The observation of high-energy gamma rays from supernova remnants, some of them revealing a shell like structure, is clear evidence that particles are accelerated to ultrarelativistic energies in the shocks of these objects. The environments of supernova remnants are complex and challenge detailed modelling of the processes leading to high-energy gamma-ray emission. The study of shock acceleration at bow shocks, created by the supersonic movement of individual stars through the interstellar medium, offers a unique possibility to determine the physical properties of shocks in a less complex environment. The shocked medium is heated by the stellar and the shock excited radiation, leading to thermal infrared emission. 28 bow shocks have been discovered through their infrared emission. Nonthermal radiation in radio and X-ray wavelengths has been detected from two bow shocks, pointing to the existence of relativistic particles in these systems. Theoretical models of the emission processes predict high-energy and very high-energy emission at a flux level in reach of current instruments. This work presents the search for gamma-ray emission from bow shocks of runaway stars in the energy regime from 100MeV to ~100TeV. The search is performed with the large area telescope (LAT) on-board the Fermi satellite and the H.E.S.S. telescopes located in the Khomas Highland in Namibia. The Fermi-LAT was launched in 2008 and is continuously scanning the sky since then. It detects photons with energies from 20MeV to over 300 GeV and has an unprecedented sensitivity. The all-sky coverage allows us to study all 28 bow shocks of runaway stars listed in the E-BOSS catalogue of infrared bow shocks. No significant emission was detected from any of the objects, although predicted by several theoretical models describing the non-thermal emission of bow shocks of runaway stars. The H.E.S.S. experiment is the most sensitive system of imaging atmospheric Cherenkov telescopes. It detects photons from several tens of GeV to ~100TeV. Seven of the bow shocks have been observed with H.E.S.S. and the data analysis is presented in this thesis. The analyses of the very-high energy data did not reveal significant emission from any of the sources either. This work presents the first systematic search for gamma-ray emission from bow shocks of runaway stars. For the first time Fermi-LAT data was specifically analysed to reveal emission from bow shocks of runaway stars. In the TeV regime no searches for emission from theses objects have been published so far, the study presented here is the first in this energy regime. The level of the gamma-ray emission from bow shocks of runaway stars is constrained by the calculated upper limits over six orders in magnitude in energy. The upper limits calculated for the bow shocks of runaway stars in the course of this work, constrain several models. For the best candidate, ζ Ophiuchi, the upper limits in the Fermi-LAT energy range are lower than the predictions by a factor ~5. This challenges the assumptions made in this model and gives valuable input for further modelling approaches. The analyses were performed with the software packages provided by the H.E.S.S. and Fermi collaborations. The development of a unified analysis framework for gamma-ray data, namely GammaLib/ctools, is rapidly progressing within the CTA consortium. Recent implementations and cross-checks with current software frameworks are presented in the Appendix.}, language = {en} } @phdthesis{Lindauer2014, author = {Lindauer, T. Marius}, title = {Algorithm selection, scheduling and configuration of Boolean constraint solvers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71260}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2014}, abstract = {Boolean constraint solving technology has made tremendous progress over the last decade, leading to industrial-strength solvers, for example, in the areas of answer set programming (ASP), the constraint satisfaction problem (CSP), propositional satisfiability (SAT) and satisfiability of quantified Boolean formulas (QBF). However, in all these areas, there exist multiple solving strategies that work well on different applications; no strategy dominates all other strategies. Therefore, no individual solver shows robust state-of-the-art performance in all kinds of applications. Additionally, the question arises how to choose a well-performing solving strategy for a given application; this is a challenging question even for solver and domain experts. One way to address this issue is the use of portfolio solvers, that is, a set of different solvers or solver configurations. We present three new automatic portfolio methods: (i) automatic construction of parallel portfolio solvers (ACPP) via algorithm configuration,(ii) solving the \$NP\$-hard problem of finding effective algorithm schedules with Answer Set Programming (aspeed), and (iii) a flexible algorithm selection framework (claspfolio2) allowing for fair comparison of different selection approaches. All three methods show improved performance and robustness in comparison to individual solvers on heterogeneous instance sets from many different applications. Since parallel solvers are important to effectively solve hard problems on parallel computation systems (e.g., multi-core processors), we extend all three approaches to be effectively applicable in parallel settings. We conducted extensive experimental studies different instance sets from ASP, CSP, MAXSAT, Operation Research (OR), SAT and QBF that indicate an improvement in the state-of-the-art solving heterogeneous instance sets. Last but not least, from our experimental studies, we deduce practical advice regarding the question when to apply which of our methods.}, language = {en} } @phdthesis{Loersch2014, author = {Loersch, Christian}, title = {Business start-ups and the effect of coaching programs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72605}, school = {Universit{\"a}t Potsdam}, pages = {XII, 282}, year = {2014}, abstract = {Entrepreneurship is known to be a main driver of economic growth. Hence, governments have an interest in supporting and promoting entrepreneurial activities. Start-up subsidies, which have been analyzed extensively, only aim at mitigating the lack of financial capital. However, some entrepreneurs also lack in human, social, and managerial capital. One way to address these shortcomings is by subsidizing coaching programs for entrepreneurs. However, theoretical and empirical evidence about business coaching and programs subsidizing coaching is scarce. This dissertation gives an extensive overview of coaching and is the first empirical study for Germany analyzing the effects of coaching programs on its participants. In the theoretical part of the dissertation the process of a business start-up is described and it is discussed how and in which stage of the company's evolvement coaching can influence entrepreneurial success. The concept of coaching is compared to other non-monetary types of support as training, mentoring, consulting, and counseling. Furthermore, national and international support programs are described. Most programs have either no or small positive effects. However, there is little quantitative evidence in the international literature. In the empirical part of the dissertation the effectiveness of coaching is shown by evaluating two German coaching programs, which support entrepreneurs via publicly subsidized coaching sessions. One of the programs aims at entrepreneurs who have been employed before becoming self-employed, whereas the other program is targeted at former unemployed entrepreneurs. The analysis is based on the evaluation of a quantitative and a qualitative dataset. The qualitative data are gathered by intensive one-on-one interviews with coaches and entrepreneurs. These data give a detailed insight about the coaching topics, duration, process, effectiveness, and the thoughts of coaches and entrepreneurs. The quantitative data include information about 2,936 German-based entrepreneurs. Using propensity score matching, the success of participants of the two coaching programs is compared with adequate groups of non-participants. In contrast to many other studies also personality traits are observed and controlled for in the matching process. The results show that only the program for former unemployed entrepreneurs has small positive effects. Participants have a larger survival probability in self-employment and a larger probability to hire employees than matched non-participants. In contrast, the program for former employed individuals has negative effects. Compared to individuals who did not participate in the coaching program, participants have a lower probability to stay in self-employment, lower earned net income, lower number of employees and lower life satisfaction. There are several reasons for these differing results of the two programs. First, former unemployed individuals have more basic coaching needs than former employed individuals. Coaches can satisfy these basic coaching needs, whereas former employed individuals have more complex business problems, which are not very easy to be solved by a coaching intervention. Second, the analysis reveals that former employed individuals are very successful in general. It is easier to increase the success of former unemployed individuals as they have a lower base level of success than former employed individuals. An effect heterogeneity analysis shows that coaching effectiveness differs by region. Coaching for previously unemployed entrepreneurs is especially useful in regions with bad labor market conditions. In summary, in line with previous literature, it is found that coaching has little effects on the success of entrepreneurs. The previous employment status, the characteristics of the entrepreneur and the regional labor market conditions play a crucial role in the effectiveness of coaching. In conclusion, coaching needs to be well tailored to the individual and applied thoroughly. Therefore, governments should design and provide coaching programs only after due consideration.}, language = {en} } @phdthesis{Munack2014, author = {Munack, Henry}, title = {From phantom blocks to denudational noise}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72629}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 172}, year = {2014}, abstract = {Knowing the rates and mechanisms of geomorphic process that shape the Earth's surface is crucial to understand landscape evolution. Modern methods for estimating denudation rates enable us to quantitatively express and compare processes of landscape downwearing that can be traced through time and space—from the seemingly intact, though intensely shattered, phantom blocks of the catastrophically fragmented basal facies of giant rockslides up to denudational noise in orogen-wide data sets averaging over several millennia. This great variety of spatiotemporal scales of denudation rates is both boon and bane of geomorphic process rates. Indeed, processes of landscape downwearing can be traced far back in time, helping us to understand the Earth's evolution. Yet, this benefit may turn into a drawback due to scaling issues if these rates are to be compared across different observation timescales. This thesis investigates the mechanisms, patterns and rates of landscape downwearing across the Himalaya-Tibet orogen. Accounting for the spatiotemporal variability of denudation processes, this thesis addresses landscape downwearing on three distinctly different spatial scales, starting off at the local scale of individual hillslopes where considerable amounts of debris are generated from rock instantaneously: Rocksliding in active mountains is a major impetus of landscape downwearing. Study I provides a systematic overview of the internal sedimentology of giant rockslide deposits and thus meets the challenge of distinguishing them from macroscopically and microscopically similar glacial deposits, tectonic fault-zone breccias, and impact breccias. This distinction is important to avoid erroneous or misleading deduction of paleoclimatic or tectonic implications. -> Grain size analysis shows that rockslide-derived micro-breccia closely resemble those from meteorite impact or tectonic faults. -> Frictionite may occur more frequently that previously assumed. -> M{\"o}ssbauer-spectroscopy derived results indicate basal rock melting in the absence of water, involving short-term temperatures of >1500°C. Zooming out, Study II tracks the fate of these sediments, using the example of the upper Indus River, NW India. There we use river sand samples from the Indus and its tributaries to estimate basin-averaged denudation rates along a ~320-km reach across the Tibetan Plateau margin, to answer the question whether incision into the western Tibetan Plateau margin is currently active or not. -> We find an about one-order-of-magnitude upstream decay—from 110 to 10 mm kyr^-1—of cosmogenic Be-10-derived basin-wide denudation rates across the morphological knickpoint that marks the transition from the Transhimalayan ranges to the Tibetan Plateau. This trend is corroborated by independent bulk petrographic and heavy mineral analysis of the same samples. -> From the observation that tributary-derived basin-wide denudation rates do not increase markedly until ~150-200 km downstream of the topographic plateau margin we conclude that incision into the Tibetan Plateau is inactive. -> Comparing our postglacial Be-10-derived denudation rates to long-term (>10^6 yr) estimates from low-temperature thermochronometry, ranging from 100 to 750 mm kyr^-1, points to an order- of-magnitude decay of rates of landscape downwearing towards present. We infer that denudation rates must have been higher in the Quaternary, probably promoted by the interplay of glacial and interglacial stages. Our investigation of regional denudation patterns in the upper Indus finally is an integral part of Study III that synthesizes denudation of the Himalaya-Tibet orogen. In order to identify general and time-invariant predictors for Be-10-derived denudation rates we analyze tectonic, climatic and topographic metrics from an inventory of 297 drainage basins from various parts of the orogen. Aiming to get insight to the full response distributions of denudation rate to tectonic, climatic and topographic candidate predictors, we apply quantile regression instead of ordinary least squares regression, which has been standard analysis tool in previous studies that looked for denudation rate predictors. -> We use principal component analysis to reduce our set of 26 candidate predictors, ending up with just three out of these: Aridity Index, topographic steepness index, and precipitation of the coldest quarter of the year. -> Topographic steepness index proves to perform best during additive quantile regression. Our consequent prediction of denudation rates on the basin scale involves prediction errors that remain between 5 and 10 mm kyr^-1. -> We conclude that while topographic metrics such as river-channel steepness and slope gradient—being representative on timescales that our cosmogenic Be-10-derived denudation rates integrate over—generally appear to be more suited as predictors than climatic and tectonic metrics based on decadal records.}, language = {en} } @phdthesis{Albrecht2014, author = {Albrecht, Steve}, title = {Generation, recombination and extraction of charges in polymer}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72285}, school = {Universit{\"a}t Potsdam}, pages = {144}, year = {2014}, abstract = {A dramatic efficiency improvement of bulk heterojunction solar cells based on electron-donating conjugated polymers in combination with soluble fullerene derivatives has been achieved over the past years. Certified and reported power conversion efficiencies now reach over 9\% for single junctions and exceed the 10\% benchmark for tandem solar cells. This trend brightens the vision of organic photovoltaics becoming competitive with inorganic solar cells including the realization of low-cost and large-area organic photovoltaics. For the best performing organic materials systems, the yield of charge generation can be very efficient. However, a detailed understanding of the free charge carrier generation mechanisms at the donor acceptor interface and the energy loss associated with it needs to be established. Moreover, organic solar cells are limited by the competition between charge extraction and free charge recombination, accounting for further efficiency losses. A conclusive picture and the development of precise methodologies for investigating the fundamental processes in organic solar cells are crucial for future material design, efficiency optimization, and the implementation of organic solar cells into commercial products. In order to advance the development of organic photovoltaics, my thesis focuses on the comprehensive understanding of charge generation, recombination and extraction in organic bulk heterojunction solar cells summarized in 6 chapters on the cumulative basis of 7 individual publications. The general motivation guiding this work was the realization of an efficient hybrid inorganic/organic tandem solar cell with sub-cells made from amorphous hydrogenated silicon and organic bulk heterojunctions. To realize this project aim, the focus was directed to the low band-gap copolymer PCPDTBT and its derivatives, resulting in the examination of the charge carrier dynamics in PCPDTBT:PC70BM blends in relation to by the blend morphology. The phase separation in this blend can be controlled by the processing additive diiodooctane, enhancing domain purity and size. The quantitative investigation of the free charge formation was realized by utilizing and improving the time delayed collection field technique. Interestingly, a pronounced field dependence of the free carrier generation for all blends is found, with the field dependence being stronger without the additive. Also, the bimolecular recombination coefficient for both blends is rather high and increases with decreasing internal field which we suggest to be caused by a negative field dependence of mobility. The additive speeds up charge extraction which is rationalized by the threefold increase in mobility. By fluorine attachment within the electron deficient subunit of PCPDTBT, a new polymer F-PCPDTBT is designed. This new material is characterized by a stronger tendency to aggregate as compared to non-fluorinated PCPDTBT. Our measurements show that for F-PCPDTBT:PCBM blends the charge carrier generation becomes more efficient and the field-dependence of free charge carrier generation is weakened. The stronger tendency to aggregate induced by the fluorination also leads to increased polymer rich domains, accompanied in a threefold reduction in the non-geminate recombination coefficient at conditions of open circuit. The size of the polymer domains is nicely correlated to the field-dependence of charge generation and the Langevin reduction factor, which highlights the importance of the domain size and domain purity for efficient charge carrier generation. In total, fluorination of PCPDTBT causes the PCE to increase from 3.6 to 6.1\% due to enhanced fill factor, short circuit current and open circuit voltage. Further optimization of the blend ratio, active layer thickness, and polymer molecular weight resulted in 6.6\% efficiency for F-PCPDTBT:PC70BM solar cells. Interestingly, the double fluorinated version 2F-PCPDTBT exhibited poorer FF despite a further reduction of geminate and non-geminate recombination losses. To further analyze this finding, a new technique is developed that measures the effective extraction mobility under charge carrier densities and electrical fields comparable to solar cell operation conditions. This method involves the bias enhanced charge extraction technique. With the knowledge of the carrier density under different electrical field and illumination conditions, a conclusive picture of the changes in charge carrier dynamics leading to differences in the fill factor upon fluorination of PCPDTBT is attained. The more efficient charge generation and reduced recombination with fluorination is counterbalanced by a decreased extraction mobility. Thus, the highest fill factor of 60\% and efficiency of 6.6\% is reached for F-PCPDTBT blends, while 2F-PCPDTBT blends have only moderate fill factors of 54\% caused by the lower effective extraction mobility, limiting the efficiency to 6.5\%. To understand the details of the charge generation mechanism and the related losses, we evaluated the yield and field-dependence of free charge generation using time delayed collection field in combination with sensitive measurements of the external quantum efficiency and absorption coefficients for a variety of blends. Importantly, both the yield and field-dependence of free charge generation is found to be unaffected by excitation energy, including direct charge transfer excitation below the optical band gap. To access the non-detectable absorption at energies of the relaxed charge transfer emission, the absorption was reconstructed from the CT emission, induced via the recombination of thermalized charges in electroluminescence. For a variety of blends, the quantum yield at energies of charge transfer emission was identical to excitations with energies well above the optical band-gap. Thus, the generation proceeds via the split-up of the thermalized charge transfer states in working solar cells. Further measurements were conducted on blends with fine-tuned energy levels and similar blend morphologies by using different fullerene derivatives. A direct correlation between the efficiency of free carrier generation and the energy difference of the relaxed charge transfer state relative to the energy of the charge separated state is found. These findings open up new guidelines for future material design as new high efficiency materials require a minimum energetic offset between charge transfer and the charge separated state while keeping the HOMO level (and LUMO level) difference between donor and acceptor as small as possible.}, language = {en} } @phdthesis{Mauri2014, author = {Mauri, Marco}, title = {A model for sigma factor competition in bacterial cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72098}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2014}, abstract = {Bacteria respond to changing environmental conditions by switching the global pattern of expressed genes. In response to specific environmental stresses the cell activates several stress-specific molecules such as sigma factors. They reversibly bind the RNA polymerase to form the so-called holoenzyme and direct it towards the appropriate stress response genes. In exponentially growing E. coli cells, the majority of the transcriptional activity is carried out by the housekeeping sigma factor, while stress responses are often under the control of alternative sigma factors. Different sigma factors compete for binding to a limited pool of RNA polymerase (RNAP) core enzymes, providing a mechanism for cross talk between genes or gene classes via the sharing of expression machinery. To quantitatively analyze the contribution of sigma factor competition to global changes in gene expression, we develop a thermodynamic model that describes binding between sigma factors and core RNAP at equilibrium, transcription, non-specific binding to DNA and the modulation of the availability of the molecular components. Association of housekeeping sigma factor to RNAP is generally favored by its abundance and higher binding affinity to the core. In order to promote transcription by alternative sigma subunits, the bacterial cell modulates the transcriptional efficiency in a reversible manner through several strategies such as anti-sigma factors, 6S RNA and generally any kind of transcriptional regulators (e.g. activators or inhibitors). By shifting the outcome of sigma factor competition for the core, these modulators bias the transcriptional program of the cell. The model is validated by comparison with in vitro competition experiments, with which excellent agreement is found. We observe that transcription is affected via the modulation of the concentrations of the different types of holoenzymes, so saturated promoters are only weakly affected by sigma factor competition. However, in case of overlapping promoters or promoters recognized by two types of sigma factors, we find that even saturated promoters are strongly affected. Active transcription effectively lowers the affinity between the sigma factor driving it and the core RNAP, resulting in complex cross talk effects and raising the question of how their in vitro measure is relevant in the cell. We also estimate that sigma factor competition is not strongly affected by non-specific binding of core RNAPs, sigma factors, and holoenzymes to DNA. Finally, we analyze the role of increased core RNAP availability upon the shut-down of ribosomal RNA transcription during stringent response. We find that passive up-regulation of alternative sigma-dependent transcription is not only possible, but also displays hypersensitivity based on the sigma factor competition. Our theoretical analysis thus provides support for a significant role of passive control during that global switch of the gene expression program and gives new insights into RNAP partitioning in the cell.}, language = {en} } @phdthesis{Widdrat2014, author = {Widdrat, Marc}, title = {Formation and alteration of magnetite nanoparticles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72239}, school = {Universit{\"a}t Potsdam}, pages = {113}, year = {2014}, abstract = {Magnetite is an iron oxide, which is ubiquitous in rocks and is usually deposited as small nanoparticulate matter among other rock material. It differs from most other iron oxides because it contains divalent and trivalent iron. Consequently, it has a special crystal structure and unique magnetic properties. These properties are used for paleoclimatic reconstructions where naturally occurring magnetite helps understanding former geological ages. Further on, magnetic properties are used in bio- and nanotechnological applications -synthetic magnetite serves as a contrast agent in MRI, is exploited in biosensing, hyperthermia or is used in storage media. Magnetic properties are strongly size-dependent and achieving size control under preferably mild synthesis conditions is of interest in order to obtain particles with required properties. By using a custom-made setup, it was possible to synthesize stable single domain magnetite nanoparticles with the co-precipitation method. Furthermore, it was shown that magnetite formation is temperature-dependent, resulting in larger particles at higher temperatures. However, mechanistic approaches about the details are incomplete. Formation of magnetite from solution was shown to occur from nanoparticulate matter rather than solvated ions. The theoretical framework of such processes has only started to be described, partly due to the lack of kinetic or thermodynamic data. Synthesis of magnetite nanoparticles at different temperatures was performed and the Arrhenius plot was used determine an activation energy for crystal growth of 28.4 kJ mol-1, which led to the conclusion that nanoparticle diffusion is the rate-determining step. Furthermore, a study of the alteration of magnetite particles of different sizes as a function of their storage conditions is presented. The magnetic properties depend not only on particle size but also depend on the structure of the oxide, because magnetite oxidizes to maghemite under environmental conditions. The dynamics of this process have not been well described. Smaller nanoparticles are shown to oxidize more rapidly than larger ones and the lower the storage temperature, the lower the measured oxidation. In addition, the magnetic properties of the altered particles are not decreased dramatically, thus suggesting that this alteration will not impact the use of such nanoparticles as medical carriers. Finally, the effect of biological additives on magnetite formation was investigated. Magnetotactic bacteria¬¬ are able to synthesize and align magnetite nanoparticles of well-defined size and morphology due to the involvement of special proteins with specific binding properties. Based on this model of morphology control, phage display experiments were performed to determine peptide sequences that preferably bind to (111)-magnetite faces. The aim was to control the shape of magnetite nanoparticles during the formation. Magnetotactic bacteria are also able to control the intracellular redox potential with proteins called magnetochromes. MamP is such a protein and its oxidizing nature was studied in vitro via biomimetic magnetite formation experiments based on ferrous ions. Magnetite and further trivalent oxides were found. This work helps understanding basic mechanisms of magnetite formation and gives insight into non-classical crystal growth. In addition, it is shown that alteration of magnetite nanoparticles is mainly based on oxidation to maghemite and does not significantly influence the magnetic properties. Finally, biomimetic experiments help understanding the role of MamP within the bacteria and furthermore, a first step was performed to achieve morphology control in magnetite formation via co-precipitation.}, language = {en} } @phdthesis{Tinnefeld2014, author = {Tinnefeld, Christian}, title = {Building a columnar database on shared main memory-based storage}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72063}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2014}, abstract = {In the field of disk-based parallel database management systems exists a great variety of solutions based on a shared-storage or a shared-nothing architecture. In contrast, main memory-based parallel database management systems are dominated solely by the shared-nothing approach as it preserves the in-memory performance advantage by processing data locally on each server. We argue that this unilateral development is going to cease due to the combination of the following three trends: a) Nowadays network technology features remote direct memory access (RDMA) and narrows the performance gap between accessing main memory inside a server and of a remote server to and even below a single order of magnitude. b) Modern storage systems scale gracefully, are elastic, and provide high-availability. c) A modern storage system such as Stanford's RAMCloud even keeps all data resident in main memory. Exploiting these characteristics in the context of a main-memory parallel database management system is desirable. The advent of RDMA-enabled network technology makes the creation of a parallel main memory DBMS based on a shared-storage approach feasible. This thesis describes building a columnar database on shared main memory-based storage. The thesis discusses the resulting architecture (Part I), the implications on query processing (Part II), and presents an evaluation of the resulting solution in terms of performance, high-availability, and elasticity (Part III). In our architecture, we use Stanford's RAMCloud as shared-storage, and the self-designed and developed in-memory AnalyticsDB as relational query processor on top. AnalyticsDB encapsulates data access and operator execution via an interface which allows seamless switching between local and remote main memory, while RAMCloud provides not only storage capacity, but also processing power. Combining both aspects allows pushing-down the execution of database operators into the storage system. We describe how the columnar data processed by AnalyticsDB is mapped to RAMCloud's key-value data model and how the performance advantages of columnar data storage can be preserved. The combination of fast network technology and the possibility to execute database operators in the storage system opens the discussion for site selection. We construct a system model that allows the estimation of operator execution costs in terms of network transfer, data processed in memory, and wall time. This can be used for database operators that work on one relation at a time - such as a scan or materialize operation - to discuss the site selection problem (data pull vs. operator push). Since a database query translates to the execution of several database operators, it is possible that the optimal site selection varies per operator. For the execution of a database operator that works on two (or more) relations at a time, such as a join, the system model is enriched by additional factors such as the chosen algorithm (e.g. Grace- vs. Distributed Block Nested Loop Join vs. Cyclo-Join), the data partitioning of the respective relations, and their overlapping as well as the allowed resource allocation. We present an evaluation on a cluster with 60 nodes where all nodes are connected via RDMA-enabled network equipment. We show that query processing performance is about 2.4x slower if everything is done via the data pull operator execution strategy (i.e. RAMCloud is being used only for data access) and about 27\% slower if operator execution is also supported inside RAMCloud (in comparison to operating only on main memory inside a server without any network communication at all). The fast-crash recovery feature of RAMCloud can be leveraged to provide high-availability, e.g. a server crash during query execution only delays the query response for about one second. Our solution is elastic in a way that it can adapt to changing workloads a) within seconds, b) without interruption of the ongoing query processing, and c) without manual intervention.}, language = {en} } @phdthesis{Negri2014, author = {Negri, Michael}, title = {How coaches influence referee decisions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72247}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2014}, abstract = {The work elaborates on the question if coaches in non-professional soccer can influence referee decisions. Modeled from a principal-agent perspective, the managing referee boards can be seen as the principal. They aim at facilitating a fair competition which is in accordance with the existing rules and regulations. In doing so, the referees are assigned as impartial agents on the pitch. The coaches take over a non-legitimate principal-like role trying to influence the referees even though they do not have the formal right to do so. Separate questionnaires were set up for referees and coaches. The coach questionnaire aimed at identifying the extent and the forms of influencing attempts by coaches. The referee questionnaire tried to elaborate on the questions if referees take notice of possible influencing attempts and how they react accordingly. The results were put into relation with official match data in order to identify significant influences on personal sanctions (yellow cards, second yellow cards, red cards) and the match result. It is found that there is a slight effect on the referee's decisions. However, this effect is rather disadvantageous for the influencing coach and there is no evidence for an impact on the match result itself.}, language = {en} } @phdthesis{Sorce2014, author = {Sorce, Jenny}, title = {From Spitzer mid-infrared observations and measurements of peculiar velocities to constrained simulations of the local universe}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72486}, school = {Universit{\"a}t Potsdam}, pages = {xx, 303}, year = {2014}, abstract = {Galaxies are observational probes to study the Large Scale Structure. Their gravitational motions are tracers of the total matter density and therefore of the Large Scale Structure. Besides, studies of structure formation and galaxy evolution rely on numerical cosmological simulations. Still, only one universe observable from a given position, in time and space, is available for comparisons with simulations. The related cosmic variance affects our ability to interpret the results. Simulations constrained by observational data are a perfect remedy to this problem. Achieving such simulations requires the projects Cosmic flows and CLUES. Cosmic flows builds catalogs of accurate distance measurements to map deviations from the expansion. These measures are mainly obtained with the galaxy luminosity-rotation rate correlation. We present the calibration of that relation in the mid-infrared with observational data from Spitzer Space Telescope. Resulting accurate distance estimates will be included in the third catalog of the project. In the meantime, two catalogs up to 30 and 150 Mpc/h have been released. We report improvements and applications of the CLUES' method on these two catalogs. The technique is based on the constrained realization algorithm. The cosmic displacement field is computed with the Zel'dovich approximation. This latter is then reversed to relocate reconstructed three-dimensional constraints to their precursors' positions in the initial field. The size of the second catalog (8000 galaxies within 150 Mpc/h) highlighted the importance of minimizing the observational biases. By carrying out tests on mock catalogs, built from cosmological simulations, a method to minimize observational bias can be derived. Finally, for the first time, cosmological simulations are constrained solely by peculiar velocities. The process is successful as resulting simulations resemble the Local Universe. The major attractors and voids are simulated at positions approaching observational positions by a few megaparsecs, thus reaching the limit imposed by the linear theory.}, language = {en} } @phdthesis{Mayer2014, author = {Mayer, Michael}, title = {Pulsar wind nebulae at high energies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71504}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2014}, abstract = {Pulsar wind nebulae (PWNe) are the most abundant TeV gamma-ray emitters in the Milky Way. The radiative emission of these objects is powered by fast-rotating pulsars, which donate parts of their rotational energy into winds of relativistic particles. This thesis presents an in-depth study of the detected population of PWNe at high energies. To outline general trends regarding their evolutionary behaviour, a time-dependent model is introduced and compared to the available data. In particular, this work presents two exceptional PWNe which protrude from the rest of the population, namely the Crab Nebula and N 157B. Both objects are driven by pulsars with extremely high rotational energy loss rates. Accordingly, they are often referred to as energetic twins. Modelling the non-thermal multi-wavelength emission of N157B gives access to specific properties of this object, like the magnetic field inside the nebula. Comparing the derived parameters to those of the Crab Nebula reveals large intrinsic differences between the two PWNe. Possible origins of these differences are discussed in context of the resembling pulsars. Compared to the TeV gamma-ray regime, the number of detected PWNe is much smaller in the MeV-GeV gamma-ray range. In the latter range, the Crab Nebula stands out by the recent detection of gamma-ray flares. In general, the measured flux enhancements on short time scales of days to weeks were not expected in the theoretical understanding of PWNe. In this thesis, the variability of the Crab Nebula is analysed using data from the Fermi Large Area Telescope (Fermi-LAT). For the presented analysis, a new gamma-ray reconstruction method is used, providing a higher sensitivity and a lower energy threshold compared to previous analyses. The derived gamma-ray light curve of the Crab Nebula is investigated for flares and periodicity. The detected flares are analysed regarding their energy spectra, and their variety and commonalities are discussed. In addition, a dedicated analysis of the flare which occurred in March 2013 is performed. The derived short-term variability time scale is roughly 6h, implying a small region inside the Crab Nebula to be responsible for the enigmatic flares. The most promising theories explaining the origins of the flux eruptions and gamma-ray variability are discussed in detail. In the technical part of this work, a new analysis framework is presented. The introduced software, called gammalib/ctools, is currently being developed for the future CTA observa- tory. The analysis framework is extensively tested using data from the H. E. S. S. experiment. To conduct proper data analysis in the likelihood framework of gammalib/ctools, a model describing the distribution of background events in H.E.S.S. data is presented. The software provides the infrastructure to combine data from several instruments in one analysis. To study the gamma-ray emitting PWN population, data from Fermi-LAT and H. E. S. S. are combined in the likelihood framework of gammalib/ctools. In particular, the spectral peak, which usually lies in the overlap energy regime between these two instruments, is determined with the presented analysis framework. The derived measurements are compared to the predictions from the time-dependent model. The combined analysis supports the conclusion of a diverse population of gamma-ray emitting PWNe.}, language = {en} } @phdthesis{Sayago2014, author = {Sayago, Jhosnella}, title = {Late Paleozoic basin analysis of the Loppa High and Finnmark Platform in the Norwegian Barents Sea : integration of seismic attributes and seismic sequence stratigraphy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72576}, school = {Universit{\"a}t Potsdam}, pages = {viii, 109}, year = {2014}, abstract = {The subsurface upper Palaeozoic sedimentary successions of the Loppa High half-graben and the Finnmark platform in the Norwegian Barents Sea (southwest Barents Sea) were investigated using 2D/3D seismic datasets combined with well and core data. These sedimentary successions represent a case of mixed siliciclastic-carbonates depositional systems, which formed during the earliest phase of the Atlantic rifting between Greenland and Norway. During the Carboniferous and Permian the southwest part of the Barents Sea was located along the northern margin of Pangaea, which experienced a northward drift at a speed of ~2-3 mm per year. This gradual shift in the paleolatitudinal position is reflected by changes in regional climatic conditions: from warm-humid in the early Carboniferous, changing to warm-arid in the middle to late Carboniferous and finally to colder conditions in the late Permian. Such changes in paleolatitude and climate have resulted in major changes in the style of sedimentation including variations in the type of carbonate factories. The upper Palaeozoic sedimentary succession is composed of four major depositional units comprising chronologically the Billefjorden Group dominated by siliciclastic deposition in extensional tectonic-controlled wedges, the Gipsdalen Group dominated by warm-water carbonates, stacked buildups and evaporites, the Bjarmeland Group characterized by cool-water carbonates as well as by the presence of buildup networks, and the Tempelfjorden Group characterized by fine-grained sedimentation dominated by biological silica production. In the Loppa High, the integration of a core study with multi-attribute seismic facies classification allowed highlighting the main sedimentary unconformities and mapping the spatial extent of a buried paleokarst terrain. This geological feature is interpreted to have formed during a protracted episode of subaerial exposure occurring between the late Palaeozoic and middle Triassic. Based on seismic sequence stratigraphy analysis the palaeogeography in time and space of the Loppa High basin was furthermore reconstructed and a new and more detailed tectono-sedimentary model for this area was proposed. In the Finnmark platform area, a detailed core analysis of two main exploration wells combined with key 2D seismic sections located along the main depositional profile, allowed the evaluation of depositional scenarios for the two main lithostratigraphic units: the {\O}rn Formation (Gipsdalen Group) and the Isbj{\o}rn Formation (Bjarmeland Group). During the mid-Sakmarian, two major changes were observed between the two formations including (1) the variation in the type of the carbonate factories, which is interpreted to be depth-controlled and (2) the change in platform morphology, which evolved from a distally steepened ramp to a homoclinal ramp. The results of this study may help supporting future reservoirs characterization of the upper Palaeozoic units in the Barents Sea, particularly in the Loppa High half-graben and the Finmmark platform area.}, language = {en} } @phdthesis{Nguyen2014, author = {Nguyen, Van Manh}, title = {Large-scale floodplain sediment dynamics in the Mekong Delta : present state and future prospects}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72512}, school = {Universit{\"a}t Potsdam}, pages = {ix, 95}, year = {2014}, abstract = {The Mekong Delta (MD) sustains the livelihood and food security of millions of people in Vietnam and Cambodia. It is known as the "rice bowl" of South East Asia and has one of the world's most productive fisheries. Sediment dynamics play a major role for the high productivity of agriculture and fishery in the delta. However, the MD is threatened by climate change, sea level rise and unsustainable development activities in the Mekong Basin. But despite its importance and the expected threats, the understanding of the present and future sediment dynamics in the MD is very limited. This is a consequence of its large extent, the intricate system of rivers, channels and floodplains and the scarcity of observations. Thus this thesis aimed at (1) the quantification of suspended sediment dynamics and associated sediment-nutrient deposition in floodplains of the MD, and (2) assessed the impacts of likely future boundary changes on the sediment dynamics in the MD. The applied methodology combines field experiments and numerical simulation to quantify and predict the sediment dynamics in the entire delta in a spatially explicit manner. The experimental part consists of a comprehensive procedure to monitor quantity and spatial variability of sediment and associated nutrient deposition for large and complex river floodplains, including an uncertainty analysis. The measurement campaign applied 450 sediment mat traps in 19 floodplains over the MD for a complete flood season. The data also supports quantification of nutrient deposition in floodplains based on laboratory analysis of nutrient fractions of trapped sedimentation.The main findings are that the distribution of grain size and nutrient fractions of suspended sediment are homogeneous over the Vietnamese floodplains. But the sediment deposition within and between ring dike floodplains shows very high spatial variability due to a high level of human inference. The experimental findings provide the essential data for setting up and calibration of a large-scale sediment transport model for the MD. For the simulation studies a large scale hydrodynamic model was developed in order to quantify large-scale floodplain sediment dynamics. The complex river-channel-floodplain system of the MD is described by a quasi-2D model linking a hydrodynamic and a cohesive sediment transport model. The floodplains are described as quasi-2D presentations linked to rivers and channels modeled in 1D by using control structures. The model setup, based on the experimental findings, ignored erosion and re-suspension processes due to a very high degree of human interference during the flood season. A two-stage calibration with six objective functions was developed in order to calibrate both the hydrodynamic and sediment transport modules. The objective functions include hydraulic and sediment transport parameters in main rivers, channels and floodplains. The model results show, for the first time, the tempo-spatial distribution of sediment and associated nutrient deposition rates in the whole MD. The patterns of sediment transport and deposition are quantified for different sub-systems. The main factors influencing spatial sediment dynamics are the network of rivers, channels and dike-rings, sluice gate operations, magnitude of the floods and tidal influences. The superposition of these factors leads to high spatial variability of the sediment transport and deposition, in particular in the Vietnamese floodplains. Depending on the flood magnitude, annual sediment loads reaching the coast vary from 48\% to 60\% of the sediment load at Kratie, the upper boundary of the MD. Deposited sediment varies from 19\% to 23\% of the annual load at Kratie in Cambodian floodplains, and from 1\% to 6\% in the compartmented and diked floodplains in Vietnam. Annual deposited nutrients (N, P, K), which are associated to the sediment deposition, provide on average more than 50\% of mineral fertilizers typically applied for rice crops in non-flooded ring dike compartments in Vietnam. This large-scale quantification provides a basis for estimating the benefits of the annual Mekong floods for agriculture and fishery, for assessing the impacts of future changes on the delta system, and further studies on coastal deposition/erosion. For the estimation of future prospects a sensitivity-based approach is applied to assess the response of floodplain hydraulics and sediment dynamics to the changes in the delta boundaries including hydropower development, climate change in the Mekong River Basin and effective sea level rise. The developed sediment model is used to simulate the mean sediment transport and sediment deposition in the whole delta system for the baseline (2000-2010) and future (2050-2060) periods. For each driver we derive a plausible range of future changes and discretize it into five levels, resulting in altogether 216 possible factor combinations. Our results thus cover all plausible future pathways of sediment dynamics in the delta based on current knowledge. The uncertainty of the range of the resulting impacts can be decreased in case more information on these drivers becomes available. Our results indicate that the hydropower development dominates the changes in sediment dynamics of the Mekong Delta, while sea level rise has the smallest effect. The floodplains of Vietnamese Mekong Delta are much more sensitive to the changes compared to the other subsystems of the delta. In terms of median changes of the three combined drivers, the inundation extent is predicted to increase slightly, but the overall floodplain sedimentation would be reduced by approximately 40\%, while the sediment load to the Sea would diminish to half of the current rates. These findings provide new and valuable information on the possible impacts of future development on the delta, and indicate the most vulnerable areas. Thus, the presented results are a significant contribution to the ongoing international discussion on the hydropower development in the Mekong basin and its impact on the Mekong delta.}, language = {en} } @phdthesis{Kruegel2014, author = {Kr{\"u}gel, Andr{\´e}}, title = {Eye movement control during reading : factors and principles of computing the word center for saccade planning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72599}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Reading is a complex cognitive task based on the analyses of visual stimuli. Due to the physiology of the eye, only a small number of letters around the fixation position can be extracted with high visual acuity, while the visibility of words and letters outside this so-called foveal region quickly drops with increasing eccentricity. As a consequence, saccadic eye movements are needed to repeatedly shift the fovea to new words for visual word identification during reading. Moreover, even within a foveated word fixation positions near the word center are superior to other fixation positions for efficient word recognition (O'Regan, 1981; Brysbaert, Vitu, and Schroyens, 1996). Thus, most reading theories assume that readers aim specifically at word centers during reading (for a review see Reichle, Rayner, \& Pollatsek, 2003). However, saccades' landing positions within words during reading are in fact systematically modulated by the distance of the launch site from the word center (McConkie, Kerr, Reddix, \& Zola, 1988). In general, it is largely unknown how readers identify the center of upcoming target words and there is no computational model of the sensorimotor translation of the decision for a target word into spatial word center coordinates. Here we present a series of three studies which aim at advancing the current knowledge about the computation of saccade target coordinates during saccade planning in reading. Based on a large corpus analyses, we firstly identified word skipping as a further factor beyond the launch-site distance with a likewise systematic and surprisingly large effect on within-word landing positions. Most importantly, we found that the end points of saccades after skipped word are shifted two and more letters to the left as compared to one-step saccades (i.e., from word N to word N+1) with equal launch-site distances. Then we present evidence from a single saccade experiment suggesting that the word-skipping effect results from highly automatic low-level perceptual processes, which are essentially based on the localization of blank spaces between words. Finally, in the third part, we present a Bayesian model of the computation of the word center from primary sensory measurements of inter-word spaces. We demonstrate that the model simultaneously accounts for launch-site and saccade-type contingent modulations of within-word landing positions in reading. Our results show that the spatial saccade target during reading is the result of complex estimations of the word center based on incomplete sensory information, which also leads to specific systematic deviations of saccades' landing positions from the word center. Our results have important implications for current reading models and experimental reading research.}, language = {en} } @phdthesis{Steinhaus2014, author = {Steinhaus, Sebastian Peter}, title = {Constructing quantum spacetime}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72558}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Despite remarkable progress made in the past century, which has revolutionized our understanding of the universe, there are numerous open questions left in theoretical physics. Particularly important is the fact that the theories describing the fundamental interactions of nature are incompatible. Einstein's theory of general relative describes gravity as a dynamical spacetime, which is curved by matter and whose curvature determines the motion of matter. On the other hand we have quantum field theory, in form of the standard model of particle physics, where particles interact via the remaining interactions - electromagnetic, weak and strong interaction - on a flat, static spacetime without gravity. A theory of quantum gravity is hoped to cure this incompatibility by heuristically replacing classical spacetime by quantum spacetime'. Several approaches exist attempting to define such a theory with differing underlying premises and ideas, where it is not clear which is to be preferred. Yet a minimal requirement is the compatibility with the classical theory, they attempt to generalize. Interestingly many of these models rely on discrete structures in their definition or postulate discreteness of spacetime to be fundamental. Besides the direct advantages discretisations provide, e.g. permitting numerical simulations, they come with serious caveats requiring thorough investigation: In general discretisations break fundamental diffeomorphism symmetry of gravity and are generically not unique. Both complicates establishing the connection to the classical continuum theory. The main focus of this thesis lies in the investigation of this relation for spin foam models. This is done on different levels of the discretisation / triangulation, ranging from few simplices up to the continuum limit. In the regime of very few simplices we confirm and deepen the connection of spin foam models to discrete gravity. Moreover, we discuss dynamical, e.g. diffeomorphism invariance in the discrete, to fix the ambiguities of the models. In order to satisfy these conditions, the discrete models have to be improved in a renormalisation procedure, which also allows us to study their continuum dynamics. Applied to simplified spin foam models, we uncover a rich, non--trivial fixed point structure, which we summarize in a phase diagram. Inspired by these methods, we propose a method to consistently construct the continuum theory, which comes with a unique vacuum state.}, language = {en} } @phdthesis{GuzmanPerez2014, author = {Guzman-Perez, Valentina}, title = {Effect of benzylglucosinolate on signaling pathways associated with type 2 diabetes prevention}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72351}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Type 2 diabetes (T2D) is a health problem throughout the world. In 2010, there were nearly 230 million individuals with diabetes worldwide and it is estimated that in the economically advanced countries the cases will increase about 50\% in the next twenty years. Insulin resistance is one of major features in T2D, which is also a risk factor for metabolic and cardiovascular complications. Epidemiological and animal studies have shown that the consumption of vegetables and fruits can delay or prevent the development of the disease, although the underlying mechanisms of these effects are still unclear. Brassica species such as broccoli (Brassica oleracea var. italica) and nasturtium (Tropaeolum majus) possess high content of bioactive phytochemicals, e.g. nitrogen sulfur compounds (glucosinolates and isothiocyanates) and polyphenols largely associated with the prevention of cancer. Isothiocyanates (ITCs) display their anti-carcinogenic potential by inducing detoxicating phase II enzymes and increasing glutathione (GSH) levels in tissues. In T2D diabetes an increase in gluconeogenesis and triglyceride synthesis, and a reduction in fatty acid oxidation accompanied by the presence of reactive oxygen species (ROS) are observed; altogether is the result of an inappropriate response to insulin. Forkhead box O (FOXO) transcription factors play a crucial role in the regulation of insulin effects on gene expression and metabolism, and alterations in FOXO function could contribute to metabolic disorders in diabetes. In this study using stably transfected human osteosarcoma cells (U-2 OS) with constitutive expression of FOXO1 protein labeled with GFP (green fluorescent protein) and human hepatoma cells HepG2 cell cultures, the ability of benzylisothiocyanate (BITC) deriving from benzylglucosinolate, extracted from nasturtium to modulate, i) the insulin-signaling pathway, ii) the intracellular localization of FOXO1 and iii) the expression of proteins involved in glucose metabolism, ROS detoxification, cell cycle arrest and DNA repair was evaluated. BITC promoted oxidative stress and in response to that induced FOXO1 translocation from cytoplasm into the nucleus antagonizing the insulin effect. BITC stimulus was able to down-regulate gluconeogenic enzymes, which can be considered as an anti-diabetic effect; to promote antioxidant resistance expressed by the up-regulation in manganese superoxide dismutase (MnSOD) and detoxification enzymes; to modulate autophagy by induction of BECLIN1 and down-regulation of the mammalian target of rapamycin complex 1 (mTORC1) pathway; and to promote cell cycle arrest and DNA damage repair by up-regulation of the cyclin-dependent kinase inhibitor (p21CIP) and Growth Arrest / DNA Damage Repair (GADD45). Except for the nuclear factor (erythroid derived)-like2 (NRF2) and its influence in the detoxification enzymes gene expression, all the observed effects were independent from FOXO1, protein kinase B (AKT/PKB) and NAD-dependent deacetylase sirtuin-1 (SIRT1). The current study provides evidence that besides of the anticarcinogenic potential, isothiocyanates might have a role in T2D prevention. BITC stimulus mimics the fasting state, in which insulin signaling is not triggered and FOXO proteins remain in the nucleus modulating gene expression of their target genes, with the advantage of a down-regulation of gluconeogenesis instead of its increase. These effects suggest that BITC might be considered as a promising substance in the prevention or treatment of T2D, therefore the factors behind of its modulatory effects need further investigation.}, language = {en} } @phdthesis{Burdack2014, author = {Burdack, Doreen}, title = {Water management policies and their impact on irrigated crop production in the Murray-Darling Basin, Australia}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-306-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72245}, school = {Universit{\"a}t Potsdam}, pages = {307}, year = {2014}, abstract = {The economic impact analysis contained in this book shows how irrigation farming is particularly susceptible when applying certain water management policies in the Australian Murray-Darling Basin, one of the world largest river basins and Australia's most fertile region. By comparing different pricing and non-pricing water management policies with the help of the Water Integrated Market Model, it is found that the impact of water demand reducing policies is most severe on crops that need to be intensively irrigated and are at the same time less water productive. A combination of increasingly frequent and severe droughts and the application of policies that decrease agricultural water demand, in the same region, will create a situation in which the highly water dependent crops rice and cotton cannot be cultivated at all.}, language = {en} } @phdthesis{Ziese2014, author = {Ziese, Ramona}, title = {Geometric electroelasticity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72504}, school = {Universit{\"a}t Potsdam}, pages = {vi, 113}, year = {2014}, abstract = {In this work a diffential geometric formulation of the theory of electroelasticity is developed which also includes thermal and magnetic influences. We study the motion of bodies consisting of an elastic material that are deformed by the influence of mechanical forces, heat and an external electromagnetic field. To this end physical balance laws (conservation of mass, balance of momentum, angular momentum and energy) are established. These provide an equation that describes the motion of the body during the deformation. Here the body and the surrounding space are modeled as Riemannian manifolds, and we allow that the body has a lower dimension than the surrounding space. In this way one is not (as usual) restricted to the description of the deformation of three-dimensional bodies in a three-dimensional space, but one can also describe the deformation of membranes and the deformation in a curved space. Moreover, we formulate so-called constitutive relations that encode the properties of the used material. Balance of energy as a scalar law can easily be formulated on a Riemannian manifold. The remaining balance laws are then obtained by demanding that balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space. This generalizes a result by Marsden and Hughes that pertains to bodies that have the same dimension as the surrounding space and does not allow the presence of electromagnetic fields. Usually, in works on electroelasticity the entropy inequality is used to decide which otherwise allowed deformations are physically admissible and which are not. It is alsoemployed to derive restrictions to the possible forms of constitutive relations describing the material. Unfortunately, the opinions on the physically correct statement of the entropy inequality diverge when electromagnetic fields are present. Moreover, it is unclear how to formulate the entropy inequality in the case of a membrane that is subjected to an electromagnetic field. Thus, we show that one can replace the use of the entropy inequality by the demand that for a given process balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space and under linear rescalings of the temperature. On the one hand, this demand also yields the desired restrictions to the form of the constitutive relations. On the other hand, it needs much weaker assumptions than the arguments in physics literature that are employing the entropy inequality. Again, our result generalizes a theorem of Marsden and Hughes. This time, our result is, like theirs, only valid for bodies that have the same dimension as the surrounding space.}, language = {en} } @phdthesis{Balzer2014, author = {Balzer, Arnim}, title = {Crab flare observations with H.E.S.S. phase II}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72545}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The H.E.S.S. array is a third generation Imaging Atmospheric Cherenkov Telescope (IACT) array. It is located in the Khomas Highland in Namibia, and measures very high energy (VHE) gamma-rays. In Phase I, the array started data taking in 2004 with its four identical 13 m telescopes. Since then, H.E.S.S. has emerged as the most successful IACT experiment to date. Among the almost 150 sources of VHE gamma-ray radiation found so far, even the oldest detection, the Crab Nebula, keeps surprising the scientific community with unexplained phenomena such as the recently discovered very energetic flares of high energy gamma-ray radiation. During its most recent flare, which was detected by the Fermi satellite in March 2013, the Crab Nebula was simultaneously observed with the H.E.S.S. array for six nights. The results of the observations will be discussed in detail during the course of this work. During the nights of the flare, the new 24 m × 32 m H.E.S.S. II telescope was still being commissioned, but participated in the data taking for one night. To be able to reconstruct and analyze the data of the H.E.S.S. Phase II array, the algorithms and software used by the H.E.S.S. Phase I array had to be adapted. The most prominent advanced shower reconstruction technique developed by de Naurois and Rolland, the template-based model analysis, compares real shower images taken by the Cherenkov telescope cameras with shower templates obtained using a semi-analytical model. To find the best fitting image, and, therefore, the relevant parameters that describe the air shower best, a pixel-wise log-likelihood fit is done. The adaptation of this advanced shower reconstruction technique to the heterogeneous H.E.S.S. Phase II array for stereo events (i.e. air showers seen by at least two telescopes of any kind), its performance using MonteCarlo simulations as well as its application to real data will be described.}, language = {en} } @phdthesis{Bamberg2014, author = {Bamberg, Marlene}, title = {Planetary mapping tools applied to floor-fractured craters on Mars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72104}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Planetary research is often user-based and requires considerable skill, time, and effort. Unfortunately, self-defined boundary conditions, definitions, and rules are often not documented or not easy to comprehend due to the complexity of research. This makes a comparison to other studies, or an extension of the already existing research, complicated. Comparisons are often distorted, because results rely on different, not well defined, or even unknown boundary conditions. The purpose of this research is to develop a standardized analysis method for planetary surfaces, which is adaptable to several research topics. The method provides a consistent quality of results. This also includes achieving reliable and comparable results and reducing the time and effort of conducting such studies. A standardized analysis method is provided by automated analysis tools that focus on statistical parameters. Specific key parameters and boundary conditions are defined for the tool application. The analysis relies on a database in which all key parameters are stored. These databases can be easily updated and adapted to various research questions. This increases the flexibility, reproducibility, and comparability of the research. However, the quality of the database and reliability of definitions directly influence the results. To ensure a high quality of results, the rules and definitions need to be well defined and based on previously conducted case studies. The tools then produce parameters, which are obtained by defined geostatistical techniques (measurements, calculations, classifications). The idea of an automated statistical analysis is tested to proof benefits but also potential problems of this method. In this study, I adapt automated tools for floor-fractured craters (FFCs) on Mars. These impact craters show a variety of surface features, occurring in different Martian environments, and having different fracturing origins. They provide a complex morphological and geological field of application. 433 FFCs are classified by the analysis tools due to their fracturing process. Spatial data, environmental context, and crater interior data are analyzed to distinguish between the processes involved in floor fracturing. Related geologic processes, such as glacial and fluvial activity, are too similar to be separately classified by the automated tools. Glacial and fluvial fracturing processes are merged together for the classification. The automated tools provide probability values for each origin model. To guarantee the quality and reliability of the results, classification tools need to achieve an origin probability above 50 \%. This analysis method shows that 15 \% of the FFCs are fractured by intrusive volcanism, 20 \% by tectonic activity, and 43 \% by water \& ice related processes. In total, 75 \% of the FFCs are classified to an origin type. This can be explained by a combination of origin models, superposition or erosion of key parameters, or an unknown fracturing model. Those features have to be manually analyzed in detail. Another possibility would be the improvement of key parameters and rules for the classification. This research shows that it is possible to conduct an automated statistical analysis of morphologic and geologic features based on analysis tools. Analysis tools provide additional information to the user and are therefore considered assistance systems.}, language = {en} } @phdthesis{Takouna2014, author = {Takouna, Ibrahim}, title = {Energy-efficient and performance-aware virtual machine management for cloud data centers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72399}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Virtualisierte Cloud Datenzentren stellen nach Bedarf Ressourcen zur Verf{\"u}gu-ng, erm{\"o}glichen agile Ressourcenbereitstellung und beherbergen heterogene Applikationen mit verschiedenen Anforderungen an Ressourcen. Solche Datenzentren verbrauchen enorme Mengen an Energie, was die Erh{\"o}hung der Betriebskosten, der W{\"a}rme innerhalb der Zentren und des Kohlendioxidausstoßes verursacht. Der Anstieg des Energieverbrauches kann durch ein ineffektives Ressourcenmanagement, das die ineffiziente Ressourcenausnutzung verursacht, entstehen. Die vorliegende Dissertation stellt detaillierte Modelle und neue Verfahren f{\"u}r virtualisiertes Ressourcenmanagement in Cloud Datenzentren vor. Die vorgestellten Verfahren ziehen das Service-Level-Agreement (SLA) und die Heterogenit{\"a}t der Auslastung bez{\"u}glich des Bedarfs an Speicherzugriffen und Kommunikationsmustern von Web- und HPC- (High Performance Computing) Applikationen in Betracht. Um die pr{\"a}sentierten Techniken zu evaluieren, verwenden wir Simulationen und echte Protokollierung der Auslastungen von Web- und HPC- Applikationen. Außerdem vergleichen wir unser Techniken und Verfahren mit anderen aktuellen Verfahren durch die Anwendung von verschiedenen Performance Metriken. Die Hauptbeitr{\"a}ge dieser Dissertation sind Folgendes: Ein Proaktives auf robuster Optimierung basierendes Ressourcenbereitstellungsverfahren. Dieses Verfahren erh{\"o}ht die F{\"a}higkeit der Hostes zur Verf{\"u}g-ungsstellung von mehr VMs. Gleichzeitig aber wird der unn{\"o}tige Energieverbrauch minimiert. Zus{\"a}tzlich mindert diese Technik unerw{\"u}nschte {\"A}nde-rungen im Energiezustand des Servers. Die vorgestellte Technik nutzt einen auf Intervall basierenden Vorhersagealgorithmus zur Implementierung einer robusten Optimierung. Dabei werden unsichere Anforderungen in Betracht gezogen. Ein adaptives und auf Intervall basierendes Verfahren zur Vorhersage des Arbeitsaufkommens mit hohen, in k{\"u}rzer Zeit auftretenden Schwankungen. Die Intervall basierende Vorhersage ist implementiert in der Standard Abweichung Variante und in der Median absoluter Abweichung Variante. Die Intervall-{\"A}nderungen basieren auf einem adaptiven Vertrauensfenster um die Schwankungen des Arbeitsaufkommens zu bew{\"a}ltigen. Eine robuste VM Zusammenlegung f{\"u}r ein effizientes Energie und Performance Management. Dies erm{\"o}glicht die gegenseitige Abh{\"a}ngigkeit zwischen der Energie und der Performance zu minimieren. Unser Verfahren reduziert die Anzahl der VM-Migrationen im Vergleich mit den neu vor kurzem vorgestellten Verfahren. Dies tr{\"a}gt auch zur Reduzierung des durch das Netzwerk verursachten Energieverbrauches. Außerdem reduziert dieses Verfahren SLA-Verletzungen und die Anzahl von {\"A}nderungen an Energiezus-t{\"a}nden. Ein generisches Modell f{\"u}r das Netzwerk eines Datenzentrums um die verz{\"o}-gerte Kommunikation und ihre Auswirkung auf die VM Performance und auf die Netzwerkenergie zu simulieren. Außerdem wird ein generisches Modell f{\"u}r ein Memory-Bus des Servers vorgestellt. Dieses Modell beinhaltet auch Modelle f{\"u}r die Latenzzeit und den Energieverbrauch f{\"u}r verschiedene Memory Frequenzen. Dies erlaubt eine Simulation der Memory Verz{\"o}gerung und ihre Auswirkung auf die VM-Performance und auf den Memory Energieverbrauch. Kommunikation bewusste und Energie effiziente Zusammenlegung f{\"u}r parallele Applikationen um die dynamische Entdeckung von Kommunikationsmustern und das Umplanen von VMs zu erm{\"o}glichen. Das Umplanen von VMs benutzt eine auf den entdeckten Kommunikationsmustern basierende Migration. Eine neue Technik zur Entdeckung von dynamischen Mustern ist implementiert. Sie basiert auf der Signal Verarbeitung des Netzwerks von VMs, anstatt die Informationen des virtuellen Umstellung der Hosts oder der Initiierung der VMs zu nutzen. Das Ergebnis zeigt, dass unsere Methode die durchschnittliche Anwendung des Netzwerks reduziert und aufgrund der Reduzierung der aktiven Umstellungen Energie gespart. Außerdem bietet sie eine bessere VM Performance im Vergleich zu der CPU-basierten Platzierung. Memory bewusste VM Zusammenlegung f{\"u}r unabh{\"a}ngige VMs. Sie nutzt die Vielfalt des VMs Memory Zuganges um die Anwendung vom Memory-Bus der Hosts zu balancieren. Die vorgestellte Technik, Memory-Bus Load Balancing (MLB), verteilt die VMs reaktiv neu im Bezug auf ihre Anwendung vom Memory-Bus. Sie nutzt die VM Migration um die Performance des gesamtem Systems zu verbessern. Außerdem sind die dynamische Spannung, die Frequenz Skalierung des Memory und die MLB Methode kombiniert um ein besseres Energiesparen zu leisten.}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Lotkowska2014, author = {Lotkowska, Magda Ewa}, title = {Functional analysis of MYB112 transcription factor in the model plant Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72131}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Transcription factors (TFs) are ubiquitous gene expression regulators and play essential roles in almost all biological processes. This Ph.D. project is primarily focused on the functional characterisation of MYB112 - a member of the R2R3-MYB TF family from the model plant Arabidopsis thaliana. This gene was selected due to its increased expression during senescence based on previous qRT-PCR expression profiling experiments of 1880 TFs in Arabidopsis leaves at three developmental stages (15 mm leaf, 30 mm leaf and 20\% yellowing leaf). MYB112 promoter GUS fusion lines were generated to further investigate the expression pattern of MYB112. Employing transgenic approaches in combination with metabolomics and transcriptomics we demonstrate that MYB112 exerts a major role in regulation of plant flavonoid metabolism. We report enhanced and impaired anthocyanin accumulation in MYB112 overexpressors and MYB112-deficient mutants, respectively. Expression profiling reveals that MYB112 acts as a positive regulator of the transcription factor PAP1 leading to increased anthocyanin biosynthesis, and as a negative regulator of MYB12 and MYB111, which both control flavonol biosynthesis. We also identify MYB112 early responsive genes using a combination of several approaches. These include gene expression profiling (Affymetrix ATH1 micro-arrays and qRT-PCR) and transactivation assays in leaf mesophyll cell protoplasts. We show that MYB112 binds to an 8-bp DNA fragment containing the core sequence (A/T/G)(A/C)CC(A/T)(A/G/T)(A/C)(T/C). By electrophoretic mobility shift assay (EMSA) and chromatin immunoprecipitation coupled to qPCR (ChIP-qPCR) we demonstrate that MYB112 binds in vitro and in vivo to MYB7 and MYB32 promoters revealing them as direct downstream target genes. MYB TFs were previously reported to play an important role in controlling flavonoid biosynthesis in plants. Many factors acting upstream of the anthocyanin biosynthesis pathway show enhanced expression levels during nitrogen limitation, or elevated sucrose content. In addition to the mentioned conditions, other environmental parameters including salinity or high light stress may trigger anthocyanin accumulation. In contrast to several other MYB TFs affecting anthocyanin biosynthesis pathway genes, MYB112 expression is not controlled by nitrogen limitation, or carbon excess, but rather is stimulated by salinity and high light stress. Thus, MYB112 constitutes a previously uncharacterised regulatory factor that modifies anthocyanin accumulation under conditions of abiotic stress.}, language = {en} } @phdthesis{Trost2014, author = {Trost, Gerda}, title = {Poly(A) Polymerase 1 (PAPS1) influences organ size and pathogen response in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72345}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Polyadenylation of pre-mRNAs is critical for efficient nuclear export, stability, and translation of the mature mRNAs, and thus for gene expression. The bulk of pre-mRNAs are processed by canonical nuclear poly(A) polymerase (PAPS). Both vertebrate and higher-plant genomes encode more than one isoform of this enzyme, and these are coexpressed in different tissues. However, in neither case is it known whether the isoforms fulfill different functions or polyadenylate distinct subsets of pre-mRNAs. This thesis shows that the three canonical nuclear PAPS isoforms in Arabidopsis are functionally specialized owing to their evolutionarily divergent C-terminal domains. A moderate loss-of-function mutant in PAPS1 leads to increase in floral organ size, whereas leaf size is reduced. A strong loss-of-function mutation causes a male gametophytic defect, whereas a weak allele leads to reduced leaf growth. By contrast, plants lacking both PAPS2 and PAPS4 function are viable with wild-type leaf growth. Polyadenylation of SMALL AUXIN UP RNA (SAUR) mRNAs depends specifically on PAPS1 function. The resulting reduction in SAUR activity in paps1 mutants contributes to their reduced leaf growth, providing a causal link between polyadenylation of specific pre-mRNAs by a particular PAPS isoform and plant growth. Additionally, opposite effects of PAPS1 on leaf and flower growth reflect the different identities of these organs. The overgrowth of paps1 mutant petals is due to increased recruitment of founder cells into early organ primordia whereas the reduced leaf size is due to an ectopic pathogen response. This constitutive immune response leads to increased resistance to the biotrophic oomycete Hyaloperonospora arabidopsidis and reflects activation of the salicylic acid-independent signalling pathway downstream of ENHANCED DISEASE SUSCEPTIBILITY1 (EDS1)/PHYTOALEXIN DEFICIENT4 (PAD4). Immune responses are accompanied by intracellular redox changes. Consistent with this, the redox-status of the chloroplast is altered in paps1-1 mutants. The molecular effects of the paps1-1 mutation were analysed using an RNA sequencing approach that distinguishes between long- and short tailed mRNA. The results shown here suggest the existence of an additional layer of regulation in plants and possibly vertebrate gene expression, whereby the relative activities of canonical nuclear PAPS isoforms control de novo synthesized poly(A) tail length and hence expression of specific subsets of mRNAs.}, language = {en} } @phdthesis{Arnold2014, author = {Arnold, Anne}, title = {Modeling photosynthesis and related metabolic processes : from detailed examination to consideration of the metabolic context}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72277}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Mathematical modeling of biological systems is a powerful tool to systematically investigate the functions of biological processes and their relationship with the environment. To obtain accurate and biologically interpretable predictions, a modeling framework has to be devised whose assumptions best approximate the examined scenario and which copes with the trade-off of complexity of the underlying mathematical description: with attention to detail or high coverage. Correspondingly, the system can be examined in detail on a smaller scale or in a simplified manner on a larger scale. In this thesis, the role of photosynthesis and its related biochemical processes in the context of plant metabolism was dissected by employing modeling approaches ranging from kinetic to stoichiometric models. The Calvin-Benson cycle, as primary pathway of carbon fixation in C3 plants, is the initial step for producing starch and sucrose, necessary for plant growth. Based on an integrative analysis for model ranking applied on the largest compendium of (kinetic) models for the Calvin-Benson cycle, those suitable for development of metabolic engineering strategies were identified. Driven by the question why starch rather than sucrose is the predominant transitory carbon storage in higher plants, the metabolic costs for their synthesis were examined. The incorporation of the maintenance costs for the involved enzymes provided a model-based support for the preference of starch as transitory carbon storage, by only exploiting the stoichiometry of synthesis pathways. Many photosynthetic organisms have to cope with processes which compete with carbon fixation, such as photorespiration whose impact on plant metabolism is still controversial. A systematic model-oriented review provided a detailed assessment for the role of this pathway in inhibiting the rate of carbon fixation, bridging carbon and nitrogen metabolism, shaping the C1 metabolism, and influencing redox signal transduction. The demand of understanding photosynthesis in its metabolic context calls for the examination of the related processes of the primary carbon metabolism. To this end, the Arabidopsis core model was assembled via a bottom-up approach. This large-scale model can be used to simulate photoautotrophic biomass production, as an indicator for plant growth, under so-called optimal, carbon-limiting and nitrogen-limiting growth conditions. Finally, the introduced model was employed to investigate the effects of the environment, in particular, nitrogen, carbon and energy sources, on the metabolic behavior. This resulted in a purely stoichiometry-based explanation for the experimental evidence for preferred simultaneous acquisition of nitrogen in both forms, as nitrate and ammonium, for optimal growth in various plant species. The findings presented in this thesis provide new insights into plant system's behavior, further support existing opinions for which mounting experimental evidences arise, and posit novel hypotheses for further directed large-scale experiments.}, language = {en} } @phdthesis{Serrano2014, author = {Serrano, Paloma}, title = {Methanogens from Siberian permafrost as models for life on Mars : response to simulated martian conditions and biosignature characterization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72299}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Mars is one of the best candidates among planetary bodies for supporting life. The presence of water in the form of ice and atmospheric vapour together with the availability of biogenic elements and energy are indicators of the possibility of hosting life as we know it. The occurrence of permanently frozen ground - permafrost, is a common phenomenon on Mars and it shows multiple morphological analogies with terrestrial permafrost. Despite the extreme inhospitable conditions, highly diverse microbial communities inhabit terrestrial permafrost in large numbers. Among these are methanogenic archaea, which are anaerobic chemotrophic microorganisms that meet many of the metabolic and physiological requirements for survival on the martian subsurface. Moreover, methanogens from Siberian permafrost are extremely resistant against different types of physiological stresses as well as simulated martian thermo-physical and subsurface conditions, making them promising model organisms for potential life on Mars. The main aims of this investigation are to assess the survival of methanogenic archaea under Mars conditions, focusing on methanogens from Siberian permafrost, and to characterize their biosignatures by means of Raman spectroscopy, a powerful technology for microbial identification that will be used in the ExoMars mission. For this purpose, methanogens from Siberian permafrost and non-permafrost habitats were subjected to simulated martian desiccation by exposure to an ultra-low subfreezing temperature (-80ºC) and to Mars regolith (S-MRS and P-MRS) and atmospheric analogues. They were also exposed to different concentrations of perchlorate, a strong oxidant found in martian soils. Moreover, the biosignatures of methanogens were characterized at the single-cell level using confocal Raman microspectroscopy (CRM). The results showed survival and methane production in all methanogenic strains under simulated martian desiccation. After exposure to subfreezing temperatures, Siberian permafrost strains had a faster metabolic recovery, whereas the membranes of non-permafrost methanogens remained intact to a greater extent. The strain Methanosarcina soligelidi SMA-21 from Siberian permafrost showed significantly higher methane production rates than all other strains after the exposure to martian soil and atmospheric analogues, and all strains survived the presence of perchlorate at the concentration on Mars. Furthermore, CRM analyses revealed remarkable differences in the overall chemical composition of permafrost and non-permafrost strains of methanogens, regardless of their phylogenetic relationship. The convergence of the chemical composition in non-sister permafrost strains may be the consequence of adaptations to the environment, and could explain their greater resistance compared to the non-permafrost strains. As part of this study, Raman spectroscopy was evaluated as an analytical technique for remote detection of methanogens embedded in a mineral matrix. This thesis contributes to the understanding of the survival limits of methanogenic archaea under simulated martian conditions to further assess the hypothetical existence of life similar to methanogens on the martian subsurface. In addition, the overall chemical composition of methanogens was characterized for the first time by means of confocal Raman microspectroscopy, with potential implications for astrobiological research.}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @phdthesis{Schollaen2014, author = {Schollaen, Karina}, title = {Tracking climate signals in tropical trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71947}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The tropical warm pool waters surrounding Indonesia are one of the equatorial heat and moisture sources that are considered as a driving force of the global climate system. The climate in Indonesia is dominated by the equatorial monsoon system, and has been linked to El Ni{\~n}o-Southern Oscillation (ENSO) events, which often result in severe droughts or floods over Indonesia with profound societal and economic impacts on the populations living in the world's fourth most populated country. The latest IPCC report states that ENSO will remain the dominant mode in the tropical Pacific with global effects in the 21st century and ENSO-related precipitation extremes will intensify. However, no common agreement exists among climate simulation models for projected change in ENSO and the Australian-Indonesian Monsoon. Exploring high-resolution palaeoclimate archives, like tree rings or varved lake sediments, provide insights into the natural climate variability of the past, and thus helps improving and validating simulations of future climate changes. Centennial tree-ring stable isotope records | Within this doctoral thesis the main goal was to explore the potential of tropical tree rings to record climate signals and to use them as palaeoclimate proxies. In detail, stable carbon (δ13C) and oxygen (δ18O) isotopes were extracted from teak trees in order to establish the first well-replicated centennial (AD 1900-2007) stable isotope records for Java, Indonesia. Furthermore, different climatic variables were tested whether they show significant correlation with tree-ring proxies (ring-width, δ13C, δ18O). Moreover, highly resolved intra-annual oxygen isotope data were established to assess the transfer of the seasonal precipitation signal into the tree rings. Finally, the established oxygen isotope record was used to reveal possible correlations with ENSO events. Methodological achievements | A second goal of this thesis was to assess the applicability of novel techniques which facilitate and optimize high-resolution and high-throughput stable isotope analysis of tree rings. Two different UV-laser-based microscopic dissection systems were evaluated as a novel sampling tool for high-resolution stable isotope analysis. Furthermore, an improved procedure of tree-ring dissection from thin cellulose laths for stable isotope analysis was designed. The most important findings of this thesis are: I) The herein presented novel sampling techniques improve stable isotope analyses for tree-ring studies in terms of precision, efficiency and quality. The UV-laser-based microdissection serve as a valuable tool for sampling plant tissue at ultrahigh-resolution and for unprecedented precision. II) A guideline for a modified method of cellulose extraction from wholewood cross-sections and subsequent tree-ring dissection was established. The novel technique optimizes the stable isotope analysis process in two ways: faster and high-throughput cellulose extraction and precise tree-ring separation at annual to high-resolution scale. III) The centennial tree-ring stable isotope records reveal significant correlation with regional precipitation. High-resolution stable oxygen values, furthermore, allow distinguishing between dry and rainy season rainfall. IV) The δ18O record reveals significant correlation with different ENSO flavors and demonstrates the importance of considering ENSO flavors when interpreting palaeoclimatic data in the tropics. The findings of my dissertation show that seasonally resolved δ18O records from Indonesian teak trees are a valuable proxy for multi-centennial reconstructions of regional precipitation variability (monsoon signals) and large-scale ocean-atmosphere phenomena (ENSO) for the Indo-Pacific region. Furthermore, the novel methodological achievements offer many unexplored avenues for multidisciplinary research in high-resolution palaeoclimatology.}, language = {en} } @phdthesis{Girbig2014, author = {Girbig, Dorothee}, title = {Analysing concerted criteria for local dynamic properties of metabolic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72017}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Metabolic systems tend to exhibit steady states that can be measured in terms of their concentrations and fluxes. These measurements can be regarded as a phenotypic representation of all the complex interactions and regulatory mechanisms taking place in the underlying metabolic network. Such interactions determine the system's response to external perturbations and are responsible, for example, for its asymptotic stability or for oscillatory trajectories around the steady state. However, determining these perturbation responses in the absence of fully specified kinetic models remains an important challenge of computational systems biology. Structural kinetic modeling (SKM) is a framework to analyse whether a metabolic steady state remains stable under perturbation, without requiring detailed knowledge about individual rate equations. It provides a parameterised representation of the system's Jacobian matrix in which the model parameters encode information about the enzyme-metabolite interactions. Stability criteria can be derived by generating a large number of structural kinetic models (SK-models) with randomly sampled parameter sets and evaluating the resulting Jacobian matrices. The parameter space can be analysed statistically in order to detect network positions that contribute significantly to the perturbation response. Because the sampled parameters are equivalent to the elasticities used in metabolic control analysis (MCA), the results are easy to interpret biologically. In this project, the SKM framework was extended by several novel methodological improvements. These improvements were evaluated in a simulation study using a set of small example pathways with simple Michaelis Menten rate laws. Afterwards, a detailed analysis of the dynamic properties of the neuronal TCA cycle was performed in order to demonstrate how the new insights obtained in this work could be used for the study of complex metabolic systems. The first improvement was achieved by examining the biological feasibility of the elasticity combinations created during Monte Carlo sampling. Using a set of small example systems, the findings showed that the majority of sampled SK-models would yield negative kinetic parameters if they were translated back into kinetic models. To overcome this problem, a simple criterion was formulated that mitigates such infeasible models and the application of this criterion changed the conclusions of the SKM experiment. The second improvement of this work was the application of supervised machine-learning approaches in order to analyse SKM experiments. So far, SKM experiments have focused on the detection of individual enzymes to identify single reactions important for maintaining the stability or oscillatory trajectories. In this work, this approach was extended by demonstrating how SKM enables the detection of ensembles of enzymes or metabolites that act together in an orchestrated manner to coordinate the pathways response to perturbations. In doing so, stable and unstable states served as class labels, and classifiers were trained to detect elasticity regions associated with stability and instability. Classification was performed using decision trees and relevance vector machines (RVMs). The decision trees produced good classification accuracy in terms of model bias and generalizability. RVMs outperformed decision trees when applied to small models, but encountered severe problems when applied to larger systems because of their high runtime requirements. The decision tree rulesets were analysed statistically and individually in order to explore the role of individual enzymes or metabolites in controlling the system's trajectories around steady states. The third improvement of this work was the establishment of a relationship between the SKM framework and the related field of MCA. In particular, it was shown how the sampled elasticities could be converted to flux control coefficients, which were then investigated for their predictive information content in classifier training. After evaluation on the small example pathways, the methodology was used to study two steady states of the neuronal TCA cycle with respect to their intrinsic mechanisms responsible for stability or instability. The findings showed that several elasticities were jointly coordinated to control stability and that the main source for potential instabilities were mutations in the enzyme alpha-ketoglutarate dehydrogenase.}, language = {en} } @phdthesis{Dyachenko2014, author = {Dyachenko, Evgeniya}, title = {Elliptic problems with small parameter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72056}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In this thesis we consider diverse aspects of existence and correctness of asymptotic solutions to elliptic differential and pseudodifferential equations. We begin our studies with the case of a general elliptic boundary value problem in partial derivatives. A small parameter enters the coefficients of the main equation as well as into the boundary conditions. Such equations have already been investigated satisfactory, but there still exist certain theoretical deficiencies. Our aim is to present the general theory of elliptic problems with a small parameter. For this purpose we examine in detail the case of a bounded domain with a smooth boundary. First of all, we construct formal solutions as power series in the small parameter. Then we examine their asymptotic properties. It suffices to carry out sharp two-sided \emph{a priori} estimates for the operators of boundary value problems which are uniform in the small parameter. Such estimates failed to hold in functional spaces used in classical elliptic theory. To circumvent this limitation we exploit norms depending on the small parameter for the functions defined on a bounded domain. Similar norms are widely used in literature, but their properties have not been investigated extensively. Our theoretical investigation shows that the usual elliptic technique can be correctly carried out in these norms. The obtained results also allow one to extend the norms to compact manifolds with boundaries. We complete our investigation by formulating algebraic conditions on the operators and showing their equivalence to the existence of a priori estimates. In the second step, we extend the concept of ellipticity with a small parameter to more general classes of operators. Firstly, we want to compare the difference in asymptotic patterns between the obtained series and expansions for similar differential problems. Therefore we investigate the heat equation in a bounded domain with a small parameter near the time derivative. In this case the characteristics touch the boundary at a finite number of points. It is known that the solutions are not regular in a neighbourhood of such points in advance. We suppose moreover that the boundary at such points can be non-smooth but have cuspidal singularities. We find a formal asymptotic expansion and show that when a set of parameters comes through a threshold value, the expansions fail to be asymptotic. The last part of the work is devoted to general concept of ellipticity with a small parameter. Several theoretical extensions to pseudodifferential operators have already been suggested in previous studies. As a new contribution we involve the analysis on manifolds with edge singularities which allows us to consider wider classes of perturbed elliptic operators. We examine that introduced classes possess a priori estimates of elliptic type. As a further application we demonstrate how developed tools can be used to reduce singularly perturbed problems to regular ones.}, language = {en} } @phdthesis{Abedjan2014, author = {Abedjan, Ziawasch}, title = {Improving RDF data with data mining}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71334}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Linked Open Data (LOD) comprises very many and often large public data sets and knowledge bases. Those datasets are mostly presented in the RDF triple structure of subject, predicate, and object, where each triple represents a statement or fact. Unfortunately, the heterogeneity of available open data requires significant integration steps before it can be used in applications. Meta information, such as ontological definitions and exact range definitions of predicates, are desirable and ideally provided by an ontology. However in the context of LOD, ontologies are often incomplete or simply not available. Thus, it is useful to automatically generate meta information, such as ontological dependencies, range definitions, and topical classifications. Association rule mining, which was originally applied for sales analysis on transactional databases, is a promising and novel technique to explore such data. We designed an adaptation of this technique for min-ing Rdf data and introduce the concept of "mining configurations", which allows us to mine RDF data sets in various ways. Different configurations enable us to identify schema and value dependencies that in combination result in interesting use cases. To this end, we present rule-based approaches for auto-completion, data enrichment, ontology improvement, and query relaxation. Auto-completion remedies the problem of inconsistent ontology usage, providing an editing user with a sorted list of commonly used predicates. A combination of different configurations step extends this approach to create completely new facts for a knowledge base. We present two approaches for fact generation, a user-based approach where a user selects the entity to be amended with new facts and a data-driven approach where an algorithm discovers entities that have to be amended with missing facts. As knowledge bases constantly grow and evolve, another approach to improve the usage of RDF data is to improve existing ontologies. Here, we present an association rule based approach to reconcile ontology and data. Interlacing different mining configurations, we infer an algorithm to discover synonymously used predicates. Those predicates can be used to expand query results and to support users during query formulation. We provide a wide range of experiments on real world datasets for each use case. The experiments and evaluations show the added value of association rule mining for the integration and usability of RDF data and confirm the appropriateness of our mining configuration methodology.}, language = {en} } @phdthesis{Radeff2014, author = {Radeff, Giuditta}, title = {Geohistory of the Central Anatolian Plateau southern margin (southern Turkey)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71865}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The Adana Basin of southern Turkey, situated at the SE margin of the Central Anatolian Plateau is ideally located to record Neogene topographic and tectonic changes in the easternmost Mediterranean realm. Using industry seismic reflection data we correlate 34 seismic profiles with corresponding exposed units in the Adana Basin. The time-depth conversion of the interpreted seismic profiles allows us to reconstruct the subsidence curve of the Adana Basin and to outline the occurrence of a major increase in both subsidence and sedimentation rates at 5.45 - 5.33 Ma, leading to the deposition of almost 1500 km3 of conglomerates and marls. Our provenance analysis of the conglomerates reveals that most of the sediment is derived from and north of the SE margin of the Central Anatolian Plateau. A comparison of these results with the composition of recent conglomerates and the present drainage basins indicates major changes between late Messinian and present-day source areas. We suggest that these changes in source areas result of uplift and ensuing erosion of the SE margin of the plateau. This hypothesis is supported by the comparison of the Adana Basin subsidence curve with the subsidence curve of the Mut Basin, a mainly Neogene basin located on top of the Central Anatolian Plateau southern margin, showing that the Adana Basin subsidence event is coeval with an uplift episode of the plateau southern margin. The collection of several fault measurements in the Adana region show different deformation styles for the NW and SE margins of the Adana Basin. The weakly seismic NW portion of the basin is characterized by extensional and transtensional structures cutting Neogene deposits, likely accomodating the differential uplift occurring between the basin and the SE margin of the plateau. We interpret the tectonic evolution of the southern flank of the Central Anatolian Plateau and the coeval subsidence and sedimentation in the Adana Basin to be related to deep lithospheric processes, particularly lithospheric delamination and slab break-off.}, language = {en} } @phdthesis{BacskaiAtkari2014, author = {Bacskai-Atkari, Julia}, title = {The syntax of comparative constructions : operators, ellipsis phenomena and functional left peripheries}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-301-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71255}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 310}, year = {2014}, abstract = {Adopting a minimalist framework, the dissertation provides an analysis for the syntactic structure of comparatives, with special attention paid to the derivation of the subclause. The proposed account explains how the comparative subclause is connected to the matrix clause, how the subclause is formed in the syntax and what additional processes contribute to its final structure. In addition, it casts light upon these problems in cross-linguistic terms and provides a model that allows for synchronic and diachronic differences. This also enables one to give a more adequate explanation for the phenomena found in English comparatives since the properties of English structures can then be linked to general settings of the language and hence need no longer be considered as idiosyncratic features of the grammar of English. First, the dissertation provides a unified analysis of degree expressions, relating the structure of comparatives to that of other degrees. It is shown that gradable adjectives are located within a degree phrase (DegP), which in turn projects a quantifier phrase (QP) and that these two functional layers are always present, irrespectively of whether there is a phonologically visible element in these layers. Second, the dissertation presents a novel analysis of Comparative Deletion by reducing it to an overtness constraint holding on operators: in this way, it is reduced to morphological differences and cross-linguistic variation is not conditioned by way of postulating an arbitrary parameter. Cross-linguistic differences are ultimately dependent on whether a language has overt operators equipped with the relevant - [+compr] and [+rel] - features. Third, the dissertation provides an adequate explanation for the phenomenon of Attributive Comparative Deletion, as attested in English, by way of relating it to the regular mechanism of Comparative Deletion. I assume that Attributive Comparative Deletion is not a universal phenomenon, and its presence in English can be conditioned by independent, more general rules, while the absence of such restrictions leads to its absence in other languages. Fourth, the dissertation accounts for certain phenomena related to diachronic changes, examining how the changes in the status of comparative operators led to changes in whether Comparative Deletion is attested in a given language: I argue that only operators without a lexical XP can be grammaticalised. The underlying mechanisms underlying are essentially general economy principles and hence the processes are not language-specific or exceptional. Fifth, the dissertation accounts for optional ellipsis processes that play a crucial role in the derivation of typical comparative subclauses. These processes are not directly related to the structure of degree expressions and hence the elimination of the quantified expression from the subclause; nevertheless, they are shown to be in interaction with the mechanisms underlying Comparative Deletion or the absence thereof.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @phdthesis{Federici2014, author = {Federici, Simone}, title = {Gamma-ray studies of the young shell-type SNR RX J1713.7-3946}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71734}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {One of the most significant current discussions in Astrophysics relates to the origin of high-energy cosmic rays. According to our current knowledge, the abundance distribution of the elements in cosmic rays at their point of origin indicates, within plausible error limits, that they were initially formed by nuclear processes in the interiors of stars. It is also believed that their energy distribution up to 1018 eV has Galactic origins. But even though the knowledge about potential sources of cosmic rays is quite poor above „ 1015 eV, that is the "knee" of the cosmic-ray spectrum, up to the knee there seems to be a wide consensus that supernova remnants are the most likely candidates. Evidence of this comes from observations of non-thermal X-ray radiation, requiring synchrotron electrons with energies up to 1014 eV, exactly in the remnant of supernovae. To date, however, there is not conclusive evidence that they produce nuclei, the dominant component of cosmic rays, in addition to electrons. In light of this dearth of evidence, γ-ray observations from supernova remnants can offer the most promising direct way to confirm whether or not these astrophysical objects are indeed the main source of cosmic-ray nuclei below the knee. Recent observations with space- and ground-based observatories have established shell-type supernova remnants as GeV-to- TeV γ-ray sources. The interpretation of these observations is however complicated by the different radiation processes, leptonic and hadronic, that can produce similar fluxes in this energy band rendering ambiguous the nature of the emission itself. The aim of this work is to develop a deeper understanding of these radiation processes from a particular shell-type supernova remnant, namely RX J1713.7-3946, using observations of the LAT instrument onboard the Fermi Gamma-Ray Space Telescope. Furthermore, to obtain accurate spectra and morphology maps of the emission associated with this supernova remnant, an improved model of the diffuse Galactic γ-ray emission background is developed. The analyses of RX J1713.7-3946 carried out with this improved background show that the hard Fermi-LAT spectrum cannot be ascribed to the hadronic emission, leading thus to the conclusion that the leptonic scenario is instead the most natural picture for the high-energy γ-ray emission of RX J1713.7-3946. The leptonic scenario however does not rule out the possibility that cosmic-ray nuclei are accelerated in this supernova remnant, but it suggests that the ambient density may not be high enough to produce a significant hadronic γ-ray emission. Further investigations involving other supernova remnants using the improved back- ground developed in this work could allow compelling population studies, and hence prove or disprove the origin of Galactic cosmic-ray nuclei in these astrophysical objects. A break- through regarding the identification of the radiation mechanisms could be lastly achieved with a new generation of instruments such as CTA.}, language = {en} } @phdthesis{Trabant2014, author = {Trabant, Christoph}, title = {Ultrafast photoinduced phase transitions in complex materials probed by time-resolved resonant soft x-ray diffraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71377}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In processing and data storage mainly ferromagnetic (FM) materials are being used. Approaching physical limits, new concepts have to be found for faster, smaller switches, for higher data densities and more energy efficiency. Some of the discussed new concepts involve the material classes of correlated oxides and materials with antiferromagnetic coupling. Their applicability depends critically on their switching behavior, i.e., how fast and how energy efficient material properties can be manipulated. This thesis presents investigations of ultrafast non-equilibrium phase transitions on such new materials. In transition metal oxides (TMOs) the coupling of different degrees of freedom and resulting low energy excitation spectrum often result in spectacular changes of macroscopic properties (colossal magneto resistance, superconductivity, metal-to-insulator transitions) often accompanied by nanoscale order of spins, charges, orbital occupation and by lattice distortions, which make these material attractive. Magnetite served as a prototype for functional TMOs showing a metal-to-insulator-transition (MIT) at T = 123 K. By probing the charge and orbital order as well as the structure after an optical excitation we found that the electronic order and the structural distortion, characteristics of the insulating phase in thermal equilibrium, are destroyed within the experimental resolution of 300 fs. The MIT itself occurs on a 1.5 ps timescale. It shows that MITs in functional materials are several thousand times faster than switching processes in semiconductors. Recently ferrimagnetic and antiferromagnetic (AFM) materials have become interesting. It was shown in ferrimagnetic GdFeCo, that the transfer of angular momentum between two opposed FM subsystems with different time constants leads to a switching of the magnetization after laser pulse excitation. In addition it was theoretically predicted that demagnetization dynamics in AFM should occur faster than in FM materials as no net angular momentum has to be transferred out of the spin system. We investigated two different AFM materials in order to learn more about their ultrafast dynamics. In Ho, a metallic AFM below T ≈ 130 K, we found that the AFM Ho can not only be faster but also ten times more energy efficiently destroyed as order in FM comparable metals. In EuTe, an AFM semiconductor below T ≈ 10 K, we compared the loss of magnetization and laser-induced structural distortion in one and the same experiment. Our experiment shows that they are effectively disentangled. An exception is an ultrafast release of lattice dynamics, which we assign to the release of magnetostriction. The results presented here were obtained with time-resolved resonant soft x-ray diffraction at the Femtoslicing source of the Helmholtz-Zentrum Berlin and at the free-electron laser in Stanford (LCLS). In addition the development and setup of a new UHV-diffractometer for these experiments will be reported.}, language = {en} } @phdthesis{Ermeydan2014, author = {Ermeydan, Mahmut Ali}, title = {Wood cell wall modification with hydrophobic molecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71325}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Wood is used for many applications because of its excellent mechanical properties, relative abundance and as it is a renewable resource. However, its wider utilization as an engineering material is limited because it swells and shrinks upon moisture changes and is susceptible to degradation by microorganisms and/or insects. Chemical modifications of wood have been shown to improve dimensional stability, water repellence and/or durability, thus increasing potential service-life of wood materials. However current treatments are limited because it is difficult to introduce and fix such modifications deep inside the tissue and cell wall. Within the scope of this thesis, novel chemical modification methods of wood cell walls were developed to improve both dimensional stability and water repellence of wood material. These methods were partly inspired by the heartwood formation in living trees, a process, that for some species results in an insertion of hydrophobic chemical substances into the cell walls of already dead wood cells, In the first part of this thesis a chemistry to modify wood cell walls was used, which was inspired by the natural process of heartwood formation. Commercially available hydrophobic flavonoid molecules were effectively inserted in the cell walls of spruce, a softwood species with low natural durability, after a tosylation treatment to obtain "artificial heartwood". Flavonoid inserted cell walls show a reduced moisture absorption, resulting in better dimensional stability, water repellency and increased hardness. This approach was quite different compared to established modifications which mainly address hydroxyl groups of cell wall polymers with hydrophilic substances. In the second part of the work in-situ styrene polymerization inside the tosylated cell walls was studied. It is known that there is a weak adhesion between hydrophobic polymers and hydrophilic cell wall components. The hydrophobic styrene monomers were inserted into the tosylated wood cell walls for further polymerization to form polystyrene in the cell walls, which increased the dimensional stability of the bulk wood material and reduced water uptake of the cell walls considerably when compared to controls. In the third part of the work, grafting of another hydrophobic and also biodegradable polymer, poly(ɛ-caprolactone) in the wood cell walls by ring opening polymerization of ɛ-caprolactone was studied at mild temperatures. Results indicated that polycaprolactone attached into the cell walls, caused permanent swelling of the cell walls up to 5\%. Dimensional stability of the bulk wood material increased 40\% and water absorption reduced more than 35\%. A fully biodegradable and hydrophobized wood material was obtained with this method which reduces disposal problem of the modified wood materials and has improved properties to extend the material's service-life. Starting from a bio-inspired approach which showed great promise as an alternative to standard cell wall modifications we showed the possibility of inserting hydrophobic molecules in the cell walls and supported this fact with in-situ styrene and ɛ-caprolactone polymerization into the cell walls. It was shown in this thesis that despite the extensive knowledge and long history of using wood as a material there is still room for novel chemical modifications which could have a high impact on improving wood properties.}, language = {en} }