@phdthesis{Dahlsten2014, author = {Dahlsten, Ulf}, title = {World market governance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70168}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Democratic capitalism or liberal democracy, as the successful marriage of convenience between market liberalism and democracy sometimes is called, is in trouble. The market economy system has become global and there is a growing mismatch with the territoriality of the nation-states. The functional global networks and inter-governmental order can no longer keep pace with the rapid development of the global market economy and regulatory capture is all too common. Concepts like de-globalization, self-regulation, and global government are floated in the debate. The alternatives are analysed and found to be improper, inadequate or plainly impossible. The proposed route is instead to accept that the global market economy has developed into an independent fundamental societal system that needs its own governance. The suggestion is World Market Governance based on the Rule of Law in order to shape the fitness environment for the global market economy and strengthen the nation-states so that they can regain the sovereignty to decide upon the social and cultural conditions in each country. Elements in the proposed Rule of Law are international legislation decided by an Assembly supported by a Council, and an independent Judiciary. Existing international organisations would function as executors. The need for broad sustained demand for regulations in the common interest is identified.}, language = {en} } @phdthesis{Ermeydan2014, author = {Ermeydan, Mahmut Ali}, title = {Wood cell wall modification with hydrophobic molecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71325}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Wood is used for many applications because of its excellent mechanical properties, relative abundance and as it is a renewable resource. However, its wider utilization as an engineering material is limited because it swells and shrinks upon moisture changes and is susceptible to degradation by microorganisms and/or insects. Chemical modifications of wood have been shown to improve dimensional stability, water repellence and/or durability, thus increasing potential service-life of wood materials. However current treatments are limited because it is difficult to introduce and fix such modifications deep inside the tissue and cell wall. Within the scope of this thesis, novel chemical modification methods of wood cell walls were developed to improve both dimensional stability and water repellence of wood material. These methods were partly inspired by the heartwood formation in living trees, a process, that for some species results in an insertion of hydrophobic chemical substances into the cell walls of already dead wood cells, In the first part of this thesis a chemistry to modify wood cell walls was used, which was inspired by the natural process of heartwood formation. Commercially available hydrophobic flavonoid molecules were effectively inserted in the cell walls of spruce, a softwood species with low natural durability, after a tosylation treatment to obtain "artificial heartwood". Flavonoid inserted cell walls show a reduced moisture absorption, resulting in better dimensional stability, water repellency and increased hardness. This approach was quite different compared to established modifications which mainly address hydroxyl groups of cell wall polymers with hydrophilic substances. In the second part of the work in-situ styrene polymerization inside the tosylated cell walls was studied. It is known that there is a weak adhesion between hydrophobic polymers and hydrophilic cell wall components. The hydrophobic styrene monomers were inserted into the tosylated wood cell walls for further polymerization to form polystyrene in the cell walls, which increased the dimensional stability of the bulk wood material and reduced water uptake of the cell walls considerably when compared to controls. In the third part of the work, grafting of another hydrophobic and also biodegradable polymer, poly(ɛ-caprolactone) in the wood cell walls by ring opening polymerization of ɛ-caprolactone was studied at mild temperatures. Results indicated that polycaprolactone attached into the cell walls, caused permanent swelling of the cell walls up to 5\%. Dimensional stability of the bulk wood material increased 40\% and water absorption reduced more than 35\%. A fully biodegradable and hydrophobized wood material was obtained with this method which reduces disposal problem of the modified wood materials and has improved properties to extend the material's service-life. Starting from a bio-inspired approach which showed great promise as an alternative to standard cell wall modifications we showed the possibility of inserting hydrophobic molecules in the cell walls and supported this fact with in-situ styrene and ɛ-caprolactone polymerization into the cell walls. It was shown in this thesis that despite the extensive knowledge and long history of using wood as a material there is still room for novel chemical modifications which could have a high impact on improving wood properties.}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @phdthesis{Burdack2014, author = {Burdack, Doreen}, title = {Water management policies and their impact on irrigated crop production in the Murray-Darling Basin, Australia}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-306-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72245}, school = {Universit{\"a}t Potsdam}, pages = {307}, year = {2014}, abstract = {The economic impact analysis contained in this book shows how irrigation farming is particularly susceptible when applying certain water management policies in the Australian Murray-Darling Basin, one of the world largest river basins and Australia's most fertile region. By comparing different pricing and non-pricing water management policies with the help of the Water Integrated Market Model, it is found that the impact of water demand reducing policies is most severe on crops that need to be intensively irrigated and are at the same time less water productive. A combination of increasingly frequent and severe droughts and the application of policies that decrease agricultural water demand, in the same region, will create a situation in which the highly water dependent crops rice and cotton cannot be cultivated at all.}, language = {en} } @phdthesis{Schmiedchen2014, author = {Schmiedchen, Bettina}, title = {Vitamin D and its linkage between chronic kidney disease and cardiovascular integrity}, pages = {113}, year = {2014}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @phdthesis{Tian2014, author = {Tian, Fang}, title = {Vegetation and environmental changes on millennial, centennial and decadal time-scales in central Mongolia and their driving forces}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2014}, language = {en} } @phdthesis{Cao2014, author = {Cao, Xianyong}, title = {Vegetation and climate change in eastern continental Asia during the last 22 ka inferred from pollen data synthesis}, pages = {156}, year = {2014}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Hutter2014, author = {Hutter, Anne}, title = {Unveiling the epoch of reionization by simulations and high-redshift galaxies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76998}, school = {Universit{\"a}t Potsdam}, pages = {vi, 155}, year = {2014}, abstract = {The Epoch of Reionization marks after recombination the second major change in the ionization state of the universe, going from a neutral to an ionized state. It starts with the appearance of the first stars and galaxies; a fraction of high-energy photons emitted from galaxies permeate into the intergalactic medium (IGM) and gradually ionize the hydrogen, until the IGM is completely ionized at z~6 (Fan et al., 2006). While the progress of reionization is driven by galaxy evolution, it changes the ionization and thermal state of the IGM substantially and affects subsequent structure and galaxy formation by various feedback mechanisms. Understanding this interaction between reionization and galaxy formation is further impeded by a lack of understanding of the high-redshift galactic properties such as the dust distribution and the escape fraction of ionizing photons. Lyman Alpha Emitters (LAEs) represent a sample of high-redshift galaxies that are sensitive to all these galactic properties and the effects of reionization. In this thesis we aim to understand the progress of reionization by performing cosmological simulations, which allows us to investigate the limits of constraining reionization by high-redshift galaxies as LAEs, and examine how galactic properties and the ionization state of the IGM affect the visibility and observed quantities of LAEs and Lyman Break galaxies (LBGs). In the first part of this thesis we focus on performing radiative transfer calculations to simulate reionization. We have developed a mapping-sphere-scheme, which, starting from spherically averaged temperature and density fields, uses our 1D radiative transfer code and computes the effect of each source on the IGM temperature and ionization (HII, HeII, HeIII) profiles, which are subsequently mapped onto a grid. Furthermore we have updated the 3D Monte-Carlo radiative transfer pCRASH, enabling detailed reionization simulations which take individual source characteristics into account. In the second part of this thesis we perform a reionization simulation by post-processing a smoothed-particle hydrodynamical (SPH) simulation (GADGET-2) with 3D radiative transfer (pCRASH), where the ionizing sources are modelled according to the characteristics of the stellar populations in the hydrodynamical simulation. Following the ionization fractions of hydrogen (HI) and helium (HeII, HeIII), and temperature in our simulation, we find that reionization starts at z~11 and ends at z~6, and high density regions near sources are ionized earlier than low density regions far from sources. In the third part of this thesis we couple the cosmological SPH simulation and the radiative transfer simulations with a physically motivated, self-consistent model for LAEs, in order to understand the importance of the ionization state of the IGM, the escape fraction of ionizing photons from galaxies and dust in the interstellar medium (ISM) on the visibility of LAEs. Comparison of our models results with the LAE Lyman Alpha (Lya) and UV luminosity functions at z~6.6 reveals a three-dimensional degeneracy between the ionization state of the IGM, the ionizing photons escape fraction and the ISM dust distribution, which implies that LAEs act not only as tracers of reionization but also of the ionizing photon escape fraction and of the ISM dust distribution. This degeneracy does not even break down when we compare simulated with observed clustering of LAEs at z~6.6. However, our results show that reionization has the largest impact on the amplitude of the LAE angular correlation functions, and its imprints are clearly distinguishable from those of properties on galactic scales. These results show that reionization cannot be constrained tightly by exclusively using LAE observations. Further observational constraints, e.g. tomographies of the redshifted hydrogen 21cm line, are required. In addition we also use our LAE model to probe the question when a galaxy is visible as a LAE or a LBG. Within our model galaxies above a critical stellar mass can produce enough luminosity to be visible as a LBG and/or a LAE. By finding an increasing duty cycle of LBGs with Lya emission as the UV magnitude or stellar mass of the galaxy rises, our model reveals that the brightest (and most massive) LBGs most often show Lya emission. Predicting the Lya equivalent width (Lya EW) distribution and the fraction of LBGs showing Lya emission at z~6.6, we reproduce the observational trend of the Lya EWs with UV magnitude. However, the Lya EWs of the UV brightest LBGs exceed observations and can only be reconciled by accounting for an increased Lya attenuation of massive galaxies, which implies that the observed Lya brightest LAEs do not necessarily coincide with the UV brightest galaxies. We have analysed the dependencies of LAE observables on the properties of the galactic and intergalactic medium and the LAE-LBG connection, and this enhances our understanding of the nature of LAEs.}, language = {en} } @phdthesis{Logačev2014, author = {Logačev, Pavel}, title = {Underspecification and parallel processing in sentence comprehension}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82047}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The aim of the present thesis is to answer the question to what degree the processes involved in sentence comprehension are sensitive to task demands. A central phenomenon in this regard is the so-called ambiguity advantage, which is the finding that ambiguous sentences can be easier to process than unambiguous sentences. This finding may appear counterintuitive, because more meanings should be associated with a higher computational effort. Currently, two theories exist that can explain this finding. The Unrestricted Race Model (URM) by van Gompel et al. (2001) assumes that several sentence interpretations are computed in parallel, whenever possible, and that the first interpretation to be computed is assigned to the sentence. Because the duration of each structure-building process varies from trial to trial, the parallelism in structure-building predicts that ambiguous sentences should be processed faster. This is because when two structures are permissible, the chances that some interpretation will be computed quickly are higher than when only one specific structure is permissible. Importantly, the URM is not sensitive to task demands such as the type of comprehension questions being asked. A radically different proposal is the strategic underspecification model by Swets et al. (2008). It assumes that readers do not attempt to resolve ambiguities unless it is absolutely necessary. In other words, they underspecify. According the strategic underspecification hypothesis, all attested replications of the ambiguity advantage are due to the fact that in those experiments, readers were not required to fully understand the sentence. In this thesis, these two models of the parser's actions at choice-points in the sentence are presented and evaluated. First, it is argued that the Swets et al.'s (2008) evidence against the URM and in favor of underspecification is inconclusive. Next, the precise predictions of the URM as well as the underspecification model are refined. Subsequently, a self-paced reading experiment involving the attachment of pre-nominal relative clauses in Turkish is presented, which provides evidence against strategical underspecification. A further experiment is presented which investigated relative clause attachment in German using the speed-accuracy tradeoff (SAT) paradigm. The experiment provides evidence against strategic underspecification and in favor of the URM. Furthermore the results of the experiment are used to argue that human sentence comprehension is fallible, and that theories of parsing should be able to account for that fact. Finally, a third experiment is presented, which provides evidence for the sensitivity to task demands in the treatment of ambiguities. Because this finding is incompatible with the URM, and because the strategic underspecification model has been ruled out, a new model of ambiguity resolution is proposed: the stochastic multiple-channel model of ambiguity resolution (SMCM). It is further shown that the quantitative predictions of the SMCM are in agreement with experimental data. In conclusion, it is argued that the human sentence comprehension system is parallel and fallible, and that it is sensitive to task-demands.}, language = {en} } @phdthesis{Goswami2014, author = {Goswami, Bedartha}, title = {Uncertainties in climate data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Scientific inquiry requires that we formulate not only what we know, but also what we do not know and by how much. In climate data analysis, this involves an accurate specification of measured quantities and a consequent analysis that consciously propagates the measurement errors at each step. The dissertation presents a thorough analytical method to quantify errors of measurement inherent in paleoclimate data. An additional focus are the uncertainties in assessing the coupling between different factors that influence the global mean temperature (GMT). Paleoclimate studies critically rely on `proxy variables' that record climatic signals in natural archives. However, such proxy records inherently involve uncertainties in determining the age of the signal. We present a generic Bayesian approach to analytically determine the proxy record along with its associated uncertainty, resulting in a time-ordered sequence of correlated probability distributions rather than a precise time series. We further develop a recurrence based method to detect dynamical events from the proxy probability distributions. The methods are validated with synthetic examples and demonstrated with real-world proxy records. The proxy estimation step reveals the interrelations between proxy variability and uncertainty. The recurrence analysis of the East Asian Summer Monsoon during the last 9000 years confirms the well-known `dry' events at 8200 and 4400 BP, plus an additional significantly dry event at 6900 BP. We also analyze the network of dependencies surrounding GMT. We find an intricate, directed network with multiple links between the different factors at multiple time delays. We further uncover a significant feedback from the GMT to the El Ni{\~n}o Southern Oscillation at quasi-biennial timescales. The analysis highlights the need of a more nuanced formulation of influences between different climatic factors, as well as the limitations in trying to estimate such dependencies.}, language = {en} } @phdthesis{Trabant2014, author = {Trabant, Christoph}, title = {Ultrafast photoinduced phase transitions in complex materials probed by time-resolved resonant soft x-ray diffraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71377}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In processing and data storage mainly ferromagnetic (FM) materials are being used. Approaching physical limits, new concepts have to be found for faster, smaller switches, for higher data densities and more energy efficiency. Some of the discussed new concepts involve the material classes of correlated oxides and materials with antiferromagnetic coupling. Their applicability depends critically on their switching behavior, i.e., how fast and how energy efficient material properties can be manipulated. This thesis presents investigations of ultrafast non-equilibrium phase transitions on such new materials. In transition metal oxides (TMOs) the coupling of different degrees of freedom and resulting low energy excitation spectrum often result in spectacular changes of macroscopic properties (colossal magneto resistance, superconductivity, metal-to-insulator transitions) often accompanied by nanoscale order of spins, charges, orbital occupation and by lattice distortions, which make these material attractive. Magnetite served as a prototype for functional TMOs showing a metal-to-insulator-transition (MIT) at T = 123 K. By probing the charge and orbital order as well as the structure after an optical excitation we found that the electronic order and the structural distortion, characteristics of the insulating phase in thermal equilibrium, are destroyed within the experimental resolution of 300 fs. The MIT itself occurs on a 1.5 ps timescale. It shows that MITs in functional materials are several thousand times faster than switching processes in semiconductors. Recently ferrimagnetic and antiferromagnetic (AFM) materials have become interesting. It was shown in ferrimagnetic GdFeCo, that the transfer of angular momentum between two opposed FM subsystems with different time constants leads to a switching of the magnetization after laser pulse excitation. In addition it was theoretically predicted that demagnetization dynamics in AFM should occur faster than in FM materials as no net angular momentum has to be transferred out of the spin system. We investigated two different AFM materials in order to learn more about their ultrafast dynamics. In Ho, a metallic AFM below T ≈ 130 K, we found that the AFM Ho can not only be faster but also ten times more energy efficiently destroyed as order in FM comparable metals. In EuTe, an AFM semiconductor below T ≈ 10 K, we compared the loss of magnetization and laser-induced structural distortion in one and the same experiment. Our experiment shows that they are effectively disentangled. An exception is an ultrafast release of lattice dynamics, which we assign to the release of magnetostriction. The results presented here were obtained with time-resolved resonant soft x-ray diffraction at the Femtoslicing source of the Helmholtz-Zentrum Berlin and at the free-electron laser in Stanford (LCLS). In addition the development and setup of a new UHV-diffractometer for these experiments will be reported.}, language = {en} } @phdthesis{Soja2014, author = {Soja, Aleksandra Maria}, title = {Transcriptomic and metabolomic analysis of Arabidopsis thaliana during abiotic stress}, pages = {134}, year = {2014}, language = {en} } @phdthesis{Schollaen2014, author = {Schollaen, Karina}, title = {Tracking climate signals in tropical trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71947}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The tropical warm pool waters surrounding Indonesia are one of the equatorial heat and moisture sources that are considered as a driving force of the global climate system. The climate in Indonesia is dominated by the equatorial monsoon system, and has been linked to El Ni{\~n}o-Southern Oscillation (ENSO) events, which often result in severe droughts or floods over Indonesia with profound societal and economic impacts on the populations living in the world's fourth most populated country. The latest IPCC report states that ENSO will remain the dominant mode in the tropical Pacific with global effects in the 21st century and ENSO-related precipitation extremes will intensify. However, no common agreement exists among climate simulation models for projected change in ENSO and the Australian-Indonesian Monsoon. Exploring high-resolution palaeoclimate archives, like tree rings or varved lake sediments, provide insights into the natural climate variability of the past, and thus helps improving and validating simulations of future climate changes. Centennial tree-ring stable isotope records | Within this doctoral thesis the main goal was to explore the potential of tropical tree rings to record climate signals and to use them as palaeoclimate proxies. In detail, stable carbon (δ13C) and oxygen (δ18O) isotopes were extracted from teak trees in order to establish the first well-replicated centennial (AD 1900-2007) stable isotope records for Java, Indonesia. Furthermore, different climatic variables were tested whether they show significant correlation with tree-ring proxies (ring-width, δ13C, δ18O). Moreover, highly resolved intra-annual oxygen isotope data were established to assess the transfer of the seasonal precipitation signal into the tree rings. Finally, the established oxygen isotope record was used to reveal possible correlations with ENSO events. Methodological achievements | A second goal of this thesis was to assess the applicability of novel techniques which facilitate and optimize high-resolution and high-throughput stable isotope analysis of tree rings. Two different UV-laser-based microscopic dissection systems were evaluated as a novel sampling tool for high-resolution stable isotope analysis. Furthermore, an improved procedure of tree-ring dissection from thin cellulose laths for stable isotope analysis was designed. The most important findings of this thesis are: I) The herein presented novel sampling techniques improve stable isotope analyses for tree-ring studies in terms of precision, efficiency and quality. The UV-laser-based microdissection serve as a valuable tool for sampling plant tissue at ultrahigh-resolution and for unprecedented precision. II) A guideline for a modified method of cellulose extraction from wholewood cross-sections and subsequent tree-ring dissection was established. The novel technique optimizes the stable isotope analysis process in two ways: faster and high-throughput cellulose extraction and precise tree-ring separation at annual to high-resolution scale. III) The centennial tree-ring stable isotope records reveal significant correlation with regional precipitation. High-resolution stable oxygen values, furthermore, allow distinguishing between dry and rainy season rainfall. IV) The δ18O record reveals significant correlation with different ENSO flavors and demonstrates the importance of considering ENSO flavors when interpreting palaeoclimatic data in the tropics. The findings of my dissertation show that seasonally resolved δ18O records from Indonesian teak trees are a valuable proxy for multi-centennial reconstructions of regional precipitation variability (monsoon signals) and large-scale ocean-atmosphere phenomena (ENSO) for the Indo-Pacific region. Furthermore, the novel methodological achievements offer many unexplored avenues for multidisciplinary research in high-resolution palaeoclimatology.}, language = {en} } @phdthesis{Lukas2014, author = {Lukas, Marcus}, title = {To breath or not to breathe - carbon budget regulation in Daphnia}, address = {Potsdam}, pages = {132 S.}, year = {2014}, language = {en} } @phdthesis{BacskaiAtkari2014, author = {Bacskai-Atkari, Julia}, title = {The syntax of comparative constructions : operators, ellipsis phenomena and functional left peripheries}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-301-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71255}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 310}, year = {2014}, abstract = {Adopting a minimalist framework, the dissertation provides an analysis for the syntactic structure of comparatives, with special attention paid to the derivation of the subclause. The proposed account explains how the comparative subclause is connected to the matrix clause, how the subclause is formed in the syntax and what additional processes contribute to its final structure. In addition, it casts light upon these problems in cross-linguistic terms and provides a model that allows for synchronic and diachronic differences. This also enables one to give a more adequate explanation for the phenomena found in English comparatives since the properties of English structures can then be linked to general settings of the language and hence need no longer be considered as idiosyncratic features of the grammar of English. First, the dissertation provides a unified analysis of degree expressions, relating the structure of comparatives to that of other degrees. It is shown that gradable adjectives are located within a degree phrase (DegP), which in turn projects a quantifier phrase (QP) and that these two functional layers are always present, irrespectively of whether there is a phonologically visible element in these layers. Second, the dissertation presents a novel analysis of Comparative Deletion by reducing it to an overtness constraint holding on operators: in this way, it is reduced to morphological differences and cross-linguistic variation is not conditioned by way of postulating an arbitrary parameter. Cross-linguistic differences are ultimately dependent on whether a language has overt operators equipped with the relevant - [+compr] and [+rel] - features. Third, the dissertation provides an adequate explanation for the phenomenon of Attributive Comparative Deletion, as attested in English, by way of relating it to the regular mechanism of Comparative Deletion. I assume that Attributive Comparative Deletion is not a universal phenomenon, and its presence in English can be conditioned by independent, more general rules, while the absence of such restrictions leads to its absence in other languages. Fourth, the dissertation accounts for certain phenomena related to diachronic changes, examining how the changes in the status of comparative operators led to changes in whether Comparative Deletion is attested in a given language: I argue that only operators without a lexical XP can be grammaticalised. The underlying mechanisms underlying are essentially general economy principles and hence the processes are not language-specific or exceptional. Fifth, the dissertation accounts for optional ellipsis processes that play a crucial role in the derivation of typical comparative subclauses. These processes are not directly related to the structure of degree expressions and hence the elimination of the quantified expression from the subclause; nevertheless, they are shown to be in interaction with the mechanisms underlying Comparative Deletion or the absence thereof.}, language = {en} } @phdthesis{Schmitt2014, author = {Schmitt, Clemens Nikolaus Zeno}, title = {The role of protein metal complexes in the mechanics of Mytilus californianus byssal threads}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-74216}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 93}, year = {2014}, abstract = {Protein-metal coordination complexes are well known as active centers in enzymatic catalysis, and to contribute to signal transduction, gas transport, and to hormone function. Additionally, they are now known to contribute as load-bearing cross-links to the mechanical properties of several biological materials, including the jaws of Nereis worms and the byssal threads of marine mussels. The primary aim of this thesis work is to better understand the role of protein-metal cross-links in the mechanical properties of biological materials, using the mussel byssus as a model system. Specifically, the focus is on histidine-metal cross-links as sacrificial bonds in the fibrous core of the byssal thread (Chapter 4) and L-3,4-dihydroxyphenylalanine (DOPA)-metal bonds in the protective thread cuticle (Chapter 5). Byssal threads are protein fibers, which mussels use to attach to various substrates at the seashore. These relatively stiff fibers have the ability to extend up to about 100 \% strain, dissipating large amounts of mechanical energy from crashing waves, for example. Remarkably, following damage from cyclic loading, initial mechanical properties are subsequently recovered by a material-intrinsic self-healing capability. Histidine residues coordinated to transition metal ions in the proteins comprising the fibrous thread core have been suggested as reversible sacrificial bonds that contribute to self-healing; however, this remains to be substantiated in situ. In the first part of this thesis, the role of metal coordination bonds in the thread core was investigated using several spectroscopic methods. In particular, X-ray absorption spectroscopy (XAS) was applied to probe the coordination environment of zinc in Mytilus californianus threads at various stages during stretching and subsequent healing. Analysis of the extended X-ray absorption fine structure (EXAFS) suggests that tensile deformation of threads is correlated with the rupture of Zn-coordination bonds and that self-healing is connected with the reorganization of Zn-coordination bond topologies rather than the mere reformation of Zn-coordination bonds. These findings have interesting implications for the design of self-healing metallopolymers. The byssus cuticle is a protective coating surrounding the fibrous thread core that is both as hard as an epoxy and extensible up to 100 \% strain before cracking. It was shown previously that cuticle stiffness and hardness largely depend on the presence of Fe-DOPA coordination bonds. However, the byssus is known to concentrate a large variety of metals from seawater, some of which are also capable of binding DOPA (e.g. V). Therefore, the question arises whether natural variation of metal composition can affect the mechanical performance of the byssal thread cuticle. To investigate this hypothesis, nanoindentation and confocal Raman spectroscopy were applied to the cuticle of native threads, threads with metals removed (EDTA treated), and threads in which the metal ions in the native tissue were replaced by either Fe or V. Interestingly, replacement of metal ions with either Fe or V leads to the full recovery of native mechanical properties with no statistical difference between each other or the native properties. This likely indicates that a fixed number of metal coordination sites are maintained within the byssal thread cuticle - possibly achieved during thread formation - which may provide an evolutionarily relevant mechanism for maintaining reliable mechanics in an unpredictable environment. While the dynamic exchange of bonds plays a vital role in the mechanical behavior and self-healing in the thread core by allowing them to act as reversible sacrificial bonds, the compatibility of DOPA with other metals allows an inherent adaptability of the thread cuticle to changing circumstances. The requirements to both of these materials can be met by the dynamic nature of the protein-metal cross-links, whereas covalent cross-linking would fail to provide the adaptability of the cuticle and the self-healing of the core. In summary, these studies of the thread core and the thread cuticle serve to underline the important and dynamic roles of protein-metal coordination in the mechanical function of load-bearing protein fibers, such as the mussel byssus.}, language = {en} } @phdthesis{Schulz2014, author = {Schulz, Elisa}, title = {The role of flavonols and anthocyanins in the cold an UV-B acclimation of Arabidopsis thaliana (L.)}, pages = {159}, year = {2014}, language = {en} } @phdthesis{Gassmoeller2014, author = {Gaßm{\"o}ller, Ren{\´e}}, title = {The interaction of subducted slabs and plume generation zones in geodynamic models}, school = {Universit{\"a}t Potsdam}, pages = {158}, year = {2014}, language = {en} } @phdthesis{Gericke2014, author = {Gericke, Lutz}, title = {Tele-Board - Supporting and analyzing creative collaboration in synchronous and asynchronous scenario}, pages = {186}, year = {2014}, language = {en} } @phdthesis{Zibulski2014, author = {Zibulski, Romy}, title = {Taxonomic composition and biochemical and isotopic characteristics of North-Siberian mosses and their application to the palaeoecological reconstruction of tundra polygon development}, pages = {ii, 127}, year = {2014}, language = {en} } @phdthesis{Fudickar2014, author = {Fudickar, Sebastian}, title = {Sub Ghz transceiver for indoor localisation of smartphones}, school = {Universit{\"a}t Potsdam}, pages = {IV, 167}, year = {2014}, language = {en} } @phdthesis{Pagel2014, author = {Pagel, J{\"o}rn}, title = {Statistical process-based models for the understanding and prediction of range dynamics}, pages = {VII, 147}, year = {2014}, language = {en} } @phdthesis{Tetzner2014, author = {Tetzner, Julia}, title = {Stability and change in academic, social, and emotional development from early adolescence to young adulthood}, school = {Universit{\"a}t Potsdam}, pages = {191}, year = {2014}, language = {en} } @phdthesis{Maghsoudi2014, author = {Maghsoudi, Samira}, title = {Spatiotemporal microseismicity patterns and detection performance in mining environments}, address = {Potsdam}, pages = {104 S.}, year = {2014}, language = {en} } @phdthesis{Benz2014, author = {Benz, Verena}, title = {Sex-specific differences in the regulation of body weight dynamics and adipose tissue metabolism}, address = {Potsdam}, pages = {119 S.}, year = {2014}, language = {en} } @phdthesis{Pussak2014, author = {Pussak, Marcin}, title = {Seismic characterization of geothermal reservoirs by application of the common-reflection-surface stack method and attribute analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77565}, school = {Universit{\"a}t Potsdam}, pages = {viii, 140}, year = {2014}, abstract = {An important contribution of geosciences to the renewable energy production portfolio is the exploration and utilization of geothermal resources. For the development of a geothermal project at great depths a detailed geological and geophysical exploration program is required in the first phase. With the help of active seismic methods high-resolution images of the geothermal reservoir can be delivered. This allows potential transport routes for fluids to be identified as well as regions with high potential of heat extraction to be mapped, which indicates favorable conditions for geothermal exploitation. The presented work investigates the extent to which an improved characterization of geothermal reservoirs can be achieved with the new methods of seismic data processing. The summations of traces (stacking) is a crucial step in the processing of seismic reflection data. The common-reflection-surface (CRS) stacking method can be applied as an alternative for the conventional normal moveout (NMO) or the dip moveout (DMO) stack. The advantages of the CRS stack beside an automatic determination of stacking operator parameters include an adequate imaging of arbitrarily curved geological boundaries, and a significant increase in signal-to-noise (S/N) ratio by stacking far more traces than used in a conventional stack. A major innovation I have shown in this work is that the quality of signal attributes that characterize the seismic images can be significantly improved by this modified type of stacking in particular. Imporoved attribute analysis facilitates the interpretation of seismic images and plays a significant role in the characterization of reservoirs. Variations of lithological and petro-physical properties are reflected by fluctuations of specific signal attributes (eg. frequency or amplitude characteristics). Its further interpretation can provide quality assessment of the geothermal reservoir with respect to the capacity of fluids within a hydrological system that can be extracted and utilized. The proposed methodological approach is demonstrated on the basis on two case studies. In the first example, I analyzed a series of 2D seismic profile sections through the Alberta sedimentary basin on the eastern edge of the Canadian Rocky Mountains. In the second application, a 3D seismic volume is characterized in the surroundings of a geothermal borehole, located in the central part of the Polish basin. Both sites were investigated with the modified and improved stacking attribute analyses. The results provide recommendations for the planning of future geothermal plants in both study areas.}, language = {en} } @phdthesis{Schulz2014, author = {Schulz, Anneli}, title = {Search for gamma-ray emission from bow shocks of runaway stars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73905}, school = {Universit{\"a}t Potsdam}, pages = {123}, year = {2014}, abstract = {The mystery of the origin of cosmic rays has been tackled for more than hundred years and is still not solved. Cosmic rays are detected with energies spanning more than 10 orders of magnitude and reaching energies up to ~10²¹ eV, far higher than any man-made accelerator can reach. Different theories on the astrophysical objects and processes creating such highly energetic particles have been proposed. A very prominent explanation for a process producing highly energetic particles is shock acceleration. The observation of high-energy gamma rays from supernova remnants, some of them revealing a shell like structure, is clear evidence that particles are accelerated to ultrarelativistic energies in the shocks of these objects. The environments of supernova remnants are complex and challenge detailed modelling of the processes leading to high-energy gamma-ray emission. The study of shock acceleration at bow shocks, created by the supersonic movement of individual stars through the interstellar medium, offers a unique possibility to determine the physical properties of shocks in a less complex environment. The shocked medium is heated by the stellar and the shock excited radiation, leading to thermal infrared emission. 28 bow shocks have been discovered through their infrared emission. Nonthermal radiation in radio and X-ray wavelengths has been detected from two bow shocks, pointing to the existence of relativistic particles in these systems. Theoretical models of the emission processes predict high-energy and very high-energy emission at a flux level in reach of current instruments. This work presents the search for gamma-ray emission from bow shocks of runaway stars in the energy regime from 100MeV to ~100TeV. The search is performed with the large area telescope (LAT) on-board the Fermi satellite and the H.E.S.S. telescopes located in the Khomas Highland in Namibia. The Fermi-LAT was launched in 2008 and is continuously scanning the sky since then. It detects photons with energies from 20MeV to over 300 GeV and has an unprecedented sensitivity. The all-sky coverage allows us to study all 28 bow shocks of runaway stars listed in the E-BOSS catalogue of infrared bow shocks. No significant emission was detected from any of the objects, although predicted by several theoretical models describing the non-thermal emission of bow shocks of runaway stars. The H.E.S.S. experiment is the most sensitive system of imaging atmospheric Cherenkov telescopes. It detects photons from several tens of GeV to ~100TeV. Seven of the bow shocks have been observed with H.E.S.S. and the data analysis is presented in this thesis. The analyses of the very-high energy data did not reveal significant emission from any of the sources either. This work presents the first systematic search for gamma-ray emission from bow shocks of runaway stars. For the first time Fermi-LAT data was specifically analysed to reveal emission from bow shocks of runaway stars. In the TeV regime no searches for emission from theses objects have been published so far, the study presented here is the first in this energy regime. The level of the gamma-ray emission from bow shocks of runaway stars is constrained by the calculated upper limits over six orders in magnitude in energy. The upper limits calculated for the bow shocks of runaway stars in the course of this work, constrain several models. For the best candidate, ζ Ophiuchi, the upper limits in the Fermi-LAT energy range are lower than the predictions by a factor ~5. This challenges the assumptions made in this model and gives valuable input for further modelling approaches. The analyses were performed with the software packages provided by the H.E.S.S. and Fermi collaborations. The development of a unified analysis framework for gamma-ray data, namely GammaLib/ctools, is rapidly progressing within the CTA consortium. Recent implementations and cross-checks with current software frameworks are presented in the Appendix.}, language = {en} } @phdthesis{Mueller2014, author = {M{\"u}ller, J{\"o}rg}, title = {Response of bryophyte diversity to land-use and management in forest and grassland habitats}, school = {Universit{\"a}t Potsdam}, pages = {133}, year = {2014}, language = {en} } @phdthesis{Kirchhecker2014, author = {Kirchhecker, Sarah}, title = {Renewable imidazolium zwitterions as platform molecules for the synthesis of ionic liquids and materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77412}, school = {Universit{\"a}t Potsdam}, pages = {136}, year = {2014}, abstract = {Following the principles of green chemistry, a simple and efficient synthesis of functionalised imidazolium zwitterionic compounds from renewable resources was developed based on a modified one-pot Debus-Radziszewski reaction. The combination of different carbohydrate-derived 1,2-dicarbonyl compounds and amino acids is a simple way to modulate the properties and introduce different functionalities. A representative compound was assessed as an acid catalyst, and converted into acidic ionic liquids by reaction with several strong acids. The reactivity of the double carboxylic functionality was explored by esterification with long and short chain alcohols, as well as functionalised amines, which led to the straightforward formation of surfactant-like molecules or bifunctional esters and amides. One of these di-esters is currently being investigated for the synthesis of poly(ionic liquids). The functionalisation of cellulose with one of the bifunctional esters was investigated and preliminary tests employing it for the functionalisation of filter papers were carried out successfully. The imidazolium zwitterions were converted into ionic liquids via hydrothermal decarboxylation in flow, a benign and scalable technique. This method provides access to imidazolium ionic liquids via a simple and sustainable methodology, whilst completely avoiding contamination with halide salts. Different ionic liquids can be generated depending on the functionality contained in the ImZw precursor. Two alanine-derived ionic liquids were assessed for their physicochemical properties and applications as solvents for the dissolution of cellulose and the Heck coupling.}, language = {en} } @phdthesis{Paepke2014, author = {P{\"a}pke, Carola}, title = {Regulation of respiration during low oxygen availability}, school = {Universit{\"a}t Potsdam}, pages = {144}, year = {2014}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @phdthesis{Stutz2014, author = {Stutz, Franziska}, title = {Reading motivation, reading amount, and reading comprehension in the early elementary years}, pages = {212}, year = {2014}, language = {en} } @phdthesis{Borchardt2014, author = {Borchardt, Sven}, title = {Rainfall, weathering and erosion}, pages = {x, 90}, year = {2014}, language = {en} } @phdthesis{Mayer2014, author = {Mayer, Michael}, title = {Pulsar wind nebulae at high energies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71504}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2014}, abstract = {Pulsar wind nebulae (PWNe) are the most abundant TeV gamma-ray emitters in the Milky Way. The radiative emission of these objects is powered by fast-rotating pulsars, which donate parts of their rotational energy into winds of relativistic particles. This thesis presents an in-depth study of the detected population of PWNe at high energies. To outline general trends regarding their evolutionary behaviour, a time-dependent model is introduced and compared to the available data. In particular, this work presents two exceptional PWNe which protrude from the rest of the population, namely the Crab Nebula and N 157B. Both objects are driven by pulsars with extremely high rotational energy loss rates. Accordingly, they are often referred to as energetic twins. Modelling the non-thermal multi-wavelength emission of N157B gives access to specific properties of this object, like the magnetic field inside the nebula. Comparing the derived parameters to those of the Crab Nebula reveals large intrinsic differences between the two PWNe. Possible origins of these differences are discussed in context of the resembling pulsars. Compared to the TeV gamma-ray regime, the number of detected PWNe is much smaller in the MeV-GeV gamma-ray range. In the latter range, the Crab Nebula stands out by the recent detection of gamma-ray flares. In general, the measured flux enhancements on short time scales of days to weeks were not expected in the theoretical understanding of PWNe. In this thesis, the variability of the Crab Nebula is analysed using data from the Fermi Large Area Telescope (Fermi-LAT). For the presented analysis, a new gamma-ray reconstruction method is used, providing a higher sensitivity and a lower energy threshold compared to previous analyses. The derived gamma-ray light curve of the Crab Nebula is investigated for flares and periodicity. The detected flares are analysed regarding their energy spectra, and their variety and commonalities are discussed. In addition, a dedicated analysis of the flare which occurred in March 2013 is performed. The derived short-term variability time scale is roughly 6h, implying a small region inside the Crab Nebula to be responsible for the enigmatic flares. The most promising theories explaining the origins of the flux eruptions and gamma-ray variability are discussed in detail. In the technical part of this work, a new analysis framework is presented. The introduced software, called gammalib/ctools, is currently being developed for the future CTA observa- tory. The analysis framework is extensively tested using data from the H. E. S. S. experiment. To conduct proper data analysis in the likelihood framework of gammalib/ctools, a model describing the distribution of background events in H.E.S.S. data is presented. The software provides the infrastructure to combine data from several instruments in one analysis. To study the gamma-ray emitting PWN population, data from Fermi-LAT and H. E. S. S. are combined in the likelihood framework of gammalib/ctools. In particular, the spectral peak, which usually lies in the overlap energy regime between these two instruments, is determined with the presented analysis framework. The derived measurements are compared to the predictions from the time-dependent model. The combined analysis supports the conclusion of a diverse population of gamma-ray emitting PWNe.}, language = {en} } @phdthesis{Rudorf2014, author = {Rudorf, Sophia}, title = {Protein Synthesis by Ribosomes}, pages = {xii, 145}, year = {2014}, language = {en} } @phdthesis{RoggeSolti2014, author = {Rogge-Solti, Andreas}, title = {Probabilistic Estimation of Unobserved Process Events}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70426}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Organizations try to gain competitive advantages, and to increase customer satisfaction. To ensure the quality and efficiency of their business processes, they perform business process management. An important part of process management that happens on the daily operational level is process controlling. A prerequisite of controlling is process monitoring, i.e., keeping track of the performed activities in running process instances. Only by process monitoring can business analysts detect delays and react to deviations from the expected or guaranteed performance of a process instance. To enable monitoring, process events need to be collected from the process environment. When a business process is orchestrated by a process execution engine, monitoring is available for all orchestrated process activities. Many business processes, however, do not lend themselves to automatic orchestration, e.g., because of required freedom of action. This situation is often encountered in hospitals, where most business processes are manually enacted. Hence, in practice it is often inefficient or infeasible to document and monitor every process activity. Additionally, manual process execution and documentation is prone to errors, e.g., documentation of activities can be forgotten. Thus, organizations face the challenge of process events that occur, but are not observed by the monitoring environment. These unobserved process events can serve as basis for operational process decisions, even without exact knowledge of when they happened or when they will happen. An exemplary decision is whether to invest more resources to manage timely completion of a case, anticipating that the process end event will occur too late. This thesis offers means to reason about unobserved process events in a probabilistic way. We address decisive questions of process managers (e.g., "when will the case be finished?", or "when did we perform the activity that we forgot to document?") in this thesis. As main contribution, we introduce an advanced probabilistic model to business process management that is based on a stochastic variant of Petri nets. We present a holistic approach to use the model effectively along the business process lifecycle. Therefore, we provide techniques to discover such models from historical observations, to predict the termination time of processes, and to ensure quality by missing data management. We propose mechanisms to optimize configuration for monitoring and prediction, i.e., to offer guidance in selecting important activities to monitor. An implementation is provided as a proof of concept. For evaluation, we compare the accuracy of the approach with that of state-of-the-art approaches using real process data of a hospital. Additionally, we show its more general applicability in other domains by applying the approach on process data from logistics and finance.}, language = {en} } @phdthesis{Rafiee2014, author = {Rafiee, Hosnieh}, title = {Privacy and security issues in IPv6 networks}, address = {Potsdam}, pages = {141 S.}, year = {2014}, language = {en} } @phdthesis{Schreck2014, author = {Schreck, Simon Frederik}, title = {Potential energy surfaces, femtosecond dynamics and nonlinear X-Ray-Matter interactions from resonant inelastic soft x-Ray scattering}, pages = {164}, year = {2014}, language = {en} } @phdthesis{Secker2014, author = {Secker, Christian}, title = {Polypeptoid block coloymers}, pages = {128}, year = {2014}, language = {en} } @phdthesis{Mathew2014, author = {Mathew, Simi}, title = {Polymeric microparticles as vaccine carriers}, address = {Potsdam}, pages = {143 S.}, year = {2014}, language = {en} } @phdthesis{Trost2014, author = {Trost, Gerda}, title = {Poly(A) Polymerase 1 (PAPS1) influences organ size and pathogen response in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72345}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Polyadenylation of pre-mRNAs is critical for efficient nuclear export, stability, and translation of the mature mRNAs, and thus for gene expression. The bulk of pre-mRNAs are processed by canonical nuclear poly(A) polymerase (PAPS). Both vertebrate and higher-plant genomes encode more than one isoform of this enzyme, and these are coexpressed in different tissues. However, in neither case is it known whether the isoforms fulfill different functions or polyadenylate distinct subsets of pre-mRNAs. This thesis shows that the three canonical nuclear PAPS isoforms in Arabidopsis are functionally specialized owing to their evolutionarily divergent C-terminal domains. A moderate loss-of-function mutant in PAPS1 leads to increase in floral organ size, whereas leaf size is reduced. A strong loss-of-function mutation causes a male gametophytic defect, whereas a weak allele leads to reduced leaf growth. By contrast, plants lacking both PAPS2 and PAPS4 function are viable with wild-type leaf growth. Polyadenylation of SMALL AUXIN UP RNA (SAUR) mRNAs depends specifically on PAPS1 function. The resulting reduction in SAUR activity in paps1 mutants contributes to their reduced leaf growth, providing a causal link between polyadenylation of specific pre-mRNAs by a particular PAPS isoform and plant growth. Additionally, opposite effects of PAPS1 on leaf and flower growth reflect the different identities of these organs. The overgrowth of paps1 mutant petals is due to increased recruitment of founder cells into early organ primordia whereas the reduced leaf size is due to an ectopic pathogen response. This constitutive immune response leads to increased resistance to the biotrophic oomycete Hyaloperonospora arabidopsidis and reflects activation of the salicylic acid-independent signalling pathway downstream of ENHANCED DISEASE SUSCEPTIBILITY1 (EDS1)/PHYTOALEXIN DEFICIENT4 (PAD4). Immune responses are accompanied by intracellular redox changes. Consistent with this, the redox-status of the chloroplast is altered in paps1-1 mutants. The molecular effects of the paps1-1 mutation were analysed using an RNA sequencing approach that distinguishes between long- and short tailed mRNA. The results shown here suggest the existence of an additional layer of regulation in plants and possibly vertebrate gene expression, whereby the relative activities of canonical nuclear PAPS isoforms control de novo synthesized poly(A) tail length and hence expression of specific subsets of mRNAs.}, language = {en} } @phdthesis{Bamberg2014, author = {Bamberg, Marlene}, title = {Planetary mapping tools applied to floor-fractured craters on Mars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72104}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Planetary research is often user-based and requires considerable skill, time, and effort. Unfortunately, self-defined boundary conditions, definitions, and rules are often not documented or not easy to comprehend due to the complexity of research. This makes a comparison to other studies, or an extension of the already existing research, complicated. Comparisons are often distorted, because results rely on different, not well defined, or even unknown boundary conditions. The purpose of this research is to develop a standardized analysis method for planetary surfaces, which is adaptable to several research topics. The method provides a consistent quality of results. This also includes achieving reliable and comparable results and reducing the time and effort of conducting such studies. A standardized analysis method is provided by automated analysis tools that focus on statistical parameters. Specific key parameters and boundary conditions are defined for the tool application. The analysis relies on a database in which all key parameters are stored. These databases can be easily updated and adapted to various research questions. This increases the flexibility, reproducibility, and comparability of the research. However, the quality of the database and reliability of definitions directly influence the results. To ensure a high quality of results, the rules and definitions need to be well defined and based on previously conducted case studies. The tools then produce parameters, which are obtained by defined geostatistical techniques (measurements, calculations, classifications). The idea of an automated statistical analysis is tested to proof benefits but also potential problems of this method. In this study, I adapt automated tools for floor-fractured craters (FFCs) on Mars. These impact craters show a variety of surface features, occurring in different Martian environments, and having different fracturing origins. They provide a complex morphological and geological field of application. 433 FFCs are classified by the analysis tools due to their fracturing process. Spatial data, environmental context, and crater interior data are analyzed to distinguish between the processes involved in floor fracturing. Related geologic processes, such as glacial and fluvial activity, are too similar to be separately classified by the automated tools. Glacial and fluvial fracturing processes are merged together for the classification. The automated tools provide probability values for each origin model. To guarantee the quality and reliability of the results, classification tools need to achieve an origin probability above 50 \%. This analysis method shows that 15 \% of the FFCs are fractured by intrusive volcanism, 20 \% by tectonic activity, and 43 \% by water \& ice related processes. In total, 75 \% of the FFCs are classified to an origin type. This can be explained by a combination of origin models, superposition or erosion of key parameters, or an unknown fracturing model. Those features have to be manually analyzed in detail. Another possibility would be the improvement of key parameters and rules for the classification. This research shows that it is possible to conduct an automated statistical analysis of morphologic and geologic features based on analysis tools. Analysis tools provide additional information to the user and are therefore considered assistance systems.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Ahmad2014, author = {Ahmad, Nadeem}, title = {People centered HMI's for deaf and functionally illiterate users}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70391}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The objective and motivation behind this research is to provide applications with easy-to-use interfaces to communities of deaf and functionally illiterate users, which enables them to work without any human assistance. Although recent years have witnessed technological advancements, the availability of technology does not ensure accessibility to information and communication technologies (ICT). Extensive use of text from menus to document contents means that deaf or functionally illiterate can not access services implemented on most computer software. Consequently, most existing computer applications pose an accessibility barrier to those who are unable to read fluently. Online technologies intended for such groups should be developed in continuous partnership with primary users and include a thorough investigation into their limitations, requirements and usability barriers. In this research, I investigated existing tools in voice, web and other multimedia technologies to identify learning gaps and explored ways to enhance the information literacy for deaf and functionally illiterate users. I worked on the development of user-centered interfaces to increase the capabilities of deaf and low literacy users by enhancing lexical resources and by evaluating several multimedia interfaces for them. The interface of the platform-independent Italian Sign Language (LIS) Dictionary has been developed to enhance the lexical resources for deaf users. The Sign Language Dictionary accepts Italian lemmas as input and provides their representation in the Italian Sign Language as output. The Sign Language dictionary has 3082 signs as set of Avatar animations in which each sign is linked to a corresponding Italian lemma. I integrated the LIS lexical resources with MultiWordNet (MWN) database to form the first LIS MultiWordNet(LMWN). LMWN contains information about lexical relations between words, semantic relations between lexical concepts (synsets), correspondences between Italian and sign language lexical concepts and semantic fields (domains). The approach enhances the deaf users' understanding of written Italian language and shows that a relatively small set of lexicon can cover a significant portion of MWN. Integration of LIS signs with MWN made it useful tool for computational linguistics and natural language processing. The rule-based translation process from written Italian text to LIS has been transformed into service-oriented system. The translation process is composed of various modules including parser, semantic interpreter, generator, and spatial allocation planner. This translation procedure has been implemented in the Java Application Building Center (jABC), which is a framework for extreme model driven design (XMDD). The XMDD approach focuses on bringing software development closer to conceptual design, so that the functionality of a software solution could be understood by someone who is unfamiliar with programming concepts. The transformation addresses the heterogeneity challenge and enhances the re-usability of the system. For enhancing the e-participation of functionally illiterate users, two detailed studies were conducted in the Republic of Rwanda. In the first study, the traditional (textual) interface was compared with the virtual character-based interactive interface. The study helped to identify usability barriers and users evaluated these interfaces according to three fundamental areas of usability, i.e. effectiveness, efficiency and satisfaction. In another study, we developed four different interfaces to analyze the usability and effects of online assistance (consistent help) for functionally illiterate users and compared different help modes including textual, vocal and virtual character on the performance of semi-literate users. In our newly designed interfaces the instructions were automatically translated in Swahili language. All the interfaces were evaluated on the basis of task accomplishment, time consumption, System Usability Scale (SUS) rating and number of times the help was acquired. The results show that the performance of semi-literate users improved significantly when using the online assistance. The dissertation thus introduces a new development approach in which virtual characters are used as additional support for barely literate or naturally challenged users. Such components enhanced the application utility by offering a variety of services like translating contents in local language, providing additional vocal information, and performing automatic translation from text to sign language. Obviously, there is no such thing as one design solution that fits for all in the underlying domain. Context sensitivity, literacy and mental abilities are key factors on which I concentrated and the results emphasize that computer interfaces must be based on a thoughtful definition of target groups, purposes and objectives.}, language = {en} } @phdthesis{Strauss2014, author = {Strauß, Jens}, title = {Organic carbon in ice-rich permafrost}, doi = {10.25932/publishup-7523}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75236}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 107, 102}, year = {2014}, abstract = {Permafrost, defined as ground that is frozen for at least two consecutive years, is a distinct feature of the terrestrial unglaciated Arctic. It covers approximately one quarter of the land area of the Northern Hemisphere (23,000,000 km²). Arctic landscapes, especially those underlain by permafrost, are threatened by climate warming and may degrade in different ways, including active layer deepening, thermal erosion, and development of rapid thaw features. In Siberian and Alaskan late Pleistocene ice-rich Yedoma permafrost, rapid and deep thaw processes (called thermokarst) can mobilize deep organic carbon (below 3 m depth) by surface subsidence due to loss of ground ice. Increased permafrost thaw could cause a feedback loop of global significance if its stored frozen organic carbon is reintroduced into the active carbon cycle as greenhouse gases, which accelerate warming and inducing more permafrost thaw and carbon release. To assess this concern, the major objective of the thesis was to enhance the understanding of the origin of Yedoma as well as to assess the associated organic carbon pool size and carbon quality (concerning degradability). The key research questions were: - How did Yedoma deposits accumulate? - How much organic carbon is stored in the Yedoma region? - What is the susceptibility of the Yedoma region's carbon for future decomposition? To address these three research questions, an interdisciplinary approach, including detailed field studies and sampling in Siberia and Alaska as well as methods of sedimentology, organic biogeochemistry, remote sensing, statistical analyses, and computational modeling were applied. To provide a panarctic context, this thesis additionally includes results both from a newly compiled northern circumpolar carbon database and from a model assessment of carbon fluxes in a warming Arctic. The Yedoma samples show a homogeneous grain-size composition. All samples were poorly sorted with a multi-modal grain-size distribution, indicating various (re-) transport processes. This contradicts the popular pure loess deposition hypothesis for the origin of Yedoma permafrost. The absence of large-scale grinding processes via glaciers and ice sheets in northeast Siberian lowlands, processes which are necessary to create loess as material source, suggests the polygenetic origin of Yedoma deposits. Based on the largest available data set of the key parameters, including organic carbon content, bulk density, ground ice content, and deposit volume (thickness and coverage) from Siberian and Alaskan study sites, this thesis further shows that deep frozen organic carbon in the Yedoma region consists of two distinct major reservoirs, Yedoma deposits and thermokarst deposits (formed in thaw-lake basins). Yedoma deposits contain ~80 Gt and thermokarst deposits ~130 Gt organic carbon, or a total of ~210 Gt. Depending on the approach used for calculating uncertainty, the range for the total Yedoma region carbon store is ±75 \% and ±20 \% for conservative single and multiple bootstrapping calculations, respectively. Despite the fact that these findings reduce the Yedoma region carbon pool by nearly a factor of two compared to previous estimates, this frozen organic carbon is still capable of inducing a permafrost carbon feedback to climate warming. The complete northern circumpolar permafrost region contains between 1100 and 1500 Gt organic carbon, of which ~60 \% is perennially frozen and decoupled from the short-term carbon cycle. When thawed and reintroduced into the active carbon cycle, the organic matter qualities become relevant. Furthermore, results from investigations into Yedoma and thermokarst organic matter quality studies showed that Yedoma and thermokarst organic matter exhibit no depth-dependent quality trend. This is evidence that after freezing, the ancient organic matter is preserved in a state of constant quality. The applied alkane and fatty-acid-based biomarker proxies including the carbon-preference and the higher-land-plant-fatty-acid indices show a broad range of organic matter quality and thus no significantly different qualities of the organic matter stored in thermokarst deposits compared to Yedoma deposits. This lack of quality differences shows that the organic matter biodegradability depends on different decomposition trajectories and the previous decomposition/incorporation history. Finally, the fate of the organic matter has been assessed by implementing deep carbon pools and thermokarst processes in a permafrost carbon model. Under various warming scenarios for the northern circumpolar permafrost region, model results show a carbon release from permafrost regions of up to ~140 Gt and ~310 Gt by the years 2100 and 2300, respectively. The additional warming caused by the carbon release from newly-thawed permafrost contributes 0.03 to 0.14°C by the year 2100. The model simulations predict that a further increase by the 23rd century will add 0.4°C to global mean surface air temperatures. In conclusion, Yedoma deposit formation during the late Pleistocene was dominated by water-related (alluvial/fluvial/lacustrine) as well as aeolian processes under periglacial conditions. The circumarctic permafrost region, including the Yedoma region, contains a substantial amount of currently frozen organic carbon. The carbon of the Yedoma region is well-preserved and therefore available for decomposition after thaw. A missing quality-depth trend shows that permafrost preserves the quality of ancient organic matter. When the organic matter is mobilized by deep degradation processes, the northern permafrost region may add up to 0.4°C to the global warming by the year 2300.}, language = {en} } @phdthesis{Braun2014, author = {Braun, Andreas}, title = {Open innovation - an analysis of the individual level}, address = {Potsdam}, pages = {219 S.}, year = {2014}, language = {en} } @phdthesis{Conrad2014, author = {Conrad, Claudia}, title = {Open cluster groups and complexes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77605}, school = {Universit{\"a}t Potsdam}, pages = {xii, 172}, year = {2014}, abstract = {It is generally agreed upon that stars typically form in open clusters and stellar associations, but little is known about the structure of the open cluster system. Do open clusters and stellar associations form isolated or do they prefer to form in groups and complexes? Open cluster groups and complexes could verify star forming regions to be larger than expected, which would explain the chemical homogeneity over large areas in the Galactic disk. They would also define an additional level in the hierarchy of star formation and could be used as tracers for the scales of fragmentation in giant molecular clouds? Furthermore, open cluster groups and complexes could affect Galactic dynamics and should be considered in investigations and simulations on the dynamical processes, such as radial migration, disc heating, differential rotation, kinematic resonances, and spiral structure. In the past decade there were a few studies on open cluster pairs (de La Fuente Marcos \& de La Fuente Marcos 2009a,b,c) and on open cluster groups and complexes (Piskunov et al. 2006). The former only considered spatial proximity for the identification of the pairs, while the latter also required tangential velocities to be similar for the members. In this work I used the full set of 6D phase-space information to draw a more detailed picture on these structures. For this purpose I utilised the most homogeneous cluster catalogue available, namely the Catalogue of Open Cluster Data (COCD; Kharchenko et al. 2005a,b), which contains parameters for 650 open clusters and compact associations, as well as for their uniformly selected members. Additional radial velocity (RV) and metallicity ([M/H]) information on the members were obtained from the RAdial Velocity Experiment (RAVE; Steinmetz et al. 2006; Kordopatis et al. 2013) for 110 and 81 clusters, respectively. The RAVE sample was cleaned considering quality parameters and flags provided by RAVE (Matijevič et al. 2012; Kordopatis et al. 2013). To ensure that only real members were included for the mean values, also the cluster membership, as provided by Kharchenko et al. (2005a,b), was considered for the stars cross-matched in RAVE. 6D phase-space information could be derived for 432 out of the 650 COCD objects and I used an adaption of the Friends-of-Friends algorithm, as used in cosmology, to identify potential groupings. The vast majority of the 19 identified groupings were pairs, but I also found four groups of 4-5 members and one complex with 15 members. For the verification of the identified structures, I compared the results to a randomly selected subsample of the catalogue for the Milky Way global survey of Star Clusters (MWSC; Kharchenko et al. 2013), which became available recently, and was used as reference sample. Furthermore, I implemented Monte-Carlo simulations with randomised samples created from two distinguished input distributions for the spatial and velocity parameters. On the one hand, assuming a uniform distribution in the Galactic disc and, on the other hand, assuming the COCD data distributions to be representative for the whole open cluster population. The results suggested that the majority of identified pairs are rather by chance alignments, but the groups and the complex seemed to be genuine. A comparison of my results to the pairs, groups and complexes proposed in the literature yielded a partial overlap, which was most likely because of selection effects and different parameters considered. This is another verification for the existence of such structures. The characteristics of the found groupings favour that members of an open cluster grouping originate from a common giant molecular cloud and formed in a single, but possibly sequential, star formation event. Moreover, the fact that the young open cluster population showed smaller spatial separations between nearest neighbours than the old cluster population indicated that the lifetime of open cluster groupings is most likely comparable to that of the Galactic open cluster population itself. Still even among the old open clusters I could identify groupings, which suggested that the detected structure could be in some cases more long lived as one might think. In this thesis I could only present a pilot study on structures in the Galactic open cluster population, since the data sample used was highly incomplete. For further investigations a far more complete sample would be required. One step in this direction would be to use data from large current surveys, like SDSS, RAVE, Gaia-ESO and VVV, as well as including results from studies on individual clusters. Later the sample can be completed by data from upcoming missions, like Gaia and 4MOST. Future studies using this more complete open cluster sample will reveal the effect of open cluster groupings on star formation theory and their significance for the kinematics, dynamics and evolution of the Milky Way, and thereby of spiral galaxies.}, language = {en} } @phdthesis{Wienhoefer2014, author = {Wienh{\"o}fer, Jan}, title = {On the role of structure-process interactions in controlling terrestrial systems : an exemplare of hillslope hydrology and a slow-moving landslide}, address = {Potsdam}, pages = {135 S.}, year = {2014}, language = {en} }