@phdthesis{Koelsch2014, author = {K{\"o}lsch, Jonas David}, title = {Entwicklung neuer farbstoffmarkierter Polymere zur Visualisierung des LCST-Phasen{\"u}bergangs in w{\"a}ssriger L{\"o}sung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72531}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 147}, year = {2014}, abstract = {Ziel der Arbeit war die Entwicklung von farbstoffmarkierten Polymeren, die einen temperaturgetriebenen Kn{\"a}uel-Kollaps-Phasen{\"u}bergang in w{\"a}ssriger L{\"o}sung ("thermo-responsive Polymere") zeigen und diesen in ein optisches Signal {\"u}bersetzen k{\"o}nnen. Solche Polymere unterliegen innerhalb eines kleinen Temperaturintervalls einer massiven {\"A}nderung ihres Verhaltens, z B. ihrer Konformation und ihres Quellungsgrads. Diese {\"A}nderungen sind mit einem Wechsel der L{\"o}seeigenschaften von hydrophil zu hydrophob verbunden. Als Matrixpolymere wurden Poly-N-isopropylacrylamid (polyNIPAm), Poly(oligoethylen-glykolacrylat) (polyOEGA) und Poly(oligoethylenglykolmethacrylat) (polyOEGMA) ein-gesetzt, in die geeignete Farbstoffen durch Copolymerisation eingebaut wurden. Als besonders geeignet, um den Phasen{\"u}bergang in ein optisches Signal zu {\"u}bersetzen, erwiesen sich hierf{\"u}r kompakte, solvatochrome Cumarin- und Naphthalimidderivate. Diese beeintr{\"a}chtigten weder das Polymerisationsverhalten noch den Phasen{\"u}bergang, reagierten aber sowohl bez{\"u}glich Farbe als auch Fluoreszenz stark auf die Polarit{\"a}t des L{\"o}semittels. Weiterhin wurden Systeme entwickelt, die mittels Energietransfer (FRET) ein an den Phasen{\"u}bergang gekoppeltes optisches Signal erzeugen. Hierbei wurde ein Cumarin als Donor- und ein Polythiophen als Akzeptorfarbstoff eingesetzt. Es zeigte sich, dass trotz scheinbarer {\"A}hnlichkeit bestimmte Polymere ausgepr{\"a}gt auf einen Temperaturstimulus mit {\"A}nderung ihrer spektralen Eigenschaften reagieren, andere aber nicht. Hierf{\"u}r wurden die molekularen Ursachen untersucht. Als wahrscheinliche Gr{\"u}nde f{\"u}r das Ausbleiben einer spektralen {\"A}nderung in Oligo(ethylenglykol)-basierten Polymeren sind zum einen die fehlende Dehydratationseffektivit{\"a}t infolge des Fehlens eines selbstgen{\"u}genden Wasserstoffbr{\"u}ckenbindungsmotivs zu nennen und zum anderen die sterische Abschirmung der Farbstoffe durch die Oligo(ethylenglykol)-Seitenketten. Als Prinzipbeweis f{\"u}r die N{\"u}tzlichkeit solcher Systeme f{\"u}r die Bioanalytik wurde ein System entwickelt, dass die L{\"o}slichkeitseigenschaft eines thermoresponsiven Polymers durch Antik{\"o}rper-Antigen-Reaktion {\"a}nderte. Die Bindung selbst kleiner Mengen eines Antik{\"o}rpers ließ sich so direkt optisch auslesen und war bereits mit dem bloßen Auge zu erkennen.}, language = {de} } @phdthesis{Nguyen2014, author = {Nguyen, Van Manh}, title = {Large-scale floodplain sediment dynamics in the Mekong Delta : present state and future prospects}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72512}, school = {Universit{\"a}t Potsdam}, pages = {ix, 95}, year = {2014}, abstract = {The Mekong Delta (MD) sustains the livelihood and food security of millions of people in Vietnam and Cambodia. It is known as the "rice bowl" of South East Asia and has one of the world's most productive fisheries. Sediment dynamics play a major role for the high productivity of agriculture and fishery in the delta. However, the MD is threatened by climate change, sea level rise and unsustainable development activities in the Mekong Basin. But despite its importance and the expected threats, the understanding of the present and future sediment dynamics in the MD is very limited. This is a consequence of its large extent, the intricate system of rivers, channels and floodplains and the scarcity of observations. Thus this thesis aimed at (1) the quantification of suspended sediment dynamics and associated sediment-nutrient deposition in floodplains of the MD, and (2) assessed the impacts of likely future boundary changes on the sediment dynamics in the MD. The applied methodology combines field experiments and numerical simulation to quantify and predict the sediment dynamics in the entire delta in a spatially explicit manner. The experimental part consists of a comprehensive procedure to monitor quantity and spatial variability of sediment and associated nutrient deposition for large and complex river floodplains, including an uncertainty analysis. The measurement campaign applied 450 sediment mat traps in 19 floodplains over the MD for a complete flood season. The data also supports quantification of nutrient deposition in floodplains based on laboratory analysis of nutrient fractions of trapped sedimentation.The main findings are that the distribution of grain size and nutrient fractions of suspended sediment are homogeneous over the Vietnamese floodplains. But the sediment deposition within and between ring dike floodplains shows very high spatial variability due to a high level of human inference. The experimental findings provide the essential data for setting up and calibration of a large-scale sediment transport model for the MD. For the simulation studies a large scale hydrodynamic model was developed in order to quantify large-scale floodplain sediment dynamics. The complex river-channel-floodplain system of the MD is described by a quasi-2D model linking a hydrodynamic and a cohesive sediment transport model. The floodplains are described as quasi-2D presentations linked to rivers and channels modeled in 1D by using control structures. The model setup, based on the experimental findings, ignored erosion and re-suspension processes due to a very high degree of human interference during the flood season. A two-stage calibration with six objective functions was developed in order to calibrate both the hydrodynamic and sediment transport modules. The objective functions include hydraulic and sediment transport parameters in main rivers, channels and floodplains. The model results show, for the first time, the tempo-spatial distribution of sediment and associated nutrient deposition rates in the whole MD. The patterns of sediment transport and deposition are quantified for different sub-systems. The main factors influencing spatial sediment dynamics are the network of rivers, channels and dike-rings, sluice gate operations, magnitude of the floods and tidal influences. The superposition of these factors leads to high spatial variability of the sediment transport and deposition, in particular in the Vietnamese floodplains. Depending on the flood magnitude, annual sediment loads reaching the coast vary from 48\% to 60\% of the sediment load at Kratie, the upper boundary of the MD. Deposited sediment varies from 19\% to 23\% of the annual load at Kratie in Cambodian floodplains, and from 1\% to 6\% in the compartmented and diked floodplains in Vietnam. Annual deposited nutrients (N, P, K), which are associated to the sediment deposition, provide on average more than 50\% of mineral fertilizers typically applied for rice crops in non-flooded ring dike compartments in Vietnam. This large-scale quantification provides a basis for estimating the benefits of the annual Mekong floods for agriculture and fishery, for assessing the impacts of future changes on the delta system, and further studies on coastal deposition/erosion. For the estimation of future prospects a sensitivity-based approach is applied to assess the response of floodplain hydraulics and sediment dynamics to the changes in the delta boundaries including hydropower development, climate change in the Mekong River Basin and effective sea level rise. The developed sediment model is used to simulate the mean sediment transport and sediment deposition in the whole delta system for the baseline (2000-2010) and future (2050-2060) periods. For each driver we derive a plausible range of future changes and discretize it into five levels, resulting in altogether 216 possible factor combinations. Our results thus cover all plausible future pathways of sediment dynamics in the delta based on current knowledge. The uncertainty of the range of the resulting impacts can be decreased in case more information on these drivers becomes available. Our results indicate that the hydropower development dominates the changes in sediment dynamics of the Mekong Delta, while sea level rise has the smallest effect. The floodplains of Vietnamese Mekong Delta are much more sensitive to the changes compared to the other subsystems of the delta. In terms of median changes of the three combined drivers, the inundation extent is predicted to increase slightly, but the overall floodplain sedimentation would be reduced by approximately 40\%, while the sediment load to the Sea would diminish to half of the current rates. These findings provide new and valuable information on the possible impacts of future development on the delta, and indicate the most vulnerable areas. Thus, the presented results are a significant contribution to the ongoing international discussion on the hydropower development in the Mekong basin and its impact on the Mekong delta.}, language = {en} } @phdthesis{Kruegel2014, author = {Kr{\"u}gel, Andr{\´e}}, title = {Eye movement control during reading : factors and principles of computing the word center for saccade planning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72599}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Reading is a complex cognitive task based on the analyses of visual stimuli. Due to the physiology of the eye, only a small number of letters around the fixation position can be extracted with high visual acuity, while the visibility of words and letters outside this so-called foveal region quickly drops with increasing eccentricity. As a consequence, saccadic eye movements are needed to repeatedly shift the fovea to new words for visual word identification during reading. Moreover, even within a foveated word fixation positions near the word center are superior to other fixation positions for efficient word recognition (O'Regan, 1981; Brysbaert, Vitu, and Schroyens, 1996). Thus, most reading theories assume that readers aim specifically at word centers during reading (for a review see Reichle, Rayner, \& Pollatsek, 2003). However, saccades' landing positions within words during reading are in fact systematically modulated by the distance of the launch site from the word center (McConkie, Kerr, Reddix, \& Zola, 1988). In general, it is largely unknown how readers identify the center of upcoming target words and there is no computational model of the sensorimotor translation of the decision for a target word into spatial word center coordinates. Here we present a series of three studies which aim at advancing the current knowledge about the computation of saccade target coordinates during saccade planning in reading. Based on a large corpus analyses, we firstly identified word skipping as a further factor beyond the launch-site distance with a likewise systematic and surprisingly large effect on within-word landing positions. Most importantly, we found that the end points of saccades after skipped word are shifted two and more letters to the left as compared to one-step saccades (i.e., from word N to word N+1) with equal launch-site distances. Then we present evidence from a single saccade experiment suggesting that the word-skipping effect results from highly automatic low-level perceptual processes, which are essentially based on the localization of blank spaces between words. Finally, in the third part, we present a Bayesian model of the computation of the word center from primary sensory measurements of inter-word spaces. We demonstrate that the model simultaneously accounts for launch-site and saccade-type contingent modulations of within-word landing positions in reading. Our results show that the spatial saccade target during reading is the result of complex estimations of the word center based on incomplete sensory information, which also leads to specific systematic deviations of saccades' landing positions from the word center. Our results have important implications for current reading models and experimental reading research.}, language = {en} } @phdthesis{Riemer2014, author = {Riemer, Martin}, title = {Vom Phenol zum Naturstoff : Entwicklung nachhaltiger Mikrowellen-vermittelter SUZUKI-MIYAURA-Kupplungen und Tandem-Reaktionen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72525}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Ziel dieser Arbeit war die Entwicklung von Methoden zur Synthese von auf Phenol basierenden Naturstoffen. Insbesondere wurde bei der Methodenentwicklung die Nachhaltigkeit in den Vordergrund ger{\"u}ckt. Dies bedeutet, dass durch die Zusammenfassung mehrerer Syntheseschritte zu einem (Tandem-Reaktion) beispielsweise unn{\"o}tige Reaktionsschritte vermieden werden sollten. Ferner sollten im Sinne der Nachhaltigkeit m{\"o}glichst ungiftige Reagenzien und L{\"o}sungmittel verwendet werden, ebenso wie mehrfach wiederverwertbare Katalysatoren zum Einsatz kommen. Im Rahmen dieser Arbeit wurden Methoden zum Aufbau von Biphenolen mittels Pd/C-katalysierten Suzuki-Miyaura-Kupplungen entwickelt. Diese Methoden sind insofern {\"a}ußerst effizient, da der ansonsten gebr{\"a}uchliche Syntheseweg {\"u}ber drei Reaktionsschritte somit auf lediglich eine Reaktionsstufe reduziert wurde. Weiterhin wurden die Reaktionsbedingungen so gestaltet, dass einfaches Wasser als vollkommen ungiftiges L{\"o}sungsmittel verwendet werden konnte. Des Weiteren wurde f{\"u}r diese Reaktionen ein Katalysator gew{\"a}hlt, der einfach durch Filtration vom Reaktionsgemisch abgetrennt und f{\"u}r weitere Reaktionen mehrfach wiederverwendet werden konnte. Dar{\"u}ber hinaus konnte durch die Synthese von mehr als 100 Verbindungen die breite Anwendbarkeit der Methoden aufgezeigt werden. Mit den entwickelten Methoden konnten 14 Naturstoffe - z. T. erstmals - synthetisiert werden. Derartige Stoffe werden u. a. von den {\"o}konomisch bedeutenden Kernobstgew{\"a}chsen ({\"A}pfeln, Birnen) als Abwehrmittel gegen{\"u}ber Sch{\"a}dlingen erzeugt. Folglich konnte mit Hilfe dieser Methoden ein Syntheseweg f{\"u}r potentielle Pflanzenschutzmittel entwickelt werden. Im zweiten Teil dieser Arbeit wurde ein Zugang zu den sich ebenfalls vom Phenol ableitenden Chromanonen, Chromonen und Cumarinen untersucht. Bei diesen Untersuchungen konnte durch die Entwicklung zweier neuer Tandem-Reaktionen ein nachhaltiger und stufen{\"o}konomischer Syntheseweg zur Darstellung substituierter Benzo(dihydro)pyrone aufgezeigt werden. Durch die erstmalige Kombination der Claisen-Umlagerung mit einer Oxa-Michael-Addition bzw. konjugierten-Addition wurden zwei vollkommen atom{\"o}konomische Reaktionen miteinander verkn{\"u}pft und somit eine {\"u}beraus effiente Synthese von allyl- bzw. prenylsubstituierten Chromanonen und Chromonen erm{\"o}glicht. Ferner konnten durch die Anwendung einer Claisen-Umlagerung-Wittig-Laktonisierungs-Reaktion allyl- bzw. prenylsubstituierte Cumarine erhalten werden. Herausragendes Merkmal dieser Methoden war, dass in nur einem Schritt der jeweilige Naturstoffgrundk{\"o}rper aufgebaut und eine lipophile Seitenkette generiert werden konnte. Die Entwicklung dieser Methoden ist von hohem pharmazeutischem Stellenwert, da auf diesen Wegen Verbindungen synthetisiert werden k{\"o}nnen, die zum einem {\"u}ber das notwendige pharmakologische Grundger{\"u}st verf{\"u}gen und zum anderen {\"u}ber eine Seitenkette, welche die Aufnahmef{\"a}higkeit und damit die Wirksamkeit im Organismus betr{\"a}chtlich erh{\"o}ht. Insgesamt konnten mittels der entwickelten Methoden 15 Chromanon-, Chromon- und Cumarin-Naturstoffe z. T. erstmals synthetisiert werden.}, language = {de} } @phdthesis{Steinhaus2014, author = {Steinhaus, Sebastian Peter}, title = {Constructing quantum spacetime}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72558}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Despite remarkable progress made in the past century, which has revolutionized our understanding of the universe, there are numerous open questions left in theoretical physics. Particularly important is the fact that the theories describing the fundamental interactions of nature are incompatible. Einstein's theory of general relative describes gravity as a dynamical spacetime, which is curved by matter and whose curvature determines the motion of matter. On the other hand we have quantum field theory, in form of the standard model of particle physics, where particles interact via the remaining interactions - electromagnetic, weak and strong interaction - on a flat, static spacetime without gravity. A theory of quantum gravity is hoped to cure this incompatibility by heuristically replacing classical spacetime by quantum spacetime'. Several approaches exist attempting to define such a theory with differing underlying premises and ideas, where it is not clear which is to be preferred. Yet a minimal requirement is the compatibility with the classical theory, they attempt to generalize. Interestingly many of these models rely on discrete structures in their definition or postulate discreteness of spacetime to be fundamental. Besides the direct advantages discretisations provide, e.g. permitting numerical simulations, they come with serious caveats requiring thorough investigation: In general discretisations break fundamental diffeomorphism symmetry of gravity and are generically not unique. Both complicates establishing the connection to the classical continuum theory. The main focus of this thesis lies in the investigation of this relation for spin foam models. This is done on different levels of the discretisation / triangulation, ranging from few simplices up to the continuum limit. In the regime of very few simplices we confirm and deepen the connection of spin foam models to discrete gravity. Moreover, we discuss dynamical, e.g. diffeomorphism invariance in the discrete, to fix the ambiguities of the models. In order to satisfy these conditions, the discrete models have to be improved in a renormalisation procedure, which also allows us to study their continuum dynamics. Applied to simplified spin foam models, we uncover a rich, non--trivial fixed point structure, which we summarize in a phase diagram. Inspired by these methods, we propose a method to consistently construct the continuum theory, which comes with a unique vacuum state.}, language = {en} } @phdthesis{GuzmanPerez2014, author = {Guzman-Perez, Valentina}, title = {Effect of benzylglucosinolate on signaling pathways associated with type 2 diabetes prevention}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72351}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Type 2 diabetes (T2D) is a health problem throughout the world. In 2010, there were nearly 230 million individuals with diabetes worldwide and it is estimated that in the economically advanced countries the cases will increase about 50\% in the next twenty years. Insulin resistance is one of major features in T2D, which is also a risk factor for metabolic and cardiovascular complications. Epidemiological and animal studies have shown that the consumption of vegetables and fruits can delay or prevent the development of the disease, although the underlying mechanisms of these effects are still unclear. Brassica species such as broccoli (Brassica oleracea var. italica) and nasturtium (Tropaeolum majus) possess high content of bioactive phytochemicals, e.g. nitrogen sulfur compounds (glucosinolates and isothiocyanates) and polyphenols largely associated with the prevention of cancer. Isothiocyanates (ITCs) display their anti-carcinogenic potential by inducing detoxicating phase II enzymes and increasing glutathione (GSH) levels in tissues. In T2D diabetes an increase in gluconeogenesis and triglyceride synthesis, and a reduction in fatty acid oxidation accompanied by the presence of reactive oxygen species (ROS) are observed; altogether is the result of an inappropriate response to insulin. Forkhead box O (FOXO) transcription factors play a crucial role in the regulation of insulin effects on gene expression and metabolism, and alterations in FOXO function could contribute to metabolic disorders in diabetes. In this study using stably transfected human osteosarcoma cells (U-2 OS) with constitutive expression of FOXO1 protein labeled with GFP (green fluorescent protein) and human hepatoma cells HepG2 cell cultures, the ability of benzylisothiocyanate (BITC) deriving from benzylglucosinolate, extracted from nasturtium to modulate, i) the insulin-signaling pathway, ii) the intracellular localization of FOXO1 and iii) the expression of proteins involved in glucose metabolism, ROS detoxification, cell cycle arrest and DNA repair was evaluated. BITC promoted oxidative stress and in response to that induced FOXO1 translocation from cytoplasm into the nucleus antagonizing the insulin effect. BITC stimulus was able to down-regulate gluconeogenic enzymes, which can be considered as an anti-diabetic effect; to promote antioxidant resistance expressed by the up-regulation in manganese superoxide dismutase (MnSOD) and detoxification enzymes; to modulate autophagy by induction of BECLIN1 and down-regulation of the mammalian target of rapamycin complex 1 (mTORC1) pathway; and to promote cell cycle arrest and DNA damage repair by up-regulation of the cyclin-dependent kinase inhibitor (p21CIP) and Growth Arrest / DNA Damage Repair (GADD45). Except for the nuclear factor (erythroid derived)-like2 (NRF2) and its influence in the detoxification enzymes gene expression, all the observed effects were independent from FOXO1, protein kinase B (AKT/PKB) and NAD-dependent deacetylase sirtuin-1 (SIRT1). The current study provides evidence that besides of the anticarcinogenic potential, isothiocyanates might have a role in T2D prevention. BITC stimulus mimics the fasting state, in which insulin signaling is not triggered and FOXO proteins remain in the nucleus modulating gene expression of their target genes, with the advantage of a down-regulation of gluconeogenesis instead of its increase. These effects suggest that BITC might be considered as a promising substance in the prevention or treatment of T2D, therefore the factors behind of its modulatory effects need further investigation.}, language = {en} } @phdthesis{Merkel2014, author = {Merkel, Roswitha}, title = {Untersuchung zur Synthese und Eigenschaften von komplexen Oligospiroketalen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72561}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Es ist in dieser Arbeit gelungen, starre Oligospiroketal(OSK)-St{\"a}be als Grundbausteine f{\"u}r komplexe 2D- und 3D-Systeme zu verwenden. Dazu wurde ein difunktionalisierter starrer Stab synthetisiert, der mit seines Gleichen und anderen verzweigten Funktionalisierungseinheiten in Azid-Alkin-Klickreaktionen eingesetzt wurde. An zwei {\"u}ber Klickreaktion verkn{\"u}pften OSK-St{\"a}ben konnten mittels theoretischer Berechnungen Aussagen {\"u}ber die neuartige Bimodalit{\"a}t der Konformation getroffen werden. Es wurde daf{\"u}r der Begriff Gelenkstab eingef{\"u}hrt, da die Molek{\"u}le um ein Gelenk gedreht sowohl gestreckt als auch geknickt vorliegen k{\"o}nnen. Aufbauend auf diesen Erkenntnissen konnte gezeigt werden, dass nicht nur gezielt große Polymere aus bis zu vier OSK-St{\"a}ben synthetisiert werden k{\"o}nnen, sondern es auch m{\"o}glich ist, durch gezielte {\"A}nderung von Reaktionsbedingungen der Klickreaktion auch Cyclen aus starren OSK-St{\"a}ben herzustellen. Die neu entwickelte Substanzklasse der Gelenkst{\"a}be wurde im Hinblick auf die Steuerung des vorliegenden Gleichgewichts zwischen geknicktem und gestrecktem Gelenkstab hin untersucht. Daf{\"u}r wurde der Gelenkstab mit Pyrenylresten in terminaler Position versehen. Es wurde durch Fluoreszenzmessungen festgestellt, dass das Gleichgewicht z. B. durch die Temperatur oder die Wahl des L{\"o}sungsmittels beeinflussbar ist. F{\"u}r vielfache Anwendungen wurde eine vereinfachte Synthesestrategie gefunden, mit der eine beliebige Funktionalisierung in nur einem Syntheseschritt erreicht werden konnte. Es konnten photoaktive Gelenkst{\"a}be synthetisiert werden, die gezielt zur intramolekularen Dimerisierung gef{\"u}hrt werden konnten. Zus{\"a}tzlich wurde durch Aminos{\"a}uren ein Verkn{\"u}pfungselement am Ende der Gelenkst{\"a}be gefunden, das eine stereoselektive Synthese von Mehrfachfunktionalisierungen zul{\"a}sst. Die Synthese der komplexen Gelenkst{\"a}be wurde als ein neuartiges Gebiet aufgezeigt und bietet ein breites Forschungspotential f{\"u}r weitere Anwendungen z. B. in der Biologie (als molekulare Schalter f{\"u}r Ionentransporte) und in der Materialchemie (als Ladungs- oder Energietransporteure).}, language = {de} } @phdthesis{Burdack2014, author = {Burdack, Doreen}, title = {Water management policies and their impact on irrigated crop production in the Murray-Darling Basin, Australia}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-306-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72245}, school = {Universit{\"a}t Potsdam}, pages = {307}, year = {2014}, abstract = {The economic impact analysis contained in this book shows how irrigation farming is particularly susceptible when applying certain water management policies in the Australian Murray-Darling Basin, one of the world largest river basins and Australia's most fertile region. By comparing different pricing and non-pricing water management policies with the help of the Water Integrated Market Model, it is found that the impact of water demand reducing policies is most severe on crops that need to be intensively irrigated and are at the same time less water productive. A combination of increasingly frequent and severe droughts and the application of policies that decrease agricultural water demand, in the same region, will create a situation in which the highly water dependent crops rice and cotton cannot be cultivated at all.}, language = {en} } @phdthesis{Ziese2014, author = {Ziese, Ramona}, title = {Geometric electroelasticity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72504}, school = {Universit{\"a}t Potsdam}, pages = {vi, 113}, year = {2014}, abstract = {In this work a diffential geometric formulation of the theory of electroelasticity is developed which also includes thermal and magnetic influences. We study the motion of bodies consisting of an elastic material that are deformed by the influence of mechanical forces, heat and an external electromagnetic field. To this end physical balance laws (conservation of mass, balance of momentum, angular momentum and energy) are established. These provide an equation that describes the motion of the body during the deformation. Here the body and the surrounding space are modeled as Riemannian manifolds, and we allow that the body has a lower dimension than the surrounding space. In this way one is not (as usual) restricted to the description of the deformation of three-dimensional bodies in a three-dimensional space, but one can also describe the deformation of membranes and the deformation in a curved space. Moreover, we formulate so-called constitutive relations that encode the properties of the used material. Balance of energy as a scalar law can easily be formulated on a Riemannian manifold. The remaining balance laws are then obtained by demanding that balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space. This generalizes a result by Marsden and Hughes that pertains to bodies that have the same dimension as the surrounding space and does not allow the presence of electromagnetic fields. Usually, in works on electroelasticity the entropy inequality is used to decide which otherwise allowed deformations are physically admissible and which are not. It is alsoemployed to derive restrictions to the possible forms of constitutive relations describing the material. Unfortunately, the opinions on the physically correct statement of the entropy inequality diverge when electromagnetic fields are present. Moreover, it is unclear how to formulate the entropy inequality in the case of a membrane that is subjected to an electromagnetic field. Thus, we show that one can replace the use of the entropy inequality by the demand that for a given process balance of energy is invariant under the action of arbitrary diffeomorphisms on the surrounding space and under linear rescalings of the temperature. On the one hand, this demand also yields the desired restrictions to the form of the constitutive relations. On the other hand, it needs much weaker assumptions than the arguments in physics literature that are employing the entropy inequality. Again, our result generalizes a theorem of Marsden and Hughes. This time, our result is, like theirs, only valid for bodies that have the same dimension as the surrounding space.}, language = {en} } @phdthesis{Balzer2014, author = {Balzer, Arnim}, title = {Crab flare observations with H.E.S.S. phase II}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72545}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The H.E.S.S. array is a third generation Imaging Atmospheric Cherenkov Telescope (IACT) array. It is located in the Khomas Highland in Namibia, and measures very high energy (VHE) gamma-rays. In Phase I, the array started data taking in 2004 with its four identical 13 m telescopes. Since then, H.E.S.S. has emerged as the most successful IACT experiment to date. Among the almost 150 sources of VHE gamma-ray radiation found so far, even the oldest detection, the Crab Nebula, keeps surprising the scientific community with unexplained phenomena such as the recently discovered very energetic flares of high energy gamma-ray radiation. During its most recent flare, which was detected by the Fermi satellite in March 2013, the Crab Nebula was simultaneously observed with the H.E.S.S. array for six nights. The results of the observations will be discussed in detail during the course of this work. During the nights of the flare, the new 24 m × 32 m H.E.S.S. II telescope was still being commissioned, but participated in the data taking for one night. To be able to reconstruct and analyze the data of the H.E.S.S. Phase II array, the algorithms and software used by the H.E.S.S. Phase I array had to be adapted. The most prominent advanced shower reconstruction technique developed by de Naurois and Rolland, the template-based model analysis, compares real shower images taken by the Cherenkov telescope cameras with shower templates obtained using a semi-analytical model. To find the best fitting image, and, therefore, the relevant parameters that describe the air shower best, a pixel-wise log-likelihood fit is done. The adaptation of this advanced shower reconstruction technique to the heterogeneous H.E.S.S. Phase II array for stereo events (i.e. air showers seen by at least two telescopes of any kind), its performance using MonteCarlo simulations as well as its application to real data will be described.}, language = {en} } @phdthesis{Bamberg2014, author = {Bamberg, Marlene}, title = {Planetary mapping tools applied to floor-fractured craters on Mars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72104}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Planetary research is often user-based and requires considerable skill, time, and effort. Unfortunately, self-defined boundary conditions, definitions, and rules are often not documented or not easy to comprehend due to the complexity of research. This makes a comparison to other studies, or an extension of the already existing research, complicated. Comparisons are often distorted, because results rely on different, not well defined, or even unknown boundary conditions. The purpose of this research is to develop a standardized analysis method for planetary surfaces, which is adaptable to several research topics. The method provides a consistent quality of results. This also includes achieving reliable and comparable results and reducing the time and effort of conducting such studies. A standardized analysis method is provided by automated analysis tools that focus on statistical parameters. Specific key parameters and boundary conditions are defined for the tool application. The analysis relies on a database in which all key parameters are stored. These databases can be easily updated and adapted to various research questions. This increases the flexibility, reproducibility, and comparability of the research. However, the quality of the database and reliability of definitions directly influence the results. To ensure a high quality of results, the rules and definitions need to be well defined and based on previously conducted case studies. The tools then produce parameters, which are obtained by defined geostatistical techniques (measurements, calculations, classifications). The idea of an automated statistical analysis is tested to proof benefits but also potential problems of this method. In this study, I adapt automated tools for floor-fractured craters (FFCs) on Mars. These impact craters show a variety of surface features, occurring in different Martian environments, and having different fracturing origins. They provide a complex morphological and geological field of application. 433 FFCs are classified by the analysis tools due to their fracturing process. Spatial data, environmental context, and crater interior data are analyzed to distinguish between the processes involved in floor fracturing. Related geologic processes, such as glacial and fluvial activity, are too similar to be separately classified by the automated tools. Glacial and fluvial fracturing processes are merged together for the classification. The automated tools provide probability values for each origin model. To guarantee the quality and reliability of the results, classification tools need to achieve an origin probability above 50 \%. This analysis method shows that 15 \% of the FFCs are fractured by intrusive volcanism, 20 \% by tectonic activity, and 43 \% by water \& ice related processes. In total, 75 \% of the FFCs are classified to an origin type. This can be explained by a combination of origin models, superposition or erosion of key parameters, or an unknown fracturing model. Those features have to be manually analyzed in detail. Another possibility would be the improvement of key parameters and rules for the classification. This research shows that it is possible to conduct an automated statistical analysis of morphologic and geologic features based on analysis tools. Analysis tools provide additional information to the user and are therefore considered assistance systems.}, language = {en} } @phdthesis{Takouna2014, author = {Takouna, Ibrahim}, title = {Energy-efficient and performance-aware virtual machine management for cloud data centers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72399}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Virtualisierte Cloud Datenzentren stellen nach Bedarf Ressourcen zur Verf{\"u}gu-ng, erm{\"o}glichen agile Ressourcenbereitstellung und beherbergen heterogene Applikationen mit verschiedenen Anforderungen an Ressourcen. Solche Datenzentren verbrauchen enorme Mengen an Energie, was die Erh{\"o}hung der Betriebskosten, der W{\"a}rme innerhalb der Zentren und des Kohlendioxidausstoßes verursacht. Der Anstieg des Energieverbrauches kann durch ein ineffektives Ressourcenmanagement, das die ineffiziente Ressourcenausnutzung verursacht, entstehen. Die vorliegende Dissertation stellt detaillierte Modelle und neue Verfahren f{\"u}r virtualisiertes Ressourcenmanagement in Cloud Datenzentren vor. Die vorgestellten Verfahren ziehen das Service-Level-Agreement (SLA) und die Heterogenit{\"a}t der Auslastung bez{\"u}glich des Bedarfs an Speicherzugriffen und Kommunikationsmustern von Web- und HPC- (High Performance Computing) Applikationen in Betracht. Um die pr{\"a}sentierten Techniken zu evaluieren, verwenden wir Simulationen und echte Protokollierung der Auslastungen von Web- und HPC- Applikationen. Außerdem vergleichen wir unser Techniken und Verfahren mit anderen aktuellen Verfahren durch die Anwendung von verschiedenen Performance Metriken. Die Hauptbeitr{\"a}ge dieser Dissertation sind Folgendes: Ein Proaktives auf robuster Optimierung basierendes Ressourcenbereitstellungsverfahren. Dieses Verfahren erh{\"o}ht die F{\"a}higkeit der Hostes zur Verf{\"u}g-ungsstellung von mehr VMs. Gleichzeitig aber wird der unn{\"o}tige Energieverbrauch minimiert. Zus{\"a}tzlich mindert diese Technik unerw{\"u}nschte {\"A}nde-rungen im Energiezustand des Servers. Die vorgestellte Technik nutzt einen auf Intervall basierenden Vorhersagealgorithmus zur Implementierung einer robusten Optimierung. Dabei werden unsichere Anforderungen in Betracht gezogen. Ein adaptives und auf Intervall basierendes Verfahren zur Vorhersage des Arbeitsaufkommens mit hohen, in k{\"u}rzer Zeit auftretenden Schwankungen. Die Intervall basierende Vorhersage ist implementiert in der Standard Abweichung Variante und in der Median absoluter Abweichung Variante. Die Intervall-{\"A}nderungen basieren auf einem adaptiven Vertrauensfenster um die Schwankungen des Arbeitsaufkommens zu bew{\"a}ltigen. Eine robuste VM Zusammenlegung f{\"u}r ein effizientes Energie und Performance Management. Dies erm{\"o}glicht die gegenseitige Abh{\"a}ngigkeit zwischen der Energie und der Performance zu minimieren. Unser Verfahren reduziert die Anzahl der VM-Migrationen im Vergleich mit den neu vor kurzem vorgestellten Verfahren. Dies tr{\"a}gt auch zur Reduzierung des durch das Netzwerk verursachten Energieverbrauches. Außerdem reduziert dieses Verfahren SLA-Verletzungen und die Anzahl von {\"A}nderungen an Energiezus-t{\"a}nden. Ein generisches Modell f{\"u}r das Netzwerk eines Datenzentrums um die verz{\"o}-gerte Kommunikation und ihre Auswirkung auf die VM Performance und auf die Netzwerkenergie zu simulieren. Außerdem wird ein generisches Modell f{\"u}r ein Memory-Bus des Servers vorgestellt. Dieses Modell beinhaltet auch Modelle f{\"u}r die Latenzzeit und den Energieverbrauch f{\"u}r verschiedene Memory Frequenzen. Dies erlaubt eine Simulation der Memory Verz{\"o}gerung und ihre Auswirkung auf die VM-Performance und auf den Memory Energieverbrauch. Kommunikation bewusste und Energie effiziente Zusammenlegung f{\"u}r parallele Applikationen um die dynamische Entdeckung von Kommunikationsmustern und das Umplanen von VMs zu erm{\"o}glichen. Das Umplanen von VMs benutzt eine auf den entdeckten Kommunikationsmustern basierende Migration. Eine neue Technik zur Entdeckung von dynamischen Mustern ist implementiert. Sie basiert auf der Signal Verarbeitung des Netzwerks von VMs, anstatt die Informationen des virtuellen Umstellung der Hosts oder der Initiierung der VMs zu nutzen. Das Ergebnis zeigt, dass unsere Methode die durchschnittliche Anwendung des Netzwerks reduziert und aufgrund der Reduzierung der aktiven Umstellungen Energie gespart. Außerdem bietet sie eine bessere VM Performance im Vergleich zu der CPU-basierten Platzierung. Memory bewusste VM Zusammenlegung f{\"u}r unabh{\"a}ngige VMs. Sie nutzt die Vielfalt des VMs Memory Zuganges um die Anwendung vom Memory-Bus der Hosts zu balancieren. Die vorgestellte Technik, Memory-Bus Load Balancing (MLB), verteilt die VMs reaktiv neu im Bezug auf ihre Anwendung vom Memory-Bus. Sie nutzt die VM Migration um die Performance des gesamtem Systems zu verbessern. Außerdem sind die dynamische Spannung, die Frequenz Skalierung des Memory und die MLB Methode kombiniert um ein besseres Energiesparen zu leisten.}, language = {en} } @phdthesis{Memczak2014, author = {Memczak, Henry}, title = {Entwicklung influenzabindender Peptide f{\"u}r die Biosensorik}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72470}, school = {Universit{\"a}t Potsdam}, pages = {X, 117}, year = {2014}, abstract = {Das Influenzavirus infiziert S{\"a}ugetiere und V{\"o}gel. Der erste Schritt im Infektionszyklus ist die Anbindung des Viruses {\"u}ber sein Oberfl{\"a}chenprotein H{\"a}magglutinin (HA) an Zuckerstrukturen auf Epithelzellen des respiratorischen Traktes im Wirtsorganismus. Aus den drei komplementarit{\"a}tsbestimmenden Regionen (complementarity determining regions, CDRs) der schweren Kette eines monoklonalen H{\"a}magglutinin-bindenden Antik{\"o}rpers wurden drei lineare Peptide abgeleitet. Die Bindungseigenschaften der drei Peptide wurden experimentell mittels Oberfl{\"a}chenplasmonenresonanzspektroskopie untersucht. Es zeigte sich, dass in {\"U}bereinstimmung mit begleitenden Molekulardynamik-Simulationen zwei der drei Peptide (PeB und PeC) analog zur Bindef{\"a}higkeit des Antik{\"o}rpers in der Lage sind, Influenzaviren vom Stamm X31 (H3N2 A/Aichi/2/1968) zu binden. Die Interaktion des Peptids PeB, welches potentiell mit der konservierten Rezeptorbindestelle im HA interagiert, wurde anschließend n{\"a}her charakterisiert. Die Detektion der Influenzaviren war unter geeigneten Immobilisationsbedingungen im diagnostisch relevanten Bereich m{\"o}glich. Die Spezifit{\"a}t der PeB-Virus-Bindung wurde mittels geeigneter Kontrollen auf der Seite des Analyten und des Liganden nachgewiesen. Des Weiteren war das Peptid PeB in der Lage die Bindung von X31-Viren an Mimetika seines nat{\"u}rlichen Rezeptors zu inhibieren, was die spezifische Interaktion mit der Rezeptorbindungsstelle im H{\"a}magglutinin belegt. Anschließend wurde die Prim{\"a}rsequenz von PeB durch eine vollst{\"a}ndige Substitutionsanalyse im Microarray-Format hinsichtlich der Struktur-Aktivit{\"a}ts-Beziehungen charakterisiert. Dies f{\"u}hrte außerdem zu verbesserten Peptidvarianten mit erh{\"o}hter Affinit{\"a}t und breiterer Spezifit{\"a}t gegen aktuelle Influenzast{\"a}mme verschiedener Serotypen (z.B. H1N1/2009, H5N1/2004, H7N1/2013). Schließlich konnte durch Verwendung einer in der Prim{\"a}rsequenz angepassten h{\"o}her affinen Peptidvariante die Influenzainfektion in vitro inhibiert werden. Damit stellen die vom urspr{\"u}nglichen Peptid PeB abgeleiteten Varianten Rezeptormolek{\"u}le in biosensorischen Testsystemen sowie potentielle Wirkstoffe dar.}, language = {de} } @phdthesis{Schaefer2014, author = {Schaefer, Laura}, title = {Synchronisationsph{\"a}nomene myotendin{\"o}ser Oszillationen interagierender neuromuskul{\"a}rer Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72445}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Muskeln oszillieren nachgewiesener Weise mit einer Frequenz um 10 Hz. Doch was geschieht mit myofaszialen Oszillationen, wenn zwei neuromuskul{\"a}re Systeme interagieren? Die Dissertation widmet sich dieser Fragestellung bei isometrischer Interaktion. W{\"a}hrend der Testmessungen ergaben sich Hinweise f{\"u}r das Vorhandensein von m{\"o}glicherweise zwei verschiedenen Formen der Isometrie. Arbeiten zwei Personen isometrisch gegeneinander, k{\"o}nnen subjektiv zwei Modi eingenommen werden: man kann entweder isometrisch halten - der Kraft des Partners widerstehen - oder isometrisch dr{\"u}cken - gegen den isometrischen Widerstand des Partners arbeiten. Daher wurde zus{\"a}tzlich zu den Messungen zur Interaktion zweier Personen an einzelnen Individuen gepr{\"u}ft, ob m{\"o}glicherweise zwei Formen der Isometrie existieren. Die Promotion besteht demnach aus zwei inhaltlich und methodisch getrennten Teilen: I „Single-Isometrie" und II „Paar-Isometrie". F{\"u}r Teil I wurden mithilfe eines pneumatisch betriebenen Systems die hypothetischen Messmodi Halten und Dr{\"u}cken w{\"a}hrend isometrischer Aktion untersucht. Bei n = 10 Probanden erfolgte parallel zur Aufzeichnung des Drucksignals w{\"a}hrend der Messungen die Erfassung der Kraft (DMS) und der Beschleunigung sowie die Aufnahme der mechanischen Muskeloszillationen folgender myotendin{\"o}ser Strukturen via Mechanomyo- (MMG) bzw. Mechanotendografie (MTG): M. triceps brachii (MMGtri), Trizepssehne (MTGtri), M. obliquus externus abdominis (MMGobl). Pro Proband wurden bei 80 \% der MVC sowohl sechs 15-Sekunden-Messungen (jeweils drei im haltenden bzw. dr{\"u}ckenden Modus; Pause: 1 Minute) als auch vier Erm{\"u}dungsmessungen (jeweils zwei im haltenden bzw. dr{\"u}ckenden Modus; Pause: 2 Minuten) durchgef{\"u}hrt. Zum Vergleich der Messmodi Halten und Dr{\"u}cken wurden die Amplituden der myofaszialen Oszillationen sowie die Kraftausdauer herangezogen. Signifikante Unterschiede zwischen dem haltenden und dem dr{\"u}ckenden Modus zeigten sich insbesondere im Bereich der Erm{\"u}dungscharakteristik. So lassen Probanden im haltenden Modus signifikant fr{\"u}her nach als im dr{\"u}ckenden Modus (t(9) = 3,716; p = .005). Im dr{\"u}ckenden Modus macht das l{\"a}ngste isometrische Plateau durchschnittlich 59,4 \% der Gesamtdauer aus, im haltenden sind es 31,6 \% (t(19) = 5,265, p = .000). Die Amplituden der Single-Isometrie-Messungen unterscheiden sich nicht signifikant. Allerdings variieren die Amplituden des MMGobl zwischen den Messungen im dr{\"u}ckenden Modus signifikant st{\"a}rker als im haltenden Modus. Aufgrund dieser teils signifikanten Unterschiede zwischen den beiden Messmodi wurde dieses Setting auch im zweiten Teil „Paar-Isometrie" ber{\"u}cksichtigt. Dort wurden n = 20 Probanden - eingeteilt in zehn gleichgeschlechtliche Paare - w{\"a}hrend isometrischer Interaktion untersucht. Die Sensorplatzierung erfolgte analog zu Teil I. Die Oszillationen der erfassten MTG- sowie MMG-Signale wurden u.a. mit Algorithmen der Nichtlinearen Dynamik auf ihre Koh{\"a}renz hin untersucht. Durch die Paar-Isometrie-Messungen zeigte sich, dass die Muskeln und die Sehnen beider neuromuskul{\"a}rer Systeme bei Interaktion im bekannten Frequenzbereich von 10 Hz oszillieren. Außerdem waren sie in der Lage, sich bei Interaktion so aufeinander abzustimmen, dass sich eine signifikante Koh{\"a}renz entwickelte, die sich von Zufallspaarungen signifikant unterscheidet (Patchanzahl: t(29) = 3,477; p = .002; Summe der 4 l{\"a}ngsten Patches: t(29) = 7,505; p = .000). Es wird der Schluss gezogen, dass neuromuskul{\"a}re Komplement{\"a}rpartner in der Lage sind, sich im Sinne koh{\"a}renten Verhaltens zu synchronisieren. Bez{\"u}glich der Parameter zur Untersuchung der m{\"o}glicherweise vorhandenen zwei Formen der Isometrie zeigte sich bei den Paar-Isometrie-Messungen zwischen Halten und Dr{\"u}cken ein signifikanter Unterschied bei der Erm{\"u}dungscharakteristik sowie bez{\"u}glich der Amplitude der MMGobl. Die Ergebnisse beider Teilstudien best{\"a}rken die Hypothese, dass zwei Formen der Isometrie existieren. Fraglich ist, ob man {\"u}berhaupt von Isometrie sprechen kann, da jede isometrische Muskelaktion aus feinen Oszillationen besteht, die eine per Definition postulierte Isometrie ausschließen. Es wird der Vorschlag unterbreitet, die Isometrie durch den Begriff der Hom{\"o}ometrie auszutauschen. Die Ergebnisse der Paar-Isometrie-Messungen zeigen u.a., dass neuromuskul{\"a}re Systeme in der Lage sind, ihre myotendin{\"o}sen Oszillationen so aufeinander abzustimmen, dass koh{\"a}rentes Verhalten entsteht. Es wird angenommen, dass hierzu beide neuromuskul{\"a}ren Systeme funktionell intakt sein m{\"u}ssen. Das Verfahren k{\"o}nnte f{\"u}r die Diagnostik funktioneller St{\"o}rungen relevant werden.}, language = {de} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Lotkowska2014, author = {Lotkowska, Magda Ewa}, title = {Functional analysis of MYB112 transcription factor in the model plant Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72131}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Transcription factors (TFs) are ubiquitous gene expression regulators and play essential roles in almost all biological processes. This Ph.D. project is primarily focused on the functional characterisation of MYB112 - a member of the R2R3-MYB TF family from the model plant Arabidopsis thaliana. This gene was selected due to its increased expression during senescence based on previous qRT-PCR expression profiling experiments of 1880 TFs in Arabidopsis leaves at three developmental stages (15 mm leaf, 30 mm leaf and 20\% yellowing leaf). MYB112 promoter GUS fusion lines were generated to further investigate the expression pattern of MYB112. Employing transgenic approaches in combination with metabolomics and transcriptomics we demonstrate that MYB112 exerts a major role in regulation of plant flavonoid metabolism. We report enhanced and impaired anthocyanin accumulation in MYB112 overexpressors and MYB112-deficient mutants, respectively. Expression profiling reveals that MYB112 acts as a positive regulator of the transcription factor PAP1 leading to increased anthocyanin biosynthesis, and as a negative regulator of MYB12 and MYB111, which both control flavonol biosynthesis. We also identify MYB112 early responsive genes using a combination of several approaches. These include gene expression profiling (Affymetrix ATH1 micro-arrays and qRT-PCR) and transactivation assays in leaf mesophyll cell protoplasts. We show that MYB112 binds to an 8-bp DNA fragment containing the core sequence (A/T/G)(A/C)CC(A/T)(A/G/T)(A/C)(T/C). By electrophoretic mobility shift assay (EMSA) and chromatin immunoprecipitation coupled to qPCR (ChIP-qPCR) we demonstrate that MYB112 binds in vitro and in vivo to MYB7 and MYB32 promoters revealing them as direct downstream target genes. MYB TFs were previously reported to play an important role in controlling flavonoid biosynthesis in plants. Many factors acting upstream of the anthocyanin biosynthesis pathway show enhanced expression levels during nitrogen limitation, or elevated sucrose content. In addition to the mentioned conditions, other environmental parameters including salinity or high light stress may trigger anthocyanin accumulation. In contrast to several other MYB TFs affecting anthocyanin biosynthesis pathway genes, MYB112 expression is not controlled by nitrogen limitation, or carbon excess, but rather is stimulated by salinity and high light stress. Thus, MYB112 constitutes a previously uncharacterised regulatory factor that modifies anthocyanin accumulation under conditions of abiotic stress.}, language = {en} } @phdthesis{Sarrar2014, author = {Sarrar, Lea}, title = {Kognitive Funktionen bei adoleszenten Patienten mit Anorexia nervosa und unipolaren Affektiven St{\"o}rungen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72439}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Anorexia nervosa und unipolare Affektive St{\"o}rungen stellen h{\"a}ufige und schwerwiegende kinder- und jugendpsychiatrische St{\"o}rungsbilder dar, deren Pathogenese bislang nicht vollst{\"a}ndig entschl{\"u}sselt ist. Verschiedene Studien zeigen bei erwachsenen Patienten gravierende Auff{\"a}lligkeiten in den kognitiven Funktionen. Dahingegen scheinen bei adoleszenten Patienten lediglich leichtere Einschr{\"a}nkungen in den kognitiven Funktionen vorzuliegen. Die Pr{\"a}valenz der Anorexia nervosa und unipolaren Affektiven St{\"o}rung ist mit Beginn der Adoleszenz deutlich erh{\"o}ht. Es ist anzunehmen, dass kognitive Dysfunktionen, die sich bereits in diesem Alter abzeichnen, den weiteren Krankheitsverlauf bis in das Erwachsenenalter, die Behandlungsergebnisse und die Prognose maßgeblich beeintr{\"a}chtigen k{\"o}nnten. Zudem ist von einem h{\"o}heren Chronifizierungsrisiko auszugehen. In der vorliegenden Arbeit wurden daher kognitive Funktionen bei adoleszenten Patientinnen mit Anorexia nervosa sowie Patienten mit unipolaren Affektiven St{\"o}rungen untersucht. Die {\"U}berpr{\"u}fung der kognitiven Funktionen bei Patientinnen mit Anorexia nervosa erfolgte vor und nach Gewichtszunahme. Weiterhin wurden zugrundeliegende biologische Mechanismen {\"u}berpr{\"u}ft. Zudem wurde die Spezifit{\"a}t kognitiver Dysfunktionen f{\"u}r beide St{\"o}rungsbilder untersucht und bei Patienten mit unipolaren Affektiven St{\"o}rungen geschlechtsbezogene Unterschiede exploriert. Insgesamt gingen 47 Patientinnen mit Anorexia nervosa (mittleres Alter 16,3 + 1,6 Jahre), 39 Patienten mit unipolaren Affektiven St{\"o}rungen (mittleres Alter 15,5 + 1,3 Jahre) sowie 78 Kontrollprobanden (mittleres Alter 16,5 + 1,3 Jahre) in die Untersuchung ein. S{\"a}mtliche Studienteilnehmer durchliefen eine neuropsychologische Testbatterie, bestehend aus Verfahren zur {\"U}berpr{\"u}fung der kognitiven Flexibilit{\"a}t sowie visuellen und psychomotorischen Verarbeitungsgeschwindigkeit. Neben einem Intelligenzscreening wurden zudem das Ausmaß der depressiven Symptomatik sowie die allgemeine psychische Belastung erfasst. Die Ergebnisse legen nahe, dass bei adoleszenten Patientinnen mit Anorexia nervosa, sowohl im akut untergewichtigen Zustand als auch nach Gewichtszunahme, lediglich milde Beeintr{\"a}chtigungen in den kognitiven Funktionen vorliegen. Im akut untergewichtigen Zustand offenbarten sich deutliche Zusammenh{\"a}nge zwischen dem appetitregulierenden Peptid Agouti-related Protein und kognitiver Flexibilit{\"a}t, nicht jedoch zwischen Agouti-related Protein und visueller oder psychomotorischer Verarbeitungsgeschwindigkeit. Bei dem Vergleich von Anorexia nervosa und unipolaren Affektiven St{\"o}rungen pr{\"a}dizierte die Zugeh{\"o}rigkeit zu der Patientengruppe Anorexia nervosa ein Risiko f{\"u}r das Vorliegen kognitiver Dysfunktionen. Es zeigte sich zudem, dass adoleszente Patienten mit unipolaren Affektiven St{\"o}rungen lediglich in der psychomotorischen Verarbeitungsgeschwindigkeit tendenziell schw{\"a}chere Leistungen offenbarten als gesunde Kontrollprobanden. Es ergab sich jedoch ein genereller geschlechtsbezogener Vorteil f{\"u}r weibliche Probanden in der visuellen und psychomotorischen Verarbeitungsgeschwindigkeit. Die vorliegenden Befunde unterstreichen die Notwendigkeit der {\"U}berpr{\"u}fung kognitiver Funktionen bei adoleszenten Patienten mit Anorexia nervosa sowie unipolaren Affektiven St{\"o}rungen in der klinischen Routinediagnostik. Die Patienten k{\"o}nnten von spezifischen Therapieprogrammen profitieren, die Beeintr{\"a}chtigungen in den kognitiven Funktionen mildern bzw. pr{\"a}ventiv behandeln.}, language = {de} } @phdthesis{Trost2014, author = {Trost, Gerda}, title = {Poly(A) Polymerase 1 (PAPS1) influences organ size and pathogen response in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72345}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Polyadenylation of pre-mRNAs is critical for efficient nuclear export, stability, and translation of the mature mRNAs, and thus for gene expression. The bulk of pre-mRNAs are processed by canonical nuclear poly(A) polymerase (PAPS). Both vertebrate and higher-plant genomes encode more than one isoform of this enzyme, and these are coexpressed in different tissues. However, in neither case is it known whether the isoforms fulfill different functions or polyadenylate distinct subsets of pre-mRNAs. This thesis shows that the three canonical nuclear PAPS isoforms in Arabidopsis are functionally specialized owing to their evolutionarily divergent C-terminal domains. A moderate loss-of-function mutant in PAPS1 leads to increase in floral organ size, whereas leaf size is reduced. A strong loss-of-function mutation causes a male gametophytic defect, whereas a weak allele leads to reduced leaf growth. By contrast, plants lacking both PAPS2 and PAPS4 function are viable with wild-type leaf growth. Polyadenylation of SMALL AUXIN UP RNA (SAUR) mRNAs depends specifically on PAPS1 function. The resulting reduction in SAUR activity in paps1 mutants contributes to their reduced leaf growth, providing a causal link between polyadenylation of specific pre-mRNAs by a particular PAPS isoform and plant growth. Additionally, opposite effects of PAPS1 on leaf and flower growth reflect the different identities of these organs. The overgrowth of paps1 mutant petals is due to increased recruitment of founder cells into early organ primordia whereas the reduced leaf size is due to an ectopic pathogen response. This constitutive immune response leads to increased resistance to the biotrophic oomycete Hyaloperonospora arabidopsidis and reflects activation of the salicylic acid-independent signalling pathway downstream of ENHANCED DISEASE SUSCEPTIBILITY1 (EDS1)/PHYTOALEXIN DEFICIENT4 (PAD4). Immune responses are accompanied by intracellular redox changes. Consistent with this, the redox-status of the chloroplast is altered in paps1-1 mutants. The molecular effects of the paps1-1 mutation were analysed using an RNA sequencing approach that distinguishes between long- and short tailed mRNA. The results shown here suggest the existence of an additional layer of regulation in plants and possibly vertebrate gene expression, whereby the relative activities of canonical nuclear PAPS isoforms control de novo synthesized poly(A) tail length and hence expression of specific subsets of mRNAs.}, language = {en} } @phdthesis{Arnold2014, author = {Arnold, Anne}, title = {Modeling photosynthesis and related metabolic processes : from detailed examination to consideration of the metabolic context}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72277}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Mathematical modeling of biological systems is a powerful tool to systematically investigate the functions of biological processes and their relationship with the environment. To obtain accurate and biologically interpretable predictions, a modeling framework has to be devised whose assumptions best approximate the examined scenario and which copes with the trade-off of complexity of the underlying mathematical description: with attention to detail or high coverage. Correspondingly, the system can be examined in detail on a smaller scale or in a simplified manner on a larger scale. In this thesis, the role of photosynthesis and its related biochemical processes in the context of plant metabolism was dissected by employing modeling approaches ranging from kinetic to stoichiometric models. The Calvin-Benson cycle, as primary pathway of carbon fixation in C3 plants, is the initial step for producing starch and sucrose, necessary for plant growth. Based on an integrative analysis for model ranking applied on the largest compendium of (kinetic) models for the Calvin-Benson cycle, those suitable for development of metabolic engineering strategies were identified. Driven by the question why starch rather than sucrose is the predominant transitory carbon storage in higher plants, the metabolic costs for their synthesis were examined. The incorporation of the maintenance costs for the involved enzymes provided a model-based support for the preference of starch as transitory carbon storage, by only exploiting the stoichiometry of synthesis pathways. Many photosynthetic organisms have to cope with processes which compete with carbon fixation, such as photorespiration whose impact on plant metabolism is still controversial. A systematic model-oriented review provided a detailed assessment for the role of this pathway in inhibiting the rate of carbon fixation, bridging carbon and nitrogen metabolism, shaping the C1 metabolism, and influencing redox signal transduction. The demand of understanding photosynthesis in its metabolic context calls for the examination of the related processes of the primary carbon metabolism. To this end, the Arabidopsis core model was assembled via a bottom-up approach. This large-scale model can be used to simulate photoautotrophic biomass production, as an indicator for plant growth, under so-called optimal, carbon-limiting and nitrogen-limiting growth conditions. Finally, the introduced model was employed to investigate the effects of the environment, in particular, nitrogen, carbon and energy sources, on the metabolic behavior. This resulted in a purely stoichiometry-based explanation for the experimental evidence for preferred simultaneous acquisition of nitrogen in both forms, as nitrate and ammonium, for optimal growth in various plant species. The findings presented in this thesis provide new insights into plant system's behavior, further support existing opinions for which mounting experimental evidences arise, and posit novel hypotheses for further directed large-scale experiments.}, language = {en} } @phdthesis{Serrano2014, author = {Serrano, Paloma}, title = {Methanogens from Siberian permafrost as models for life on Mars : response to simulated martian conditions and biosignature characterization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72299}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Mars is one of the best candidates among planetary bodies for supporting life. The presence of water in the form of ice and atmospheric vapour together with the availability of biogenic elements and energy are indicators of the possibility of hosting life as we know it. The occurrence of permanently frozen ground - permafrost, is a common phenomenon on Mars and it shows multiple morphological analogies with terrestrial permafrost. Despite the extreme inhospitable conditions, highly diverse microbial communities inhabit terrestrial permafrost in large numbers. Among these are methanogenic archaea, which are anaerobic chemotrophic microorganisms that meet many of the metabolic and physiological requirements for survival on the martian subsurface. Moreover, methanogens from Siberian permafrost are extremely resistant against different types of physiological stresses as well as simulated martian thermo-physical and subsurface conditions, making them promising model organisms for potential life on Mars. The main aims of this investigation are to assess the survival of methanogenic archaea under Mars conditions, focusing on methanogens from Siberian permafrost, and to characterize their biosignatures by means of Raman spectroscopy, a powerful technology for microbial identification that will be used in the ExoMars mission. For this purpose, methanogens from Siberian permafrost and non-permafrost habitats were subjected to simulated martian desiccation by exposure to an ultra-low subfreezing temperature (-80ºC) and to Mars regolith (S-MRS and P-MRS) and atmospheric analogues. They were also exposed to different concentrations of perchlorate, a strong oxidant found in martian soils. Moreover, the biosignatures of methanogens were characterized at the single-cell level using confocal Raman microspectroscopy (CRM). The results showed survival and methane production in all methanogenic strains under simulated martian desiccation. After exposure to subfreezing temperatures, Siberian permafrost strains had a faster metabolic recovery, whereas the membranes of non-permafrost methanogens remained intact to a greater extent. The strain Methanosarcina soligelidi SMA-21 from Siberian permafrost showed significantly higher methane production rates than all other strains after the exposure to martian soil and atmospheric analogues, and all strains survived the presence of perchlorate at the concentration on Mars. Furthermore, CRM analyses revealed remarkable differences in the overall chemical composition of permafrost and non-permafrost strains of methanogens, regardless of their phylogenetic relationship. The convergence of the chemical composition in non-sister permafrost strains may be the consequence of adaptations to the environment, and could explain their greater resistance compared to the non-permafrost strains. As part of this study, Raman spectroscopy was evaluated as an analytical technique for remote detection of methanogens embedded in a mineral matrix. This thesis contributes to the understanding of the survival limits of methanogenic archaea under simulated martian conditions to further assess the hypothetical existence of life similar to methanogens on the martian subsurface. In addition, the overall chemical composition of methanogens was characterized for the first time by means of confocal Raman microspectroscopy, with potential implications for astrobiological research.}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @phdthesis{Tietz2014, author = {Tietz, Marcel}, title = {Europ{\"a}isches Verwaltungsmanagement : Vergleich von Hauptst{\"a}dten neuer und alter Mitgliedsstaaten der EU am Beispiel der B{\"u}rgerdienste}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72171}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Im Rahmen der Dissertation wird die Anwendung und Wirkung von Kernelementen des New Public Management (NPM) am Beispiel der B{\"u}rgerdienste der sechs europ{\"a}ischen Hauptst{\"a}dte Berlin, Br{\"u}ssel, Kopenhagen, Madrid, Prag und Warschau analysiert. Hierbei steht der Vergleich von Hauptst{\"a}dten der MOE-Staaten mit Hauptst{\"a}dten alter EU-Mitgliedsstaaten im Vordergrund. Es wird die folgende Forschungshypothese untersucht: Die Verwaltungen in den Hauptst{\"a}dten der {\"o}stlichen Mitgliedsstaaten der EU haben in Folge der grunds{\"a}tzlichen gesellschaftlichen und politischen Umbr{\"u}che in den 1990er Jahren bedeutend mehr Kernelemente des NPM beim Neuaufbau ihrer {\"o}ffentlichen Verwaltungen eingef{\"u}hrt. Durch den folgerichtigen Aufbau kundenorientierter und moderner Verwaltungen sowie der strikten Anwendung der Kernelemente des New Public Management arbeiten die B{\"u}rgerdienste in den Hauptst{\"a}dten {\"o}stlicher EU-Mitgliedsstaaten effizienter und wirkungsvoller als vergleichbare B{\"u}rgerdienste in den Hauptst{\"a}dten westlicher EU-Mitgliedsstaaten. Zur {\"U}berpr{\"u}fung der Forschungshypothese werden die Vergleichsst{\"a}dte zun{\"a}chst den entsprechenden Rechts- und Verwaltungstraditionen (kontinentaleurop{\"a}isch deutsch, napoleonisch und skandinavisch) zugeordnet und bez{\"u}glich ihrer Ausgangslage zum Aufbau einer modernen Verwaltung (Westeurop{\"a}ische Verwaltung, Wiedervereinigungsverwaltung und Transformations-verwaltung) kategorisiert. Im Anschluss werden die institutionellen Voraussetzungen hinterfragt, was die deskriptive Darstellung der Stadt- und Verwaltungsgeschichte sowie die Untersuchung von organisatorischen Strukturen der B{\"u}rgerdienste, die Anwendung der NPM-Instrumente als auch die Innen- und Außenperspektive des NPM umfasst. Es wird festgestellt, ob und in welcher Form die B{\"u}rgerdienste der Vergleichsst{\"a}dte die Kernelemente des NPM anwenden. Im Anschluss werden die Vergleichsst{\"a}dte bez{\"u}glich der Anwendung der Kernelemente miteinander verglichen, wobei der Fokus auf dem pers{\"o}nlichen Vertriebsweg und der Kundenorientierung liegt. Der folgende Teil der Dissertation befasst sich mit dem Output der B{\"u}rgerdienste, der auf operative Resultate untersucht und verglichen wird. Hierbei stellt sich insbesondere die Frage nach den Leistungsmengen und der Produktivit{\"a}t des Outputs. Es werden aber auch die Ergebnisse von Verwaltungsprozessen untersucht, insbesondere in Bezug auf die Kundenorientierung. Hierf{\"u}r wird ein Effizienzvergleich der B{\"u}rgerdienste in den Vergleichsst{\"a}dten anhand einer relativen Effizienzmessung und der Free Disposal Hull (FDH)-Methode nach Bouckaert durchgef{\"u}hrt. Es ist eine Konzentration auf popul{\"a}re Dienstleistungen aus dem Portfolio der B{\"u}rgerdienste notwendig. Daher werden die vergleichbaren Dienstleistungen Melde-, Personalausweis-, F{\"u}hrerschein- und Reisepass-angelegenheiten unter Einbeziehung des Vollzeit{\"a}quivalents zur Berechnung der Effizienz der B{\"u}rgerdienste herangezogen. Hierf{\"u}r werden Daten aus den Jahren 2009 bis 2011 genutzt, die teilweise aus verwaltungsinternen Datenbanken stammen. Anschließend wird der Versuch unternommen, den Outcome in die Effizienzanalyse der B{\"u}rgerdienste einfließen zu lassen. In diesem Zusammenhang wird die Anwendbarkeit von verschiedenen erweiterten Best-Practice-Verfahren und auch eine Erweiterung der relativen Effizienzmessung und der FDH-Methode gepr{\"u}ft. Als Gesamtfazit der Dissertation kann festgehalten werden, dass die B{\"u}rgerdienste in den untersuchten Hauptst{\"a}dten der MOE-Staaten nicht mehr Kernelemente des NPM anwenden, als die Hauptst{\"a}dte der westlichen Mitgliedsstaaten der EU. Im Gegenteil wendet Prag deutlich weniger NPM-Instrumente als andere Vergleichsst{\"a}dte an, wohingegen Warschau zwar viele NPM-Instrumente anwendet, jedoch immer von einer westeurop{\"a}ischen Vergleichsstadt {\"u}bertroffen wird. Auch die Hypothese, dass die B{\"u}rgerdienste in den Hauptst{\"a}dten der MOE-Staaten effizienter arbeiten als vergleichbare B{\"u}rgerdienste in den Hauptst{\"a}dten westlicher EU-Mitgliedsstaaten wurde durch die Dissertation entkr{\"a}ftet. Das Gegenteil ist der Fall, da Prag und Warschau im Rahmen des Effizienzvergleichs lediglich durchschnittliche oder schlechte Performances aufweisen. Die aufgestellte Hypothese ist durch die Forschungsergebnisse widerlegt, lediglich das gute Abschneiden der Vergleichsstadt Warschau bei der Anwendungsanalyse kann einen Teil der These im gewissen Umfang best{\"a}tigen.}, language = {de} } @phdthesis{Metzner2014, author = {Metzner, Christiane}, title = {Freiwilligenmanagement als Instrument zur F{\"o}rderung B{\"u}rgerschaftlichen Engagements in Nonprofit-Organisationen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72180}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Diese Arbeit untersucht, was passiert, wenn in Non-Profit-Organisation (NPO) der Anspruch des B{\"u}rgerschaftlichen Engagements auf Praktiken des Freiwilligenmanagements trifft. Ausgangspunkt dieser Fragestellung ist eine doppelte Diagnose: Zum einen setzen NPOs aufgrund mehrerer Faktoren - u.a. Ressourcenknappheit, Wettbewerb und Nachahmungseffekten - vermehrt auf Freiwilligenmanagement. Mit dieser von der BWL inspirierten, aber f{\"u}r NPO entwickelten Personalf{\"u}hrungsmethode wollen sie mehr und bessere Freiwillige gewinnen und deren Einsatz effizienter strukturieren. Zum anderen haben sich gleichzeitig viele NPO dem Ziel des b{\"u}rgerschaftlichen Engagements verschrieben. Damit reagieren sie auf den aus Politik und Wissenschaft zu vernehmenden Anspruch, die Zivilgesellschaft m{\"o}ge die knappen Kassen der {\"o}ffentlichen Hand kompensieren und das wachsende Partizipationsbed{\"u}rfnis weiter Teile der Bev{\"o}lkerung durch eine neue Kultur der Teilhabe der B{\"u}rgerinnen und B{\"u}rger befriedigen. Bei n{\"a}herer Betrachtung zeigt sich jedoch: W{\"a}hrend Freiwilligenmanagement einer {\"o}konomischen Handlungslogik folgt, ist b{\"u}rgerschaftliches Engagement Ausdruck einer Handlungslogik der Zivilgesellschaft. Beide sind unter gegenw{\"a}rtigen Bedingungen weder theoretisch noch praktisch miteinander vereinbar. Um beide Entwicklungen miteinander zu vers{\"o}hnen, muss Freiwilligenmanagement unter dem Banner des B{\"u}rgerschaftlichen neu gedacht werden. Dieses Argument unterf{\"u}ttert die Arbeit sowohl theoretisch und empirisch. Der Theorieteil gliedert sich in drei Teile. Zun{\"a}chst wird der Begriff der NPO n{\"a}her eingegrenzt. Dazu wird die bestehende Literatur zum Dritten Sektor und Non-Profit-Organisationen zu einem operationalisierbaren Begriff von NPO kondensiert. Daran anschließend werden aktuelle Trends im Feld der NPO identifiziert, die zeigen, dass NPO tats{\"a}chlich oft von widerstreitenden Handlungslogiken gekennzeichnet sind, darunter eine {\"o}konomische und eine b{\"u}rgerschaftliche. Die beiden folgenden Kapitel untersuchen dann jeweils eine der beiden Logiken. Zun{\"a}chst wird das Leitbild des b{\"u}rgerschaftlichen Engagements als Ausdruck einer zivilgesellschaftlichen Handlungslogik n{\"a}her definiert. Dabei zeigt sich, dass dieser Begriff oft sehr unscharf verwendet wird. Daher greift die Arbeit auf die politiktheoretische Diskussion um Zivil- und B{\"u}rgergesellschaft auf und schmiedet daraus eine qualifizierte Definition von b{\"u}rgerschaftlichem Engagement, die sich maßgeblich am Ideal von gesellschaftlich-politischer Partizipation und b{\"u}rgerschaftlicher Kompetenz orientiert. Dem wird im dritten und letzten Kapitel des Theorieteils die {\"o}konomische Handlungslogik in Form der Theorie des Freiwilligenmanagements gegen{\"u}bergestellt. Bei der Darstellung zeigt sich schnell, dass dessen Grundprinzipien - anders als oft vorgebracht - mit den qualifizierten Idealen von Partizipation und Konkurrenz im Konflikt stehen. In der empirischen Analyse wird dann in den 8 Interviews den Widerspr{\"u}chen zwischen b{\"u}rgerschaftlichem Engagement und Freiwilligenmanagement in der Praxis nachgegangen. Die Ergebnisse dieser Untersuchung lassen sich in 5 Punkten zusammenfassen: 1. Freiwilligenmanagement orientiert sich erstens im wesentlichen an einer Zahl: Dem Zugewinn oder Verlust von freiwilliger Arbeit. 2. Freiwilligenmanagement installiert ein umfassendes System der Selektion von „passenden" Freiwilligen. 3. Positiv hervorzuheben ist die institutionalisierte Ansprechbarkeit, die im Rahmen von Freiwilligenmanagement in NPO Einzug erh{\"a}lt. 4. Freiwilligenmanagement ist eng mit dem Anspruch verbunden, die Arbeit der Freiwilligen zu kontrollieren. Der Eigensinn des Engagements, die Notwendigkeit von Spielr{\"a}umen, die M{\"o}glichkeit des Ausprobierens oder der Anspruch der Freiwilligen, an Entscheidungen zu partizipieren bzw. gar selbstorganisiert und -verantwortlich zu handeln, r{\"u}ckt dabei in den Hintergrund. 5. In den Interviews wird eine starke {\"O}konomisierung des Engagements sichtbar. Freiwillige werden als Ressource betrachtet, ihr Engagement als „Zeitspende" statistisch erfasst, ihre (Dienst-)Leistung monet{\"a}r bewertet. Im Zuge dessen erh{\"a}lt auch der Managerialism verst{\"a}rkt Einfluss auf die Arbeit in NPO und begr{\"u}ndet ein stark hierarchisches Verh{\"a}ltnis: W{\"a}hrend die Freiwilligenmangerin aktiv handelt, wird die freiwillig Engagierte zum Objekt von Management-Techniken. Dass dies dem Anspruch der Partizipation entgegenl{\"a}uft, ergibt sich dabei von selbst. Angesichts dieser Diagnose, dass real-existierendes Freiwilligenmanagement nicht mit dem Ideal des b{\"u}rgerschaftlichen Engagement im engeren Sinne zusammenpasst, formuliert das Fazit Vorschl{\"a}ge f{\"u}r ein b{\"u}rgerschaftlich orientiertes, engagement-sensibles Freiwilligenmanagement.
}, language = {de} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @phdthesis{Schollaen2014, author = {Schollaen, Karina}, title = {Tracking climate signals in tropical trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71947}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The tropical warm pool waters surrounding Indonesia are one of the equatorial heat and moisture sources that are considered as a driving force of the global climate system. The climate in Indonesia is dominated by the equatorial monsoon system, and has been linked to El Ni{\~n}o-Southern Oscillation (ENSO) events, which often result in severe droughts or floods over Indonesia with profound societal and economic impacts on the populations living in the world's fourth most populated country. The latest IPCC report states that ENSO will remain the dominant mode in the tropical Pacific with global effects in the 21st century and ENSO-related precipitation extremes will intensify. However, no common agreement exists among climate simulation models for projected change in ENSO and the Australian-Indonesian Monsoon. Exploring high-resolution palaeoclimate archives, like tree rings or varved lake sediments, provide insights into the natural climate variability of the past, and thus helps improving and validating simulations of future climate changes. Centennial tree-ring stable isotope records | Within this doctoral thesis the main goal was to explore the potential of tropical tree rings to record climate signals and to use them as palaeoclimate proxies. In detail, stable carbon (δ13C) and oxygen (δ18O) isotopes were extracted from teak trees in order to establish the first well-replicated centennial (AD 1900-2007) stable isotope records for Java, Indonesia. Furthermore, different climatic variables were tested whether they show significant correlation with tree-ring proxies (ring-width, δ13C, δ18O). Moreover, highly resolved intra-annual oxygen isotope data were established to assess the transfer of the seasonal precipitation signal into the tree rings. Finally, the established oxygen isotope record was used to reveal possible correlations with ENSO events. Methodological achievements | A second goal of this thesis was to assess the applicability of novel techniques which facilitate and optimize high-resolution and high-throughput stable isotope analysis of tree rings. Two different UV-laser-based microscopic dissection systems were evaluated as a novel sampling tool for high-resolution stable isotope analysis. Furthermore, an improved procedure of tree-ring dissection from thin cellulose laths for stable isotope analysis was designed. The most important findings of this thesis are: I) The herein presented novel sampling techniques improve stable isotope analyses for tree-ring studies in terms of precision, efficiency and quality. The UV-laser-based microdissection serve as a valuable tool for sampling plant tissue at ultrahigh-resolution and for unprecedented precision. II) A guideline for a modified method of cellulose extraction from wholewood cross-sections and subsequent tree-ring dissection was established. The novel technique optimizes the stable isotope analysis process in two ways: faster and high-throughput cellulose extraction and precise tree-ring separation at annual to high-resolution scale. III) The centennial tree-ring stable isotope records reveal significant correlation with regional precipitation. High-resolution stable oxygen values, furthermore, allow distinguishing between dry and rainy season rainfall. IV) The δ18O record reveals significant correlation with different ENSO flavors and demonstrates the importance of considering ENSO flavors when interpreting palaeoclimatic data in the tropics. The findings of my dissertation show that seasonally resolved δ18O records from Indonesian teak trees are a valuable proxy for multi-centennial reconstructions of regional precipitation variability (monsoon signals) and large-scale ocean-atmosphere phenomena (ENSO) for the Indo-Pacific region. Furthermore, the novel methodological achievements offer many unexplored avenues for multidisciplinary research in high-resolution palaeoclimatology.}, language = {en} } @phdthesis{Girbig2014, author = {Girbig, Dorothee}, title = {Analysing concerted criteria for local dynamic properties of metabolic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72017}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Metabolic systems tend to exhibit steady states that can be measured in terms of their concentrations and fluxes. These measurements can be regarded as a phenotypic representation of all the complex interactions and regulatory mechanisms taking place in the underlying metabolic network. Such interactions determine the system's response to external perturbations and are responsible, for example, for its asymptotic stability or for oscillatory trajectories around the steady state. However, determining these perturbation responses in the absence of fully specified kinetic models remains an important challenge of computational systems biology. Structural kinetic modeling (SKM) is a framework to analyse whether a metabolic steady state remains stable under perturbation, without requiring detailed knowledge about individual rate equations. It provides a parameterised representation of the system's Jacobian matrix in which the model parameters encode information about the enzyme-metabolite interactions. Stability criteria can be derived by generating a large number of structural kinetic models (SK-models) with randomly sampled parameter sets and evaluating the resulting Jacobian matrices. The parameter space can be analysed statistically in order to detect network positions that contribute significantly to the perturbation response. Because the sampled parameters are equivalent to the elasticities used in metabolic control analysis (MCA), the results are easy to interpret biologically. In this project, the SKM framework was extended by several novel methodological improvements. These improvements were evaluated in a simulation study using a set of small example pathways with simple Michaelis Menten rate laws. Afterwards, a detailed analysis of the dynamic properties of the neuronal TCA cycle was performed in order to demonstrate how the new insights obtained in this work could be used for the study of complex metabolic systems. The first improvement was achieved by examining the biological feasibility of the elasticity combinations created during Monte Carlo sampling. Using a set of small example systems, the findings showed that the majority of sampled SK-models would yield negative kinetic parameters if they were translated back into kinetic models. To overcome this problem, a simple criterion was formulated that mitigates such infeasible models and the application of this criterion changed the conclusions of the SKM experiment. The second improvement of this work was the application of supervised machine-learning approaches in order to analyse SKM experiments. So far, SKM experiments have focused on the detection of individual enzymes to identify single reactions important for maintaining the stability or oscillatory trajectories. In this work, this approach was extended by demonstrating how SKM enables the detection of ensembles of enzymes or metabolites that act together in an orchestrated manner to coordinate the pathways response to perturbations. In doing so, stable and unstable states served as class labels, and classifiers were trained to detect elasticity regions associated with stability and instability. Classification was performed using decision trees and relevance vector machines (RVMs). The decision trees produced good classification accuracy in terms of model bias and generalizability. RVMs outperformed decision trees when applied to small models, but encountered severe problems when applied to larger systems because of their high runtime requirements. The decision tree rulesets were analysed statistically and individually in order to explore the role of individual enzymes or metabolites in controlling the system's trajectories around steady states. The third improvement of this work was the establishment of a relationship between the SKM framework and the related field of MCA. In particular, it was shown how the sampled elasticities could be converted to flux control coefficients, which were then investigated for their predictive information content in classifier training. After evaluation on the small example pathways, the methodology was used to study two steady states of the neuronal TCA cycle with respect to their intrinsic mechanisms responsible for stability or instability. The findings showed that several elasticities were jointly coordinated to control stability and that the main source for potential instabilities were mutations in the enzyme alpha-ketoglutarate dehydrogenase.}, language = {en} } @phdthesis{Schipper2014, author = {Schipper, Florian}, title = {Biomass derived carbon for new energy storage technologies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72045}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The thesis deals with the production and evaluation of porous carbon materials for energy storage technologies, namely super capacitors and lithium sulfur batteries.}, language = {de} } @phdthesis{Muksin2014, author = {Muksin, Umar}, title = {A fault-controlled geothermal system in Tarutung (North Sumatra, Indonesia)investigated by seismological analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72065}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The seismic structure (Vp, Vp/Vs, and Qp anomalies) contributes to the physical properties and the lithology of rocks and possible fluid distribution in the region. The Vp model images the geometry of the Tarutung and the Sarulla basins. Both basins have a depth of around 2.0 km. High Vp/Vs and high attenuation (low Qp) anomalies are observed along the Sarulla graben associated with a weak zone caused by volcanic activities along the graben. Low Vp/Vs and low conductivity anomalies are found in the west of the Tarutung basin. This anomaly is interpreted as dry, compact, and rigid granitic rock in the region as also found by geological observations. Low Vp, high Vp/Vs and low Qp anomalies are found at the east of the Tarutung basin which appear to be associated with the three big geothermal manifestations in Sipoholon, Hutabarat, and Panabungan area. These anomalies are connected with high Vp/Vs and low Qp anomalies below the Tarutung basin at depth of around 3 - 10 km. This suggests that these geothermal manifestations are fed by the same source of the hot fluid below the Tarutung basin. The hot fluids from below the Tarutung basin propagate to the more dilatational and more permeable zone in the northeast. Granite found in the west of the Tarutung basin could also be abundant underneath the basin at a certain depth so that it prevents the hot fluid to be transported directly to the Tarutung basin. High seismic attenuation and low Vp/Vs anomalies are found in the southwest of the Tarutung basin below the Martimbang volcano. These anomalies are associated with hot rock below the volcano without or with less amount of partial melting. There is no indication that the volcano controls the geothermal system around the Tarutung basin. The geothermal resources around the Tarutung basin is a fault-controlled system as a result of deep circulation of fluids. Outside of the basin, the seismicity delineation and the focal mechanism correlate with the shape and the characteristics of the strike-slip Sumatran fault. Within the Tarutung basin, the seismicity is distributed more broadly which coincides with the margin of the basin. An extensional duplex system in the Tarutung basin is derived from the seismicity and focal mechanism analysis which is also consistent with the geological observations. The vertical distribution of the seismicity suggests the presence of a negative flower structure within the Tarutung basin.}, language = {de} } @phdthesis{Dyachenko2014, author = {Dyachenko, Evgeniya}, title = {Elliptic problems with small parameter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72056}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In this thesis we consider diverse aspects of existence and correctness of asymptotic solutions to elliptic differential and pseudodifferential equations. We begin our studies with the case of a general elliptic boundary value problem in partial derivatives. A small parameter enters the coefficients of the main equation as well as into the boundary conditions. Such equations have already been investigated satisfactory, but there still exist certain theoretical deficiencies. Our aim is to present the general theory of elliptic problems with a small parameter. For this purpose we examine in detail the case of a bounded domain with a smooth boundary. First of all, we construct formal solutions as power series in the small parameter. Then we examine their asymptotic properties. It suffices to carry out sharp two-sided \emph{a priori} estimates for the operators of boundary value problems which are uniform in the small parameter. Such estimates failed to hold in functional spaces used in classical elliptic theory. To circumvent this limitation we exploit norms depending on the small parameter for the functions defined on a bounded domain. Similar norms are widely used in literature, but their properties have not been investigated extensively. Our theoretical investigation shows that the usual elliptic technique can be correctly carried out in these norms. The obtained results also allow one to extend the norms to compact manifolds with boundaries. We complete our investigation by formulating algebraic conditions on the operators and showing their equivalence to the existence of a priori estimates. In the second step, we extend the concept of ellipticity with a small parameter to more general classes of operators. Firstly, we want to compare the difference in asymptotic patterns between the obtained series and expansions for similar differential problems. Therefore we investigate the heat equation in a bounded domain with a small parameter near the time derivative. In this case the characteristics touch the boundary at a finite number of points. It is known that the solutions are not regular in a neighbourhood of such points in advance. We suppose moreover that the boundary at such points can be non-smooth but have cuspidal singularities. We find a formal asymptotic expansion and show that when a set of parameters comes through a threshold value, the expansions fail to be asymptotic. The last part of the work is devoted to general concept of ellipticity with a small parameter. Several theoretical extensions to pseudodifferential operators have already been suggested in previous studies. As a new contribution we involve the analysis on manifolds with edge singularities which allows us to consider wider classes of perturbed elliptic operators. We examine that introduced classes possess a priori estimates of elliptic type. As a further application we demonstrate how developed tools can be used to reduce singularly perturbed problems to regular ones.}, language = {en} } @phdthesis{Fischer2014, author = {Fischer, Jost Leonhardt}, title = {Nichtlineare Kopplungsmechanismen akustischer Oszillatoren am Beispiel der Synchronisation von Orgelpfeifen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71975}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In dieser Arbeit werden nichtlineare Kopplungsmechanismen von akustischen Oszillatoren untersucht, die zu Synchronisation f{\"u}hren k{\"o}nnen. Aufbauend auf die Fragestellungen vorangegangener Arbeiten werden mit Hilfe theoretischer und experimenteller Studien sowie mit Hilfe numerischer Simulationen die Elemente der Tonentstehung in der Orgelpfeife und die Mechanismen der gegenseitigen Wechselwirkung von Orgelpfeifen identifiziert. Daraus wird erstmalig ein vollst{\"a}ndig auf den aeroakustischen und fluiddynamischen Grundprinzipien basierendes nichtlinear gekoppeltes Modell selbst-erregter Oszillatoren f{\"u}r die Beschreibung des Verhaltens zweier wechselwirkender Orgelpfeifen entwickelt. Die durchgef{\"u}hrten Modellrechnungen werden mit den experimentellen Befunden verglichen. Es zeigt sich, dass die Tonentstehung und die Kopplungsmechanismen von Orgelpfeifen durch das entwickelte Oszillatormodell in weiten Teilen richtig beschrieben werden. Insbesondere kann damit die Ursache f{\"u}r den nichtlinearen Zusammenhang von Kopplungsst{\"a}rke und Synchronisation des gekoppelten Zwei-Pfeifen Systems, welcher sich in einem nichtlinearen Verlauf der Arnoldzunge darstellt, gekl{\"a}rt werden. Mit den gewonnenen Erkenntnissen wird der Einfluss des Raumes auf die Tonentstehung bei Orgelpfeifen betrachtet. Daf{\"u}r werden numerische Simulationen der Wechselwirkung einer Orgelpfeife mit verschiedenen Raumgeometrien, wie z. B. ebene, konvexe, konkave, und gezahnte Geometrien, exemplarisch untersucht. Auch der Einfluss von Schwellk{\"a}sten auf die Tonentstehung und die Klangbildung der Orgelpfeife wird studiert. In weiteren, neuartigen Synchronisationsexperimenten mit identisch gestimmten Orgelpfeifen, sowie mit Mixturen wird die Synchronisation f{\"u}r verschiedene, horizontale und vertikale Pfeifenabst{\"a}nde in der Ebene der Schallabstrahlung, untersucht. Die dabei erstmalig beobachteten r{\"a}umlich isotropen Unstetigkeiten im Schwingungsverhalten der gekoppelten Pfeifensysteme, deuten auf abstandsabh{\"a}ngige Wechsel zwischen gegen- und gleichphasigen Sychronisationsregimen hin. Abschließend wird die M{\"o}glichkeit dokumentiert, das Ph{\"a}nomen der Synchronisation zweier Orgelpfeifen durch numerische Simulationen, also der Behandlung der kompressiblen Navier-Stokes Gleichungen mit entsprechenden Rand- und Anfangsbedingungen, realit{\"a}tsnah abzubilden. Auch dies stellt ein Novum dar.}, language = {de} } @phdthesis{Abedjan2014, author = {Abedjan, Ziawasch}, title = {Improving RDF data with data mining}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71334}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Linked Open Data (LOD) comprises very many and often large public data sets and knowledge bases. Those datasets are mostly presented in the RDF triple structure of subject, predicate, and object, where each triple represents a statement or fact. Unfortunately, the heterogeneity of available open data requires significant integration steps before it can be used in applications. Meta information, such as ontological definitions and exact range definitions of predicates, are desirable and ideally provided by an ontology. However in the context of LOD, ontologies are often incomplete or simply not available. Thus, it is useful to automatically generate meta information, such as ontological dependencies, range definitions, and topical classifications. Association rule mining, which was originally applied for sales analysis on transactional databases, is a promising and novel technique to explore such data. We designed an adaptation of this technique for min-ing Rdf data and introduce the concept of "mining configurations", which allows us to mine RDF data sets in various ways. Different configurations enable us to identify schema and value dependencies that in combination result in interesting use cases. To this end, we present rule-based approaches for auto-completion, data enrichment, ontology improvement, and query relaxation. Auto-completion remedies the problem of inconsistent ontology usage, providing an editing user with a sorted list of commonly used predicates. A combination of different configurations step extends this approach to create completely new facts for a knowledge base. We present two approaches for fact generation, a user-based approach where a user selects the entity to be amended with new facts and a data-driven approach where an algorithm discovers entities that have to be amended with missing facts. As knowledge bases constantly grow and evolve, another approach to improve the usage of RDF data is to improve existing ontologies. Here, we present an association rule based approach to reconcile ontology and data. Interlacing different mining configurations, we infer an algorithm to discover synonymously used predicates. Those predicates can be used to expand query results and to support users during query formulation. We provide a wide range of experiments on real world datasets for each use case. The experiments and evaluations show the added value of association rule mining for the integration and usability of RDF data and confirm the appropriateness of our mining configuration methodology.}, language = {en} } @phdthesis{Radeff2014, author = {Radeff, Giuditta}, title = {Geohistory of the Central Anatolian Plateau southern margin (southern Turkey)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71865}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The Adana Basin of southern Turkey, situated at the SE margin of the Central Anatolian Plateau is ideally located to record Neogene topographic and tectonic changes in the easternmost Mediterranean realm. Using industry seismic reflection data we correlate 34 seismic profiles with corresponding exposed units in the Adana Basin. The time-depth conversion of the interpreted seismic profiles allows us to reconstruct the subsidence curve of the Adana Basin and to outline the occurrence of a major increase in both subsidence and sedimentation rates at 5.45 - 5.33 Ma, leading to the deposition of almost 1500 km3 of conglomerates and marls. Our provenance analysis of the conglomerates reveals that most of the sediment is derived from and north of the SE margin of the Central Anatolian Plateau. A comparison of these results with the composition of recent conglomerates and the present drainage basins indicates major changes between late Messinian and present-day source areas. We suggest that these changes in source areas result of uplift and ensuing erosion of the SE margin of the plateau. This hypothesis is supported by the comparison of the Adana Basin subsidence curve with the subsidence curve of the Mut Basin, a mainly Neogene basin located on top of the Central Anatolian Plateau southern margin, showing that the Adana Basin subsidence event is coeval with an uplift episode of the plateau southern margin. The collection of several fault measurements in the Adana region show different deformation styles for the NW and SE margins of the Adana Basin. The weakly seismic NW portion of the basin is characterized by extensional and transtensional structures cutting Neogene deposits, likely accomodating the differential uplift occurring between the basin and the SE margin of the plateau. We interpret the tectonic evolution of the southern flank of the Central Anatolian Plateau and the coeval subsidence and sedimentation in the Adana Basin to be related to deep lithospheric processes, particularly lithospheric delamination and slab break-off.}, language = {en} } @phdthesis{BacskaiAtkari2014, author = {Bacskai-Atkari, Julia}, title = {The syntax of comparative constructions : operators, ellipsis phenomena and functional left peripheries}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-301-5}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71255}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 310}, year = {2014}, abstract = {Adopting a minimalist framework, the dissertation provides an analysis for the syntactic structure of comparatives, with special attention paid to the derivation of the subclause. The proposed account explains how the comparative subclause is connected to the matrix clause, how the subclause is formed in the syntax and what additional processes contribute to its final structure. In addition, it casts light upon these problems in cross-linguistic terms and provides a model that allows for synchronic and diachronic differences. This also enables one to give a more adequate explanation for the phenomena found in English comparatives since the properties of English structures can then be linked to general settings of the language and hence need no longer be considered as idiosyncratic features of the grammar of English. First, the dissertation provides a unified analysis of degree expressions, relating the structure of comparatives to that of other degrees. It is shown that gradable adjectives are located within a degree phrase (DegP), which in turn projects a quantifier phrase (QP) and that these two functional layers are always present, irrespectively of whether there is a phonologically visible element in these layers. Second, the dissertation presents a novel analysis of Comparative Deletion by reducing it to an overtness constraint holding on operators: in this way, it is reduced to morphological differences and cross-linguistic variation is not conditioned by way of postulating an arbitrary parameter. Cross-linguistic differences are ultimately dependent on whether a language has overt operators equipped with the relevant - [+compr] and [+rel] - features. Third, the dissertation provides an adequate explanation for the phenomenon of Attributive Comparative Deletion, as attested in English, by way of relating it to the regular mechanism of Comparative Deletion. I assume that Attributive Comparative Deletion is not a universal phenomenon, and its presence in English can be conditioned by independent, more general rules, while the absence of such restrictions leads to its absence in other languages. Fourth, the dissertation accounts for certain phenomena related to diachronic changes, examining how the changes in the status of comparative operators led to changes in whether Comparative Deletion is attested in a given language: I argue that only operators without a lexical XP can be grammaticalised. The underlying mechanisms underlying are essentially general economy principles and hence the processes are not language-specific or exceptional. Fifth, the dissertation accounts for optional ellipsis processes that play a crucial role in the derivation of typical comparative subclauses. These processes are not directly related to the structure of degree expressions and hence the elimination of the quantified expression from the subclause; nevertheless, they are shown to be in interaction with the mechanisms underlying Comparative Deletion or the absence thereof.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @phdthesis{Federici2014, author = {Federici, Simone}, title = {Gamma-ray studies of the young shell-type SNR RX J1713.7-3946}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71734}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {One of the most significant current discussions in Astrophysics relates to the origin of high-energy cosmic rays. According to our current knowledge, the abundance distribution of the elements in cosmic rays at their point of origin indicates, within plausible error limits, that they were initially formed by nuclear processes in the interiors of stars. It is also believed that their energy distribution up to 1018 eV has Galactic origins. But even though the knowledge about potential sources of cosmic rays is quite poor above „ 1015 eV, that is the "knee" of the cosmic-ray spectrum, up to the knee there seems to be a wide consensus that supernova remnants are the most likely candidates. Evidence of this comes from observations of non-thermal X-ray radiation, requiring synchrotron electrons with energies up to 1014 eV, exactly in the remnant of supernovae. To date, however, there is not conclusive evidence that they produce nuclei, the dominant component of cosmic rays, in addition to electrons. In light of this dearth of evidence, γ-ray observations from supernova remnants can offer the most promising direct way to confirm whether or not these astrophysical objects are indeed the main source of cosmic-ray nuclei below the knee. Recent observations with space- and ground-based observatories have established shell-type supernova remnants as GeV-to- TeV γ-ray sources. The interpretation of these observations is however complicated by the different radiation processes, leptonic and hadronic, that can produce similar fluxes in this energy band rendering ambiguous the nature of the emission itself. The aim of this work is to develop a deeper understanding of these radiation processes from a particular shell-type supernova remnant, namely RX J1713.7-3946, using observations of the LAT instrument onboard the Fermi Gamma-Ray Space Telescope. Furthermore, to obtain accurate spectra and morphology maps of the emission associated with this supernova remnant, an improved model of the diffuse Galactic γ-ray emission background is developed. The analyses of RX J1713.7-3946 carried out with this improved background show that the hard Fermi-LAT spectrum cannot be ascribed to the hadronic emission, leading thus to the conclusion that the leptonic scenario is instead the most natural picture for the high-energy γ-ray emission of RX J1713.7-3946. The leptonic scenario however does not rule out the possibility that cosmic-ray nuclei are accelerated in this supernova remnant, but it suggests that the ambient density may not be high enough to produce a significant hadronic γ-ray emission. Further investigations involving other supernova remnants using the improved back- ground developed in this work could allow compelling population studies, and hence prove or disprove the origin of Galactic cosmic-ray nuclei in these astrophysical objects. A break- through regarding the identification of the radiation mechanisms could be lastly achieved with a new generation of instruments such as CTA.}, language = {en} } @phdthesis{Steyrleuthner2014, author = {Steyrleuthner, Robert}, title = {Korrelation von Struktur, optischen Eigenschaften und Ladungstransport in einem konjugierten Naphthalindiimid-Bithiophen Copolymer mit herausragender Elektronenmobilit{\"a}t}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71413}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Organische Halbleiter besitzen neue, bemerkenswerte Materialeigenschaften, die sie f{\"u}r die grundlegende Forschung wie auch aktuelle technologische Entwicklung (bsw. org. Leuchtdioden, org. Solarzellen) interessant werden lassen. Aufgrund der starken konformative Freiheit der konjugierten Polymerketten f{\"u}hrt die Vielzahl der m{\"o}glichen Anordnungen und die schwache intermolekulare Wechselwirkung f{\"u}r gew{\"o}hnlich zu geringer struktureller Ordnung im Festk{\"o}rper. Die Morphologie hat gleichzeitig direkten Einfluss auf die elektronische Struktur der organischen Halbleiter, welches sich meistens in einer deutlichen Reduktion der Ladungstr{\"a}gerbeweglichkeit gegen{\"u}ber den anorganischen Verwandten zeigt. So stellt die Beweglichkeit der Ladungen im Halbleiter einen der limitierenden Faktoren f{\"u}r die Leistungsf{\"a}higkeit bzw. den Wirkungsgrad von funktionellen organischen Bauteilen dar. Im Jahr 2009 wurde ein neues auf Naphthalindiimid und Bithiophen basierendes Dornor/Akzeptor Copolymer vorgestellt [P(NDI2OD‑T2)], welches sich durch seine außergew{\"o}hnlich hohe Ladungstr{\"a}germobilit{\"a}t auszeichnet. In dieser Arbeit wird die Ladungstr{\"a}germobilit{\"a}t in P(NDI2OD‑T2) bestimmt, und der Transport durch eine geringe energetischer Unordnung charakterisiert. Obwohl dieses Material zun{\"a}chst als amorph beschrieben wurde zeigt eine detaillierte Analyse der optischen Eigenschaften von P(NDI2OD‑T2), dass bereits in L{\"o}sung geordnete Vorstufen supramolekularer Strukturen (Aggregate) existieren. Quantenchemische Berechnungen belegen die beobachteten spektralen {\"A}nderungen. Mithilfe der NMR-Spektroskopie kann die Bildung der Aggregate unabh{\"a}ngig von optischer Spektroskopie best{\"a}tigt werden. Die Analytische Ultrazentrifugation an P(NDI2OD‑T2) L{\"o}sungen legt nahe, dass sich die Aggregation innerhalb der einzelnen Ketten unter Reduktion des hydrodynamischen Radius vollzieht. Die Ausbildung supramolekularen Strukturen nimmt auch eine signifikante Rolle bei der Filmbildung ein und verhindert gleichzeitig die Herstellung amorpher P(NDI2OD‑T2) Filme. Durch chemische Modifikation der P(NDI2OD‑T2)-Kette und verschiedener Prozessierungs-Methoden wurde eine {\"A}nderung des Kristallinit{\"a}tsgrades und gleichzeitig der Orientierung der kristallinen Dom{\"a}nen erreicht und mittels R{\"o}ntgenbeugung quantifiziert. In hochaufl{\"o}senden Elektronenmikroskopie-Messungen werden die Netzebenen und deren Einbettung in die semikristallinen Strukturen direkt abgebildet. Aus der Kombination der verschiedenen Methoden erschließt sich ein Gesamtbild der Nah- und Fernordnung in P(NDI2OD‑T2). {\"U}ber die Messung der Elektronenmobilit{\"a}t dieser Schichten wird die Anisotropie des Ladungstransports in den kristallographischen Raumrichtungen von P(NDI2OD‑T2) charakterisiert und die Bedeutung der intramolekularen Wechselwirkung f{\"u}r effizienten Ladungstransport herausgearbeitet. Gleichzeitig wird deutlich, wie die Verwendung von gr{\"o}ßeren und planaren funktionellen Gruppen zu h{\"o}heren Ladungstr{\"a}germobilit{\"a}ten f{\"u}hrt, welche im Vergleich zu klassischen semikristallinen Polymeren weniger sensitiv auf die strukturelle Unordnung im Film sind.}, language = {de} } @phdthesis{Trabant2014, author = {Trabant, Christoph}, title = {Ultrafast photoinduced phase transitions in complex materials probed by time-resolved resonant soft x-ray diffraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71377}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In processing and data storage mainly ferromagnetic (FM) materials are being used. Approaching physical limits, new concepts have to be found for faster, smaller switches, for higher data densities and more energy efficiency. Some of the discussed new concepts involve the material classes of correlated oxides and materials with antiferromagnetic coupling. Their applicability depends critically on their switching behavior, i.e., how fast and how energy efficient material properties can be manipulated. This thesis presents investigations of ultrafast non-equilibrium phase transitions on such new materials. In transition metal oxides (TMOs) the coupling of different degrees of freedom and resulting low energy excitation spectrum often result in spectacular changes of macroscopic properties (colossal magneto resistance, superconductivity, metal-to-insulator transitions) often accompanied by nanoscale order of spins, charges, orbital occupation and by lattice distortions, which make these material attractive. Magnetite served as a prototype for functional TMOs showing a metal-to-insulator-transition (MIT) at T = 123 K. By probing the charge and orbital order as well as the structure after an optical excitation we found that the electronic order and the structural distortion, characteristics of the insulating phase in thermal equilibrium, are destroyed within the experimental resolution of 300 fs. The MIT itself occurs on a 1.5 ps timescale. It shows that MITs in functional materials are several thousand times faster than switching processes in semiconductors. Recently ferrimagnetic and antiferromagnetic (AFM) materials have become interesting. It was shown in ferrimagnetic GdFeCo, that the transfer of angular momentum between two opposed FM subsystems with different time constants leads to a switching of the magnetization after laser pulse excitation. In addition it was theoretically predicted that demagnetization dynamics in AFM should occur faster than in FM materials as no net angular momentum has to be transferred out of the spin system. We investigated two different AFM materials in order to learn more about their ultrafast dynamics. In Ho, a metallic AFM below T ≈ 130 K, we found that the AFM Ho can not only be faster but also ten times more energy efficiently destroyed as order in FM comparable metals. In EuTe, an AFM semiconductor below T ≈ 10 K, we compared the loss of magnetization and laser-induced structural distortion in one and the same experiment. Our experiment shows that they are effectively disentangled. An exception is an ultrafast release of lattice dynamics, which we assign to the release of magnetostriction. The results presented here were obtained with time-resolved resonant soft x-ray diffraction at the Femtoslicing source of the Helmholtz-Zentrum Berlin and at the free-electron laser in Stanford (LCLS). In addition the development and setup of a new UHV-diffractometer for these experiments will be reported.}, language = {en} } @phdthesis{Busching2014, author = {Busching, Robert}, title = {Affektive und kognitive Desensibilisierung als Konsequenz von Mediengewaltkonsum : eine experimentelle Untersuchung auf Basis lerntheoretischer {\"U}berlegungen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71360}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {NutzerInnen von gewalthaltigen Medien geben einerseits oftmals zu, dass sie fiktionale, gewalthaltige Medien konsumieren, behaupten jedoch gleichzeitig, dass dies nicht ihr Verhalten außerhalb des Medienkontexts beeinflusst. Sie argumentieren, dass sie leicht zwischen Dingen, die im fiktionalen Kontext und Dingen, die in der Realit{\"a}t gelernt wurden, unterscheiden k{\"o}nnen. Im Kontrast zu diesen Aussagen zeigen Metanalysen Effektst{\"a}rken im mittleren Bereich f{\"u}r den Zusammenhang zwischen Gewaltmedienkonsum und aggressivem Verhalten. Diese Ergebnisse k{\"o}nnen nur erkl{\"a}rt werden, wenn MediennutzerInnen gewalthaltige Lernerfahrungen auch außerhalb des Medienkontexts anwenden. Ein Prozess, der Lernerfahrungen innerhalb des Medienkontexts mit dem Verhalten in der realen Welt verkn{\"u}pft, ist Desensibilisierung, die oftmals eine Reduktion des negativen Affektes gegen{\"u}ber Gewalt definiert ist. Zur Untersuchung des Desensibilisierungsprozesses wurden vier Experimente durchgef{\"u}hrt. Die erste in dieser Arbeit untersuchte Hypothese war, dass je h{\"a}ufiger Personen Gewaltmedien konsumieren, desto weniger negativen Affekt zeigen sie gegen{\"u}ber Bildern mit realer Gewalt. Jedoch wurde angenommen, dass diese Bewertung auf Darstellungen von realer Gewalt beschr{\"a}nkt ist und nicht bei Bildern ohne Gewaltbezug, die einen negativen Affekt ausl{\"o}sen, zu finden ist. Die zweite Hypothese bezog sich auf den Affekt w{\"a}hrend des Konsums von Mediengewalt. Hier wurde angenommen, dass besonders Personen, die Freude an Gewalt in den Medien empfinden weniger negativen Affekt gegen{\"u}ber realen Gewaltdarstellungen zeigen. Die letzte Hypothese besch{\"a}ftigte sich mit kognitiver Desensibilisierung und sagte vorher, dass Gewaltmedienkonsum zu einem Transfer von Reaktionen, die normalerweise gegen{\"u}ber gewalthaltigen Reizen gezeigt werden, auf urspr{\"u}nglich neutrale Reize f{\"u}hrt. Das erste Experiment (N = 57) untersuchte, ob die habituelle Nutzung von gewalthaltigen Medien den selbstberichteten Affekt (Valenz und Aktivierung) gegen{\"u}ber Darstellungen von realer Gewalt und nichtgewalthaltigen Darstellungen, die negativen Affekt ausl{\"o}sen, vorhersagt. Die habituelle Nutzung von gewalthaltigen Medien sagte weniger negative Valenz und weniger allgemeine Aktivierung gegen{\"u}ber gewalthalten und nichtgewalthaltigen Bildern vorher. Das zweite Experiment (N = 103) untersuchte auch die Beziehung zwischen habituellem Gewaltmedienkonsum und den affektiven Reaktionen gegen{\"u}ber Bildern realer Gewalt und negativen affektausl{\"o}senden Bildern. Als weiterer Pr{\"a}diktor wurde der Affekt beim Betrachten von gewalthaltigen Medien hinzugef{\"u}gt. Der Affekt gegen{\"u}ber den Bildern wurde zus{\"a}tzlich durch psychophysiologische Maße (Valenz: C: Supercilii; Aktivierung: Hautleitreaktion) erhoben. Wie zuvor sagte habitueller Gewaltmedienkonsum weniger selbstberichte Erregung und weniger negative Valenz f{\"u}r die gewalthaltigen und die negativen, gewalthaltfreien Bilder vorher. Die physiologischen Maßen replizierten dieses Ergebnis. Jedoch zeigte sich ein anderes Muster f{\"u}r den Affekt beim Konsum von Gewalt in den Medien. Personen, die Gewalt in den Medien st{\"a}rker erfreut, zeigen eine Reduktion der Responsivit{\"a}t gegen{\"u}ber Gewalt auf allen vier Maßen. Weiterhin war bei drei dieser vier Maße (selbstberichte Valenz, Aktivit{\"a}t des C. Supercilii und Hautleitreaktion) dieser Zusammenhang auf die gewalthaltigen Bilder beschr{\"a}nkt, mit keinem oder nur einem kleinen Effekt auf die negativen, aber nichtgewalthaltigen Bilder. Das dritte Experiment (N = 73) untersuchte den Affekt w{\"a}hrend die Teilnehmer ein Computerspiel spielten. Das Spiel wurde eigens f{\"u}r dieses Experiment programmiert, sodass einzelne Handlungen im Spiel mit der Aktivit{\"a}t des C. Supercilii, dem Indikator f{\"u}r negativen Affekt, in Bezug gesetzt werden konnten. Die Analyse des C. Supercilii zeigte, dass wiederholtes Durchf{\"u}hren von aggressiven Spielz{\"u}gen zu einem R{\"u}ckgang von negativen Affekt f{\"u}hrte, der die aggressiven Spielhandlungen begleitete. Der negative Affekt w{\"a}hrend gewalthaltiger Spielz{\"u}ge wiederum sagte die affektive Reaktion gegen{\"u}ber Darstellungen von gewalthaltigen Bildern vorher, nicht jedoch gegen{\"u}ber den negativen Bildern. Das vierte Experiment (N = 77) untersuchte kognitive Desensibilisierung, die die Entwicklung von Verkn{\"u}pfungen zwischen neutralen und aggressiven Kognitionen beinhaltete. Die Teilnehmer spielten einen Ego-Shooter entweder auf einem Schiff- oder einem Stadtlevel. Die Beziehung zwischen den neutralen Konstrukten (Schiff/Stadt) und den aggressiven Kognitionen wurde mit einer lexikalischen Entscheidungsaufgabe gemessen. Das Spielen im Schiff-/Stadt-Level f{\"u}hrte zu einer k{\"u}rzen Reaktionszeit f{\"u}r aggressive W{\"o}rter, wenn sie einem Schiff- bzw. Stadtprime folgten. Dies zeigte, dass die im Spiel enthaltenen neutralen Konzepte mit aggressiven Knoten verkn{\"u}pft werden. Die Ergebnisse dieser vier Experimente wurden diskutiert im Rahmen eines lerntheoretischen Ansatzes um Desensibilisierung zu konzeptualisieren.}, language = {de} } @phdthesis{Ermeydan2014, author = {Ermeydan, Mahmut Ali}, title = {Wood cell wall modification with hydrophobic molecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71325}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Wood is used for many applications because of its excellent mechanical properties, relative abundance and as it is a renewable resource. However, its wider utilization as an engineering material is limited because it swells and shrinks upon moisture changes and is susceptible to degradation by microorganisms and/or insects. Chemical modifications of wood have been shown to improve dimensional stability, water repellence and/or durability, thus increasing potential service-life of wood materials. However current treatments are limited because it is difficult to introduce and fix such modifications deep inside the tissue and cell wall. Within the scope of this thesis, novel chemical modification methods of wood cell walls were developed to improve both dimensional stability and water repellence of wood material. These methods were partly inspired by the heartwood formation in living trees, a process, that for some species results in an insertion of hydrophobic chemical substances into the cell walls of already dead wood cells, In the first part of this thesis a chemistry to modify wood cell walls was used, which was inspired by the natural process of heartwood formation. Commercially available hydrophobic flavonoid molecules were effectively inserted in the cell walls of spruce, a softwood species with low natural durability, after a tosylation treatment to obtain "artificial heartwood". Flavonoid inserted cell walls show a reduced moisture absorption, resulting in better dimensional stability, water repellency and increased hardness. This approach was quite different compared to established modifications which mainly address hydroxyl groups of cell wall polymers with hydrophilic substances. In the second part of the work in-situ styrene polymerization inside the tosylated cell walls was studied. It is known that there is a weak adhesion between hydrophobic polymers and hydrophilic cell wall components. The hydrophobic styrene monomers were inserted into the tosylated wood cell walls for further polymerization to form polystyrene in the cell walls, which increased the dimensional stability of the bulk wood material and reduced water uptake of the cell walls considerably when compared to controls. In the third part of the work, grafting of another hydrophobic and also biodegradable polymer, poly(ɛ-caprolactone) in the wood cell walls by ring opening polymerization of ɛ-caprolactone was studied at mild temperatures. Results indicated that polycaprolactone attached into the cell walls, caused permanent swelling of the cell walls up to 5\%. Dimensional stability of the bulk wood material increased 40\% and water absorption reduced more than 35\%. A fully biodegradable and hydrophobized wood material was obtained with this method which reduces disposal problem of the modified wood materials and has improved properties to extend the material's service-life. Starting from a bio-inspired approach which showed great promise as an alternative to standard cell wall modifications we showed the possibility of inserting hydrophobic molecules in the cell walls and supported this fact with in-situ styrene and ɛ-caprolactone polymerization into the cell walls. It was shown in this thesis that despite the extensive knowledge and long history of using wood as a material there is still room for novel chemical modifications which could have a high impact on improving wood properties.}, language = {en} } @phdthesis{Steinert2014, author = {Steinert, Bastian}, title = {Built-in recovery support for explorative programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71305}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {This work introduces concepts and corresponding tool support to enable a complementary approach in dealing with recovery. Programmers need to recover a development state, or a part thereof, when previously made changes reveal undesired implications. However, when the need arises suddenly and unexpectedly, recovery often involves expensive and tedious work. To avoid tedious work, literature recommends keeping away from unexpected recovery demands by following a structured and disciplined approach, which consists of the application of various best practices including working only on one thing at a time, performing small steps, as well as making proper use of versioning and testing tools. However, the attempt to avoid unexpected recovery is both time-consuming and error-prone. On the one hand, it requires disproportionate effort to minimize the risk of unexpected situations. On the other hand, applying recommended practices selectively, which saves time, can hardly avoid recovery. In addition, the constant need for foresight and self-control has unfavorable implications. It is exhaustive and impedes creative problem solving. This work proposes to make recovery fast and easy and introduces corresponding support called CoExist. Such dedicated support turns situations of unanticipated recovery from tedious experiences into pleasant ones. It makes recovery fast and easy to accomplish, even if explicit commits are unavailable or tests have been ignored for some time. When mistakes and unexpected insights are no longer associated with tedious corrective actions, programmers are encouraged to change source code as a means to reason about it, as opposed to making changes only after structuring and evaluating them mentally. This work further reports on an implementation of the proposed tool support in the Squeak/Smalltalk development environment. The development of the tools has been accompanied by regular performance and usability tests. In addition, this work investigates whether the proposed tools affect programmers' performance. In a controlled lab study, 22 participants improved the design of two different applications. Using a repeated measurement setup, the study examined the effect of providing CoExist on programming performance. The result of analyzing 88 hours of programming suggests that built-in recovery support as provided with CoExist positively has a positive effect on programming performance in explorative programming tasks.}, language = {en} } @phdthesis{Yadavalli2014, author = {Yadavalli, Nataraja Sekhar}, title = {Advances in experimental methods to probe surface relief grating formation mechanism in photosensitive materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71213}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {When azobenzene-modified photosensitive polymer films are irradiated with light interference patterns, topographic variations in the film develop that follow the electric field vector distribution resulting in the formation of surface relief grating (SRG). The exact correspondence of the electric field vector orientation in interference pattern in relation to the presence of local topographic minima or maxima of SRG is in general difficult to determine. In my thesis, we have established a systematic procedure to accomplish the correlation between different interference patterns and the topography of SRG. For this, we devise a new setup combining an atomic force microscope and a two-beam interferometer (IIAFM). With this set-up, it is possible to track the topography change in-situ, while at the same time changing polarization and phase of the impinging interference pattern. To validate our results, we have compared two photosensitive materials named in short as PAZO and trimer. This is the first time that an absolute correspondence between the local distribution of electric field vectors of interference pattern and the local topography of the relief grating could be established exhaustively. In addition, using our IIAFM we found that for a certain polarization combination of two orthogonally polarized interfering beams namely SP (↕, ↔) interference pattern, the topography forms SRG with only half the period of the interference patterns. Exploiting this phenomenon we are able to fabricate surface relief structures below diffraction limit with characteristic features measuring only 140 nm, by using far field optics with a wavelength of 491 nm. We have also probed for the stresses induced during the polymer mass transport by placing an ultra-thin gold film on top (5-30 nm). During irradiation, the metal film not only deforms along with the SRG formation, but ruptures in regular and complex manner. The morphology of the cracks differs strongly depending on the electric field distribution in the interference pattern even when the magnitude and the kinetic of the strain are kept constant. This implies a complex local distribution of the opto-mechanical stress along the topography grating. The neutron reflectivity measurements of the metal/polymer interface indicate the penetration of metal layer within the polymer resulting in the formation of bonding layer that confirms the transduction of light induced stresses in the polymer layer to a metal film.}, language = {en} } @phdthesis{Schmidl2014, author = {Schmidl, Ricarda}, title = {Empirical essays on job search behavior, active labor market policies, and propensity score balancing methods}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In Chapter 1 of the dissertation, the role of social networks is analyzed as an important determinant in the search behavior of the unemployed. Based on the hypothesis that the unemployed generate information on vacancies through their social network, search theory predicts that individuals with large social networks should experience an increased productivity of informal search, and reduce their search in formal channels. Due to the higher productivity of search, unemployed with a larger network are also expected to have a higher reservation wage than unemployed with a small network. The model-theoretic predictions are tested and confirmed empirically. It is found that the search behavior of unemployed is significantly affected by the presence of social contacts, with larger networks implying a stronger substitution away from formal search channels towards informal channels. The substitution is particularly pronounced for passive formal search methods, i.e., search methods that generate rather non-specific types of job offer information at low relative cost. We also find small but significant positive effects of an increase of the network size on the reservation wage. These results have important implications on the analysis of the job search monitoring or counseling measures that are usually targeted at formal search only. Chapter 2 of the dissertation addresses the labor market effects of vacancy information during the early stages of unemployment. The outcomes considered are the speed of exit from unemployment, the effects on the quality of employment and the short-and medium-term effects on active labor market program (ALMP) participation. It is found that vacancy information significantly increases the speed of entry into employment; at the same time the probability to participate in ALMP is significantly reduced. Whereas the long-term reduction in the ALMP arises in consequence of the earlier exit from unemployment, we also observe a short-run decrease for some labor market groups which suggest that caseworker use high and low intensity activation measures interchangeably which is clearly questionable from an efficiency point of view. For unemployed who find a job through vacancy information we observe a small negative effect on the weekly number of hours worked. In Chapter 3, the long-term effects of participation in ALMP are assessed for unemployed youth under 25 years of age. Complementary to the analysis in Chapter 2, the effects of participation in time- and cost-intensive measures of active labor market policies are examined. In particular we study the effects of job creation schemes, wage subsidies, short-and long-term training measures and measures to promote the participation in vocational training. The outcome variables of interest are the probability to be in regular employment, and participation in further education during the 60 months following program entry. The analysis shows that all programs, except job creation schemes have positive and long-term effects on the employment probability of youth. In the short-run only short-term training measures generate positive effects, as long-term training programs and wage subsidies exhibit significant locking-in'' effects. Measures to promote vocational training are found to increase the probability of attending education and training significantly, whereas all other programs have either no or a negative effect on training participation. Effect heterogeneity with respect to the pre-treatment level education shows that young people with higher pre-treatment educational levels benefit more from participation most programs. However, for longer-term wage subsidies we also find strong positive effects for young people with low initial education levels. The relative benefit of training measures is higher in West than in East Germany. In the evaluation studies of Chapters 2 and 3 semi-parametric balancing methods of Propensity Score Matching (PSM) and Inverse Probability Weighting (IPW) are used to eliminate the effects of counfounding factors that influence both the treatment participation as well as the outcome variable of interest, and to establish a causal relation between program participation and outcome differences. While PSM and IPW are intuitive and methodologically attractive as they do not require parametric assumptions, the practical implementation may become quite challenging due to their sensitivity to various data features. Given the importance of these methods in the evaluation literature, and the vast number of recent methodological contributions in this field, Chapter 4 aims to reduce the knowledge gap between the methodological and applied literature by summarizing new findings of the empirical and statistical literature and practical guidelines for future applied research. In contrast to previous publications this study does not only focus on the estimation of causal effects, but stresses that the balancing challenge can and should be discussed independent of question of causal identification of treatment effects on most empirical applications. Following a brief outline of the practical implementation steps required for PSM and IPW, these steps are presented in detail chronologically, outlining practical advice for each step. Subsequently, the topics of effect estimation, inference, sensitivity analysis and the combination with parametric estimation methods are discussed. Finally, new extensions of the methodology and avenues for future research are presented.}, language = {en} } @phdthesis{Hebig2014, author = {Hebig, Regina}, title = {Evolution of model-driven engineering settings in practice}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70761}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Nowadays, software systems are getting more and more complex. To tackle this challenge most diverse techniques, such as design patterns, service oriented architectures (SOA), software development processes, and model-driven engineering (MDE), are used to improve productivity, while time to market and quality of the products stay stable. Multiple of these techniques are used in parallel to profit from their benefits. While the use of sophisticated software development processes is standard, today, MDE is just adopted in practice. However, research has shown that the application of MDE is not always successful. It is not fully understood when advantages of MDE can be used and to what degree MDE can also be disadvantageous for productivity. Further, when combining different techniques that aim to affect the same factor (e.g. productivity) the question arises whether these techniques really complement each other or, in contrast, compensate their effects. Due to that, there is the concrete question how MDE and other techniques, such as software development process, are interrelated. Both aspects (advantages and disadvantages for productivity as well as the interrelation to other techniques) need to be understood to identify risks relating to the productivity impact of MDE. Before studying MDE's impact on productivity, it is necessary to investigate the range of validity that can be reached for the results. This includes two questions. First, there is the question whether MDE's impact on productivity is similar for all approaches of adopting MDE in practice. Second, there is the question whether MDE's impact on productivity for an approach of using MDE in practice remains stable over time. The answers for both questions are crucial for handling risks of MDE, but also for the design of future studies on MDE success. This thesis addresses these questions with the goal to support adoption of MDE in future. To enable a differentiated discussion about MDE, the term MDE setting'' is introduced. MDE setting refers to the applied technical setting, i.e. the employed manual and automated activities, artifacts, languages, and tools. An MDE setting's possible impact on productivity is studied with a focus on changeability and the interrelation to software development processes. This is done by introducing a taxonomy of changeability concerns that might be affected by an MDE setting. Further, three MDE traits are identified and it is studied for which manifestations of these MDE traits software development processes are impacted. To enable the assessment and evaluation of an MDE setting's impacts, the Software Manufacture Model language is introduced. This is a process modeling language that allows to reason about how relations between (modeling) artifacts (e.g. models or code files) change during application of manual or automated development activities. On that basis, risk analysis techniques are provided. These techniques allow identifying changeability risks and assessing the manifestations of the MDE traits (and with it an MDE setting's impact on software development processes). To address the range of validity, MDE settings from practice and their evolution histories were capture in context of this thesis. First, this data is used to show that MDE settings cover the whole spectrum concerning their impact on changeability or interrelation to software development processes. Neither it is seldom that MDE settings are neutral for processes nor is it seldom that MDE settings have impact on processes. Similarly, the impact on changeability differs relevantly. Second, a taxonomy of evolution of MDE settings is introduced. In that context it is discussed to what extent different types of changes on an MDE setting can influence this MDE setting's impact on changeability and the interrelation to processes. The category of structural evolution, which can change these characteristics of an MDE setting, is identified. The captured MDE settings from practice are used to show that structural evolution exists and is common. In addition, some examples of structural evolution steps are collected that actually led to a change in the characteristics of the respective MDE settings. Two implications are: First, the assessed diversity of MDE settings evaluates the need for the analysis techniques that shall be presented in this thesis. Second, evolution is one explanation for the diversity of MDE settings in practice. To summarize, this thesis studies the nature and evolution of MDE settings in practice. As a result support for the adoption of MDE settings is provided in form of techniques for the identification of risks relating to productivity impacts.}, language = {en} } @phdthesis{BrandtKobele2014, author = {Brandt-Kobele, Oda-Christina}, title = {Comprehension of verb inflection in German-speaking children}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-216-2}, issn = {1869-3822}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62046}, school = {Universit{\"a}t Potsdam}, pages = {xx, 325}, year = {2014}, abstract = {Previous studies on the acquisition of verb inflection in normally developing children have revealed an astonishing pattern: children use correctly inflected verbs in their own speech but fail to make use of verb inflections when comprehending sentences uttered by others. Thus, a three-year old might well be able to say something like 'The cat sleeps on the bed', but fails to understand that the same sentence, when uttered by another person, refers to only one sleeping cat but not more than one. The previous studies that have examined children's comprehension of verb inflections have employed a variant of a picture selection task in which the child was asked to explicitly indicate (via pointing) what semantic meaning she had inferred from the test sentence. Recent research on other linguistic structures, such as pronouns or focus particles, has indicated that earlier comprehension abilities can be found when methods are used that do not require an explicit reaction, like preferential looking tasks. This dissertation aimed to examine whether children are truly not able to understand the connection the the verb form and the meaning of the sentence subject until the age of five years or whether earlier comprehension can be found when a different measure, preferential looking, is used. Additionally, children's processing of subject-verb agreement violations was examined. The three experiments of this thesis that examined children's comprehension of verb inflections revealed the following: German-speaking three- to four-year old children looked more to a picture showing one actor when hearing a sentence with a singular inflected verb but only when their eye gaze was tracked and they did not have to perform a picture selection task. When they were asked to point to the matching picture, they performed at chance-level. This pattern indicates asymmetries in children's language performance even within the receptive modality. The fourth experiment examined sensitivity to subject-verb agreement violations and did not reveal evidence for sensitivity toward agreement violations in three- and four-year old children, but only found that children's looking patterns were influenced by the grammatical violations at the age of five. The results from these experiments are discussed in relation to the existence of a production-comprehension asymmetry in the use of verb inflections and children's underlying grammatical knowledge.}, language = {en} } @phdthesis{Kollosche2014, author = {Kollosche, David}, title = {Gesellschaft, Mathematik und Unterricht : ein Beitrag zum soziologisch-kritischen Verst{\"a}ndnis der gesellschaftlichen Funktionen des Mathematikunterrichts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70726}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Die vorliegende Studie untersucht die gesellschaftliche Rolle des gegenw{\"a}rtigen Mathematikunterrichts an deutschen allgemeinbildenden Schulen aus einer soziologisch-kritischen Perspektive. In Zentrum des Interesses steht die durch den Mathematikunterricht erfahrene Sozialisation. Die Studie umfasst unter anderem eine Literaturdiskussion, die Ausarbeitung eines soziologischen Rahmens auf der Grundlage des Werks von Michel Foucault und zwei Teilstudien zur Soziologie der Logik und des Rechnens. Abschließend werden Dispositive des Mathematischen beschrieben, die darlegen, in welcher Art und mit welcher pers{\"o}nlichen und gesellschaftlichen Folgen der gegenw{\"a}rtige Mathematikunterricht eine spezielle Geisteshaltung etabliert.}, language = {de} } @phdthesis{Schubert2014, author = {Schubert, Marcel}, title = {Elementary processes in layers of electron transporting Donor-acceptor copolymers : investigation of charge transport and application to organic solar cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70791}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Donor-acceptor (D-A) copolymers have revolutionized the field of organic electronics over the last decade. Comprised of a electron rich and an electron deficient molecular unit, these copolymers facilitate the systematic modification of the material's optoelectronic properties. The ability to tune the optical band gap and to optimize the molecular frontier orbitals as well as the manifold of structural sites that enable chemical modifications has created a tremendous variety of copolymer structures. Today, these materials reach or even exceed the performance of amorphous inorganic semiconductors. Most impressively, the charge carrier mobility of D-A copolymers has been pushed to the technologically important value of 10 cm^{2}V^{-1}s^{-1}. Furthermore, owed to their enormous variability they are the material of choice for the donor component in organic solar cells, which have recently surpassed the efficiency threshold of 10\%. Because of the great number of available D-A copolymers and due to their fast chemical evolution, there is a significant lack of understanding of the fundamental physical properties of these materials. Furthermore, the complex chemical and electronic structure of D-A copolymers in combination with their semi-crystalline morphology impede a straightforward identification of the microscopic origin of their superior performance. In this thesis, two aspects of prototype D-A copolymers were analysed. These are the investigation of electron transport in several copolymers and the application of low band gap copolymers as acceptor component in organic solar cells. In the first part, the investigation of a series of chemically modified fluorene-based copolymers is presented. The charge carrier mobility varies strongly between the different derivatives, although only moderate structural changes on the copolymers structure were made. Furthermore, rather unusual photocurrent transients were observed for one of the copolymers. Numerical simulations of the experimental results reveal that this behavior arises from a severe trapping of electrons in an exponential distribution of trap states. Based on the comparison of simulation and experiment, the general impact of charge carrier trapping on the shape of photo-CELIV and time-of-flight transients is discussed. In addition, the high performance naphthalenediimide (NDI)-based copolymer P(NDI2OD-T2) was characterized. It is shown that the copolymer posses one of the highest electron mobilities reported so far, which makes it attractive to be used as the electron accepting component in organic photovoltaic cells.\par Solar cells were prepared from two NDI-containing copolymers, blended with the hole transporting polymer P3HT. I demonstrate that the use of appropriate, high boiling point solvents can significantly increase the power conversion efficiency of these devices. Spectroscopic studies reveal that the pre-aggregation of the copolymers is suppressed in these solvents, which has a strong impact on the blend morphology. Finally, a systematic study of P3HT:P(NDI2OD-T2) blends is presented, which quantifies the processes that limit the efficiency of devices. The major loss channel for excited states was determined by transient and steady state spectroscopic investigations: the majority of initially generated electron-hole pairs is annihilated by an ultrafast geminate recombination process. Furthermore, exciton self-trapping in P(NDI2OD-T2) domains account for an additional reduction of the efficiency. The correlation of the photocurrent to microscopic morphology parameters was used to disclose the factors that limit the charge generation efficiency. Our results suggest that the orientation of the donor and acceptor crystallites relative to each other represents the main factor that determines the free charge carrier yield in this material system. This provides an explanation for the overall low efficiencies that are generally observed in all-polymer solar cells.}, language = {en} } @phdthesis{Orgis2014, author = {Orgis, Thomas}, title = {Unstetige Galerkin-Diskretisierung niedriger Ordnung in einem atmosph{\"a}rischen Multiskalenmodell}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70687}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Die Dynamik der Atmosph{\"a}re der Erde umfasst einen Bereich von mikrophysikalischer Turbulenz {\"u}ber konvektive Prozesse und Wolkenbildung bis zu planetaren Wellenmustern. F{\"u}r Wettervorhersage und zur Betrachtung des Klimas {\"u}ber Jahrzehnte und Jahrhunderte ist diese Gegenstand der Modellierung mit numerischen Verfahren. Mit voranschreitender Entwicklung der Rechentechnik sind Neuentwicklungen der dynamischen Kerne von Klimamodellen, die mit der feiner werdenden Aufl{\"o}sung auch entsprechende Prozesse aufl{\"o}sen k{\"o}nnen, notwendig. Der dynamische Kern eines Modells besteht in der Umsetzung (Diskretisierung) der grundlegenden dynamischen Gleichungen f{\"u}r die Entwicklung von Masse, Energie und Impuls, so dass sie mit Computern numerisch gel{\"o}st werden k{\"o}nnen. Die vorliegende Arbeit untersucht die Eignung eines unstetigen Galerkin-Verfahrens niedriger Ordnung f{\"u}r atmosph{\"a}rische Anwendungen. Diese Eignung f{\"u}r Gleichungen mit Wirkungen von externen Kr{\"a}ften wie Erdanziehungskraft und Corioliskraft ist aus der Theorie nicht selbstverst{\"a}ndlich. Es werden n{\"o}tige Anpassungen beschrieben, die das Verfahren stabilisieren, ohne sogenannte „slope limiter" einzusetzen. F{\"u}r das unmodifizierte Verfahren wird belegt, dass es nicht geeignet ist, atmosph{\"a}rische Gleichgewichte stabil darzustellen. Das entwickelte stabilisierte Modell reproduziert eine Reihe von Standard-Testf{\"a}llen der atmosph{\"a}rischen Dynamik mit Euler- und Flachwassergleichungen in einem weiten Bereich von r{\"a}umlichen und zeitlichen Skalen. Die L{\"o}sung der thermischen Windgleichung entlang der mit den Isobaren identischen charakteristischen Kurven liefert atmosph{\"a}rische Gleichgewichtszust{\"a}nde mit durch vorgegebenem Grundstrom einstellbarer Neigung zu(barotropen und baroklinen)Instabilit{\"a}ten, die f{\"u}r die Entwicklung von Zyklonen wesentlich sind. Im Gegensatz zu fr{\"u}heren Arbeiten sind diese Zust{\"a}nde direkt im z-System(H{\"o}he in Metern)definiert und m{\"u}ssen nicht aus Druckkoordinaten {\"u}bertragen werden.Mit diesen Zust{\"a}nden, sowohl als Referenzzustand, von dem lediglich die Abweichungen numerisch betrachtet werden, und insbesondere auch als Startzustand, der einer kleinen St{\"o}rung unterliegt, werden verschiedene Studien der Simulation von barotroper und barokliner Instabilit{\"a}t durchgef{\"u}hrt. Hervorzuheben ist dabei die durch die Formulierung von Grundstr{\"o}men mit einstellbarer Baroklinit{\"a}t erm{\"o}glichte simulationsgest{\"u}tzte Studie des Grades der baroklinen Instabilit{\"a}t verschiedener Wellenl{\"a}ngen in Abh{\"a}ngigkeit von statischer Stabilit{\"a}t und vertikalem Windgradient als Entsprechung zu Stabilit{\"a}tskarten aus theoretischen Betrachtungen in der Literatur.}, language = {de} } @phdthesis{Tenenboim2014, author = {Tenenboim, Yehezkel}, title = {Characterization of a Chlamydomonas protein involved in cell division and autophagy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70650}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The contractile vacuole (CV) is an osmoregulatory organelle found exclusively in algae and protists. In addition to expelling excessive water out of the cell, it also expels ions and other metabolites and thereby contributes to the cell's metabolic homeostasis. The interest in the CV reaches beyond its immediate cellular roles. The CV's function is tightly related to basic cellular processes such as membrane dynamics and vesicle budding and fusion; several physiological processes in animals, such as synaptic neurotransmission and blood filtration in the kidney, are related to the CV's function; and several pathogens, such as the causative agents of sleeping sickness, possess CVs, which may serve as pharmacological targets. The green alga Chlamydomonas reinhardtii has two CVs. They are the smallest known CVs in nature, and they remain relatively untouched in the CV-related literature. Many genes that have been shown to be related to the CV in other organisms have close homologues in C. reinhardtii. We attempted to silence some of these genes and observe the effect on the CV. One of our genes, VMP1, caused striking, severe phenotypes when silenced. Cells exhibited defective cytokinesis and aberrant morphologies. The CV, incidentally, remained unscathed. In addition, mutant cells showed some evidence of disrupted autophagy. Several important regulators of the cell cycle as well as autophagy were found to be underexpressed in the mutant. Lipidomic analysis revealed many meaningful changes between wild-type and mutant cells, reinforcing the compromised-autophagy observation. VMP1 is a singular protein, with homologues in numerous eukaryotic organisms (aside from fungi), but usually with no relatives in each particular genome. Since its first characterization in 2002 it has been associated with several cellular processes and functions, namely autophagy, programmed cell-death, secretion, cell adhesion, and organelle biogenesis. It has been implicated in several human diseases: pancreatitis, diabetes, and several types of cancer. Our results reiterate some of the observations in VMP1's six reported homologues, but, importantly, show for the first time an involvement of this protein in cell division. The mechanisms underlying this involvement in Chlamydomonas, as well as other key aspects, such as VMP1's subcellular localization and interaction partners, still await elucidation.}, language = {en} } @phdthesis{Ahmad2014, author = {Ahmad, Nadeem}, title = {People centered HMI's for deaf and functionally illiterate users}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70391}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The objective and motivation behind this research is to provide applications with easy-to-use interfaces to communities of deaf and functionally illiterate users, which enables them to work without any human assistance. Although recent years have witnessed technological advancements, the availability of technology does not ensure accessibility to information and communication technologies (ICT). Extensive use of text from menus to document contents means that deaf or functionally illiterate can not access services implemented on most computer software. Consequently, most existing computer applications pose an accessibility barrier to those who are unable to read fluently. Online technologies intended for such groups should be developed in continuous partnership with primary users and include a thorough investigation into their limitations, requirements and usability barriers. In this research, I investigated existing tools in voice, web and other multimedia technologies to identify learning gaps and explored ways to enhance the information literacy for deaf and functionally illiterate users. I worked on the development of user-centered interfaces to increase the capabilities of deaf and low literacy users by enhancing lexical resources and by evaluating several multimedia interfaces for them. The interface of the platform-independent Italian Sign Language (LIS) Dictionary has been developed to enhance the lexical resources for deaf users. The Sign Language Dictionary accepts Italian lemmas as input and provides their representation in the Italian Sign Language as output. The Sign Language dictionary has 3082 signs as set of Avatar animations in which each sign is linked to a corresponding Italian lemma. I integrated the LIS lexical resources with MultiWordNet (MWN) database to form the first LIS MultiWordNet(LMWN). LMWN contains information about lexical relations between words, semantic relations between lexical concepts (synsets), correspondences between Italian and sign language lexical concepts and semantic fields (domains). The approach enhances the deaf users' understanding of written Italian language and shows that a relatively small set of lexicon can cover a significant portion of MWN. Integration of LIS signs with MWN made it useful tool for computational linguistics and natural language processing. The rule-based translation process from written Italian text to LIS has been transformed into service-oriented system. The translation process is composed of various modules including parser, semantic interpreter, generator, and spatial allocation planner. This translation procedure has been implemented in the Java Application Building Center (jABC), which is a framework for extreme model driven design (XMDD). The XMDD approach focuses on bringing software development closer to conceptual design, so that the functionality of a software solution could be understood by someone who is unfamiliar with programming concepts. The transformation addresses the heterogeneity challenge and enhances the re-usability of the system. For enhancing the e-participation of functionally illiterate users, two detailed studies were conducted in the Republic of Rwanda. In the first study, the traditional (textual) interface was compared with the virtual character-based interactive interface. The study helped to identify usability barriers and users evaluated these interfaces according to three fundamental areas of usability, i.e. effectiveness, efficiency and satisfaction. In another study, we developed four different interfaces to analyze the usability and effects of online assistance (consistent help) for functionally illiterate users and compared different help modes including textual, vocal and virtual character on the performance of semi-literate users. In our newly designed interfaces the instructions were automatically translated in Swahili language. All the interfaces were evaluated on the basis of task accomplishment, time consumption, System Usability Scale (SUS) rating and number of times the help was acquired. The results show that the performance of semi-literate users improved significantly when using the online assistance. The dissertation thus introduces a new development approach in which virtual characters are used as additional support for barely literate or naturally challenged users. Such components enhanced the application utility by offering a variety of services like translating contents in local language, providing additional vocal information, and performing automatic translation from text to sign language. Obviously, there is no such thing as one design solution that fits for all in the underlying domain. Context sensitivity, literacy and mental abilities are key factors on which I concentrated and the results emphasize that computer interfaces must be based on a thoughtful definition of target groups, purposes and objectives.}, language = {en} } @phdthesis{Bathke2014, author = {Bathke, Hannes}, title = {An investigation of complex deformation patterns detected by using InSAR at Llaima and Tend{\"u}rek volcanoes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70522}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Surface displacement at volcanic edifices is related to subsurface processes associated with magma movements, fluid transfers within the volcano edifice and gravity-driven deformation processes. Understanding of associated ground displacements is of importance for assessment of volcanic hazards. For example, volcanic unrest is often preceded by surface uplift, caused by magma intrusion and followed by subsidence, after the withdrawal of magma. Continuous monitoring of the surface displacement at volcanoes therefore might allow the forecasting of upcoming eruptions to some extent. In geophysics, the measured surface displacements allow the parameters of possible deformation sources to be estimated through analytical or numerical modeling. This is one way to improve the understanding of subsurface processes acting at volcanoes. Although the monitoring of volcanoes has significantly improved in the last decades (in terms of technical advancements and number of monitored volcanoes), the forecasting of volcanic eruptions remains puzzling. In this work I contribute towards the understanding of the subsurface processes at volcanoes and thus to the improvement of volcano eruption forecasting. I have investigated the displacement field of Llaima volcano in Chile and of Tend{\"u}rek volcano in East Turkey by using synthetic aperture radar interferometry (InSAR). Through modeling of the deformation sources with the extracted displacement data, it was possible to gain insights into potential subsurface processes occurring at these two volcanoes that had been barely studied before. The two volcanoes, although of very different origin, composition and geometry, both show a complexity of interacting deformation sources. At Llaima volcano, the InSAR technique was difficult to apply, due to the large decorrelation of the radar signal between the acquisition of images. I developed a model-based unwrapping scheme, which allows the production of reliable displacement maps at the volcano that I used for deformation source modeling. The modeling results show significant differences in pre- and post-eruptive magmatic deformation source parameters. Therefore, I conjecture that two magma chambers exist below Llaima volcano: a post-eruptive deep one and a shallow one possibly due to the pre-eruptive ascent of magma. Similar reservoir depths at Llaima have been confirmed by independent petrologic studies. These reservoirs are interpreted to be temporally coupled. At Tend{\"u}rek volcano I have found long-term subsidence of the volcanic edifice, which can be described by a large, magmatic, sill-like source that is subject to cooling contraction. The displacement data in conjunction with high-resolution optical images, however, reveal arcuate fractures at the eastern and western flank of the volcano. These are most likely the surface expressions of concentric ring-faults around the volcanic edifice that show low magnitudes of slip over a long time. This might be an alternative mechanism for the development of large caldera structures, which are so far assumed to be generated during large catastrophic collapse events. To investigate the potential subsurface geometry and relation of the two proposed interacting sources at Tend{\"u}rek, a sill-like magmatic source and ring-faults, I have performed a more sophisticated numerical modeling approach. The optimum source geometries show, that the size of the sill-like source was overestimated in the simple models and that it is difficult to determine the dip angle of the ring-faults with surface displacement data only. However, considering physical and geological criteria a combination of outward-dipping reverse faults in the west and inward-dipping normal faults in the east seem to be the most likely. Consequently, the underground structure at the Tend{\"u}rek volcano consists of a small, sill-like, contracting, magmatic source below the western summit crater that causes a trapdoor-like faulting along the ring-faults around the volcanic edifice. Therefore, the magmatic source and the ring-faults are also interpreted to be temporally coupled. In addition, a method for data reduction has been improved. The modeling of subsurface deformation sources requires only a relatively small number of well distributed InSAR observations at the earth's surface. Satellite radar images, however, consist of several millions of these observations. Therefore, the large amount of data needs to be reduced by several orders of magnitude for source modeling, to save computation time and increase model flexibility. I have introduced a model-based subsampling approach in particular for heterogeneously-distributed observations. It allows a fast calculation of the data error variance-covariance matrix, also supports the modeling of time dependent displacement data and is, therefore, an alternative to existing methods.}, language = {en} } @phdthesis{Raiser2014, author = {Raiser, Christoph}, title = {Kompromisse im Europ{\"a}ischen Parlament : eine kultursoziologische Analyse von Entscheidungsprozessen in einer supranationalen Institution}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-283-4}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69560}, school = {Universit{\"a}t Potsdam}, pages = {xxiii, 313}, year = {2014}, abstract = {Das Europ{\"a}ische Parlament ist zweifelsohne die m{\"a}chtigste parlamentarische Versammlung auf supranationaler Ebene. Das provoziert die Frage, wie Entscheidungen in diesem Parlament gef{\"a}llt werden und wie sie begr{\"u}ndet werden k{\"o}nnen. Darin liegt das Hauptanliegen dieser Arbeit, die zur Beantwortung dieser Frage auf soziologische Ans{\"a}tze der Erkl{\"a}rung sozialen Handelns zur{\"u}ckgreift und damit einen neuen Zugang zur Beobachtung parlamentarischen Handelns schafft. Dabei arbeitet sie heraus, wie wichtig es ist, bei der Analyse politischer Entscheidungsprozesse zu beachten, wie politische Probleme von Akteuren interpretiert und gegen{\"u}ber Verhandlungspartnern dargestellt werden. An den Fallbeispielen der Entscheidungsprozesse zur Dienstleistungsrichtlinie, zur Chemikalien-Verordnung REACH und dem TDIP (CIA)-Ausschuss in der Legislaturperiode 2004-2009, wird der soziale Mechanismus dargestellt, der hinter Einigungen im Europ{\"a}ischen Parlament steckt. Kultur als Interpretation der Welt wird so zum Schl{\"u}ssel des Verst{\"a}ndnisses politischer Entscheidungen auf supranationaler Ebene.}, language = {de} } @phdthesis{Herenz2014, author = {Herenz, Peter}, title = {A study of the absorption characteristics of gaseous galaxy halos in the local Universe}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70513}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Today, it is well known that galaxies like the Milky Way consist not only of stars but also of gas and dust. The galactic halo, a sphere of gas that surrounds the stellar disk of a galaxy, is especially interesting. It provides a wealth of information about in and outflowing gaseous material towards and away from galaxies and their hierarchical evolution. For the Milky Way, the so-called high-velocity clouds (HVCs), fast moving neutral gas complexes in the halo that can be traced by absorption-line measurements, are believed to play a crucial role in the overall matter cycle in our Galaxy. Over the last decades, the properties of these halo structures and their connection to the local circumgalactic and intergalactic medium (CGM and IGM, respectively) have been investigated in great detail by many different groups. So far it remains unclear, however, to what extent the results of these studies can be transferred to other galaxies in the local Universe. In this thesis, we study the absorption properties of Galactic HVCs and compare the HVC absorption characteristics with those of intervening QSO absorption-line systems at low redshift. The goal of this project is to improve our understanding of the spatial extent and physical conditions of gaseous galaxy halos in the local Universe. In the first part of the thesis we use HST /STIS ultraviolet spectra of more than 40 extragalactic background sources to statistically analyze the absorption properties of the HVCs in the Galactic halo. We determine fundamental absorption line parameters including covering fractions of different weakly/intermediately/highly ionized metals with a particular focus on SiII and MgII. Due to the similarity in the ionization properties of SiII and MgII, we are able to estimate the contribution of HVC-like halo structures to the cross section of intervening strong MgII absorbers at z = 0. Our study implies that only the most massive HVCs would be regarded as strong MgII absorbers, if the Milky Way halo would be seen as a QSO absorption line system from an exterior vantage point. Combining the observed absorption-cross section of Galactic HVCs with the well-known number density of intervening strong MgII absorbers at z = 0, we conclude that the contribution of infalling gas clouds (i.e., HVC analogs) in the halos of Milky Way-type galaxies to the cross section of strong MgII absorbers is 34\%. This result indicates that only about one third of the strong MgII absorption can be associated with HVC analogs around other galaxies, while the majority of the strong MgII systems possibly is related to galaxy outflows and winds. The second part of this thesis focuses on the properties of intervening metal absorbers at low redshift. The analysis of the frequency and physical conditions of intervening metal systems in QSO spectra and their relation to nearby galaxies offers new insights into the typical conditions of gaseous galaxy halos. One major aspect in our study was to regard intervening metal systems as possible HVC analogs. We perform a detailed analysis of absorption line properties and line statistics for 57 metal absorbers along 78 QSO sightlines using newly-obtained ultraviolet spectra obtained with HST /COS. We find clear evidence for bimodal distribution in the HI column density in the absorbers, a trend that we interpret as sign for two different classes of absorption systems (with HVC analogs at the high-column density end). With the help of the strong transitions of SiII λ1260, SiIII λ1206, and CIII λ977 we have set up Cloudy photoionization models to estimate the local ionization conditions, gas densities, and metallicities. We find that the intervening absorption systems studied by us have, on average, similar physical conditions as Galactic HVC absorbers, providing evidence that many of them represent HVC analogs in the vicinity of other galaxies. We therefore determine typical halo sizes for SiII, SiIII, and CIII for L = 0.01L∗ and L = 0.05L∗ galaxies. Based on the covering fractions of the different ions in the Galactic halo, we find that, for example, the typical halo size for SiIII is ∼ 160 kpc for L = 0.05L∗ galaxies. We test the plausibility of this result by searching for known galaxies close to the QSO sightlines and at similar redshifts as the absorbers. We find that more than 34\% of the measured SiIII absorbers have galaxies associated with them, with the majority of the absorbers indeed being at impact parameters ρ ≤160 kpc.}, language = {en} } @phdthesis{RoggeSolti2014, author = {Rogge-Solti, Andreas}, title = {Probabilistic Estimation of Unobserved Process Events}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70426}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Organizations try to gain competitive advantages, and to increase customer satisfaction. To ensure the quality and efficiency of their business processes, they perform business process management. An important part of process management that happens on the daily operational level is process controlling. A prerequisite of controlling is process monitoring, i.e., keeping track of the performed activities in running process instances. Only by process monitoring can business analysts detect delays and react to deviations from the expected or guaranteed performance of a process instance. To enable monitoring, process events need to be collected from the process environment. When a business process is orchestrated by a process execution engine, monitoring is available for all orchestrated process activities. Many business processes, however, do not lend themselves to automatic orchestration, e.g., because of required freedom of action. This situation is often encountered in hospitals, where most business processes are manually enacted. Hence, in practice it is often inefficient or infeasible to document and monitor every process activity. Additionally, manual process execution and documentation is prone to errors, e.g., documentation of activities can be forgotten. Thus, organizations face the challenge of process events that occur, but are not observed by the monitoring environment. These unobserved process events can serve as basis for operational process decisions, even without exact knowledge of when they happened or when they will happen. An exemplary decision is whether to invest more resources to manage timely completion of a case, anticipating that the process end event will occur too late. This thesis offers means to reason about unobserved process events in a probabilistic way. We address decisive questions of process managers (e.g., "when will the case be finished?", or "when did we perform the activity that we forgot to document?") in this thesis. As main contribution, we introduce an advanced probabilistic model to business process management that is based on a stochastic variant of Petri nets. We present a holistic approach to use the model effectively along the business process lifecycle. Therefore, we provide techniques to discover such models from historical observations, to predict the termination time of processes, and to ensure quality by missing data management. We propose mechanisms to optimize configuration for monitoring and prediction, i.e., to offer guidance in selecting important activities to monitor. An implementation is provided as a proof of concept. For evaluation, we compare the accuracy of the approach with that of state-of-the-art approaches using real process data of a hospital. Additionally, we show its more general applicability in other domains by applying the approach on process data from logistics and finance.}, language = {en} } @phdthesis{Grigoli2014, author = {Grigoli, Francesco}, title = {Automated seismic event location by waveform coherence analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70329}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Automated location of seismic events is a very important task in microseismic monitoring operations as well for local and regional seismic monitoring. Since microseismic records are generally characterised by low signal-to-noise ratio, such methods are requested to be noise robust and sufficiently accurate. Most of the standard automated location routines are based on the automated picking, identification and association of the first arrivals of P and S waves and on the minimization of the residuals between theoretical and observed arrival times of the considered seismic phases. Although current methods can accurately pick P onsets, the automatic picking of the S onset is still problematic, especially when the P coda overlaps the S wave onset. In this thesis I developed a picking free automated method based on the Short-Term-Average/Long-Term-Average (STA/LTA) traces at different stations as observed data. I used the STA/LTA of several characteristic functions in order to increase the sensitiveness to the P wave and the S waves. For the P phases we use the STA/LTA traces of the vertical energy function, while for the S phases, we use the STA/LTA traces of the horizontal energy trace and then a more optimized characteristic function which is obtained using the principal component analysis technique. The orientation of the horizontal components can be retrieved by robust and linear approach of waveform comparison between stations within a network using seismic sources outside the network (chapter 2). To locate the seismic event, we scan the space of possible hypocentral locations and origin times, and stack the STA/LTA traces along the theoretical arrival time surface for both P and S phases. Iterating this procedure on a three-dimensional grid we retrieve a multidimensional matrix whose absolute maximum corresponds to the spatial and temporal coordinates of the seismic event. Location uncertainties are then estimated by perturbing the STA/LTA parameters (i.e the length of both long and short time windows) and relocating each event several times. In order to test the location method I firstly applied it to a set of 200 synthetic events. Then we applied it to two different real datasets. A first one related to mining induced microseismicity in a coal mine in the northern Germany (chapter 3). In this case we successfully located 391 microseismic event with magnitude range between 0.5 and 2.0 Ml. To further validate the location method I compared the retrieved locations with those obtained by manual picking procedure. The second dataset consist in a pilot application performed in the Campania-Lucania region (southern Italy) using a 33 stations seismic network (Irpinia Seismic Network) with an aperture of about 150 km (chapter 4). We located 196 crustal earthquakes (depth < 20 km) with magnitude range 1.1 < Ml < 2.7. A subset of these locations were compared with accurate locations retrieved by a manual location procedure based on the use of a double difference technique. In both cases results indicate good agreement with manual locations. Moreover, the waveform stacking location method results noise robust and performs better than classical location methods based on the automatic picking of the P and S waves first arrivals.}, language = {en} } @phdthesis{Schultheiss2014, author = {Schultheiss, Corinna}, title = {Die Bewertung der pharyngalen Schluckphase mittels Bioimpedanz}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-284-1}, issn = {1869-3822}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69589}, school = {Universit{\"a}t Potsdam}, pages = {XV, 108}, year = {2014}, abstract = {Schlucken ist ein lebensnotwendiger Prozess, dessen Diagnose und Therapie eine enorme Herausforderung bedeutet. Die Erkennung und Beurteilung von Schlucken und Schluckst{\"o}rungen erfordert den Einsatz von technisch aufwendigen Verfahren, wie Videofluoroskopie (VFSS) und fiberoptisch-endoskopische Schluckuntersuchung (FEES), die eine hohe Belastung f{\"u}r die Patienten darstellen. Beide Verfahren werden als Goldstandard in der Diagnostik von Schluckst{\"o}rungen eingesetzt. Die Durchf{\"u}hrung obliegt in der Regel {\"a}rztlichem Personal. Dar{\"u}ber hinaus erfordert die Auswertung des Bildmaterials der Diagnostik eine ausreichend hohe Erfahrung. In der Therapie findet neben den klassischen Therapiemethoden, wie z.B. di{\"a}tetische Modifikationen und Schluckman{\"o}ver, auch zunehmend die funktionelle Elektrostimulation Anwendung. Ziel der vorliegenden Dissertationsschrift ist die Evaluation eines im Verbundprojekt BigDysPro entwickelten Bioimpedanz (BI)- und Elektromyographie (EMG)-Messsystems. Es wurde gepr{\"u}ft, ob sich das BI- und EMG-Messsystem eignet, sowohl in der Diagnostik als auch in der Therapie als eigenst{\"a}ndiges Messsystem und im Rahmen einer Schluckneuroprothese eingesetzt zu werden. In verschiedenen Studien wurden gesunde Probanden f{\"u}r die {\"U}berpr{\"u}fung der Reproduzierbarkeit (Intra-und Interrater-Reliabilit{\"a}t), der Unterscheidbarkeit von Schluck- und Kopfbewegungen und der Beeinflussung der Biosignale (BI, EMG) durch verschiedene Faktoren (Geschlecht der Probanden, Leitf{\"a}higkeit, Konsistenz und Menge der Nahrung) untersucht. Durch zus{\"a}tzliche Untersuchungen mit Patienten wurde einerseits der Einfluss der Elektrodenart gepr{\"u}ft. Andererseits wurden parallel zur BI- und EMG-Messung auch endoskopische (FEES) und radiologische Schluckuntersuchungen (VFSS) durchgef{\"u}hrt, um die Korrelation der Biosignale mit der Bewegung anatomischer Strukturen (VFSS) und mit der Schluckqualit{\"a}t (FEES) zu pr{\"u}fen. Es wurden 31 gesunde Probanden mit 1819 Schlucken und 60 Patienten mit 715 Schlucken untersucht. Die Messkurven zeigten einen typischen, reproduzierbaren Signalverlauf, der mit anatomischen und funktionellen {\"A}nderungen w{\"a}hrend der pharyngalen Schluckphase in der VFSS korrelierte (r > 0,7). Aus dem Bioimpedanzsignal konnten Merkmale extrahiert werden, die mit physiologischen Merkmalen eines Schluckes, wie verz{\"o}gerter laryngealer Verschluss und Kehlkopfhebung, korrelierten und eine Einsch{\"a}tzung der Schluckqualit{\"a}t in {\"U}bereinstimmung mit der FEES erm{\"o}glichten. In den Signalverl{\"a}ufen der Biosignale konnten signifikante Unterschiede zwischen Schluck- und Kopfbewegungen und den Nahrungsmengen und -konsistenzen nachgewiesen werden. Im Gegensatz zur Nahrungsmenge und -konsistenz zeigte die Leitf{\"a}higkeit der zu schluckenden Nahrung, das Geschlecht der Probanden und die Art der Elektroden keinen signifikanten Einfluss auf die Messsignale. Mit den Ergebnissen der Evaluation konnte gezeigt werden, dass mit dem BI- und EMG-Messsystem ein neuartiges und nicht-invasives Verfahren zur Verf{\"u}gung steht, das eine reproduzierbare Darstellung der pharyngalen Schluckphase und ihrer Ver{\"a}nderungen erm{\"o}glicht. Daraus ergeben sich vielseitige Einsatzm{\"o}glichkeiten in der Diagnostik, z.B. Langzeitmessung zur Schluckfrequenz und Einsch{\"a}tzung der Schluckqualit{\"a}t, und in der Therapie, z.B. der Einsatz in einer Schluckneuroprothese oder als Biofeedback zur Darstellung des Schluckes, von Schluckst{\"o}rungen.}, language = {de} } @phdthesis{Dahlsten2014, author = {Dahlsten, Ulf}, title = {World market governance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70168}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Democratic capitalism or liberal democracy, as the successful marriage of convenience between market liberalism and democracy sometimes is called, is in trouble. The market economy system has become global and there is a growing mismatch with the territoriality of the nation-states. The functional global networks and inter-governmental order can no longer keep pace with the rapid development of the global market economy and regulatory capture is all too common. Concepts like de-globalization, self-regulation, and global government are floated in the debate. The alternatives are analysed and found to be improper, inadequate or plainly impossible. The proposed route is instead to accept that the global market economy has developed into an independent fundamental societal system that needs its own governance. The suggestion is World Market Governance based on the Rule of Law in order to shape the fitness environment for the global market economy and strengthen the nation-states so that they can regain the sovereignty to decide upon the social and cultural conditions in each country. Elements in the proposed Rule of Law are international legislation decided by an Assembly supported by a Council, and an independent Judiciary. Existing international organisations would function as executors. The need for broad sustained demand for regulations in the common interest is identified.}, language = {en} } @phdthesis{Andree2014, author = {Andree, Kai}, title = {Horizontale Fusionen bei r{\"a}umlichem Wettbewerb}, series = {Potsdamer Schriften zur Raumwirtschaft}, journal = {Potsdamer Schriften zur Raumwirtschaft}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-279-7}, issn = {2190-8702}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69209}, school = {Universit{\"a}t Potsdam}, pages = {xi, 227}, year = {2014}, abstract = {Fusionen stellen einen zentralen Baustein der Industrie{\"o}konomik dar. In diesem Buch wird der Frage nachgegangen, welchen Einfluss die r{\"a}umliche Dimension auf eine Fusion aus{\"u}bt. Dabei wird ein Grundmodell entwickelt und {\"u}ber dieses hinaus eine Vielzahl Erweiterungen pr{\"a}sentiert. Der Leser erh{\"a}lt somit die M{\"o}glichkeit ein tiefes Verst{\"a}ndnis f{\"u}r Fusionen bei r{\"a}umlichem Wettbewerb zu erlangen.}, language = {de} } @phdthesis{Biegholdt2014, author = {Biegholdt, Georg}, title = {Theorie und Praxis der Lerngruppensprache im Musikunterricht}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69657}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Der Umgang mit der musikalischen Fachsprache wird in den meisten Lehrpl{\"a}nen f{\"u}r den Musikunterricht der Sekundarstufe I gefordert. Allerdings fehlt nicht nur in den Lehrpl{\"a}nen, sondern auch in der musikdidaktischen Literatur eine inhaltliche Ausgestaltung dieser Forderung. {\"U}ber Inhalt, Umfang und Ziel der in der Schule anzuwendenden musikalischen Fachsprache herrscht daher keine Klarheit. Empirische Untersuchungen zu den sprachlichen Inhalten im Musikunterricht liegen ebenfalls nicht vor. Auch in vielen anderen Unterrichtsf{\"a}chern ist die Forschungslage die sprachlichen Inhalte betreffend {\"u}berschaubar. Mit der Verwendung von Sprache sind jedoch nicht nur Kommunikationsprozesse verbunden, sondern gleichzeitig Lernprozesse innerhalb der Sprache, von der Wortschatzerweiterung bis zur Herstellung von inhaltlich-thematischen Zusammenh{\"a}ngen. Diese Lernprozesse werden beeinflusst von der Wortwahl der Lernenden und Lehrenden. Die Wortwahl der Lernenden l{\"a}sst gleichzeitig einen Schluss zu auf den Stand des Wissens und dessen Vernetzung. Auf dieser Basis ist der sprachliche Inhalt des Musikunterrichtes der Gegenstand der vorgelegten Arbeit. Ziel der Studie war herauszu¬finden, inwieweit es gelingen kann, durch die Art und Weise des Einsatzes und den Umfang von Fachsprache im Musikunterricht Lernprozesse effektiver und erfolgreicher zu gestalten und besser an Gegenwarts- und Zukunftsbed{\"u}rfnissen der Lernenden auszurichten.}, language = {de} }