@phdthesis{Weiss2018, author = {Weiß, Katharina}, title = {Three Essays on EFRAG}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415355}, school = {Universit{\"a}t Potsdam}, pages = {II, 180}, year = {2018}, abstract = {This cumulative doctoral thesis consists of three papers that deal with the role of one specific European accounting player in the international accounting standard-setting, namely the European Financial Reporting Advisory Group (EFRAG). The first paper examines whether and how EFRAG generally fulfills its role in articulating Europe's interests toward the International Accounting Standards Board (IASB). The qualitative data from the conducted interviews reveal that EFRAG influences the IASB's decision making at a very early stage, long before other constituents are officially asked to comment on the IASB's proposals. The second paper uses quantitative data and investigates the formal participation behavior of European constituents that seek to determine EFRAG's voice. More precisely, this paper analyzes the nature of the constituents' participation in EFRAG's due process in terms of representation (constituent groups and geographical distribution) and the drivers of their participation behavior. EFRAG's official decision making process is dominated by some specific constituent groups (such as preparers and the accounting profession) and by constituents from some specific countries (e.g. those with effective enforcement regimes). The third paper investigates in a first step who of the European constituents choose which lobbying channel (participation only at IASB, only at EFRAG, or at both institutions) and unveils in a second step possible reasons for their lobbying choices. The paper comprises quantitative and qualitative data. It reveals that English skills, time issues, the size of the constituent, and the country of origin are factors that can explain why the majority participates only in the IASB's due process.}, language = {en} } @phdthesis{Weiss2011, author = {Weiß, Jan}, title = {Synthesis and self-assembly of multiple thermoresponsive amphiphilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53360}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In the present thesis, the self-assembly of multi thermoresponsive block copolymers in dilute aqueous solution was investigated by a combination of turbidimetry, dynamic light scattering, TEM measurements, NMR as well as fluorescence spectroscopy. The successive conversion of such block copolymers from a hydrophilic into a hydrophobic state includes intermediate amphiphilic states with a variable hydrophilic-to-lipophilic balance. As a result, the self-organization is not following an all-or-none principle but a multistep aggregation in dilute solution was observed. The synthesis of double thermoresponsive diblock copolymers as well as triple thermoresponsive triblock copolymers was realized using twofold-TMS labeled RAFT agents which provide direct information about the average molar mass as well as residual end group functionality from a routine proton NMR spectrum. First a set of double thermosensitive diblock copolymers poly(N-n-propylacrylamide)-b-poly(N-ethylacrylamide) was synthesized which differed only in the relative size of the two blocks. Depending on the relative block lengths, different aggregation pathways were found. Furthermore, the complementary TMS-labeled end groups served as NMR-probes for the self-assembly of these diblock copolymers in dilute solution. Reversible, temperature sensitive peak splitting of the TMS-signals in NMR spectroscopy was indicative for the formation of mixed star-/flower-like micelles in some cases. Moreover, triple thermoresponsive triblock copolymers from poly(N-n-propylacrylamide) (A), poly(methoxydiethylene glycol acrylate) (B) and poly(N-ethylacrylamide) (C) were obtained from sequential RAFT polymerization in all possible block sequences (ABC, BAC, ACB). Their self-organization behavior in dilute aqueous solution was found to be rather complex and dependent on the positioning of the different blocks within the terpolymers. Especially the localization of the low-LCST block (A) had a large influence on the aggregation behavior. Above the first cloud point, aggregates were only observed when the A block was located at one terminus. Once placed in the middle, unimolecular micelles were observed which showed aggregation only above the second phase transition temperature of the B block. Carrier abilities of such triple thermosensitive triblock copolymers tested in fluorescence spectroscopy, using the solvatochromic dye Nile Red, suggested that the hydrophobic probe is less efficiently incorporated by the polymer with the BAC sequence as compared to ABC or ACB polymers above the first phase transition temperature. In addition, due to the problem of increasing loss of end group functionality during the subsequent polymerization steps, a novel concept for the one-step synthesis of multi thermoresponsive block copolymers was developed. This allowed to synthesize double thermoresponsive di- and triblock copolymers in a single polymerization step. The copolymerization of different N-substituted maleimides with a thermosensitive styrene derivative (4-vinylbenzyl methoxytetrakis(oxyethylene) ether) led to alternating copolymers with variable LCST. Consequently, an excess of this styrene-based monomer allowed the synthesis of double thermoresponsive tapered block copolymers in a single polymerization step.}, language = {en} } @phdthesis{Weidlich2011, author = {Weidlich, Matthias}, title = {Behavioural profiles : a relational approach to behaviour consistency}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55590}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business Process Management (BPM) emerged as a means to control, analyse, and optimise business operations. Conceptual models are of central importance for BPM. Most prominently, process models define the behaviour that is performed to achieve a business value. In essence, a process model is a mapping of properties of the original business process to the model, created for a purpose. Different modelling purposes, therefore, result in different models of a business process. Against this background, the misalignment of process models often observed in the field of BPM is no surprise. Even if the same business scenario is considered, models created for strategic decision making differ in content significantly from models created for process automation. Despite their differences, process models that refer to the same business process should be consistent, i.e., free of contradictions. Apparently, there is a trade-off between strictness of a notion of consistency and appropriateness of process models serving different purposes. Existing work on consistency analysis builds upon behaviour equivalences and hierarchical refinements between process models. Hence, these approaches are computationally hard and do not offer the flexibility to gradually relax consistency requirements towards a certain setting. This thesis presents a framework for the analysis of behaviour consistency that takes a fundamentally different approach. As a first step, an alignment between corresponding elements of related process models is constructed. Then, this thesis conducts behavioural analysis grounded on a relational abstraction of the behaviour of a process model, its behavioural profile. Different variants of these profiles are proposed, along with efficient computation techniques for a broad class of process models. Using behavioural profiles, consistency of an alignment between process models is judged by different notions and measures. The consistency measures are also adjusted to assess conformance of process logs that capture the observed execution of a process. Further, this thesis proposes various complementary techniques to support consistency management. It elaborates on how to implement consistent change propagation between process models, addresses the exploration of behavioural commonalities and differences, and proposes a model synthesis for behavioural profiles.}, language = {en} } @phdthesis{Weidenfeld2003, author = {Weidenfeld, Andrea}, title = {Interpretation of and reasoning with conditionals : probabilities, mental models, and causality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5207}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In everyday conversation \"if\" is one of the most frequently used conjunctions. This dissertation investigates what meaning an everyday conditional transmits and what inferences it licenses. It is suggested that the nature of the relation between the two propositions in a conditional might play a major role for both questions. Thus, in the experiments reported here conditional statements that describe a causal relationship (e.g., \"If you touch that wire, you will receive an electric shock\") were compared to arbitrary conditional statements in which there is no meaningful relation between the antecedent and the consequent proposition (e.g., \"If Napoleon is dead, then Bristol is in England\"). Initially, central assumptions from several approaches to the meaning and the reasoning from causal conditionals will be integrated into a common model. In the model the availability of exceptional situations that have the power to generate exceptions to the rule described in the conditional (e.g., the electricity is turned off), reduces the subjective conditional probability of the consequent, given the antecedent (e.g., the probability of receiving an electric shock when touching the wire). This conditional probability determines people's degree of belief in the conditional, which in turn affects their willingness to accept valid inferences (e.g., \"Peter touches the wire, therefore he receives an electric shock\") in a reasoning task. Additionally to this indirect pathway, the model contains a direct pathway: Cognitive availability of exceptional situations directly reduces the readiness to accept valid conclusions. The first experimental series tested the integrated model for conditional statements embedded in pseudo-natural cover stories that either established a causal relation between the antecedent and the consequent event (causal conditionals) or did not connect the propositions in a meaningful way (arbitrary conditionals). The model was supported for the causal, but not for the arbitrary conditional statements. Furthermore, participants assigned lower degrees of belief to arbitrary than to causal conditionals. Is this effect due to the presence versus absence of a semantic link between antecedent and consequent in the conditionals? This question was one of the starting points for the second experimental series. Here, the credibility of the conditionals was manipulated by adding explicit frequency information about possible combinations of presence or absence of antecedent and consequent events to the problems (i.e., frequencies of cases of 1. true antecedent with true consequent, 2. true antecedent with false consequent, 3. false antecedent with true consequent, 4. false antecedent with false consequent). This paradigm allows testing different approaches to the meaning of conditionals (Experiment 4) as well as theories of conditional reasoning against each other (Experiment 5). The results of Experiment 4 supported mainly the conditional probability approach to the meaning of conditionals (Edgington, 1995) according to which the degree of belief a listener has in a conditional statement equals the conditional probability that the consequent is true given the antecedent (e.g., the probability of receiving an electric shock when touching the wire). Participants again assigned lower degrees of belief to the arbitrary than the causal conditionals, although the conditional probability of the consequent given the antecedent was held constant within every condition of explicit frequency information. This supports the hypothesis that the mere presence of a causal link enhances the believability of a conditional statement. In Experiment 5 participants solved conditional reasoning tasks from problems that contained explicit frequency information about possible relevant cases. The data favored the probabilistic approach to conditional reasoning advanced by Oaksford, Chater, and Larkin (2000). The two experimental series reported in this dissertation provide strong support for recent probabilistic theories: for the conditional probability approach to the meaning of conditionals by Edgington (1995) and the probabilistic approach to conditional reasoning by Oaksford et al. (2000). In the domain of conditional reasoning, there was additionally support for the modified mental model approaches by Markovits and Barrouillet (2002) and Schroyens and Schaeken (2003). Probabilistic and mental model approaches could be reconciled within a dual-process-model as suggested by Verschueren, Schaeken, and d\&\#39;Ydewalle (2003).}, subject = {Konditional}, language = {en} } @phdthesis{Wegerich2010, author = {Wegerich, Franziska}, title = {Engineered human cytochrome c : investigation of superoxide and protein-protein interaction and application in bioelectronic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50782}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The aim of this thesis is the design, expression and purification of human cytochrome c mutants and their characterization with regard to electrochemical and structural properties as well as with respect to the reaction with the superoxide radical and the selected proteins sulfite oxidase from human and fungi bilirubin oxidase. All three interaction partners are studied here for the first time with human cyt c and with mutant forms of cyt c. A further aim is the incorporation of the different cyt c forms in two bioelectronic systems: an electrochemical superoxide biosensor with an enhanced sensitivity and a protein multilayer assembly with and without bilirubin oxidase on electrodes. The first part of the thesis is dedicated to the design, expression and characterization of the mutants. A focus is here the electrochemical characterization of the protein in solution and immobilized on electrodes. Further the reaction of these mutants with superoxide was investigated and the possible reaction mechanisms are discussed. In the second part of the work an amperometric superoxide biosensor with selected human cytochrome c mutants was constructed and the performance of the sensor electrodes was studied. The human wild-type and four of the five mutant electrodes could be applied successfully for the detection of the superoxide radical. In the third part of the thesis the reaction of horse heart cyt c, the human wild-type and seven human cyt c mutants with the two proteins sulfite oxidase and bilirubin oxidase was studied electrochemically and the influence of the mutations on the electron transfer reactions was discussed. Finally protein multilayer electrodes with different cyt form including the mutant forms G77K and N70K which exhibit different reaction rates towards BOD were investigated and BOD together with the wild-type and engineered cyt c was embedded in the multilayer assembly. The relevant electron transfer steps and the kinetic behavior of the multilayer electrodes are investigated since the functionality of electroactive multilayer assemblies with incorporated redox proteins is often limited by the electron transfer abilities of the proteins within the multilayer. The formation via the layer-by-layer technique and the kinetic behavior of the mono and bi-protein multilayer system are studied by SPR and cyclic voltammetry. In conclusion this thesis shows that protein engineering is a helpful instrument to study protein reactions as well as electron transfer mechanisms of complex bioelectronic systems (such as bi-protein multilayers). Furthermore, the possibility to design tailored recognition elements for the construction of biosensors with an improved performance is demonstrated.}, language = {en} } @phdthesis{WegerCoenen2021, author = {Weger Coenen, Lindsey}, title = {Exploring potential impacts from transitions in German and European energy on GHG and air pollutant emissions and on ozone air quality}, doi = {10.25932/publishup-49698}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-496986}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 161}, year = {2021}, abstract = {Energy is at the heart of the climate crisis—but also at the heart of any efforts for climate change mitigation. Energy consumption is namely responsible for approximately three quarters of global anthropogenic greenhouse gas (GHG) emissions. Therefore, central to any serious plans to stave off a climate catastrophe is a major transformation of the world's energy system, which would move society away from fossil fuels and towards a net-zero energy future. Considering that fossil fuels are also a major source of air pollutant emissions, the energy transition has important implications for air quality as well, and thus also for human and environmental health. Both Europe and Germany have set the goal of becoming GHG neutral by 2050, and moreover have demonstrated their deep commitment to a comprehensive energy transition. Two of the most significant developments in energy policy over the past decade have been the interest in expansion of shale gas and hydrogen, which accordingly have garnered great interest and debate among public, private and political actors. In this context, sound scientific information can play an important role by informing stakeholder dialogue and future research investments, and by supporting evidence-based decision-making. This thesis examines anticipated environmental impacts from possible, relevant changes in the European energy system, in order to impart valuable insight and fill critical gaps in knowledge. Specifically, it investigates possible future shale gas development in Germany and the United Kingdom (UK), as well as a hypothetical, complete transition to hydrogen mobility in Germany. Moreover, it assesses the impacts on GHG and air pollutant emissions, and on tropospheric ozone (O3) air quality. The analysis is facilitated by constructing emission scenarios and performing air quality modeling via the Weather Research and Forecasting model coupled with chemistry (WRF-Chem). The work of this thesis is presented in three research papers. The first paper finds that methane (CH4) leakage rates from upstream shale gas development in Germany and the UK would range between 0.35\% and 1.36\% in a realistic, business-as-usual case, while they would be significantly lower - between 0.08\% and 0.15\% - in an optimistic, strict regulation and high compliance case, thus demonstrating the value and potential of measures to substantially reduce emissions. Yet, while the optimistic case is technically feasible, it is unlikely that the practices and technologies assumed would be applied and accomplished on a systematic, regular basis, owing to economics and limited monitoring resources. The realistic CH4 leakage rates estimated in this study are comparable to values reported by studies carried out in the US and elsewhere. In contrast, the optimistic rates are similar to official CH4 leakage data from upstream gas production in Germany and in the UK. Considering that there is a lack of systematic, transparent and independent reports supporting the official values, this study further highlights the need for more research efforts in this direction. Compared with national energy sector emissions, this study suggests that shale gas emissions of volatile organic compounds (VOCs) could be significant, though relatively insignificant for other air pollutants. Similar to CH4, measures could be effective for reducing VOCs emissions. The second paper shows that VOC and nitrogen oxides (NOx) emissions from a future shale gas industry in Germany and the UK have potentially harmful consequences for European O3 air quality on both the local and regional scale. The results indicate a peak increase in maximum daily 8-hour average O3 (MDA8) ranging from 3.7 µg m-3 to 28.3 µg m-3. Findings suggest that shale gas activities could result in additional exceedances of MDA8 at a substantial percentage of regulatory measurement stations both locally and in neighboring and distant countries, with up to circa one third of stations in the UK and one fifth of stations in Germany experiencing additional exceedances. Moreover, the results reveal that the shale gas impact on the cumulative health-related metric SOMO35 (annual Sum of Ozone Means Over 35 ppb) could be substantial, with a maximum increase of circa 28\%. Overall, the findings suggest that shale gas VOC emissions could play a critical role in O3 enhancement, while NOx emissions would contribute to a lesser extent. Thus, the results indicate that stringent regulation of VOC emissions would be important in the event of future European shale gas development to minimize deleterious health outcomes. The third paper demonstrates that a hypothetical, complete transition of the German vehicle fleet to hydrogen fuel cell technology could contribute substantially to Germany's climate and air quality goals. The results indicate that if the hydrogen were to be produced via renewable-powered water electrolysis (green hydrogen), German carbon dioxide equivalent (CO2eq) emissions would decrease by 179 MtCO2eq annually, though if electrolysis were powered by the current electricity mix, emissions would instead increase by 95 MtCO2eq annually. The findings generally reveal a notable anticipated decrease in German energy emissions of regulated air pollutants. The results suggest that vehicular hydrogen demand is 1000 PJ annually, which would require between 446 TWh and 525 TWh for electrolysis, hydrogen transport and storage. When only the heavy duty vehicle segment (HDVs) is shifted to green hydrogen, the results of this thesis show that vehicular hydrogen demand drops to 371 PJ, while a deep emissions cut is still realized (-57 MtCO2eq), suggesting that HDVs are a low-hanging fruit for contributing to decarbonization of the German road transport sector with hydrogen energy.}, language = {en} } @phdthesis{Weege2017, author = {Weege, Stefanie}, title = {Climatic drivers of retrogressive thaw slump activity and resulting sediment and carbon release to the nearshore zone of Herschel Island, Yukon Territory, Canada}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397947}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2017}, abstract = {The Yukon Coast in Canada is an ice-rich permafrost coast and highly sensitive to changing environmental conditions. Retrogressive thaw slumps are a common thermoerosion feature along this coast, and develop through the thawing of exposed ice-rich permafrost on slopes and removal of accumulating debris. They contribute large amounts of sediment, including organic carbon and nitrogen, to the nearshore zone. The objective of this study was to 1) identify the climatic and geomorphological drivers of sediment-meltwater release, 2) quantify the amount of released meltwater, sediment, organic carbon and nitrogen, and 3) project the evolution of sediment-meltwater release of retrogressive thaw slumps in a changing future climate. The analysis is based on data collected over 18 days in July 2013 and 18 days in August 2012. A cut-throat flume was set up in the main sediment-meltwater channel of the largest retrogressive thaw slump on Herschel Island. In addition, two weather stations, one on top of the undisturbed tundra and one on the slump floor, measured incoming solar radiation, air temperature, wind speed and precipitation. The discharge volume eroding from the ice-rich permafrost and retreating snowbanks was measured and compared to the meteorological data collected in real time with a resolution of one minute. The results show that the release of sediment-meltwater from thawing of the ice-rich permafrost headwall is strongly related to snowmelt, incoming solar radiation and air temperature. Snowmelt led to seasonal differences, especially due to the additional contribution of water to the eroding sediment-meltwater from headwall ablation, lead to dilution of the sediment-meltwater composition. Incoming solar radiation and air temperature were the main drivers for diurnal and inter-diurnal fluctuations. In July (2013), the retrogressive thaw slump released about 25 000 m³ of sediment-meltwater, containing 225 kg dissolved organic carbon and 2050 t of sediment, which in turn included 33 t organic carbon, and 4 t total nitrogen. In August (2012), just 15 600 m³ of sediment-meltwater was released, since there was no additional contribution from snowmelt. However, even without the additional dilution, 281 kg dissolved organic carbon was released. The sediment concentration was twice as high as in July, with sediment contents of up to 457 g l-1 and 3058 t of sediment, including 53 t organic carbon and 5 t nitrogen, being released. In addition, the data from the 36 days of observations from Slump D were upscaled to cover the main summer season of 1 July to 31 August (62 days) and to include all 229 active retrogressive thaw slumps along the Yukon Coast. In total, all retrogressive thaw slumps along the Yukon Coast contribute a minimum of 1.4 Mio. m³ sediment-meltwater each thawing season, containing a minimum of 172 000 t sediment with 3119 t organic carbon, 327 t nitrogen and 17 t dissolved organic carbon. Therefore, in addition to the coastal erosion input to the Beaufort Sea, retrogressive thaw slumps additionally release 3 \% of sediment and 8 \% of organic carbon into the ocean. Finally, the future evolution of retrogressive thaw slumps under a warming scenario with summer air temperatures increasing by 2-3 °C by 2081-2100, would lead to an increase of 109-114\% in release of sediment-meltwater. It can be concluded that retrogressive thaw slumps are sensitive to climatic conditions and under projected future Arctic warming will contribute larger amounts of thawed permafrost material (including organic carbon and nitrogen) into the environment.}, language = {en} } @phdthesis{Wechakama2013, author = {Wechakama, Maneenate}, title = {Multi-messenger constraints and pressure from dark matter annihilation into electron-positron pairs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67401}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Despite striking evidence for the existence of dark matter from astrophysical observations, dark matter has still escaped any direct or indirect detection until today. Therefore a proof for its existence and the revelation of its nature belongs to one of the most intriguing challenges of nowadays cosmology and particle physics. The present work tries to investigate the nature of dark matter through indirect signatures from dark matter annihilation into electron-positron pairs in two different ways, pressure from dark matter annihilation and multi-messenger constraints on the dark matter annihilation cross-section. We focus on dark matter annihilation into electron-positron pairs and adopt a model-independent approach, where all the electrons and positrons are injected with the same initial energy E_0 ~ m_dm*c^2. The propagation of these particles is determined by solving the diffusion-loss equation, considering inverse Compton scattering, synchrotron radiation, Coulomb collisions, bremsstrahlung, and ionization. The first part of this work, focusing on pressure from dark matter annihilation, demonstrates that dark matter annihilation into electron-positron pairs may affect the observed rotation curve by a significant amount. The injection rate of this calculation is constrained by INTEGRAL, Fermi, and H.E.S.S. data. The pressure of the relativistic electron-positron gas is computed from the energy spectrum predicted by the diffusion-loss equation. For values of the gas density and magnetic field that are representative of the Milky Way, it is estimated that the pressure gradients are strong enough to balance gravity in the central parts if E_0 < 1 GeV. The exact value depends somewhat on the astrophysical parameters, and it changes dramatically with the slope of the dark matter density profile. For very steep slopes, as those expected from adiabatic contraction, the rotation curves of spiral galaxies would be affected on kiloparsec scales for most values of E_0. By comparing the predicted rotation curves with observations of dwarf and low surface brightness galaxies, we show that the pressure from dark matter annihilation may improve the agreement between theory and observations in some cases, but it also imposes severe constraints on the model parameters (most notably, the inner slope of the halo density profile, as well as the mass and the annihilation cross-section of dark matter particles into electron-positron pairs). In the second part, upper limits on the dark matter annihilation cross-section into electron-positron pairs are obtained by combining observed data at different wavelengths (from Haslam, WMAP, and Fermi all-sky intensity maps) with recent measurements of the electron and positron spectra in the solar neighbourhood by PAMELA, Fermi, and H.E.S.S.. We consider synchrotron emission in the radio and microwave bands, as well as inverse Compton scattering and final-state radiation at gamma-ray energies. For most values of the model parameters, the tightest constraints are imposed by the local positron spectrum and synchrotron emission from the central regions of the Galaxy. According to our results, the annihilation cross-section should not be higher than the canonical value for a thermal relic if the mass of the dark matter candidate is smaller than a few GeV. In addition, we also derive a stringent upper limit on the inner logarithmic slope α of the density profile of the Milky Way dark matter halo (α < 1 if m_dm < 5 GeV, α < 1.3 if m_dm < 100 GeV and α < 1.5 if m_dm < 2 TeV) assuming a dark matter annihilation cross-section into electron-positron pairs (σv) = 3*10^-26 cm^3 s^-1, as predicted for thermal relics from the big bang.}, language = {en} } @phdthesis{Weber2004, author = {Weber, Michael H.}, title = {Robotic telescopes \& Doppler imaging : measuring differential rotation on long-period active stars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001834}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Auf der Sonne sind viele Ph{\"a}nomene zu sehen die mit der solaren magnetischen Aktivit{\"a}t zusammenh{\"a}ngen. Das daf{\"u}r zust{\"a}ndige Magnetfeld wird durch einen Dynamo erzeugt, der sich vermutlich am Boden der Konvektionszone in der sogenannten Tachocline befindet. Angetrieben wird der Dynamo teils von der differenziellen Rotation, teils von den magnetischen Turbulenzen in der Konvektionszone. Die differentielle Rotation kann an der Sonnenoberfl{\"a}che durch beobachten der Sonnenfleckbewegungen gemessen werden.Um einen gr{\"o}ßeren Parameterraum zum Testen von Dynamotheorien zu erhalten, kann man diese Messungen auch auf andere Sterne ausdehnen. Das prim{\"a}re Problem dabei ist, dass die Oberfl{\"a}chen von Sternen nicht direkt beobachtet werden k{\"o}nnen. Indirekt kann man dies jedoch mit Hilfe der Doppler-imaging Methode erreichen, die die Doppler-Verbreitung der Spektrallinien von schnell rotierenden Sternen ben{\"u}tzt. Um jedoch ein Bild der Sternoberfl{\"a}che zu erhalten, bedarf es vieler hochaufgel{\"o}ster spektroskopischer Beobachtungen, die gleichm{\"a}ßig {\"u}ber eine Sternrotation verteilt sein m{\"u}ssen. F{\"u}r Sterne mit langen Rotationsperioden sind diese Beobachtungen nur schwierig durchzuf{\"u}hren. Das neue robotische Observatorium STELLA adressiert dieses Problem und bietet eine auf Dopplerimaging abgestimmte Ablaufplanung der Beobachtungen an. Dies wird solche Beobachtungen nicht nur leichter durchf{\"u}hrbar machen, sondern auch effektiver gestalten.Als Vorschau welche Ergebnisse mit STELLA erwartet werden k{\"o}nnen dient eine Studie an sieben Sternen die allesamt eine lange (zwischen sieben und 25 Tagen) Rotationsperiode haben. Alle Sterne zeigen differentielle Rotation, allerdings sind die Messfehler aufgrund der nicht zufriedenstellenden Datenqualit{\"a}t von gleicher Gr{\"o}ßenordnung wie die Ergebnisse, ein Problem das bei STELLA nicht auftreten wird. Um die Konsistenz der Ergebnisse zu pr{\"u}fen wurde wenn m{\"o}glich sowohl eine Kreuzkorrelationsanalyse als auch die sheared-image Methode angewandt. Vier von diesen sieben Sternen weisen eine differentielle Rotation in umgekehrter Richtung auf als auf der Sonne zu sehen ist. Die restlichen drei Sterne weisen schwache, aber in der Richtung sonnen{\"a}hnliche differentielle Rotation auf.Abschließend werden diese neuen Messungen mit bereits publizierten Werten kombiniert, und die so erhaltenen Daten auf Korrelationen zwischen differentieller Rotation, Rotationsperiode, Evolutionsstaus, Spektraltyp und Vorhandensein eines Doppelsterns {\"u}berpr{\"u}ft. Alle Sterne zusammen zeigen eine signifikante Korrelation zwischen dem Betrag der differenziellen Rotation und der Rotationsperiode. Unterscheidet man zwischen den Richtungen der differentiellen Rotation, so bleibt nur eine Korrelation der Sterne mit antisolarem Verhalten. Dar{\"u}berhinaus zeigt sich auch, dass Doppelsterne schw{\"a}cher differentiell rotieren.}, language = {en} } @phdthesis{Wattenbach2008, author = {Wattenbach, Martin}, title = {The hydrological effects of changes in forest area and species composition in the federal state of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27394}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This thesis aims to quantify the human impact on the natural resource water at the landscape scale. The drivers in the federal state of Brandenburg (Germany), the area under investigation, are land-use changes induced by policy decisions at European and federal state level. The water resources of the federal state are particularly sensitive to changes in land-use due to low precipitation rates in the summer combined with sandy soils and high evapotranspiration rates. Key elements in landscape hydrology are forests because of their unique capacity to transport water from the soil to the atmosphere. Given these circumstances, decisions made at any level of administration that may have effects on the forest sector in the state are critical in relation to the water cycle. It is therefore essential to evaluate any decision that may change forest area and structure in such a sensitive region. Thus, as a first step, it was necessary to develop and implement a model able to simulate possible interactions and feedbacks between forested surfaces and the hydrological cycle at the landscape scale. The result is a model for simulating the hydrological properties of forest stands based on a robust computation of the temporal and spatial LAI (leaf area index) dynamics. The approach allows the simulation of all relevant hydrological processes with a low parameter demand. It includes the interception of precipitation and transpiration of forest stands with and without groundwater in the rooting zone. The model also considers phenology, biomass allocation, as well as mortality and simple management practices. It has been implemented as a module in the eco-hydrological model SWIM (Soil and Water Integrated Model). This model has been tested in two pre-studies to verify the applicability of its hydrological process description for the hydrological conditions typical for the state. The newly implemented forest module has been tested for Scots Pine (Pinus sylvestris) and in parts for Common Oak (Quercus robur and Q. petraea) in Brandenburg. For Scots Pine the results demonstrate a good simulation of annual biomass increase and LAI in addition to the satisfactory simulation of litter production. A comparison of the simulated and measured data of the May sprout for Scots pine and leaf unfolding for Oak, as well as the evaluation against daily transpiration measurements for Scots Pine, does support the applicability of the approach. The interception of precipitation has also been simulated and compared with weekly observed data for a Scots Pine stand which displays satisfactory results in both the vegetation periods and annual sums. After the development and testing phase, the model is used to analyse the effects of two scenarios. The first scenario is an increase in forest area on abandoned agricultural land that is triggered by a decrease in European agricultural production support. The second one is a shift in species composition from predominant Scots Pine to Common Oak that is based on decisions of the regional forestry authority to support a more natural species composition. The scenario effects are modelled for the federal state of Brandenburg on a 50m grid utilising spatially explicit land-use patterns. The results, for the first scenario, suggest a negative impact of an increase in forest area (9.4\% total state area) on the regional water balance, causing an increase in mean long-term annual evapotranspiration of 3.7\% at 100\% afforestation when compared to no afforestation. The relatively small annual change conceals a much more pronounced seasonal effect of a mean long-term evapotranspiration increase by 25.1\% in the spring causing a pronounced reduction in groundwater recharge and runoff. The reduction causes a lag effect that aggravates the scarcity of water resources in the summer. In contrast, in the second scenario, a change in species composition in existing forests (29.2\% total state area) from predominantly Scots Pine to Common Oak decreases the long-term annual mean evapotranspiration by 3.4\%, accompanied by a much weaker, but apparent, seasonal pattern. Both scenarios exhibit a high spatial heterogeneity because of the distinct natural conditions in the different regions of the state. Areas with groundwater levels near the surface are particularly sensitive to changes in forest area and regions with relatively high proportion of forest respond strongly to the change in species composition. In both cases this regional response is masked by a smaller linear mean effect for the total state area. Two critical sources of uncertainty in the model results have been investigated. The first one originates from the model calibration parameters estimated in the pre-study for lowland regions, such as the federal state. The combined effect of the parameters, when changed within their physical meaningful limits, unveils an overestimation of the mean water balance by 1.6\%. However, the distribution has a wide spread with 14.7\% for the 90th percentile and -9.9\% for the 10th percentile. The second source of uncertainty emerges from the parameterisation of the forest module. The analysis exhibits a standard deviation of 0.6 \% over a ten year period in the mean of the simulated evapotranspiration as a result of variance in the key forest parameters. The analysis suggests that the combined uncertainty in the model results is dominated by the uncertainties of calibration parameters. Therefore, the effect of the first scenario might be underestimated because the calculated increase in evapotranspiration is too small. This may lead to an overestimation of the water balance towards runoff and groundwater recharge. The opposite can be assumed for the second scenario in which the decrease in evapotranspiration might be overestimated.}, language = {en} } @phdthesis{Wattebled2006, author = {Wattebled, Laurent}, title = {Oligomeric surfactants as novel type of amphiphiles : structure - property relationships and behaviour with additives}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12855}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The properties of a series of well-defined new surfactant oligomers (dimers to tetramers)were examined. From a molecular point of view, these oligomeric surfactants consist of simple monomeric cationic surfactant fragments coupled via the hydrophilic ammonium chloride head groups by spacer groups (different in nature and length). Properties of these cationic surfactant oligomers in aqueous solution such as solubility, micellization and surface activity, micellar size and aggregation number were discussed with respect to the two new molecular variables introduced, i.e. degree of oligomerization and spacer group, in order to establish structure - property relationships. Thus, increasing the degree of oligomerization results in a pronounced decrease of the critical micellization concentration (CMC). Both reduced spacer length and increased spacer hydrophobicity lead to a decrease of the CMC, but to a lesser extent. For these particular compounds, the formed micelles are relatively small and their aggregation number decreases with increasing the degree of oligomerization, increasing spacer length and sterical hindrance. In addition, pseudo-phase diagrams were established for the dimeric surfactants in more complex systems, namely inverse microemulsions, demonstrating again the important influence of the spacer group on the surfactant behaviour. Furthermore, the influence of additives on the property profile of the dimeric compounds was examined, in order to see if the solution properties can be improved while using less material. Strong synergistic effects were observed by adding special organic salts (e.g. sodium salicylate, sodium vinyl benzoate, etc.) to the surfactant dimers in stoichiometric amounts. For such mixtures, the critical aggregation concentration is strongly shifted to lower concentration, the effect being more pronounced for dimers than for analogous monomers. A sharp decrease of the surface tension can also be attained. Many of the organic anions produce viscoelastic solutions when added to the relatively short-chain dimers in aqueous solution, as evidenced by rheological measurements. This behaviour reflects the formation of entangled wormlike micelles due to strong interactions of the anions with the cationic surfactants, decreasing the curvature of the micellar aggregates. It is found that the associative behaviour is enhanced by dimerization. For a given counterion, the spacer group may also induce a stronger viscosifying effect depending on its length and hydrophobicity. Oppositely charged surfactants were combined with the cationic dimers, too. First, some mixtures with the conventional anionic surfactant SDS revealed vesicular aggregates in solution. Also, in view of these catanionic mixtures, a novel anionic dimeric surfactant based on EDTA was synthesized and studied. The synthesis route is relatively simple and the compound exhibits particularly appealing properties such as low CMC and σCMC values, good solubilization capacity of hydrophobic probes and high tolerance to hard water. Noteworthy, mixtures with particular cationic dimers gave rise to viscous solutions, reflecting the micelle growth.}, language = {en} } @phdthesis{Wasiolka2007, author = {Wasiolka, Bernd}, title = {The impact of overgrazing on reptile diversity and population dynamics of Pedioplanis l. lineoocellata in the southern Kalahari}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-16611}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Die Vegetationskomposition und -struktur, beispielsweise die unterschiedliche Architektur von B{\"a}umen, Str{\"a}uchern, Gr{\"a}sern und Kr{\"a}utern, bietet ein großes Spektrum an Habitaten und Nischen, die wiederum eine hohe Tierdiversit{\"a}t in den Savannensystemen des s{\"u}dlichen Afrikas erm{\"o}glichen. Dieses {\"O}kosystem wurde jedoch {\"u}ber Jahrzehnte weltweit durch intensive anthropogene Landnutzung (z.B. Viehwirtschaft) nachhaltig ver{\"a}ndert. Dabei wurden die Zusammensetzung, Diversit{\"a}t und Struktur der Vegetation stark ver{\"a}ndert. {\"U}berweidung in Savannensystemen f{\"u}hrt zu einer Degradation des Habitates einhergehend mit dem Verlust von perennierenden Gr{\"a}sern und krautiger Vegetation. Dies f{\"u}hrt zu einem Anstieg an vegetationsfreien Bodenfl{\"a}chen. Beides, sowohl der Verlust an perennierenden Gr{\"a}sern und krautiger Vegetation sowie der Anstieg an vegetationsfreien Fl{\"a}chen f{\"u}hrt zu verbesserten Etablierungsbedingungen f{\"u}r Str{\"a}ucher (z.B. Rhigozum trichotomum, Acacia mellifera) und auf lange Sicht zu stark verbuschten Fl{\"a}chen. Die Tierdiversit{\"a}t in Savannen ist hiervon entscheidend beeinflusst. Mit sinkender struktureller Diversit{\"a}t verringert sich auch die Tierdiversit{\"a}t. W{\"a}hrend der Einfluss von {\"U}berweidung auf die Vegetation relativ gut untersucht ist sind Informationen {\"u}ber den Einfluss von {\"U}berweidung auf die Tierdiversit{\"a}t, speziell f{\"u}r Reptilien, eher sp{\"a}rlich vorhanden. Zus{\"a}tzlich ist sehr wenig bekannt zum Einfluss auf die Populationsdynamik (z.B. Verhaltensanpassungen, Raumnutzung, {\"U}berlebensrate, Sterberate) einzelner Reptilienarten. Ziel meiner Doktorarbeit ist es den Einfluss von {\"U}berweidung durch kommerzielle Farmnutzung auf die Reptiliengemeinschaft und auf verschiedene Aspekte der Populationsdynamik der Echse Pedioplanis lineoocellata lineoocellata zu untersuchen. Hinsichtlich bestimmter Naturschutzmaßnahmen ist es einerseits wichtig zu verstehen welchen Auswirkungen {\"U}berweidung auf die gesamte Reptiliengemeinschaft hat. Und zum anderen wie entscheidende Faktoren der Populationsdynamik beeinflusst werden. Beides f{\"u}hrt zu einem besseren Verst{\"a}ndnis der Reaktion von Reptilien auf Habitatdegradation zu erlangen. Die Ergebnisse meiner Doktorarbeit zeigen eindeutig einen negativen Einfluss der {\"U}berweidung und der daraus resultierende Habitatdegradation auf (1) die gesamte Reptiliengemeinschaft und (2) auf einzelne Aspekte der Populationsdynamik von P. lineoocellata. Im Teil 1 wird die signifikante Reduzierung der Reptiliendiversit{\"a}t und Abundanz in degradierten Habitaten beschrieben. Im zweiten Teil wird gezeigt, dass P. lineoocellata das Verhalten an die verschlechterten Lebensbedingungen anpassen kann. Die Art bewegt sich sowohl h{\"a}ufiger als auch {\"u}ber einen l{\"a}ngeren Zeitraum und legt dabei gr{\"o}ßere Distanzen zur{\"u}ck. Zus{\"a}tzlich vergr{\"o}ßerte die Art ihr Revier (home range) (Teil 3). Im abschließenden Teil wird der negative Einfluss von {\"U}berweidung auf die Populationsdynamik von P. lineoocellata beschrieben: In degradierten Habitaten nimmt die Populationsgr{\"o}ße von adulten und juvenilen Echsen ab, die {\"U}berlebens- und Geburtenrate sinken, w{\"a}hren zus{\"a}tzlich das Pr{\"a}dationsrisiko ansteigt. Verantwortlich hierf{\"u}r ist zum einen die ebenfalls reduzierte Nahrungsverf{\"u}gbarkeit (Arthropoden) auf degradierten Fl{\"a}chen. Dies hat zur Folge, dass die Populationsgr{\"o}ße abnimmt und die Fitness der Individuen verringert wird, welches sich durch eine Reduzierung der {\"U}berlebens- und Geburtenrate bemerkbar macht. Und zum anderen ist es die Reduzierung der Vegetationsbedeckung und der R{\"u}ckgang an perennierenden Gr{\"a}sern welche sich negativ auswirken. Als Konsequenz hiervon gehen Nischen und Mikrohabitate verloren und die M{\"o}glichkeiten der Reptilien zur Thermoregulation sind verringert. Des Weiteren hat dieser Verlust an perennierender Grasbedeckung auch ein erh{\"o}htes Pr{\"a}dationsrisikos zur Folge. Zusammenfassend l{\"a}sst sich sagen, dass nicht nur B{\"a}ume und Str{\"a}ucher, wie in anderen Studien gezeigt, eine bedeutende Rolle f{\"u}r die Diversit{\"a}t spielen, sondern auch das perennierende Gras eine wichtige Rolle f{\"u}r die Faunendiversit{\"a}t spielt. Weiterhin zeigte sich, dass Habitatdegradation nicht nur die Population als gesamtes beeinflusst, sondern auch das Verhalten und Populationsparameter einzelner Arten. Des Weiteren ist es Reptilien m{\"o}glich durch Verhaltensflexibilit{\"a}t auf verschlechterte Umweltbedingen zu reagieren.}, language = {en} } @phdthesis{Washuettl2004, author = {Wash{\"u}ttl, Albert}, title = {EI Eridani and the art of doppler imaging : a long-term study}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001714}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Das Verst{\"a}ndnis magnetisch verursachter Aktivit{\"a}t auf Sternen sowie der zugrundeliegenden Dynamoprozesse ist von fundamentaler Bedeutung f{\"u}r das Verst{\"a}ndnis von Entstehung und Entwicklung von Sternen sowie des Lebens im Universum. Sichtbare Erscheinungen dieser stellaren Aktivit{\"a}t sind u.a. Sternflecken, welche als Indikatoren des zugrundeliegenden Magnetfeldes dienen. Solche Flecken k{\"o}nnen auf anderen Sternen als der Sonne nicht direkt beobachtet werden, zumal mit den heutigen technischen Mitteln eine Aufl{\"o}sung der Oberfl{\"a}che selbst der benachbarten Sterne unm{\"o}glich ist. Eine indirekte Rekonstruktionsmethode namens 'Doppler Imaging' erlaubt es jedoch, auf die Temperaturverteilung auf der Sternoberfl{\"a}che zu schließen. F{\"u}r diese Arbeit wurden elf Jahre kontinuierlicher spektroskopischer Beobachtungen des aktiven Doppelsterns EI Eridani herangezogen, um insgesamt 34 Dopplerkarten zu erstellen. In der Folge wird versucht, eine Grundlage zu schaffen f{\"u}r die Analyse des zweidimensionalen Informationsgehalts dieser Karten. Drei Oberfl{\"a}chenkartenparameter werden vorgeschlagen: gemittelte Temperatur, getrennt f{\"u}r verschiedenen stellare Breitenb{\"a}nder; relative Fleckenh{\"a}ufigkeit; und, zum Zwecke der Auswertung der strukturellen Temperaturverteilung, L{\"a}ngen- und Breiten-Ortsfunktion der Sternfleckenh{\"a}ufung. Die resultierenden Werte zeigen deutlich, daß kein zeitlicher Zusammenhang mit dem photometrischen Aktivit{\"a}tszyklus besteht. Die Morphologie der Fleckenverteilung bleibt w{\"a}hrend des kompletten Beobachtungszeitraums im wesentlichen konstant. Im Gegensatz zur Sonne gibt es also, im beobachteten Zeitraum und innerhalb der bestehenden Genauigkeit, keinen Fleckenzyklus auf dem aktiven Stern EI Eri. Dar{\"u}berhinaus wurde eine ausf{\"u}hrliche Studie der stellaren Parameter von EI Eri und eine vorl{\"a}ufige Absch{\"a}tzung der differentiellen Rotation auf EI Eri durchgef{\"u}hrt, die eine anti-solare Ausrichtung aufzuweisen scheint, d.h. der Pol rotiert schneller als der {\"A}quator.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Yongbo}, title = {Late glacial to Holocene climate and vegetation changes on the Tibetan Plateau inferred from fossil pollen records in lacustrine sediments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-63155}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The past climate in central Asia, and especially on the Tibetan Plateau (TP), is of great importance for an understanding of global climate processes and for predicting the future climate. As a major influence on the climate in this region, the Asian Summer Monsoon (ASM) and its evolutionary history are of vital importance for accurate predictions. However, neither the evolutionary pattern of the summer monsoon nor the driving mechanisms behind it are yet clearly understood. For this research, I first synthesized previously published Late Glacial to Holocene climatic records from monsoonal central Asia in order to extract the general climate signals and the associated summer monsoon intensities. New climate and vegetation sequences were then established using improved quantitative methods, focusing on fossil pollen records recovered from Tibetan lakes and also incorporating new modern datasets. The pollen-vegetation and vegetation-climate relationships on the TP were also evaluated in order to achieve a better understanding of fossil pollen records. The synthesis of previously published moisture-related palaeoclimate records in monsoonal central Asia revealed generally different temporal patterns for the two monsoonal subsystems, i.e. the Indian Summer Monsoon (ISM) and East Asian Summer Monsoon (EASM). The ISM appears to have experienced maximum wet conditions during the early Holocene, while many records from the area affected by the EASM indicate relatively dry conditions at that time, particularly in north-central China where the maximum moisture levels occurred during the middle Holocene. A detailed consideration of possible driving factors affecting the summer monsoon, including summer solar insolation and sea surface temperatures, revealed that the ISM was primarily driven by variations in northern hemisphere solar insolation, and that the EASM may have been constrained by the ISM resulting in asynchronous patterns of evolution for these two subsystems. This hypothesis is further supported by modern monsoon indices estimated using the NCEP/NCAR Reanalysis data from the last 50 years, which indicate a significant negative correlation between the two summer monsoon subsystems. By analogy with the early Holocene, intensification of the ISM during coming decades could lead to increased aridification elsewhere as a result of the asynchronous nature of the monsoon subsystems, as can already be observed in the meteorological data from the last 15 years. A quantitative climate reconstruction using fossil pollen records was achieved through analysis of sediment core recovered from Lake Donggi Cona (in the north-eastern part of the TP) which has been dated back to the Last Glacial Maximum (LGM). A new data-set of modern pollen collected from large lakes in arid to semi-arid regions of central Asia is also presented herein. The concept of "pollen source area" was introduced to modern climate calibration based on pollen from large lakes, and was applied to the fossil pollen sequence from Lake Donggi Cona. Extremely dry conditions were found to have dominated the LGM, and a subsequent gradually increasing trend in moisture during the Late Glacial period was terminated by an abrupt reversion to a dry phase that lasted for about 1000 years and coincided with the first Heinrich Event of the northern Atlantic region. Subsequent periods corresponding to the warm B{\o}lling-Aller{\o}d period and the Younger Dryas cold event were followed by moist conditions during the early Holocene, with annual precipitation of up to about 400 mm. A slightly drier trend after 9 cal ka BP was then followed by a second wet phase during the middle Holocene that lasted until 4.5 cal ka BP. Relatively steady conditions with only slight fluctuations then dominated the late Holocene, resulting in the present climatic conditions. In order to investigate the relationship between vegetation and climate, temporal variations in the possible driving factors for vegetation change on the northern TP were examined using a high resolution late Holocene pollen record from Lake Kusai. Moving-window Redundancy Analyses (RDAs) were used to evaluate the correlations between pollen assemblages and individual sedimentary proxies. These analyses have revealed frequent fluctuations in the relative abundances of alpine steppe and alpine desert components, and in particular a decrease in the total vegetation cover at around 1500 cal a BP. The climate was found to have had an important influence on vegetation changes when conditions were relatively wet and stable. However, after the 1500 cal a BP threshold in vegetation cover was crossed the vegetation appears to have been affected more by extreme events such as dust storms or fluvial erosion than by the general climatic trends. In addition, pollen spectra over the last 600 years have been revealed by Procrustes analysis to be significantly different from those recovered from older samples, which is attributed to an increased human impact that resulted in unprecedented changes to the composition of the vegetation. Theoretical models that have been developed and widely applied to the European area (i.e. the Extended R-Value (ERV) model and the Regional Estimates of Vegetation Abundance from Large Sites (REVEALS) model) have been applied to the high alpine TP ecosystems in order to investigate the pollen-vegetation relationships, as well as for quantitative reconstructions of vegetation abundance. The modern pollen-vegetation relationships for four common pollen species on the TP have been investigated using Poaceae as the reference taxa. The ERV Submodel 2 yielded relatively high PPEs for the steppe and desert taxa (Artemisia Chenopodiaceae), and low PPEs for the Cyperaceae that are characteristic of the alpine Kobresia meadows. The plant abundances on the central and north-eastern TP were quantified by applying these PPEs to four post-Late Glacial fossil pollen sequences. The reconstructed vegetation assemblages for the four pollen sequences always yielded smaller compositional species turnovers than suggested by the pollen spectra, indicating that the strength of the previously-reported vegetation changes may therefore have been overestimated. In summary, the key findings of this thesis are that (a) the two ASM subsystems show asynchronous patterns during both the Holocene and modern time periods, (b) fossil pollen records from large lakes reflect regional signals for which the pollen source areas need to be taken into account, (c) climate is not always the main driver for vegetation change, and (d) previously reported vegetation changes on the TP may have been overestimated because they ignored inter-species variations in pollen productivity.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Xia}, title = {Reef ecosystem recovery following the Middle Permian (Capitanian) mass extinction}, doi = {10.25932/publishup-48750}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487502}, school = {Universit{\"a}t Potsdam}, pages = {XI, 144}, year = {2020}, abstract = {To find out the future of nowadays reef ecosystem turnover under the environmental stresses such as global warming and ocean acidification, analogue studies from the geologic past are needed. As a critical time of reef ecosystem innovation, the Permian-Triassic transition witnessed the most severe demise of Phanerozoic reef builders, and the establishment of modern style symbiotic relationships within the reef-building organisms. Being the initial stage of this transition, the Middle Permian (Capitanian) mass extinction coursed a reef eclipse in the early Late Permian, which lead to a gap of understanding in the post-extinction Wuchiapingian reef ecosystem, shortly before the radiation of Changhsingian reefs. Here, this thesis presents detailed biostratigraphic, sedimentological, and palaeoecological studies of the Wuchiapingian reef recovery following the Middle Permian (Capitanian) mass extinction, on the only recorded Wuchiapingian reef setting, outcropping in South China at the Tieqiao section. Conodont biostratigraphic zonations were revised from the Early Permian Artinskian to the Late Permian Wuchiapingian in the Tieqiao section. Twenty main and seven subordinate conodont zones are determined at Tieqiao section including two conodont zone below and above the Tieqiao reef complex. The age of Tieqiao reef was constrained as early to middle Wuchiapingian. After constraining the reef age, detailed two-dimensional outcrop mapping combined with lithofacies study were carried out on the Wuchiapingian Tieqiao Section to investigate the reef growth pattern stratigraphically as well as the lateral changes of reef geometry on the outcrop scale. Semi-quantitative studies of the reef-building organisms were used to find out their evolution pattern within the reef recovery. Six reef growth cycles were determined within six transgressive-regressive cycles in the Tieqiao section. The reefs developed within the upper part of each regressive phase and were dominated by different biotas. The timing of initial reef recovery after the Middle Permian (Capitanian) mass extinction was updated to the Clarkina leveni conodont zone, which is earlier than previous understanding. Metazoans such as sponges were not the major components of the Wuchiapingian reefs until the 5th and 6th cycles. So, the recovery of metazoan reef ecosystem after the Middle Permian (Capitanian) mass extinction was obviously delayed. In addition, although the importance of metazoan reef builders such as sponges did increase following the recovery process, encrusting organisms such as Archaeolithoporella and Tubiphytes, combined with microbial carbonate precipitation, still played significant roles to the reef building process and reef recovery after the mass extinction. Based on the results from outcrop mapping and sedimentological studies, quantitative composition analysis of the Tieqiao reef complex were applied on selected thin sections to further investigate the functioning of reef building components and the reef evolution after the Middle Permian (Capitanian) mass extinction. Data sets of skeletal grains and whole rock components were analyzed. The results show eleven biocommunity clusters/eight rock composition clusters dominated by different skeletal grains/rock components. Sponges, Archaeolithoporella and Tubiphytes were the most ecologically important components within the Wuchiapingian Tieqiao reef, while the clotted micrites and syndepositional cements are the additional important rock components for reef cores. The sponges were important within the whole reef recovery. Tubiphytes were broadly distributed in different environments and played a key-role in the initial reef communities. Archaeolithoporella concentrated in the shallower part of reef cycles (i.e., the upper part of reef core) and was functionally significant for the enlargement of reef volume. In general, the reef recovery after the Middle Permian (Capitanian) mass extinction has some similarities with the reef recovery following the end-Permian mass extinction. It shows a delayed recovery of metazoan reefs and a stepwise recovery pattern that was controlled by both ecological and environmental factors. The importance of encrusting organisms and microbial carbonates are also similar to most of the other post-extinction reef ecosystems. These findings can be instructive to extend our understanding of the reef ecosystem evolution under environmental perturbation or stresses.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Weishi}, title = {Influence of river reconstruction at a bank filtration site}, doi = {10.25932/publishup-49023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490234}, school = {Universit{\"a}t Potsdam}, pages = {IIV, 120}, year = {2020}, abstract = {Bank filtration is an effective water treatment technique and is widely adopted in Europe along major rivers. It is the process where surface water penetrates the riverbed, flows through the aquifer, and then is extracted by near-bank production wells. By flowing in the subsurface flow passage, the water quality can be improved by a series of beneficial processes. Long-term riverbank filtration also produces colmation layers on the riverbed. The colmation layer may act as a bioactive zone that is governed by biochemical and physical processes owing to its enrichment of microbes and organic matter. Low permeability may strongly limit the surface water infiltration and further lead to a decreasing recoverable ratio of production wells.The removal of the colmation layer is therefore a trade-off between the treatment capacity and treatment efficiency. The goal of this Ph.D. thesis is to focus on the temporal and spatial change of the water quality and quantity along the flow path of a hydrogeological heterogeneous riverbank filtration site adjacent to an artificial-reconstructed (bottom excavation and bank reconstruction) canal in Potsdam, Germany. To quantify the change of the infiltration rate, travel time distribution, and the thermal field brought by the canal reconstruction, a three-dimensional flow and heat transport model was created. This model has two scenarios, 1) 'with' canal reconstruction, and 2) 'without' canal reconstruction. Overall, the model calibration results of both water heads and temperatures matched those observed in the field study. In comparison to the model without reconstruction, the reconstruction model led to more water being infiltrated into the aquifer on that section, on average 521 m3/d, which corresponded to around 9\% of the total pumping rate. Subsurface travel-time distribution substantially shifted towards shorter travel times. Flow paths with travel times <200 days increased by ~10\% and those with <300 days by 15\%. Furthermore, the thermal distribution in the aquifer showed that the seasonal variation in the scenario with reconstruction reaches deeper and laterally propagates further. By scatter plotting of δ18O versus δ 2H, the infiltrated river water could be differentiated from water flowing in the deep aquifer, which may contain remnant landside groundwater from further north. In contrast, the increase of river water contribution due to decolmation could be shown by piper plot. Geological heterogeneity caused a substantial spatial difference in redox zonation among different flow paths, both horizontally and vertically. Using the Wilcoxon rank test, the reconstruction changed the redox potential differently in observation wells. However, taking the small absolute concentration level, the change is also relatively minor. The treatment efficiency for both organic matter and inorganic matter is consistent after the reconstruction, except for ammonium. The inconsistent results for ammonium could be explained by changes in the Cation Exchange Capacity (CEC) in the newly paved riverbed. Because the bed is new, it was not yet capable of keeping the newly produced ammonium by sorption and further led to the breakthrough of the ammonium plume. By estimation, the peak of the ammonium plume would reach the most distant observation well before February 2024, while the peaking concentration could be further dampened by sorption and diluted by the afterward low ammonium flow. The consistent DOC and SUVA level suggests that there was no clear preference for the organic matter removal along the flow path.}, language = {en} } @phdthesis{Wang2016, author = {Wang, Victor-C.}, title = {Injury and illness risk factors for elite athletes in training environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100925}, school = {Universit{\"a}t Potsdam}, pages = {viii, 84, ix}, year = {2016}, abstract = {Since 1998, elite athletes' sport injuries have been monitored in single sport event, which leads to the development of first comprehensive injury surveillance system in multi-sport Olympic Games in 2008. However, injury and illness occurred in training phases have not been systematically studied due to its multi-facets, potentially interactive risk related factors. The present thesis aim to address issues of feasibility of establishing a validated measure for injury/illness, training environment and psychosocial risk factors by creating the evaluation tool namely risk of injury questionnaire (Risk-IQ) for elite athletes, which based on IOC consensus statement 2009 recommended content of preparticipation evaluation(PPE) and periodic health exam (PHE). A total of 335 top level athletes and a total of 88 medical care providers from Germany and Taiwan participated in tow "cross-sectional plus longitudinal" Risk-IQ and MCPQ surveys respectively. Four categories of injury/illness related risk factors questions were asked in Risk-IQ for athletes while injury risk and psychological related questions were asked in MCPQ for MCP cohorts. Answers were quantified scales wise/subscales wise before analyzed with other factors/scales. In addition, adapted variables such as sport format were introduced for difference task of analysis. Validated with 2-wyas translation and test-retest reliabilities, the Risk-IQ was proved to be in good standard which were further confirmed by analyzed results from official surveys in both Germany and Taiwan. The result of Risk-IQ revealed that elite athletes' accumulated total injuries, in general, were multi-factor dependent; influencing factors including but not limited to background experiences, medical history, PHE and PPE medical resources as well as stress from life events. Injuries of different body parts were sport format and location specific. Additionally, medical support of PPE and PHE indicated significant difference between German and Taiwan. The result of the present thesis confirmed that it is feasible to construct a comprehensive evalua-tion instrument for heterogeneous elite athletes cohorts' risk factor analysis for injury/illness oc-curred during their non-competition periods. In average and with many moderators involved, Ger-man elite athletes have superior medical care support yet suffered more severe injuries than Tai-wanese counterparts. Opinions of injury related psychological issues reflected differently on vari-ous MCP groups irrespective of different nationalities. In general, influencing factors and interac-tions existed among relevant factors in both studies which implied further investigation with multiple regression analysis is needed for better understanding.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Jingwen}, title = {Electret properties of polypropylene with surface chemical modification and crystalline reconstruction}, doi = {10.25932/publishup-47027}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470271}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2020}, abstract = {As one of the most-produced commodity polymers, polypropylene draws considerable scientific and commercial interest as an electret material. In the present thesis, the influence of the surface chemical modification and crystalline reconstruction on the electret properties of the polypropylene thin films will be discussed. The chemical treatment with orthophosphoric acid can significantly improve the surface charge stability of the polypropylene electrets by introducing phosphorus- and oxygen-containing structures onto the modified surface. The thermally stimulated discharge measurement and charge profiling by means of piezoelectrically generated pressure steps are used to investigate the electret behaviour. It is concluded that deep traps of limited number density are created during the treatment with inorganic chemicals. Hence, the improvement dramatically decreases when the surface-charge density is substantially higher than ±1.2×10^(-3) C·m^(-2). The newly formed traps also show a higher trapping energy for negative charges. The energetic distributions of the traps in the non-treated and chemically treated samples offer an insight regarding the surface and foreign-chemical dominance on the charge storage and transport in the polypropylene electrets. Additionally, different electret properties are observed on the polypropylene films with the spherulitic and transcrystalline structures. It indicates the dependence of the charge storage and transport on the crystallite and molecular orientations in the crystalline phase. In general, a more diverse crystalline growth in the spherulitic samples can result in a more complex energetic trap distribution, in comparison to that in a transcrystalline polypropylene. The double-layer transcrystalline polypropylene film with a crystalline interface in the middle can be obtained by crystallising the film in contact with rough moulding surfaces on both sides. A layer of heterocharges appears on each side of the interface in the double-layer transcrystalline polypropylene electrets after the thermal poling. However, there is no charge captured within the transcrystalline layers. The phenomenon reveals the importance of the crystalline interface in terms of creating traps with the higher activation energy in polypropylene. The present studies highlight the fact that even slight variations in the polypropylene film may lead to dramatic differences in its electret properties.}, language = {en} } @phdthesis{Wallenta2015, author = {Wallenta, Daniel}, title = {Sequences of compact curvature}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87489}, school = {Universit{\"a}t Potsdam}, pages = {viii, 73}, year = {2015}, abstract = {By perturbing the differential of a (cochain-)complex by "small" operators, one obtains what is referred to as quasicomplexes, i.e. a sequence whose curvature is not equal to zero in general. In this situation the cohomology is no longer defined. Note that it depends on the structure of the underlying spaces whether or not an operator is "small." This leads to a magical mix of perturbation and regularisation theory. In the general setting of Hilbert spaces compact operators are "small." In order to develop this theory, many elements of diverse mathematical disciplines, such as functional analysis, differential geometry, partial differential equation, homological algebra and topology have to be combined. All essential basics are summarised in the first chapter of this thesis. This contains classical elements of index theory, such as Fredholm operators, elliptic pseudodifferential operators and characteristic classes. Moreover we study the de Rham complex and introduce Sobolev spaces of arbitrary order as well as the concept of operator ideals. In the second chapter, the abstract theory of (Fredholm) quasicomplexes of Hilbert spaces will be developed. From the very beginning we will consider quasicomplexes with curvature in an ideal class. We introduce the Euler characteristic, the cone of a quasiendomorphism and the Lefschetz number. In particular, we generalise Euler's identity, which will allow us to develop the Lefschetz theory on nonseparable Hilbert spaces. Finally, in the third chapter the abstract theory will be applied to elliptic quasicomplexes with pseudodifferential operators of arbitrary order. We will show that the Atiyah-Singer index formula holds true for those objects and, as an example, we will compute the Euler characteristic of the connection quasicomplex. In addition to this we introduce geometric quasiendomorphisms and prove a generalisation of the Lefschetz fixed point theorem of Atiyah and Bott.}, language = {en} } @phdthesis{Walczak2019, author = {Walczak, Ralf}, title = {Molecular design of nitrogen-doped nanoporous noble carbon materials for gas adsorption}, doi = {10.25932/publishup-43524}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435241}, school = {Universit{\"a}t Potsdam}, pages = {II, 155}, year = {2019}, abstract = {In den modernen Gesellschaften f{\"u}hrt ein stetig steigender Energiebedarf zu dem zunehmenden Verbrauch fossiler Brennstoffe wie Kohle, {\"O}l, und Gas. Die Verbrennung dieser kohlenstoffbasierten Brennstoffe f{\"u}hrt unweigerlich zur Freisetzung von Treibhausgasen, vor allem von CO2. Die CO2 Aufnahme unmittelbar bei den Verbrennungsanlagen oder direkt aus der Luft, zusammen mit Regulierung von CO2 produzierenden Energiesektoren (z.B. K{\"u}hlanlagen), k{\"o}nnen den CO2 Ausstoß reduzieren. Allerdings f{\"u}hren insbesondere bei der CO2 Aufnahme die geringen CO2 Konzentrationen und die Aufnahme konkurrierender Gase zu niedrigen CO2 Kapazit{\"a}ten und Selektivit{\"a}ten. Das Zusammenspiel der Gastmolek{\"u}le mit por{\"o}sen Materialien ist dabei essentiell. Por{\"o}se Kohlenstoffmaterialien besitzen attraktive Eigenschaften, unter anderem elektrische Leitf{\"a}higkeit, einstellbare Porosit{\"a}t, als auch chemische und thermische Stabilit{\"a}t. Allerdings f{\"u}hrt die zu geringe Polarisierbarkeit dieser Materialien zu einer geringen Affinit{\"a}t zu polaren Molek{\"u}len (z.B. CO2, H2O, oder NH3). Diese Affinit{\"a}t kann durch den Einbau von Stickstoff erh{\"o}ht werden. Solche Materialien sind oft „edler" als reine Kohlenstoffe, dies bedeutet, dass sie eher oxidierend wirken, als selbst oxidiert zu werden. Die Problematik besteht darin, einen hohen und gleichm{\"a}ßig verteilten Stickstoffgehalt in das Kohlenstoffger{\"u}st einzubauen. Die Zielsetzung dieser Dissertation ist die Erforschung neuer Synthesewege f{\"u}r stickstoffdotierte edle Kohlenstoffmaterialien und die Entwicklung eines grundlegenden Verst{\"a}ndnisses f{\"u}r deren Anwendung in Gasadsorption und elektrochemischer Energiespeicherung. Es wurde eine templatfreie Synthese f{\"u}r stickstoffreiche, edle, und mikropor{\"o}se Kohlenstoffmaterialien durch direkte Kondensation eines stickstoffreichen organischen Molek{\"u}ls als Vorl{\"a}ufer erarbeitet. Dadurch konnten Materialien mit hohen Adsorptionskapazit{\"a}ten f{\"u}r H2O und CO2 bei niedrigen Konzentrationen und moderate CO2/N2 Selektivit{\"a}ten erzielt werden. Um die CO2/N2 Selektivit{\"a}ten zu verbessern, wurden mittels der Einstellung des Kondensationsgrades die molekulare Struktur und Porosit{\"a}t der Kohlenstoffmaterialien kontrolliert. Diese Materialien besitzen die Eigenschaften eines molekularen Siebs f{\"u}r CO2 {\"u}ber N2, das zu herausragenden CO2/N2 Selektivit{\"a}ten f{\"u}hrt. Der ultrahydrophile Charakter der Porenoberfl{\"a}chen und die kleinen Mikroporen dieser Kohlenstoffmaterialien erm{\"o}glichen grundlegende Untersuchungen f{\"u}r die Wechselwirkungen mit Molek{\"u}len die polarer sind als CO2, n{\"a}mlich H2O und NH3. Eine weitere Reihe stickstoffdotierter Kohlenstoffmaterialien wurde durch Kondensation eines konjugierten mikropor{\"o}sen Polymers synthetisiert und deren strukturelle Besonderheiten als Anodenmaterial f{\"u}r die Natriumionen Batterie untersucht. Diese Dissertation leistet einen Beitrag zur Erforschung stickstoffdotierter Kohlenstoffmaterialien und deren Wechselwirkungen mit verschiedenen Gastmolek{\"u}len.}, language = {en} } @phdthesis{Waha2012, author = {Waha, Katharina}, title = {Climate change impacts on agricultural vegetation in sub-Saharan Africa}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64717}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Agriculture is one of the most important human activities providing food and more agricultural goods for seven billion people around the world and is of special importance in sub-Saharan Africa. The majority of people depends on the agricultural sector for their livelihoods and will suffer from negative climate change impacts on agriculture until the middle and end of the 21st century, even more if weak governments, economic crises or violent conflicts endanger the countries' food security. The impact of temperature increases and changing precipitation patterns on agricultural vegetation motivated this thesis in the first place. Analyzing the potentials of reducing negative climate change impacts by adapting crop management to changing climate is a second objective of the thesis. As a precondition for simulating climate change impacts on agricultural crops with a global crop model first the timing of sowing in the tropics was improved and validated as this is an important factor determining the length and timing of the crops´ development phases, the occurrence of water stress and final crop yield. Crop yields are projected to decline in most regions which is evident from the results of this thesis, but the uncertainties that exist in climate projections and in the efficiency of adaptation options because of political, economical or institutional obstacles have to be considered. The effect of temperature increases and changing precipitation patterns on crop yields can be analyzed separately and varies in space across the continent. Southern Africa is clearly the region most susceptible to climate change, especially to precipitation changes. The Sahel north of 13° N and parts of Eastern Africa with short growing seasons below 120 days and limited wet season precipitation of less than 500 mm are also vulnerable to precipitation changes while in most other part of East and Central Africa, in contrast, the effect of temperature increase on crops overbalances the precipitation effect and is most pronounced in a band stretching from Angola to Ethiopia in the 2060s. The results of this thesis confirm the findings from previous studies on the magnitude of climate change impact on crops in sub-Saharan Africa but beyond that helps to understand the drivers of these changes and the potential of certain management strategies for adaptation in more detail. Crop yield changes depend on the initial growing conditions, on the magnitude of climate change, and on the crop, cropping system and adaptive capacity of African farmers which is only now evident from this comprehensive study for sub-Saharan Africa. Furthermore this study improves the representation of tropical cropping systems in a global crop model and considers the major food crops cultivated in sub-Saharan Africa and climate change impacts throughout the continent.}, language = {en} } @phdthesis{VasquezParra2007, author = {V{\´a}squez Parra, M{\´o}nica Fernanda}, title = {Mafic magmatism in the Eastern Cordillera and Putumayo Basin, Colombia : causes and consequences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13183}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The Eastern Cordillera of Colombia is mainly composed of sedimentary rocks deposited since early Mesozoic times. Magmatic rocks are scarce. They are represented only by a few locally restricted occurrences of dykes and sills of mafic composition presumably emplaced in the Cretaceous and of volcanic rocks of Neogene age. This work is focused on the study of the Cretaceous magmatism with the intention to understand the processes causing the genesis of these rocks and their significance in the regional tectonic setting of the Northern Andes. The magmatic rocks cut the Cretaceous sedimentary succession of black shales and marlstones that crop out in both flanks of the Eastern Cordillera. The studied rocks were classified as gabbros (C{\´a}ceres, Pacho, Rodrigoque), tonalites (C{\´a}ceres, La Corona), diorites and syenodiorites (La Corona), pyroxene-hornblende gabbros (Pacho), and pyroxene-hornblendites (Pajarito). The gabbroic samples are mainly composed of plagioclase, clinopyroxene, and/or green to brown hornblende, whereas the tonalitic rocks are mainly composed of plagioclase and quartz. The samples are highly variable in crystal sizes from fine- to coarse-grained. Accessory minerals such as biotite, titanite and zircon are present. Some samples are characterized by moderate to strong alteration, and show the presence of epidote, actinolite and chlorite. Major and trace element compositions of the rocks as well as the rock-forming minerals show significant differences in the geochemical and petrological characteristics for the different localities, suggesting that this magmatism does not result from a single melting process. The wide compositional spectrum of trace elements in the intrusions is characteristic for different degrees of mantle melting and enrichment of incompatible elements. MORB- and OIB-like compositions suggest at least two different sources of magma with tholeiitic and alkaline affinity, respectively. Evidence of slab-derived fluids can be recognized in the western part of the basin reflected in higher Ba/Nb and Sr/P ratios and also in the Sr radiogenic isotope ratios, which is possible a consequence of metasomatism in the mantle due to processes related to the presence of a previously subducted slab. The trace element patterns evidence an extensional setting in the Cretaceous basin producing a continental rift, with continental crust being stretched until oceanic crust was generated in the last stages of this extension. Electron microprobe analyses (EMPA) of the major elements and synchrotron radiation micro-X-ray fluorescence (μ-SRXRF) analyses of the trace element composition of the early crystallized minerals of the intrusions (clinopyroxenes and amphiboles) reflect the same dual character that has been found in the bulk-rock analyses. Despite the observed alteration of the rocks, the mineral composition shows evidences for an enriched and a relative depleted magma source. Even the normalization of the trace element concentrations of clinopyroxenes and amphiboles to the whole rock nearly follows the pattern predicted by published partition coefficients, suggesting that the alteration did not change the original trace element compositions of the investigated minerals. Sr-Nd-Pb isotope data reveal a large isotopic variation but still suggest an initial origin of the magmas in the mantle. Samples have moderate to highly radiogenic compositions of 143Nd/144Nd and high 87Sr/86Sr ratios and follow a trend towards enriched mantle compositions, like the local South American Paleozoic crust. The melts experienced variable degrees of contamination by sediments, crust, and seawater. The age corrected Pb isotope ratios show two separated groups of samples. This suggests that the chemical composition of the mantle below the Northern Andes has been modified by the interaction with other components resulting in a heterogeneous combination of materials of diverse origins. Although previous K/Ar age dating have shown that the magmatism took place in the Cretaceous, the high error of the analyses and the altered nature of the investigated minerals did preclude reliable interpretations. In the present work 40Ar/39Ar dating was carried out. The results show a prolonged history of magmatism during the Cretaceous over more than 60 Ma, from ~136 to ~74 Ma (Hauterivian to Campanian). Pre-Cretaceous rifting phases occurred in the Triassic-Jurassic for the western part of the basin and in the Paleozoic for the eastern part. Those previous rifting phases are decisive mechanisms controlling the localization and composition of the Cretaceous magmatism. Therefore, it is the structural position and not the age of the intrusions which preconditions the kind of magmatism and the degree of melting. The divergences on ages are the consequence of the segmentation of the basin in several sub-basins which stretching, thermal evolution and subsidence rate evolved independently. The first hypothesis formulated at the beginning of this investigation was that the Cretaceous gabbroic intrusions identified in northern Ecuador could be correlated with the intrusions described in the Eastern Cordillera. The mafic occurrences should mark the location of the most subsiding places of the large Cretaceous basin in northern South America. For this reason, the gabbroic intrusions cutting the Cretaceous succession in the Putumayo Basin, southern Colombia, were investigated. The results of the studies were quite unexpected. The petrologic and geochemical character of the magmatic rocks indicates subduction-related magmatism. K/Ar dating of amphibole yields a Late Miocene to Pliocene age (6.1 ± 0.7 Ma) for the igneous event in the basin. Although there is no correlation between this magmatic event and the Cretaceous magmatic event, the data obtained has significant tectonic and economic implications. The emplacement of the Neogene gabbroic rocks coincides with the late Miocene/Pliocene Andean orogenic uplift as well as with a significant pulse of hydrocarbon generation and expulsion.}, language = {en} } @phdthesis{Vu2012, author = {Vu, Thi Thanh Van}, title = {Local government on the way to good governance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93943}, school = {Universit{\"a}t Potsdam}, pages = {vii, 254}, year = {2012}, abstract = {Bad governance causes economic, social, developmental and environmental problems in many developing countries. Developing countries have adopted a number of reforms that have assisted in achieving good governance. The success of governance reform depends on the starting point of each country - what institutional arrangements exist at the out-set and who the people implementing reforms within the existing institutional framework are. This dissertation focuses on how formal institutions (laws and regulations) and informal institutions (culture, habit and conception) impact on good governance. Three characteristics central to good governance - transparency, participation and accountability are studied in the research. A number of key findings were: Good governance in Hanoi and Berlin represent the two extremes of the scale, while governance in Berlin is almost at the top of the scale, governance in Hanoi is at the bottom. Good governance in Hanoi is still far from achieved. In Berlin, information about public policies, administrative services and public finance is available, reliable and understandable. People do not encounter any problems accessing public information. In Hanoi, however, public information is not easy to access. There are big differences between Hanoi and Berlin in the three forms of participation. While voting in Hanoi to elect local deputies is formal and forced, elections in Berlin are fair and free. The candidates in local elections in Berlin come from different parties, whereas the candidacy of local deputies in Hanoi is thoroughly controlled by the Fatherland Front. Even though the turnout of voters in local deputy elections is close to 90 percent in Hanoi, the legitimacy of both the elections and the process of representation is non-existent because the local deputy candidates are decided by the Communist Party. The involvement of people in solving local problems is encouraged by the government in Berlin. The different initiatives include citizenry budget, citizen activity, citizen initiatives, etc. Individual citizens are free to participate either individually or through an association. Lacking transparency and participation, the quality of public service in Hanoi is poor. Citizens seldom get their services on time as required by the regulations. Citizens who want to receive public services can bribe officials directly, use the power of relationships, or pay a third person - the mediator ("C{\`o}" - in Vietnamese). In contrast, public service delivery in Berlin follows the customer-orientated principle. The quality of service is high in relation to time and cost. Paying speed money, bribery and using relationships to gain preferential public service do not exist in Berlin. Using the examples of Berlin and Hanoi, it is clear to see how transparency, participation and accountability are interconnected and influence each other. Without a free and fair election as well as participation of non-governmental organisations, civil organisations, and the media in political decision-making and public actions, it is hard to hold the Hanoi local government accountable. The key differences in formal institutions (regulative and cognitive) between Berlin and Hanoi reflect the three main principles: rule of law vs. rule by law, pluralism vs. monopoly Party in politics and social market economy vs. market economy with socialist orientation. In Berlin the logic of appropriateness and codes of conduct are respect for laws, respect of individual freedom and ideas and awareness of community development. People in Berlin take for granted that public services are delivered to them fairly. Ideas such as using money or relationships to shorten public administrative procedures do not exist in the mind of either public officials or citizens. In Hanoi, under a weak formal framework of good governance, new values and norms (prosperity, achievement) generated in the economic transition interact with the habits of the centrally-planned economy (lying, dependence, passivity) and traditional values (hierarchy, harmony, family, collectivism) influence behaviours of those involved. In Hanoi "doing the right thing" such as compliance with law doesn't become "the way it is". The unintended consequence of the deliberate reform actions of the Party is the prevalence of corruption. The socialist orientation seems not to have been achieved as the gap between the rich and the poor has widened. Good governance is not achievable if citizens and officials are concerned only with their self-interest. State and society depend on each other. Theoretically to achieve good governance in Hanoi, institutions (formal and informal) able to create good citizens, officials and deputies should be generated. Good citizens are good by habit rather than by nature. The rule of law principle is necessary for the professional performance of local administrations and People's Councils. When the rule of law is applied consistently, the room for informal institutions to function will be reduced. Promoting good governance in Hanoi is dependent on the need and desire to change the government and people themselves. Good governance in Berlin can be seen to be the result of the efforts of the local government and citizens after a long period of development and continuous adjustment. Institutional transformation is always a long and complicated process because the change in formal regulations as well as in the way they are implemented may meet strong resistance from the established practice. This study has attempted to point out the weaknesses of the institutions of Hanoi and has identified factors affecting future development towards good governance. But it is not easy to determine how long it will take to change the institutional setting of Hanoi in order to achieve good governance.}, language = {en} } @phdthesis{Vu2022, author = {Vu, Nils Leif}, title = {A task-based parallel elliptic solver for numerical relativity with discontinuous Galerkin methods}, doi = {10.25932/publishup-56226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562265}, school = {Universit{\"a}t Potsdam}, pages = {172}, year = {2022}, abstract = {Elliptic partial differential equations are ubiquitous in physics. In numerical relativity---the study of computational solutions to the Einstein field equations of general relativity---elliptic equations govern the initial data that seed every simulation of merging black holes and neutron stars. In the quest to produce detailed numerical simulations of these most cataclysmic astrophysical events in our Universe, numerical relativists resort to the vast computing power offered by current and future supercomputers. To leverage these computational resources, numerical codes for the time evolution of general-relativistic initial value problems are being developed with a renewed focus on parallelization and computational efficiency. Their capability to solve elliptic problems for accurate initial data must keep pace with the increasing detail of the simulations, but elliptic problems are traditionally hard to parallelize effectively. In this thesis, I develop new numerical methods to solve elliptic partial differential equations on computing clusters, with a focus on initial data for orbiting black holes and neutron stars. I develop a discontinuous Galerkin scheme for a wide range of elliptic equations, and a stack of task-based parallel algorithms for their iterative solution. The resulting multigrid-Schwarz preconditioned Newton-Krylov elliptic solver proves capable of parallelizing over 200 million degrees of freedom to at least a few thousand cores, and already solves initial data for a black hole binary about ten times faster than the numerical relativity code SpEC. I also demonstrate the applicability of the new elliptic solver across physical disciplines, simulating the thermal noise in thin mirror coatings of interferometric gravitational-wave detectors to unprecedented accuracy. The elliptic solver is implemented in the new open-source SpECTRE numerical relativity code, and set up to support simulations of astrophysical scenarios for the emerging era of gravitational-wave and multimessenger astronomy.}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Voss2005, author = {Voß, Rebecca}, title = {Mesoporous organosilica materials with amine functions : surface characteristics and chirality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5287}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {In this work mesoporous organisilica materials are synthesized through the silica sol-gel process. For this a new class of precursors which are also surfactant are synthesized and self-assembled. This leads to a high surface area functionality which is analysized with copper (II) and water adsorption.}, subject = {Silicate}, language = {en} } @phdthesis{Vossenkuhl2015, author = {Vossenkuhl, Birgit}, title = {Transmission of MRSA along the meat supply chain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-85918}, school = {Universit{\"a}t Potsdam}, pages = {141}, year = {2015}, abstract = {Methicillin-resistente Staphylococcus aureus (MRSA) z{\"a}hlen zu den bedeutendsten antibiotikaresistenten Pathogenen, die vor allem in Krankenh{\"a}usern aber auch außerhalb von Einrichtungen des Gesundheitswesens weit verbreitet sind. Seit einigen Jahren ist eine neue Generation von MRSA auf dem Vormarsch, die vor allem Nutztierbest{\"a}nde als neue Nische besiedelt. Diese sogenannten Nutztier-assoziierten MRSA wurden wiederholt bei wirtschaftlich bedeutenden Nutztieren sowie daraus gewonnenem Fleisch nachgewiesen. Im Rahmen der vorliegenden Arbeit wurde ein methodischer Ansatz verfolgt, um die Hypothese einer m{\"o}glichen {\"U}bertragung von Nutztier-assoziierten MRSA entlang der Lebensmittelkette vom Tier auf dessen Fleisch zu best{\"a}tigen. Angepasst an die Unterschiede in den verf{\"u}gbaren Daten wurden daf{\"u}r zwei neue Konzepte erstellt. Zur Analyse der {\"U}bertragung von MRSA entlang der Schlachtkette wurde ein mathematisches Modell des Schweineschlachtprozesses entwickelt, welches dazu geeignet ist, den Verlauf der MRSA-Pr{\"a}valenz entlang der Schlachtkette zu quantifizieren sowie kritische Prozessschritte f{\"u}r eine MRSA-{\"U}bertragung zu identifizieren. Anhand von Pr{\"a}valenzdaten ist es dem Modell m{\"o}glich, die durchschnittlichen MRSA-Eliminations- und Kontaminationsraten jedes einzelnen Prozessschrittes zu sch{\"a}tzen, die anschließend in eine Monte-Carlo-Simulation einfließen. Im Ergebnis konnte gezeigt werden, dass es generell m{\"o}glich ist, die MRSA Pr{\"a}valenz im Laufe des Schlachtprozesses auf ein niedriges finales Niveau zwischen 0,15 bis 1,15\% zu reduzieren. Vor allem das Br{\"u}hen und Abfl{\"a}mmen der Schlachtk{\"o}rper wurden als kritische Prozesse im Hinblick auf eine MRSA-Dekontamination identifiziert. In Deutschland werden regelm{\"a}ßig MRSA-Pr{\"a}valenz und Typisierungsdaten auf allen Stufen der Lebensmittelkette verschiedener Nutztiere erfasst. Um die MRSA-Daten dieser Querschnittstudie hinsichtlich einer m{\"o}glichen {\"U}bertragung entlang der Kette zu analysieren, wurde ein neuer statistischer Ansatz entwickelt. Hierf{\"u}r wurde eine Chi-Quadrat-Statistik mit der Berechnung des Czekanowski-{\"A}hnlichkeitsindex kombiniert, um Unterschiede in der Verteilung stammspezifischer Eigenschaften zwischen MRSA aus dem Stall, von Karkassen nach der Schlachtung und aus Fleisch im Einzelhandel zu quantifizieren. Die Methode wurde am Beispiel der Putenfleischkette implementiert und zudem bei der Analyse der Kalbfleischkette angewendet. Die durchgehend hohen {\"A}hnlichkeitswerte zwischen den einzelnen Proben weisen auf eine m{\"o}gliche {\"U}bertragung von MRSA entlang der Lebensmittelkette hin. Die erarbeiteten Methoden sind nicht spezifisch bez{\"u}glich Prozessketten und Pathogenen. Sie bieten somit einen großen Anwendungsbereich und erweitern das Methodenspektrum zur Bewertung bakterieller {\"U}bertragungswege.}, language = {en} } @phdthesis{Vosloh2011, author = {Vosloh, Daniel}, title = {Subcellular compartmentation of primary carbon metabolism in mesophyll cells of Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55534}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Metabolismus in Pflanzenzellen ist stark kompartimentiert. Viele Stoffwechselwege haben Reaktionen in mehr als einem Kompartiment. Zum Beispiel wird w{\"a}hrend der Photosynthese in pflanzlichen Mesophyllzellen Kohlenstoff in Form von St{\"a}rke in den Chloroplasten synthetisiert, w{\"a}hrend es im Zytosol in Form von Sacharose gebildet und in der Vakuole gespeichert wird. Diese Reaktionen sind strikt reguliert um ein Gleichgewicht der Kohlenstoffpools der verschiedenen Kompartimente aufrecht zu erhalten und die Energieversorgung aller Teile der Zelle f{\"u}r anabolische Reaktionen sicher zu stellen. Ich wende eine Methode an, bei der die Zellen unter nicht-w{\"a}ssrigen Bedingungen fraktioniert werden und daher der metabolische Status der w{\"a}hrend der Ernte herrschte {\"u}ber den ganzen Zeitraum der Auftrennung beibehalten wird. Durch die Kombination von nichtw{\"a}ssriger Fraktionierung und verschiedener Massenspektrometrietechniken (Fl{\"u}ssigchromotagraphie- und Gaschromotagraphie basierende Massenspekrometrie) ist es m{\"o}glich die intrazellul{\"a}re Verteilung der meisten Intermediate des photosynthetischen Kohlenstoffstoffwechsels und der Produkte der nachgelagerten metabolischen Reaktionen zu bestimmen. Das Wissen {\"u}ber die in vivo Konzentrationen dieser Metabolite wurde genutzt um die {\"A}nderung der freien Gibbs Energie in vivo zu bestimmen. Mit Hilfe dessen kann bestimmt werden, welche Reaktion sich in einem Gleichgewichtszustand befinden und welche davon entfernt sind. Die Konzentration der Enzyme und der Km Werte wurden mit den Konzentrationen der Metabolite in vivo verglichen, um festzustellen, welche Enzyme substratlimitiert sind und somit sensitiv gegen{\"u}ber {\"A}nderungen der Substratkonzentration sind. Verschiedene Intermediate des Calvin-Benson Zyklus sind gleichzeitig Substrate f{\"u}r andere Stoffwechselwege, als da w{\"a}ren Dihyroxyaceton-phosphat (DHAP, Saccharosesynthese), Fructose 6-phosphat (Fru6P, St{\"a}rkesynthese), Erythrose 4-phosphat (E4P, Shikimat Stoffwechselweg) und Ribose 5-phosphat (R5P, Nukleotidbiosynthese). Die Enzyme, die diese Intermediate verstoffwechseln, liegen an den Abzweigungspunkten zu diesen Stoffwechselwegen. Diese sind Trisose phosphat isomerase (DHAP), Transketolase (E4P), Sedoheptulose-1,7 biphosphat aldolase (E4P) und Ribose-5-phosphat isomerase (R5P), welche nicht mit ihren Substraten ges{\"a}ttigt sind, da die jeweilige Substratkonzentration geringer als der zugeh{\"o}rige Km Wert ist. F{\"u}r metabolische Kontrolle bedeutet dies, dass diese Schritte am sensitivsten gegen{\"u}ber {\"A}nderungen der Substratkonzentrationen sind. Im Gegensatz dazu sind die regulierten irreversiblen Schritte von Fructose-1,6.biphosphatase und Sedoheptulose-1,7-biphosphatase relativ insensitiv gegen{\"u}ber {\"A}nderungen der Substratkonzentration. F{\"u}r den Stoffwechselweg der Saccharosesynthese konnte gezeigt werden, dass die zytosolische Aldolase eine geringer Bindeseitenkonzentration als Substratkonzentration (DHAP) aufweist, und dass die Konzentration von Saccharose-6-phosphat geringer als der Km Wert des synthetisierenden Enzyms Saccharose-phosphatase ist. Sowohl die Saccharose-phosphat-synthase, also auch die Saccharose-phosphatase sind in vivo weit von einem Gleichgewichtszustand entfernt. In Wildtyp Arabidopsis thaliana Columbia-0 Bl{\"a}ttern wurde der gesamte Pool von ADPGlc im Chloroplasten gefunden. Das Enzyme ADPGlc pyrophosphorylase ist im Chloroplasten lokalisiert und synthetisiert ADPGlc aus ATP und Glc1P. Dieses Verteilungsmuster spricht eindeutig gegen die Hypothese von Pozueta-Romero und Kollegen, dass ADPGlc im Zytosol durch ADP vermittelte Spaltung von Saccharose durch die Saccharose Synthase erzeugt wird. Basierend auf dieser Beobachtung und anderen ver{\"o}ffentlichten Ergebnissen wurde geschlußfolgert, dass der generell akzeptierte Stoffwechselweg der St{\"a}rkesynthese durch ADPGlc Produktion via ADPGlc pyrophosphorylase in den Chloroplasten korrekt ist, und die Hypothese des alternativen Stoffwechselweges unhaltbar ist. Innerhalb des Stoffwechselweges der Saccharosesynthsese wurde festgestellt, dass die Konzentration von ADPGlc geringer als der Km Wert des St{\"a}rkesynthase ist, was darauf hindeutet, dass das Enzym substratlimitiert ist. Eine generelle Beobachtung ist, dass viele Enzmye des Calvin-Benson Zyklus {\"a}hnliche Bindeseitenkonzentrationen wie Metabolitkonzentrationen aufweisen, wohingegen in den Synthesewegen von Saccharose und St{\"a}rke die Bindeseitenkonzentrationen der Enzyme viel geringer als die Metabolitkonzentrationen sind.}, language = {en} } @phdthesis{Voroshnin2023, author = {Voroshnin, Vladimir}, title = {Control over spin and electronic structure of MoS₂ monolayer via interactions with substrates}, doi = {10.25932/publishup-59070}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-590709}, school = {Universit{\"a}t Potsdam}, pages = {viii, 125}, year = {2023}, abstract = {The molybdenum disulfide (MoS2) monolayer is a semiconductor with a direct bandgap while it is a robust and affordable material. It is a candidate for applications in optoelectronics and field-effect transistors. MoS2 features a strong spin-orbit coupling which makes its spin structure promising for acquiring the Kane-Mele topological concept with corresponding applications in spintronics and valleytronics. From the optical point of view, the MoS2 monolayer features two valleys in the regions of K and K' points. These valleys are differentiated by opposite spins and a related valley-selective circular dichroism. In this study we aim to manipulate the MoS2 monolayer spin structure in the vicinity of the K and K' points to explore the possibility of getting control over the optical and electronic properties. We focus on two different substrates to demonstrate two distinct routes: a gold substrate to introduce a Rashba effect and a graphene/cobalt substrate to introduce a magnetic proximity effect in MoS2. The Rashba effect is proportional to the out-of-plane projection of the electric field gradient. Such a strong change of the electric field occurs at the surfaces of a high atomic number materials and effectively influence conduction electrons as an in-plane magnetic field. A molybdenum and a sulfur are relatively light atoms, thus, similar to many other 2D materials, intrinsic Rashba effect in MoS2 monolayer is vanishing small. However, proximity of a high atomic number substrate may enhance Rashba effect in a 2D material as it was demonstrated for graphene previously. Another way to modify the spin structure is to apply an external magnetic field of high magnitude (several Tesla), and cause a Zeeman splitting, the conduction electrons. However, a similar effect can be reached via magnetic proximity which allows us to reduce external magnetic fields significantly or even to zero. The graphene on cobalt interface is ferromagnetic and stable for MoS2 monolayer synthesis. Cobalt is not the strongest magnet; therefore, stronger magnets may lead to more significant results. Nowadays most experimental studies on the dichalcogenides (MoS2 included) are performed on encapsulated heterostructures that are produced by mechanical exfoliation. While mechanical exfoliation (or scotch-tape method) allows to produce a huge variety of structures, the shape and the size of the samples as well as distance between layers in heterostructures are impossible to control reproducibly. In our study we used molecular beam epitaxy (MBE) methods to synthesise both MoS2/Au(111) and MoS2/graphene/Co systems. We chose to use MBE, as it is a scalable and reproducible approach, so later industry may adapt it and take over. We used graphene/cobalt instead of just a cobalt substrate because direct contact of MoS2\ monolayer and a metallic substrate may lead to photoluminescence (PL) quenching in the metallic substrate. Graphene and hexagonal boron nitride monolayer are considered building blocks of a new generation of electronics also commonly used as encapsulating materials for PL studies. Moreover graphene is proved to be a suitable substrate for the MBE growth of transitional metal dichalcogenides (TMDCs). In chapter 1, we start with an introduction to TMDCs. Then we focus on MoS2 monolayer state of the art research in the fields of application scenario; synthesis approaches; electronic, spin, and optical properties; and interactions with magnetic fields and magnetic materials. We briefly touch the basics of magnetism in solids and move on to discuss various magnetic exchange interactions and magnetic proximity effect. Then we describe MoS2 optical properties in more detail. We start from basic exciton physics and its manifestation in the MoS2 monolayer. We consider optical selection rules in the MoS2 monolayer and such properties as chirality, spin-valley locking, and coexistence of bright and dark excitons. Chapter 2 contains an overview of the employed surface science methods: angle-integrated, angle-resolved, and spin-resolved photoemission; low energy electron diffraction and scanning tunneling microscopy. In chapter 3, we describe MoS2 monolayer synthesis details for two substrates: gold monocrystal with (111) surface and graphene on cobalt thin film with Co(111) surface orientation. The synthesis descriptions are followed by a detailed characterisation of the obtained structures: fingerprints of MoS2 monolayer formation; MoS2 monolayer symmetry and its relation to the substrate below; characterisation of MoS2 monolayer coverage, domain distribution, sizes and shapes, and moire structures. In chapter~4, we start our discussion with MoS2/Au(111) electronic and spin structure. Combining density functional theory computations (DFT) and spin-resolved photoemission studies, we demonstrate that the MoS2 monolayer band structure features an in-plane Rashba spin splitting. This confirms the possibility of MoS2 monolayer spin structure manipulation via a substrate. Then we investigate the influence of a magnetic proximity in the MoS2/graphene/Co system on the MoS2 monolayer spin structure. We focus our investigation on MoS2 high symmetry points: G and K. First, using spin-resolved measurements, we confirm that electronic states are spin-split at the G point via a magnetic proximity effect. Second, combining spin-resolved measurements and DFT computations for MoS2 monolayer in the K point region, we demonstrate the appearance of a small in-plane spin polarisation in the valence band top and predict a full in-plane spin polarisation for the conduction band bottom. We move forward discussing how these findings are related to the MoS2 monolayer optical properties, in particular the possibility of dark exciton observation. Additionally, we speculate on the control of the MoS2 valley energy via magnetic proximity from cobalt. As graphene is spatially buffering the MoS2 monolayer from the Co thin film, we speculate on the role of graphene in the magnetic proximity transfer by replacing graphene with vacuum and other 2D materials in our computations. We finish our discussion by investigating the K-doped MoS2/graphene/Co system and the influence of this doping on the electronic and spin structure as well as on the magnetic proximity effect. In summary, using a scalable MBE approach we synthesised MoS2/Au(111) and MoS2/graphene/Co systems. We found a Rashba effect taking place in MoS2/Au(111) which proves that the MoS2 monolayer in-plane spin structure can be modified. In MoS2/graphene/Co the in-plane magnetic proximity effect indeed takes place which rises the possibility of fine tuning the MoS2 optical properties via manipulation of the the substrate magnetisation.}, language = {en} } @phdthesis{Vorogushyn2008, author = {Vorogushyn, Sergiy}, title = {Analysis of flood hazard under consideration of dike breaches}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27646}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {River reaches protected by dikes exhibit high damage potential due to strong value accumulation in the hinterland areas. While providing an efficient protection against low magnitude flood events, dikes may fail under the load of extreme water levels and long flood durations. Hazard and risk assessments for river reaches protected by dikes have not adequately considered the fluvial inundation processes up to now. Particularly, the processes of dike failures and their influence on the hinterland inundation and flood wave propagation lack comprehensive consideration. This study focuses on the development and application of a new modelling system which allows a comprehensive flood hazard assessment along diked river reaches under consideration of dike failures. The proposed Inundation Hazard Assessment Model (IHAM) represents a hybrid probabilistic-deterministic model. It comprises three models interactively coupled at runtime. These are: (1) 1D unsteady hydrodynamic model of river channel and floodplain flow between dikes, (2) probabilistic dike breach model which determines possible dike breach locations, breach widths and breach outflow discharges, and (3) 2D raster-based diffusion wave storage cell model of the hinterland areas behind the dikes. Due to the unsteady nature of the 1D and 2D coupled models, the dependence between hydraulic load at various locations along the reach is explicitly considered. The probabilistic dike breach model describes dike failures due to three failure mechanisms: overtopping, piping and slope instability caused by the seepage flow through the dike core (micro-instability). The 2D storage cell model driven by the breach outflow boundary conditions computes an extended spectrum of flood intensity indicators such as water depth, flow velocity, impulse, inundation duration and rate of water rise. IHAM is embedded in a Monte Carlo simulation in order to account for the natural variability of the flood generation processes reflected in the form of input hydrographs and for the randomness of dike failures given by breach locations, times and widths. The model was developed and tested on a ca. 91 km heavily diked river reach on the German part of the Elbe River between gauges Torgau and Vockerode. The reach is characterised by low slope and fairly flat extended hinterland areas. The scenario calculations for the developed synthetic input hydrographs for the main river and tributary were carried out for floods with return periods of T = 100, 200, 500, 1000 a. Based on the modelling results, probabilistic dike hazard maps could be generated that indicate the failure probability of each discretised dike section for every scenario magnitude. In the disaggregated display mode, the dike hazard maps indicate the failure probabilities for each considered breach mechanism. Besides the binary inundation patterns that indicate the probability of raster cells being inundated, IHAM generates probabilistic flood hazard maps. These maps display spatial patterns of the considered flood intensity indicators and their associated return periods. Finally, scenarios of polder deployment for the extreme floods with T = 200, 500, 1000 were simulated with IHAM. The developed IHAM simulation system represents a new scientific tool for studying fluvial inundation dynamics under extreme conditions incorporating effects of technical flood protection measures. With its major outputs in form of novel probabilistic inundation and dike hazard maps, the IHAM system has a high practical value for decision support in flood management.}, language = {en} } @phdthesis{vonNordheim2018, author = {von Nordheim, Danny}, title = {Dielectric non-linearities of P(VDF-TrFE) single and multilayers for memory applications}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421778}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 109}, year = {2018}, abstract = {Poly(vinylidene fluoride-trifluoroethylene) (P(VDF-TrFE)) ferroelectric thin films of different molar ratio have been studied with regard to data memory applications. Therefore, films with thicknesses of 200 nm and less have been spin coated from solution. Observations gained from single layers have been extended to multilayer capacitors and three terminal transistor devices. Besides conventional hysteresis measurements, the measurement of dielectric non-linearities has been used as a main tool of characterisation. Being a very sensitive and non-destructive method, non-linearity measurements are well suited for polarisation readout and property studies. Samples have been excited using a high quality, single-frequency sinusoidal voltage with an amplitude significantly smaller than the coercive field of the samples. The response was then measured at the excitation frequency and its higher harmonics. Using the measurement results, the linear and non-linear dielectric permittivities ɛ₁, ɛ₂ and ɛ₃ have been determined. The permittivities have been used to derive the temperature-dependent polarisation behaviour as well as the polarisation state and the order of the phase transitions. The coercive field in VDF-TrFE copolymers is high if compared to their ceramic competitors. Therefore, the film thickness had to be reduced significantly. Considering a switching voltage of 5 V and a coercive field of 50 MV/m, the film thickness has to be 100 nm and below. If the thickness becomes substantially smaller than the other dimensions, surface and interface layer effects become more pronounced. For thicker films of P(VDF-TrFE) with a molar fraction of 56/44 a second-order phase transition without a thermal hysteresis for an ɛ₁(T) temperature cycle has been predicted and observed. This however, could not be confirmed by the measurements of thinner films. A shift of transition temperatures as well as a temperature independent, non-switchable polarisation and a thermal hysteresis for P(VDF-TrFE) 56/44 have been observed. The impact of static electric fields on the polarisation and the phase transition has therefore been studied and simulated, showing that all aforementioned phenomena including a linear temperature dependence of the polarisation might originate from intrinsic electric fields. In further experiments the knowledge gained from single layer capacitors has been extended to bilayer copolymer thin films of different molar composition. Bilayers have been deposited by succeeding cycles of spin coating from solution. Single layers and their bilayer combination have been studied individually in order to prove the layers stability. The individual layers have been found to be physically stable. But while the bilayers reproduced the main ɛ₁(T) properties of the single layers qualitatively, quantitative numbers could not be explained by a simple serial connection of capacitors. Furthermore, a linear behaviour of the polarisation throughout the measured temperature range has been observed. This was found to match the behaviour predicted considering a constant electric field. Retention time is an important quantity for memory applications. Hence, the retention behaviour of VDF-TrFE copolymer thin films has been determined using dielectric non-linearities. The polarisation loss in P(VDF-TrFE) poled samples has been found to be less than 20\% if recorded over several days. The loss increases significantly if the samples have been poled with lower amplitudes, causing an unsaturated polarisation. The main loss was attributed to injected charges. Additionally, measurements of dielectric non-linearities have been proven to be a sensitive and non-destructive tool to measure the retention behaviour. Finally, a ferroelectric field effect transistor using mainly organic materials (FerrOFET) has been successfully studied. DiNaphtho[2,3-b:2',3'-f]Thieno[3,2-b]Thiophene (DNTT) has proven to be a stable, suitable organic semiconductor to build up ferroelectric memory devices. Furthermore, an oxidised aluminium bottom electrode and additional dielectric layers, i.e. parylene C, have proven to reduce the leakage current and therefore enhance the performance significantly.}, language = {en} } @phdthesis{vonKaphengst2019, author = {von Kaphengst, Dragana}, title = {Project's management quality in development cooperation}, doi = {10.25932/publishup-43099}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-430992}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 237}, year = {2019}, abstract = {In light of the debate on the consequences of competitive contracting out of traditionally public services, this research compares two mechanisms used to allocate funds in development cooperation—direct awarding and competitive contracting out—aiming to identify their potential advantages and disadvantages. The agency theory is applied within the framework of rational-choice institutionalism to study the institutional arrangements that surround two different money allocation mechanisms, identify the incentives they create for the behavior of individual actors in the field, and examine how these then transfer into measurable differences in managerial quality of development aid projects. In this work, project management quality is seen as an important determinant of the overall project success. For data-gathering purposes, the German development agency, the Gesellschaft f{\"u}r Internationale Zusammenarbeit (GIZ), is used due to its unique way of work. Whereas the majority of projects receive funds via direct-award mechanism, there is a commercial department, GIZ International Services (GIZ IS) that has to compete for project funds. The data concerning project management practices on the GIZ and GIZ IS projects was gathered via a web-based, self-administered survey of project team leaders. Principal component analysis was applied to reduce the dimensionality of the independent variable to total of five components of project management. Furthermore, multiple regression analysis identified the differences between the separate components on these two project types. Enriched by qualitative data gathered via interviews, this thesis offers insights into everyday managerial practices in development cooperation and identifies the advantages and disadvantages of the two allocation mechanisms. The thesis first reiterates the responsibility of donors and implementers for overall aid effectiveness. It shows that the mechanism of competitive contracting out leads to better oversight and control of implementers, fosters deeper cooperation between the implementers and beneficiaries, and has a potential to strengthen ownership of recipient countries. On the other hand, it shows that the evaluation quality does not tremendously benefit from the competitive allocation mechanism and that the quality of the component knowledge management and learning is better when direct-award mechanisms are used. This raises questions about the lacking possibilities of actors in the field to learn about past mistakes and incorporate the finings into the future interventions, which is one of the fundamental issues of aid effectiveness. Finally, the findings show immense deficiencies in regard to oversight and control of individual projects in German development cooperation.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Thomas}, title = {Model-driven engineering of self-adaptive software}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409755}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 357}, year = {2018}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls the underlying adaptable software by a feedback loop. State-of-the-art approaches prescribe the feedback loop in terms of numbers, how the activities (e.g., monitor, analyze, plan, and execute (MAPE)) and the knowledge are structured to a feedback loop, and the type of knowledge. Moreover, the feedback loop is usually hidden in the implementation or framework and therefore not visible in the architectural design. Additionally, an adaptation engine often employs runtime models that either represent the adaptable software or capture strategic knowledge such as reconfiguration strategies. State-of-the-art approaches do not systematically address the interplay of such runtime models, which would otherwise allow developers to freely design the entire feedback loop. This thesis presents ExecUtable RuntimE MegAmodels (EUREMA), an integrated model-driven engineering (MDE) solution that rigorously uses models for engineering feedback loops. EUREMA provides a domain-specific modeling language to specify and an interpreter to execute feedback loops. The language allows developers to freely design a feedback loop concerning the activities and runtime models (knowledge) as well as the number of feedback loops. It further supports structuring the feedback loops in the adaptation engine that follows a layered architectural style. Thus, EUREMA makes the feedback loops explicit in the design and enables developers to reason about design decisions. To address the interplay of runtime models, we propose the concept of a runtime megamodel, which is a runtime model that contains other runtime models as well as activities (e.g., MAPE) working on the contained models. This concept is the underlying principle of EUREMA. The resulting EUREMA (mega)models are kept alive at runtime and they are directly executed by the EUREMA interpreter to run the feedback loops. Interpretation provides the flexibility to dynamically adapt a feedback loop. In this context, EUREMA supports engineering self-adaptive software in which feedback loops run independently or in a coordinated fashion within the same layer as well as on top of each other in different layers of the adaptation engine. Moreover, we consider preliminary means to evolve self-adaptive software by providing a maintenance interface to the adaptation engine. This thesis discusses in detail EUREMA by applying it to different scenarios such as single, multiple, and stacked feedback loops for self-repairing and self-optimizing the mRUBiS application. Moreover, it investigates the design and expressiveness of EUREMA, reports on experiments with a running system (mRUBiS) and with alternative solutions, and assesses EUREMA with respect to quality attributes such as performance and scalability. The conducted evaluation provides evidence that EUREMA as an integrated and open MDE approach for engineering self-adaptive software seamlessly integrates the development and runtime environments using the same formalism to specify and execute feedback loops, supports the dynamic adaptation of feedback loops in layered architectures, and achieves an efficient execution of feedback loops by leveraging incrementality.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Stefanie}, title = {Sequence dependency of photon and electron induced DNA strand breaks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419669}, school = {Universit{\"a}t Potsdam}, pages = {xii, 117}, year = {2018}, abstract = {Deoxyribonucleic acid (DNA) is the carrier of human genetic information and is exposed to environmental influences such as the ultraviolet (UV) fraction of sunlight every day. The photostability of the DNA against UV light is astonishing. Even if the DNA bases have a strong absorption maximum at around 260 nm/4.77 eV, their quantum yield of photoproducts remains very low 1. If the photon energies exceed the ionization energy (IE) of the nucleobases ( ̴ 8-9 eV) 2, the DNA can be severely damaged. Photoexcitation and -ionization reactions occur, which can induce strand breaks in the DNA. The efficiency of the excitation and ionization induced strand breaks in the target DNA sequences are represented by cross sections. If Si as a substrate material is used in the VUV irradiation experiments, secondary electrons with an energy below 3.6 eV are generated from the substrate. This low energy electrons (LEE) are known to induce dissociative electron attachment (DEA) in DNA and with it DNA strand breakage very efficiently. LEEs play an important role in cancer radiation therapy, since they are generated secondarily along the radiation track of ionizing radiation. In the framework of this thesis, different single stranded DNA sequences were irradiated with 8.44 eV vacuum UV (VUV) light and cross sections for single strand breaks (SSB) were determined. Several sequences were also exposed to secondary LEEs, which additionally contributed to the SSBs. First, the cross sections for SSBs depending on the type of nucleobases were determined. Both types of DNA sequences, mono-nucleobase and mixed sequences showed very similar results upon VUV radiation. The additional influence of secondarily generated LEEs resulted in contrast in a clear trend for the SSB cross sections. In this, the polythymine sequence had the highest cross section for SSBs, which can be explained by strong anionic resonances in this energy range. Furthermore, SSB cross sections were determined as a function of sequence length. This resulted in an increase in the strand breaks to the same extent as the increase in the geometrical cross section. The longest DNA sequence (20 nucleotides) investigated in this series, however, showed smaller cross section values for SSBs, which can be explained by conformational changes in the DNA. Moreover, several DNA sequences that included the radiosensitizers 5-Bromouracil (5BrU) and 8-Bromoadenine (8BrA) were investigated and the corresponding SSB cross sections were determined. It was shown that 5BrU reacts very strongly to VUV radiation leading to high strand break yields, which showed in turn a strong sequence-dependency. 8BrA, on the other hand, showed no sensitization to the applied VUV radiation, since almost no increase in strand breakage yield was observed in comparison to non-modified DNA sequences. In order to be able to identify the mechanisms of radiation damage by photons, the IEs of certain DNA sequences were further explored using photoionization tandem mass spectrometry. By varying the DNA sequence, both the IEs depending on the type of nucleobase as well as on the DNA strand length could be identified and correlated to the SSB cross sections. The influence of the IE on the photoinduced reaction in the brominated DNA sequences could be excluded.}, language = {en} } @phdthesis{Vogel2013, author = {Vogel, Kristin}, title = {Applications of Bayesian networks in natural hazard assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69777}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Even though quite different in occurrence and consequences, from a modeling perspective many natural hazards share similar properties and challenges. Their complex nature as well as lacking knowledge about their driving forces and potential effects make their analysis demanding: uncertainty about the modeling framework, inaccurate or incomplete event observations and the intrinsic randomness of the natural phenomenon add up to different interacting layers of uncertainty, which require a careful handling. Nevertheless deterministic approaches are still widely used in natural hazard assessments, holding the risk of underestimating the hazard with disastrous effects. The all-round probabilistic framework of Bayesian networks constitutes an attractive alternative. In contrast to deterministic proceedings, it treats response variables as well as explanatory variables as random variables making no difference between input and output variables. Using a graphical representation Bayesian networks encode the dependency relations between the variables in a directed acyclic graph: variables are represented as nodes and (in-)dependencies between variables as (missing) edges between the nodes. The joint distribution of all variables can thus be described by decomposing it, according to the depicted independences, into a product of local conditional probability distributions, which are defined by the parameters of the Bayesian network. In the framework of this thesis the Bayesian network approach is applied to different natural hazard domains (i.e. seismic hazard, flood damage and landslide assessments). Learning the network structure and parameters from data, Bayesian networks reveal relevant dependency relations between the included variables and help to gain knowledge about the underlying processes. The problem of Bayesian network learning is cast in a Bayesian framework, considering the network structure and parameters as random variables itself and searching for the most likely combination of both, which corresponds to the maximum a posteriori (MAP score) of their joint distribution given the observed data. Although well studied in theory the learning of Bayesian networks based on real-world data is usually not straight forward and requires an adoption of existing algorithms. Typically arising problems are the handling of continuous variables, incomplete observations and the interaction of both. Working with continuous distributions requires assumptions about the allowed families of distributions. To "let the data speak" and avoid wrong assumptions, continuous variables are instead discretized here, thus allowing for a completely data-driven and distribution-free learning. An extension of the MAP score, considering the discretization as random variable as well, is developed for an automatic multivariate discretization, that takes interactions between the variables into account. The discretization process is nested into the network learning and requires several iterations. Having to face incomplete observations on top, this may pose a computational burden. Iterative proceedings for missing value estimation become quickly infeasible. A more efficient albeit approximate method is used instead, estimating the missing values based only on the observations of variables directly interacting with the missing variable. Moreover natural hazard assessments often have a primary interest in a certain target variable. The discretization learned for this variable does not always have the required resolution for a good prediction performance. Finer resolutions for (conditional) continuous distributions are achieved with continuous approximations subsequent to the Bayesian network learning, using kernel density estimations or mixtures of truncated exponential functions. All our proceedings are completely data-driven. We thus avoid assumptions that require expert knowledge and instead provide domain independent solutions, that are applicable not only in other natural hazard assessments, but in a variety of domains struggling with uncertainties.}, language = {en} } @phdthesis{Vockenberg2006, author = {Vockenberg, Kerstin}, title = {Updating of representations in working memory}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-11767}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The limited capacity of working memory forces people to update its contents continuously. Two aspects of the updating process were investigated in the present experimental series. The first series concerned the question if it is possible to update several representations in parallel. Similar results were obtained for the updating of object features as well as for the updating of whole objects, participants were able to update representations in parallel. The second experimental series addressed the question if working memory representations which were replaced in an updating disappear directly or interfere with the new representations. Evidence for the existence of old representations was found under working memory conditions and under conditions exceeding working memory capacity. These results contradict the hypothesis that working memory contents are protected from proactive interference of long-term memory contents.}, subject = {Aktualisierung}, language = {en} } @phdthesis{Vlasov2015, author = {Vlasov, Vladimir}, title = {Synchronization of oscillatory networks in terms of global variables}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78182}, school = {Universit{\"a}t Potsdam}, pages = {82}, year = {2015}, abstract = {Synchronization of large ensembles of oscillators is an omnipresent phenomenon observed in different fields of science like physics, engineering, life sciences, etc. The most simple setup is that of globally coupled phase oscillators, where all the oscillators contribute to a global field which acts on all oscillators. This formulation of the problem was pioneered by Winfree and Kuramoto. Such a setup gives a possibility for the analysis of these systems in terms of global variables. In this work we describe nontrivial collective dynamics in oscillator populations coupled via mean fields in terms of global variables. We consider problems which cannot be directly reduced to standard Kuramoto and Winfree models. In the first part of the thesis we adopt a method introduced by Watanabe and Strogatz. The main idea is that the system of identical oscillators of particular type can be described by a low-dimensional system of global equations. This approach enables us to perform a complete analytical analysis for a special but vast set of initial conditions. Furthermore, we show how the approach can be expanded for some nonidentical systems. We apply the Watanabe-Strogatz approach to arrays of Josephson junctions and systems of identical phase oscillators with leader-type coupling. In the next parts of the thesis we consider the self-consistent mean-field theory method that can be applied to general nonidentical globally coupled systems of oscillators both with or without noise. For considered systems a regime, where the global field rotates uniformly, is the most important one. With the help of this approach such solutions of the self-consistency equation for an arbitrary distribution of frequencies and coupling parameters can be found analytically in the parametric form, both for noise-free and noisy cases. We apply this method to deterministic Kuramoto-type model with generic coupling and an ensemble of spatially distributed oscillators with leader-type coupling. Furthermore, with the proposed self-consistent approach we fully characterize rotating wave solutions of noisy Kuramoto-type model with generic coupling and an ensemble of noisy oscillators with bi-harmonic coupling. Whenever possible, a complete analysis of global dynamics is performed and compared with direct numerical simulations of large populations.}, language = {en} } @phdthesis{Vitagliano2024, author = {Vitagliano, Gerardo}, title = {Modeling the structure of tabular files for data preparation}, doi = {10.25932/publishup-62435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-624351}, school = {Universit{\"a}t Potsdam}, pages = {ii, 114}, year = {2024}, abstract = {To manage tabular data files and leverage their content in a given downstream task, practitioners often design and execute complex transformation pipelines to prepare them. The complexity of such pipelines stems from different factors, including the nature of the preparation tasks, often exploratory or ad-hoc to specific datasets; the large repertory of tools, algorithms, and frameworks that practitioners need to master; and the volume, variety, and velocity of the files to be prepared. Metadata plays a fundamental role in reducing this complexity: characterizing a file assists end users in the design of data preprocessing pipelines, and furthermore paves the way for suggestion, automation, and optimization of data preparation tasks. Previous research in the areas of data profiling, data integration, and data cleaning, has focused on extracting and characterizing metadata regarding the content of tabular data files, i.e., about the records and attributes of tables. Content metadata are useful for the latter stages of a preprocessing pipeline, e.g., error correction, duplicate detection, or value normalization, but they require a properly formed tabular input. Therefore, these metadata are not relevant for the early stages of a preparation pipeline, i.e., to correctly parse tables out of files. In this dissertation, we turn our focus to what we call the structure of a tabular data file, i.e., the set of characters within a file that do not represent data values but are required to parse and understand the content of the file. We provide three different approaches to represent file structure, an explicit representation based on context-free grammars; an implicit representation based on file-wise similarity; and a learned representation based on machine learning. In our first contribution, we use the grammar-based representation to characterize a set of over 3000 real-world csv files and identify multiple structural issues that let files deviate from the csv standard, e.g., by having inconsistent delimiters or containing multiple tables. We leverage our learnings about real-world files and propose Pollock, a benchmark to test how well systems parse csv files that have a non-standard structure, without any previous preparation. We report on our experiments on using Pollock to evaluate the performance of 16 real-world data management systems. Following, we characterize the structure of files implicitly, by defining a measure of structural similarity for file pairs. We design a novel algorithm to compute this measure, which is based on a graph representation of the files' content. We leverage this algorithm and propose Mondrian, a graphical system to assist users in identifying layout templates in a dataset, classes of files that have the same structure, and therefore can be prepared by applying the same preparation pipeline. Finally, we introduce MaGRiTTE, a novel architecture that uses self-supervised learning to automatically learn structural representations of files in the form of vectorial embeddings at three different levels: cell level, row level, and file level. We experiment with the application of structural embeddings for several tasks, namely dialect detection, row classification, and data preparation efforts estimation. Our experimental results show that structural metadata, either identified explicitly on parsing grammars, derived implicitly as file-wise similarity, or learned with the help of machine learning architectures, is fundamental to automate several tasks, to scale up preparation to large quantities of files, and to provide repeatable preparation pipelines.}, language = {en} } @phdthesis{VillatoroLeal2018, author = {Villatoro Leal, Jos{\´e} Andr{\´e}s}, title = {A combined approach for the analysis of biomolecules using IR-MALDI ion mobility spectrometry and molecular dynamics simulations of peptide ions in the gas phase}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419723}, school = {Universit{\"a}t Potsdam}, pages = {133}, year = {2018}, abstract = {The aim of this doctoral thesis was to establish a technique for the analysis of biomolecules with infrared matrix-assisted laser dispersion (IR-MALDI) ion mobility (IM) spectrometry. The main components of the work were the characterization of the IR-MALDI process, the development and characterization of different ion mobility spectrometers, the use of IR-MALDI-IM spectrometry as a robust, standalone spectrometer and the development of a collision cross-section estimation approach for peptides based on molecular dynamics and thermodynamic reweighting. First, the IR-MALDI source was studied with atmospheric pressure ion mobility spectrometry and shadowgraphy. It consisted of a metal capillary, at the tip of which a self-renewing droplet of analyte solution was met by an IR laser beam. A relationship between peak shape, ion desolvation, diffusion and extraction pulse delay time (pulse delay) was established. First order desolvation kinetics were observed and related to peak broadening by diffusion, both influenced by the pulse delay. The transport mechanisms in IR-MALDI were then studied by relating different laser impact positions on the droplet surface to the corresponding ion mobility spectra. Two different transport mechanisms were determined: phase explosion due to the laser pulse and electrical transport due to delayed ion extraction. The velocity of the ions stemming from the phase explosion was then measured by ion mobility and shadowgraphy at different time scales and distances from the source capillary, showing an initially very high but rapidly decaying velocity. Finally, the anatomy of the dispersion plume was observed in detail with shadowgraphy and general conclusions over the process were drawn. Understanding the IR-MALDI process enabled the optimization of the different IM spectrometers at atmospheric and reduced pressure (AP and RP, respectively). At reduced pressure, both an AP and an RP IR-MALDI source were used. The influence of the pulsed ion extraction parameters (pulse delay, width and amplitude) on peak shape, resolution and area was systematically studied in both AP and RP IM spectrometers and discussed in the context of the IR-MALDI process. Under RP conditions, the influence of the closing field and of the pressure was also examined for both AP and RP sources. For the AP ionization RP IM spectrometer, the influence of the inlet field (IF) in the source region was also examined. All of these studies led to the determination of the optimal analytical parameters as well as to a better understanding of the initial ion cloud anatomy. The analytical performance of the spectrometer was then studied. Limits of detection (LOD) and linear ranges were determined under static and pulsed ion injection conditions and interpreted in the context of the IR-MALDI mechanism. Applications in the separation of simple mixtures were also illustrated, demonstrating good isomer separation capabilities and the advantages of singly charged peaks. The possibility to couple high performance liquid chromatography (HPLC) to IR-MALDI-IM spectrometry was also demonstrated. Finally, the reduced pressure spectrometer was used to study the effect of high reduced field strength on the mobility of polyatomic ions in polyatomic gases. The last focus point was on the study of peptide ions. A dataset obtained with electrospray IM spectrometry was characterized and used for the calibration of a collision cross-section (CCS) determination method based on molecular dynamics (MD) simulations at high temperature. Instead of producing candidate structures which are evaluated one by one, this semi-automated method uses the simulation as a whole to determine a single average collision cross-section value by reweighting the CCS of a few representative structures. The method was compared to the intrinsic size parameter (ISP) method and to experimental results. Additional MD data obtained from the simulations was also used to further analyze the peptides and understand the experimental results, an advantage with regard to the ISP method. Finally, the CCS of peptide ions analyzed by IR-MALDI were also evaluated with both ISP and MD methods and the results compared to experiment, resulting in a first validation of the MD method. Thus, this thesis brings together the soft ionization technique that is IR-MALDI, which produces mostly singly charged peaks, with ion mobility spectrometry, which can distinguish between isomers, and a collision cross-section determination method which also provides structural information on the analyte at hand.}, language = {en} } @phdthesis{Vijayakrishnan2008, author = {Vijayakrishnan, Balakumar}, title = {Solution and solid phase synthesis of N,N'-diacetyl chitotetraoses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-18830}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {The three major biopolymers, proteins, nucleic acids and glycoconjugates are mainly responsible for the information transfer, which is a fundamental process of life. The biological importance of proteins and nucleic acids are well explored and oligosaccharides in the form of glycoconjugates have gained importance recently. The β-(1→4) linked N-acetylglucosamine (GlcNAc) moiety is a frequently occurring structural unit in various naturally and biologically important oligosaccharides and related conjugates. Chitin which is the most abundant polymer of GlcNAc is widely distributed in nature whereas the related polysaccharide chitosan (polymer of GlcN and GlcNAc) occurs in certain fungi. Chitooligosaccharides of mixed acetylation patterns are of interest for the determination of the substrate specificities and mechanism of chitinases. In this report, we describe the chemical synthesis of three chitotetraoses namely GlcNAc-GlcN-GlcNAc-GlcN, GlcN-GlcNAc-GlcNAc-GlcN and GlcN-GlcN-GlcNAc-GlcNAc. Benzyloxycarbonyl (Z) and p-nitrobenzyloxycarbonyl (PNZ) were used for the amino functionality due to their ability to form the β-linkage during the glycosylation reactions through neighboring group participation and the trichloroacetimidate approach was utilized for the donor. Monomeric, dimeric acceptors and donors have been prepared by utilizing the Z and PNZ groups and coupling between the appropriate donor and acceptors in the presence of Lewis acid yielded the protected tetrasaccharides. Finally cleavage of PNZ followed by reacetylation and the deblocking of other protecting groups afforded the N,N'-diacetyl chitotetraoses in good yield. Successful syntheses for the protected diacetyl chitotetraoses by solid phase synthesis have also been described.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @phdthesis{Verch2023, author = {Verch, Ronald}, title = {Whole-body electrical muscle stimulation superimposed walking as training tool in the management of type 2 diabetes mellitus}, doi = {10.25932/publishup-63424}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-634240}, school = {Universit{\"a}t Potsdam}, pages = {IX, 78}, year = {2023}, abstract = {Background: The worldwide prevalence of diabetes has been increasing in recent years, with a projected prevalence of 700 million patients by 2045, leading to economic burdens on societies. Type 2 diabetes mellitus (T2DM), representing more than 95\% of all diabetes cases, is a multifactorial metabolic disorder characterized by insulin resistance leading to an imbalance between insulin requirements and supply. Overweight and obesity are the main risk factors for developing type 2 diabetes mellitus. The lifestyle modification of following a healthy diet and physical activity are the primary successful treatment and prevention methods for type 2 diabetes mellitus. Problems may exist with patients not achieving recommended levels of physical activity. Electrical muscle stimulation (EMS) is an increasingly popular training method and has become in the focus of research in recent years. It involves the external application of an electric field to muscles, which can lead to muscle contraction. Positive effects of EMS training have been found in healthy individuals as well as in various patient groups. New EMS devices offer a wide range of mobile applications for whole-body electrical muscle stimulation (WB-EMS) training, e.g., the intensification of dynamic low-intensity endurance exercises through WB-EMS. This dissertation project aims to investigate whether WB-EMS is suitable for intensifying low-intensive dynamic exercises such as walking and Nordic walking. Methods: Two independent studies were conducted. The first study aimed to investigate the reliability of exercise parameters during the 10-meter Incremental Shuttle Walk Test (10MISWT) using superimposed WB-EMS (research question 1, sub-question a) and the difference in exercise intensity compared to conventional walking (CON-W, research question 1, sub-question b). The second study aimed to compare differences in exercise parameters between superimposed WB-EMS (WB-EMS-W) and conventional walking (CON-W), as well as between superimposed WB-EMS (WB-EMS-NW) and conventional Nordic walking (CON-NW) on a treadmill (research question 2). Both studies took place in participant groups of healthy, moderately active men aged 35-70 years. During all measurements, the Easy Motion Skin® WB-EMS low frequency stimulation device with adjustable intensities for eight muscle groups was used. The current intensity was individually adjusted for each participant at each trial to ensure safety, avoiding pain and muscle cramps. In study 1, thirteen individuals were included for each sub question. A randomized cross-over design with three measurement appointments used was to avoid confounding factors such as delayed onset muscle soreness. The 10MISWT was performed until the participants no longer met the criteria of the test and recording five outcome measures: peak oxygen uptake (VO2peak), relative VO2peak (rel.VO2peak), maximum walk distance (MWD), blood lactate concentration, and the rate of perceived exertion (RPE). Eleven participants were included in study 2. A randomized cross-over design in a study with four measurement appointments was used to avoid confounding factors. A treadmill test protocol at constant velocity (6.5 m/s) was developed to compare exercise intensities. Oxygen uptake (VO2), relative VO2 (rel.VO2) blood lactate, and the RPE were used as outcome variables. Test-retest reliability between measurements was determined using a compilation of absolute and relative measures of reliability. Outcome measures in study 2 were studied using multifactorial analyses of variances. Results: Reliability analysis showed good reliability for VO2peak, rel.VO2peak, MWD and RPE with no statistically significant difference for WB-EMS-W during 10WISWT. However, differences compared to conventional walking in outcome variables were not found. The analysis of the treadmill tests showed significant effects for the factors CON/WB-EMS and W/NW for the outcome variables VO2, rel.VO2 and lactate, with both factors leading to higher results. However, the difference in VO2 and relative VO2 is within the range of biological variability of ± 12\%. The factor combination EMS∗W/NW is statistically non-significant for all three variables. WB-EMS resulted in the higher RPE values, RPE differences for W/NW and EMS∗W/NW were not significant. Discussion: The present project found good reliability for measuring VO2peak, rel. VO2peak, MWD and RPE during 10MISWT during WB-EMS-W, confirming prior research of the test. The test appears technically limited rather than physiologically in healthy, moderately active men. However, it is unsuitable for investigating differences in exercise intensities using WB-EMS-W compared to CON-W due to different perceptions of current intensity between exercise and rest. A treadmill test with constant walking speed was conducted to adjust individual maximum tolerable current intensity for the second part of the project. The treadmill test showed a significant increase in metabolic demands during WB-EMS-W and WB-EMS-NW by an increased VO2 and blood lactate concentration. However, the clinical relevance of these findings remains debatable. The study also found that WB-EMS superimposed exercises are perceived as more strenuous than conventional exercise. While in parts comparable studies lead to higher results for VO2, our results are in line with those of other studies using the same frequency. Due to the minor clinical relevance the use of WB-EMS as exercise intensification tool during walking and Nordic walking is limited. High device cost should be considered. Habituation to WB-EMS could increase current intensity tolerance and VO2 and make it a meaningful method in the treatment of T2DM. Recent figures show that WB-EMS is used in obese people to achieve health and weight goals. The supposed benefit should be further investigated scientifically.}, language = {en} } @phdthesis{VenturaBort2020, author = {Ventura-Bort, Carlos}, title = {Temporo-spatial dynamics of the impact of emotional contexts on visual processing and memory}, doi = {10.25932/publishup-55023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550236}, school = {Universit{\"a}t Potsdam}, pages = {208}, year = {2020}, abstract = {It has frequently been observed that single emotional events are not only more efficiently processed, but also better remembered, and form longer-lasting memory traces than neutral material. However, when emotional information is perceived as a part of a complex event, such as in the context of or in relation to other events and/or source details, the modulatory effects of emotion are less clear. The present work aims to investigate how emotional, contextual source information modulates the initial encoding and subsequent long-term retrieval of associated neutral material (item memory) and contextual source details (contextual source memory). To do so, a two-task experiment was used, consisting of an incidental encoding task in which neutral objects were displayed over different contextual background scenes which varied in emotional content (unpleasant, pleasant, and neutral), and a delayed retrieval task (1 week), in which previously-encoded objects and new ones were presented. In a series of studies, behavioral indices (Studies 2, 3, and 5), event-related potentials (ERPs; Studies 1-4), and functional magnetic resonance imaging (Study 5) were used to investigate whether emotional contexts can rapidly tune the visual processing of associated neutral information (Study 1) and modulate long-term item memory (Study 2), how different recognition memory processes (familiarity vs. recollection) contribute to these emotion effects on item and contextual source memory (Study 3), whether the emotional effects of item memory can also be observed during spontaneous retrieval (Sstudy 4), and which brain regions underpin the modulatory effects of emotional contexts on item and contextual source memory (Study 5). In Study 1, it was observed that emotional contexts by means of emotional associative learning, can rapidly alter the processing of associated neutral information. Neutral items associated with emotional contexts (i.e. emotional associates) compared to neutral ones, showed enhanced perceptual and more elaborate processing after one single pairing, as indexed by larger amplitudes in the P100 and LPP components, respectively. Study 2 showed that emotional contexts produce longer-lasting memory effects, as evidenced by better item memory performance and larger ERP Old/New differences for emotional associates. In Study 3, a mnemonic differentiation was observed between item and contextual source memory which was modulated by emotion. Item memory was driven by familiarity, independently of emotional contexts during encoding, whereas contextual source memory was driven by recollection, and better for emotional material. As in Study 2, enhancing effects of emotional contexts for item memory were observed in ERPs associated with recollection processes. Likewise, for contextual source memory, a pronounced recollection-related ERP enhancement was observed for exclusively emotional contexts. Study 4 showed that the long-term recollection enhancement of emotional contexts on item memory can be observed even when retrieval is not explicitly attempted, as measured with ERPs, suggesting that the emotion enhancing effects on memory are not related to the task embedded during recognition, but to the motivational relevance of the triggering event. In Study 5, it was observed that enhancing effects of emotional contexts on item and contextual source memory involve stronger engagement of the brain's regions which are associated with memory recollection, including areas of the medial temporal lobe, posterior parietal cortex, and prefrontal cortex. Taken together, these findings suggest that emotional contexts rapidly modulate the initial processing of associated neutral information and the subsequent, long-term item and contextual source memories. The enhanced memory effects of emotional contexts are strongly supported by recollection rather than familiarity processes, and are shown to be triggered when retrieval is both explicitly and spontaneously attempted. These results provide new insights into the modulatory role of emotional information on the visual processing and the long-term recognition memory of complex events. The present findings are integrated into the current theoretical models and future ventures are discussed.}, language = {en} } @phdthesis{Venevskaia2004, author = {Venevskaia, Irina}, title = {Modeling of vegetation diversity and a national conservation planning: example of Russia}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001863}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Die {\"u}bergreifende Zielsetzung meiner Studie ist eine Ausarbeitung quantitativer Methoden zur nationalen nationale Schutzplanung in {\"U}bereinstimmung mit dem internationalen Ansatz. Diese Zielsetzung erfordert eine L{\"o}sung der folgenden Probleme: 1) Wie l{\"a}sst sich Vegetationsvielfalt in grober Aufl{\"o}sung auf Basis abiotischen Faktoren einsch{\"a}tzen? 2) Wie ist der Ansatz 'globaler Hotspots' f{\"u}r die Eingrenzung nationaler Biodiversit{\"a}ts-Hotspots zu {\"u}bernehmen? 3) Wie erfolgt die Auswahl von quantitativen Schutzzielen unter Einbezug der Unterschiede nationaler Hotspots bei Umweltbedingungen und durch den Menschen Bedrohung? 4) Wie sieht der Entwurf eines großfl{\"a}chigen nationalen Naturschutzkonzepts aus, das die hierarchische Natur der Artenvielfalt reflektiert? Die Fallstudie f{\"u}r nationale Naturschutzplanung ist Russland. Die nachfolgenden theoretischen Schl{\"u}sse wurden gezogen: · Großr{\"a}umige Vegetationsdiversit{\"a}t ist weitgehend vorhersagbar durch klimabedingte latente W{\"a}rme f{\"u}r Verdunstung und topographische Landschaftsstruktur, beschrieben als H{\"o}hendifferenz. Das klimabasierte Modell reproduziert die beobachtete Artenanzahl von Gef{\"a}ßpflanzen f{\"u}r verschiedene Gebiete auf der Welt mit einem durchschnittlichen Fehler von 15\% · Nationale Biodiversit{\"a}ts-Hotspots k{\"o}nnen auf Grundlage biotischer oder abiotischer Daten kartographiert werden, indem als Korrektur f{\"u}r ein Land die quantitativen Kriterien f{\"u}r Planzenendemismus und Landnutzung des Ansatzes der 'globalen Hotspots' genutzt wird · Quantitative Naturschutzziele, die die Unterschiede zwischen nationalen Biodiversit{\"a}ts-Hotspots in Bezug auf Umweltbedingungen und der Bedrohung durch den Menschen miteinbeziehen, k{\"o}nnen mit nationalen Daten {\"u}ber Arten auf der Roten Liste gesetzt werden · Ein großr{\"a}umiger nationaler Naturschutzplan, der die hierarchische Natur der Artenvielfalt ber{\"u}cksichtigt, kann durch eine Kombination von abiotischer Methode im nationalen Bereich (Identifikation großr{\"a}umiger Hotspots) und biotischer Methode im regionalen Bereich (Datenanalyse der Arten auf der Roten Liste) entworfen werden}, language = {en} } @phdthesis{Veh2019, author = {Veh, Georg}, title = {Outburst floods from moraine-dammed lakes in the Himalayas}, doi = {10.25932/publishup-43607}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436071}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2019}, abstract = {The Himalayas are a region that is most dependent, but also frequently prone to hazards from changing meltwater resources. This mountain belt hosts the highest mountain peaks on earth, has the largest reserve of ice outside the polar regions, and is home to a rapidly growing population in recent decades. One source of hazard has attracted scientific research in particular in the past two decades: glacial lake outburst floods (GLOFs) occurred rarely, but mostly with fatal and catastrophic consequences for downstream communities and infrastructure. Such GLOFs can suddenly release several million cubic meters of water from naturally impounded meltwater lakes. Glacial lakes have grown in number and size by ongoing glacial mass losses in the Himalayas. Theory holds that enhanced meltwater production may increase GLOF frequency, but has never been tested so far. The key challenge to test this notion are the high altitudes of >4000 m, at which lakes occur, making field work impractical. Moreover, flood waves can attenuate rapidly in mountain channels downstream, so that many GLOFs have likely gone unnoticed in past decades. Our knowledge on GLOFs is hence likely biased towards larger, destructive cases, which challenges a detailed quantification of their frequency and their response to atmospheric warming. Robustly quantifying the magnitude and frequency of GLOFs is essential for risk assessment and management along mountain rivers, not least to implement their return periods in building design codes. Motivated by this limited knowledge of GLOF frequency and hazard, I developed an algorithm that efficiently detects GLOFs from satellite images. In essence, this algorithm classifies land cover in 30 years (~1988-2017) of continuously recorded Landsat images over the Himalayas, and calculates likelihoods for rapidly shrinking water bodies in the stack of land cover images. I visually assessed such detected tell-tale sites for sediment fans in the river channel downstream, a second key diagnostic of GLOFs. Rigorous tests and validation with known cases from roughly 10\% of the Himalayas suggested that this algorithm is robust against frequent image noise, and hence capable to identify previously unknown GLOFs. Extending the search radius to the entire Himalayan mountain range revealed some 22 newly detected GLOFs. I thus more than doubled the existing GLOF count from 16 previously known cases since 1988, and found a dominant cluster of GLOFs in the Central and Eastern Himalayas (Bhutan and Eastern Nepal), compared to the rarer affected ranges in the North. Yet, the total of 38 GLOFs showed no change in the annual frequency, so that the activity of GLOFs per unit glacial lake area has decreased in the past 30 years. I discussed possible drivers for this finding, but left a further attribution to distinct GLOF-triggering mechanisms open to future research. This updated GLOF frequency was the key input for assessing GLOF hazard for the entire Himalayan mountain belt and several subregions. I used standard definitions in flood hydrology, describing hazard as the annual exceedance probability of a given flood peak discharge [m3 s-1] or larger at the breach location. I coupled the empirical frequency of GLOFs per region to simulations of physically plausible peak discharges from all existing ~5,000 lakes in the Himalayas. Using an extreme-value model, I could hence calculate flood return periods. I found that the contemporary 100-year GLOF discharge (the flood level that is reached or exceeded on average once in 100 years) is 20,600+2,200/-2,300 m3 s-1 for the entire Himalayas. Given the spatial and temporal distribution of historic GLOFs, contemporary GLOF hazard is highest in the Eastern Himalayas, and lower for regions with rarer GLOF abundance. I also calculated GLOF hazard for some 9,500 overdeepenings, which could expose and fill with water, if all Himalayan glaciers have melted eventually. Assuming that the current GLOF rate remains unchanged, the 100-year GLOF discharge could double (41,700+5,500/-4,700 m3 s-1), while the regional GLOF hazard may increase largest in the Karakoram. To conclude, these three stages-from GLOF detection, to analysing their frequency and estimating regional GLOF hazard-provide a framework for modern GLOF hazard assessment. Given the rapidly growing population, infrastructure, and hydropower projects in the Himalayas, this thesis assists in quantifying the purely climate-driven contribution to hazard and risk from GLOFs.}, language = {en} } @phdthesis{Vazhappilly2008, author = {Vazhappilly, Tijo Joseph}, title = {Vibrationally enhanced associative photodesorption of H2 (D2) from Ru(0001) : quantum and classical approaches}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-19056}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Nowadays, reactions on surfaces are attaining great scientific interest because of their diverse applications. Some well known examples are production of ammonia on metal surfaces for fertilizers and reduction of poisonous gases from automobiles using catalytic converters. More recently, also photoinduced reactions at surfaces, useful, \textit{e.g.}, for photocatalysis, were studied in detail. Often, very short laser pulses are used for this purpose. Some of these reactions are occurring on femtosecond (1 fs=\$10^{-15}\$ s) time scales since the motion of atoms (which leads to bond breaking and new bond formation) belongs to this time range. This thesis investigates the femtosecond laser induced associative photodesorption of hydrogen, H\$_2\$, and deuterium, D\$_2\$, from a ruthenium metal surface. Many interesting features of this reaction were explored by experimentalists: (i) a huge isotope effect in the desorption probability of H\$_2\$ and D\$_2\$, (ii) the desorption yield increases non-linearly with the applied visible (vis) laser fluence, and (iii) unequal energy partitioning to different degrees of freedom. These peculiarities are due to the fact that an ultrashort vis pulse creates hot electrons in the metal. These hot electrons then transfer energy to adsorbate vibrations which leads to desorption. In fact, adsorbate vibrations are strongly coupled to metal electrons, \textit{i.e.}, through non-adiabatic couplings. This means that, surfaces introduce additional channels for energy exchange which makes the control of surface reactions more difficult than the control of reactions in the gas phase. In fact, the quantum yield of surface photochemical reactions is often notoriously small. One of the goals of the present thesis is to suggest, on the basis of theoretical simulations, strategies to control/enhance the photodesorption yield of H\$_2\$ and D\$_2\$ from Ru(0001). For this purpose, we suggest a \textit{hybrid scheme} to control the reaction, where the adsorbate vibrations are initially excited by an infrared (IR) pulse, prior to the vis pulse. Both \textit{adiabatic} and \textit{non-adiabatic} representations for photoinduced desorption problems are employed here. The \textit{adiabatic} representation is realized within the classical picture using Molecular Dynamics (MD) with electronic frictions. In a quantum mechanical description, \textit{non-adiabatic} representations are employed within open-system density matrix theory. The time evolution of the desorption process is studied using a two-mode reduced dimensionality model with one vibrational coordinate and one translational coordinate of the adsorbate. The ground and excited electronic state potentials, and dipole function for the IR excitation are taken from first principles. The IR driven vibrational excitation of adsorbate modes with moderate efficiency is achieved by (modified) \$\pi\$-pulses or/and optimal control theory. The fluence dependence of the desorption reaction is computed by including the electronic temperature of the metal calculated from the two-temperature model. Here, our theoretical results show a good agreement with experimental and previous theoretical findings. We then employed the IR+vis strategy in both models. Here, we found that vibrational excitation indeed promotes the desorption of hydrogen and deuterium. To summarize, we conclude that photocontrol of this surface reaction can be achieved by our IR+vis scheme.}, language = {en} } @phdthesis{Varykhalov2005, author = {Varykhalov, Andrei}, title = {Quantum-size effects in the electronic structure of novel self-organized systems with reduced dimensionality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5784}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The Thesis is focused on the properties of self-organized nanostructures. Atomic and electronic properties of different systems have been investigated using methods of electron diffraction, scanning tunneling microscopy and photoelectron spectroscopy. Implementation of the STM technique (including design, construction, and tuning of the UHV experimental set-up) has been done in the framework of present work. This time-consuming work is reported to greater detail in the experimental part of this Thesis. The scientific part starts from the study of quantum-size effects in the electronic structure of a two-dimensional Ag film on the supporting substrate Ni(111). Distinct quantum well states in the sp-band of Ag were observed in photoelectron spectra. Analysis of thickness- and angle-dependent photoemission supplies novel information on the properties of the interface. For the first time the Ni(111) relative band gap was indirectly probed in the ground-state through the electronic structure of quantum well states in the adlayer. This is particularly important for Ni where valence electrons are strongly correlated. Comparison of the experiment with calculations performed in the formalism of the extended phase accumulation model gives the substrate gap which is fully consistent with the one obtained by ab-initio LDA calculations. It is, however, in controversy to the band structure of Ni measured directly by photoemission. These results lend credit to the simplest view of photoemission from Ni, assigning early observed contradictions between theory and experiments to electron correlation effects in the final state of photoemission. Further, nanosystems of lower dimensionality have been studied. Stepped surfaces W(331) and W(551) were used as one-dimensional model systems and as templates for self-organization of Au nanoclusters. Photon energy dependent photoemission revealed a surface resonance which was never observed before on W(110) which is the base plane of the terrace microsurfaces. The dispersion E(k) of this state measured on stepped W(331) and W(551) with angle-resolved photoelectron spectroscopy is modified by a strong umklapp effect. It appears as two parabolas shifted symmetrically relative to the microsurface normal by half of the Brillouin zone of the step superlattice. The reported results are very important for understanding of the electronic properties of low-dimensional nanostructures. It was also established that W(331) and W(551) can serve as templates for self-organization of metallic nanostructures. A combined study of electronic and atomic properties of sub-monolayer amounts of gold deposited on these templates have shown that if the substrate is slightly pre-oxidized and the temperature is elevated, then Au can alloy with the first monolayer of W. As a result, a nanostructure of uniform clusters of a surface alloy is produced all over the steps. Such clusters feature a novel sp-band in the vicinity of the Fermi level, which appears split into constant energy levels due to effects of lateral quantization. The last and main part of this work is devoted to large-scale reconstructions on surfaces and nanostructures self-assembled on top. The two-dimensional surface carbide W(110)/C-R(15x3) has been extensively investigated. Photoemission studies of quantum size effects in the electronic structure of this reconstruction, combined with an investigation of its surface geometry, lead to an advanced structural model of the carbide overlayer. It was discovered that W(110)/C-R(15x3) can control self-organization of adlayers into nanostructures with extremely different electronic and structural properties. Thus, it was established that at elevated temperature the R(15x3) superstructure controls the self-assembly of sub-monolayer amounts of Au into nm-wide nanostripes. Based on the results of core level photoemission, the R(15x3)-induced surface alloying which takes place between Au and W can be claimed as driving force of self-organization. The observed stripes exhibit a characteristic one-dimensional electronic structure with laterally quantized d-bands. Obviously, these are very important for applications, since dimensions of electronic devices have already stepped into the nm-range, where quantum-size phenomena must undoubtedly be considered. Moreover, formation of perfectly uniform molecular clusters of C60 was demonstrated and described in terms of the van der Waals formalism. It is the first experimental observation of two-dimensional fullerene nanoclusters with "magic numbers". Calculations of the cluster potentials using the static approach have revealed characteristic minima in the interaction energy. They are achieved for 4 and 7 molecules per cluster. The obtained "magic numbers" and the corresponding cluster structures are fully consistent with the results of the STM measurements.}, subject = {Nanostruktur}, language = {en} } @phdthesis{vanderVeen2021, author = {van der Veen, Iris}, title = {Defining moisture sources and (palaeo)environmental conditions using isotope geochemistry in the NW Himalaya}, doi = {10.25932/publishup-51439}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-514397}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2021}, abstract = {Anthropogenic climate change alters the hydrological cycle. While certain areas experience more intense precipitation events, others will experience droughts and increased evaporation, affecting water storage in long-term reservoirs, groundwater, snow, and glaciers. High elevation environments are especially vulnerable to climate change, which will impact the water supply for people living downstream. The Himalaya has been identified as a particularly vulnerable system, with nearly one billion people depending on the runoff in this system as their main water resource. As such, a more refined understanding of spatial and temporal changes in the water cycle in high altitude systems is essential to assess variations in water budgets under different climate change scenarios. However, not only anthropogenic influences have an impact on the hydrological cycle, but changes to the hydrological cycle can occur over geological timescales, which are connected to the interplay between orogenic uplift and climate change. However, their temporal evolution and causes are often difficult to constrain. Using proxies that reflect hydrological changes with an increase in elevation, we can unravel the history of orogenic uplift in mountain ranges and its effect on the climate. In this thesis, stable isotope ratios (expressed as δ2H and δ18O values) of meteoric waters and organic material are combined as tracers of atmospheric and hydrologic processes with remote sensing products to better understand water sources in the Himalayas. In addition, the record of modern climatological conditions based on the compound specific stable isotopes of leaf waxes (δ2Hwax) and brGDGTs (branched Glycerol dialkyl glycerol tetraethers) in modern soils in four Himalayan river catchments was assessed as proxies of the paleoclimate and (paleo-) elevation. Ultimately, hydrological variations over geological timescales were examined using δ13C and δ18O values of soil carbonates and bulk organic matter originating from sedimentological sections from the pre-Siwalik and Siwalik groups to track the response of vegetation and monsoon intensity and seasonality on a timescale of 20 Myr. I find that Rayleigh distillation, with an ISM moisture source, mainly controls the isotopic composition of surface waters in the studied Himalayan catchments. An increase in d-excess in the spring, verified by remote sensing data products, shows the significant impact of runoff from snow-covered and glaciated areas on the surface water isotopic values in the timeseries. In addition, I show that biomarker records such as brGDGTs and δ2Hwax have the potential to record (paleo-) elevation by yielding a significant correlation with the temperature and surface water δ2H values, respectively, as well as with elevation. Comparing the elevation inferred from both brGDGT and δ2Hwax, large differences were found in arid sections of the elevation transects due to an additional effect of evapotranspiration on δ2Hwax. A combined study of these proxies can improve paleoelevation estimates and provide recommendations based on the results found in this study. Ultimately, I infer that the expansion of C4 vegetation between 20 and 1 Myr was not solely dependent on atmospheric pCO2, but also on regional changes in aridity and seasonality from to the stable isotopic signature of the two sedimentary sections in the Himalaya (east and west). This thesis shows that the stable isotope chemistry of surface waters can be applied as a tool to monitor the changing Himalayan water budget under projected increasing temperatures. Minimizing the uncertainties associated with the paleo-elevation reconstructions were assessed by the combination of organic proxies (δ2Hwax and brGDGTs) in Himalayan soil. Stable isotope ratios in bulk soil and soil carbonates showed the evolution of vegetation influenced by the monsoon during the late Miocene, proving that these proxies can be used to record monsoon intensity, seasonality, and the response of vegetation. In conclusion, the use of organic proxies and stable isotope chemistry in the Himalayas has proven to successfully record changes in climate with increasing elevation. The combination of δ2Hwax and brGDGTs as a new proxy provides a more refined understanding of (paleo-)elevation and the influence of climate.}, language = {en} } @phdthesis{ValverdeSerrano2011, author = {Valverde Serrano, Clara}, title = {Self-assembly behavior in hydrophilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54163}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Block copolymers are receiving increasing attention in the literature. Reports on amphiphilic block copolymers have now established the basis of their self-assembly behavior: aggregate sizes, morphologies and stability can be explained from the absolute and relative block lengths, the nature of the blocks, the architecture and also solvent selectiveness. In water, self-assembly of amphiphilic block copolymers is assumed to be driven by the hydrophobic. The motivation of this thesis is to study the influence on the self-assembly in water of A b B type block copolymers (with A hydrophilic) of the variation of the hydrophilicity of B from non-soluble (hydrophobic) to totally soluble (hydrophilic). Glucose-modified polybutadiene-block-poly(N-isopropylacrylamide) copolymers were prepared and their self-assembly behavior in water studied. The copolymers formed vesicles with an asymmetric membrane with a glycosylated exterior and poly(N-isopropylacrylamide) on the inside. Above the low critical solution temperature (LCST) of poly(N-isopropylacrylamide), the structure collapsed into micelles with a hydrophobic PNIPAM core and glycosylated exterior. This collapse was found to be reversible. As a result, the structures showed a temperature-dependent interaction with L-lectin proteins and were shown to be able to encapsulate organic molecules. Several families of double hydrophilic block copolymers (DHBC) were prepared. The blocks of these copolymers were biopolymers or polymer chimeras used in aqueous two-phase partition systems. Copolymers based on dextran and poly(ethylene glycol) blocks were able to form aggregates in water. Dex6500-b-PEG5500 copolymer spontaneously formed vesicles with PEG as the "less hydrophilic" barrier and dextran as the solubilizing block. The aggregates were found to be insensitive to the polymer's architecture and concentration (in the dilute range) and only mildly sensitive to temperature. Variation of the block length, yielded different morphologies. A longer PEG chain seemed to promote more curved aggregates following the inverse trend usually observed in amphiphilic block copolymers. A shorter dextran promoted vesicular structures as usually observed for the amphiphilic counterparts. The linking function was shown to have an influence of the morphology but not on the self-assembly capability in itself. The vesicles formed by dex6500-b-PEG5500 showed slow kinetics of clustering in the presence of Con A lectin. In addition both dex6500-b-PEG5500 and its crosslinked derivative were able to encapsulate fluorescent dyes. Two additional dextran-based copolymers were synthesized, dextran-b-poly(vinyl alcohol) and dextran-b-poly(vinyl pyrrolidone). The study of their self-assembly allowed to conclude that aqueous two-phase systems (ATPS) is a valid source of inspiration to conceive DHBCs capable of self-assembling. In the second part the principle was extended to polypeptide systems with the synthesis of a poly(N-hydroxyethylglutamine)-block-poly(ethylene glycol) copolymer. The copolymer that had been previously reported to have emulsifying properties was able to form vesicles by direct dissolution of the solid in water. Last, a series of thermoresponsive copolymers were prepared, dextran-b-PNIPAMm. These polymers formed aggregates below the LCST. Their structure could not be unambiguously elucidated but seemed to correspond to vesicles. Above the LCST, the collapse of the PNIPAM chains induced the formation of stable objects of several hundreds of nanometers in radius that evolved with increasing temperature. The cooling of these solution below LCST restored the initial aggregates. This self-assembly of DHBC outside any stimuli of pH, ionic strength, or temperature has only rarely been described in the literature. This work constituted the first formal attempt to frame the phenomenon. Two reasons were accounted for the self-assembly of such systems: incompatibility of the polymer pairs forming the two blocks (enthalpic) and a considerable solubility difference (enthalpic and entropic). The entropic contribution to the positive Gibbs free energy of mixing is believed to arise from the same loss of conformational entropy that is responsible for "the hydrophobic effect" but driven by a competition for water of the two blocks. In that sense this phenomenon should be described as the "hydrophilic effect".}, language = {en} } @phdthesis{Valliappan2018, author = {Valliappan, Senthamizh Pavai}, title = {Solar Activity Reconstruction from Historical Observations of Sunspots}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413600}, school = {Universit{\"a}t Potsdam}, pages = {115}, year = {2018}, abstract = {The solar activity and its consequences affect space weather and Earth's climate. The solar activity exhibits a cyclic behaviour with a period of about 11 years. The solar cycle properties are governed by the dynamo taking place in the interior of the Sun, and they are distinctive. Extending the knowledge about solar cycle properties into the past is essential for understanding the solar dynamo and forecasting space weather. It can be acquired through the analysis of historical sunspot drawings. Sunspots are the dark areas, which are associated with strong magnetic fields, on the solar surface. Sunspots are the oldest and longest available observed features of solar activity. One of the longest available records of sunspot drawings is the collection by Samuel Heinrich Schwabe during 1825-1867. The sunspot sizes measured from digitized Schwabe drawings are not to scale and need to be converted into physical sunspot areas. We employed a statistical approach assuming that the area distribution of sunspots was the same in the 19th century as it was in the 20th century. Umbral areas for about 130 000 sunspots observed by Schwabe were obtained. The annually averaged sunspot areas correlate reasonably well with the sunspot number. Tilt angles and polarity separations of sunspot groups were calculated assuming them to be bipolar. There is, of course, no polarity information in the observations. We derived an average tilt angle by attempting to exclude unipolar groups with a minimum separation of the two surmised polarities and an outlier rejection method, which follows the evolution of each group and detects the moment, when it turns unipolar as it decays. As a result, the tilt angles, although displaying considerable natural scatter, are on average 5.85° ± 0.25°, with the leading polarity located closer to the equator, in good agreement with tilt angles obtained from 20th century data sets. Sources of uncertainties in the tilt angle determination are discussed and need to be addressed whenever different data sets are combined. Digital images of observations printed in the books Rosa Ursina and Prodromus pro sole mobili by Christoph Scheiner, as well as the drawings from Scheiner's letters to Marcus Welser, are analyzed to obtain information on the positions and sizes of sunspots that appeared before the Maunder minimum. In most cases, the given orientation of the ecliptic is used to set up the heliographic coordinate system for the drawings. Positions and sizes are measured manually displaying the drawings on a computer screen. Very early drawings have no indication of the solar orientation. A rotational matching using common spots of adjacent days is used in some cases, while in other cases, the assumption that images were aligned with a zenith-horizon coordinate system appeared to be the most likely. In total, 8167 sunspots were measured. A distribution of sunspot latitudes versus time (butterfly diagram) is obtained for Scheiner's observations. The observations of 1611 are very inaccurate, but the drawings of 1612 have at least an indication of the solar orientation, while the remaining part of the spot positions from 1618-1631 have good to very good accuracy. We also computed 697 tilt angles of apparent bipolar sunspot groups, which were observed in the period 1618-1631. We find that the average tilt angle of nearly 4° does not significantly differ from the 20th century values. The solar cycle properties seem to be related to the tilt angles of sunspot groups, and it is an important parameter in the surface flux transport models. The tilt angles of bipolar sunspot groups from various historical sets of solar drawings including from Schwabe and Scheiner are analyzed. Data by Scheiner, Hevelius, Staudacher, Zucconi, Schwabe, and Spörer deliver a series of average tilt angles spanning a period of 270 years, in addition to previously found values for 20th-century data obtained by other authors. We find that the average tilt angles before the Maunder minimum were not significantly different from modern values. However, the average tilt angles of a period 50 years after the Maunder minimum, namely for cycles 0 and 1, were much lower and near zero. The typical tilt angles before the Maunder minimum suggest that abnormally low tilt angles were not responsible for driving the solar cycle into a grand minimum. With the Schwabe (1826-1867) and Spörer (1866-1880) sunspot data, the butterfly diagram of sunspot groups extends back till 1826. A recently developed method, which separates the wings of the butterfly diagram based on the long gaps present in sunspot group occurrences at different latitudinal bands, is used to separate the wings of the butterfly diagram. The cycle-to-cycle variation in the start (F), end (L), and highest (H) latitudes of the wings with respect to the strength of the wings are analyzed. On the whole, the wings of the stronger cycles tend to start at higher latitudes and have a greater extent. The time spans of the wings and the time difference between the wings in the northern hemisphere display a quasi-periodicity of 5-6 cycles. The average wing overlap is zero in the southern hemisphere, whereas it is 2-3 months in the north. A marginally significant oscillation of about 10 solar cycles is found in the asymmetry of the L latitudes. This latest, extended database of butterfly wings provides new observational constraints, regarding the spatio-temporal distribution of sunspot occurrences over the solar cycle, to solar dynamo models.}, language = {en} } @phdthesis{Vacogne2016, author = {Vacogne, Charlotte D.}, title = {New synthetic routes towards well-defined polypeptides, morphologies and hydrogels}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396366}, school = {Universit{\"a}t Potsdam}, pages = {xii, 175}, year = {2016}, abstract = {Proteins are natural polypeptides produced by cells; they can be found in both animals and plants, and possess a variety of functions. One of these functions is to provide structural support to the surrounding cells and tissues. For example, collagen (which is found in skin, cartilage, tendons and bones) and keratin (which is found in hair and nails) are structural proteins. When a tissue is damaged, however, the supporting matrix formed by structural proteins cannot always spontaneously regenerate. Tailor-made synthetic polypeptides can be used to help heal and restore tissue formation. Synthetic polypeptides are typically synthesized by the so-called ring opening polymerization (ROP) of α-amino acid N-carboxyanhydrides (NCA). Such synthetic polypeptides are generally non-sequence-controlled and thus less complex than proteins. As such, synthetic polypeptides are rarely as efficient as proteins in their ability to self-assemble and form hierarchical or structural supramolecular assemblies in water, and thus, often require rational designing. In this doctoral work, two types of amino acids, γ-benzyl-L/D-glutamate (BLG / BDG) and allylglycine (AG), were selected to synthesize a series of (co)polypeptides of different compositions and molar masses. A new and versatile synthetic route to prepare polypeptides was developed, and its mechanism and kinetics were investigated. The polypeptide properties were thoroughly studied and new materials were developed from them. In particular, these polypeptides were able to aggregate (or self-assemble) in solution into microscopic fibres, very similar to those formed by collagen. By doing so, they formed robust physical networks and organogels which could be processed into high water-content, pH-responsive hydrogels. Particles with highly regular and chiral spiral morphologies were also obtained by emulsifying these polypeptides. Such polypeptides and the materials derived from them are, therefore, promising candidates for biomedical applications.}, language = {en} } @phdthesis{Uyaver2004, author = {Uyaver, Sahin}, title = {Simulation of annealed polyelectrolytes in poor solvents}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001488}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Polymere sind lange kettenartige Molek{\"u}le. Sie bestehen aus vielen elementaren chemischen Einheiten, den Monomeren, die durch kovalente Bindungen aneinander gekettet sind. Polyelektrolyte sind Polymere, die ionisierbare Monomeren enthalten. Aufgrund ihrer speziellen Eigenschaften sind Polyelektrolyte sowohl in der Molekular- und Zellbiologie von großen Bedeutung als auch in der Chemie großtechnisch relevant. Verglichen mit ungeladenen Polymeren sind Polyelektrolyte theoretisch noch wenig verstanden. Insbesondere gilt dies f{\"u}r Polyelektrolyte in sogenanntem schlechten L{\"o}sungsmittel. Ein schlechtes L{\"o}sungsmittel bewirkt eine effektive Anziehung zwischen den Monomeren. F{\"u}r Polyelektrolyte in schlechtem L{\"o}sungsmittel kommt es daher zu einer Konkurrenz zwischen dieser Anziehung und der elektrostatischen Abstoßung. Geladene Polymere werden im Rahmen der chemischen Klassifikation in starke und schwache Polyelektrolyte unterschieden. Erstere zeigen vollst{\"a}ndige Dissoziation unabh{\"a}ngig vom pH-Wert der L{\"o}sung. Die Position der Ladungen auf der Kette wird ausschließlich w{\"a}hrend der Polymersynthese festgelegt. In der Physik spricht man deshalb von Polyelektrolyten mit eingefrorener Ladungsverteilung (quenched polyelectrolytes). Im Falle von schwachen Polyelektrolyten ist die Ladungsdichte auf der Kette nicht konstant, sondern wird durch der pH-Wert der L{\"o}sung kontrolliert. Durch Rekombinations- und Dissoziationsprozesse sind die Ladungen auf der Kette beweglich. Im allgemeinen stellt sich eine inhomogene Gleichgewichtsverteilung ein, die mit der Struktur der Kette gekoppelt ist. Diese Polymere werden deshalb auch Polyelektrolyte mit Gleichgewichtsladungsverteilung (annealed polyelectrolytes) genannt. Wegen des zus{\"a}tzlichen Freiheitsgrades in der Ladungsverteilung werden eine Reihe ungew{\"o}hnlicher Eigenschaften theoretisch vorhergesagt. Mit Hilfe von Simulationen ist es zum ersten Mal gelungen, zu zeigen daß 'annealed' Polyelektrolyte in relativ schlechtem L{\"o}sungsmittel einen diskontinuierlichen Phasen{\"u}bergang durchlaufen, wenn ein kritischer pH-Werts der L{\"o}sung {\"u}berschritten wird. Bei diesem Phasen{\"u}bergang, gehen die Polyelektolyte von einer schwach geladenen kompakten globul{\"a}ren Struktur zu einer stark geladenen gestreckten Konfiguration {\"u}ber. Aufgrund theoretischer Vorhersagen wird erwartet, daß die globul{\"a}re Struktur in weniger schlechtem L{\"o}sungsmittel instabil wird und sich eine Perlenkettenkonfiguration ausbildet. Diese Vorhersage konnte f{\"u}r 'annealed' Polyelektrolyte mit den durchgef{\"u}hrten Simulationen zum ersten Mal best{\"a}tigt werden - inzwischen auch durch erste experimentelle Ergebnisse. Schließlich zeigen die Simulationen auch, daß annealed Polyelektrolyte bei einer kritischen Salzkonzentration in der L{\"o}sung einen scharfen {\"U}bergang zwischen einem stark geladenen gestreckten Zustand und einem schwach geladenen globul{\"a}ren Zustand aufweisen, wiederum in {\"U}bereinstimmung mit theoretischen Erwartungen.}, language = {en} } @phdthesis{Usadel2004, author = {Usadel, Bj{\"o}rn}, title = {Untersuchungen zur Biosynthese der pflanzlichen Zellwand = [Identification and characterization of genes involved in plant cell wall synthesis]}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2947}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Even though the structure of the plant cell wall is by and large quite well characterized, its synthesis and regulation remains largely obscure. However, it is accepted that the building blocks of the polysaccharidic part of the plant cell wall are nucleotide sugars. Thus to gain more insight into the cell wall biosynthesis, in the first part of this thesis, plant genes possibly involved in the nucleotide sugar interconversion pathway were identified using a bioinformatics approach and characterized in plants, mainly in Arabidopsis. For the computational identification profile hidden markov models were extracted from the Pfam and TIGR databases. Mainly with these, plant genes were identified facilitating the "hmmer" program. Several gene families were identified and three were further characterized, the UDP-rhamnose synthase (RHM), UDP-glucuronic acid epimerase (GAE) and the myo-inositol oxygenase (MIOX) families. For the three-membered RHM family relative ubiquitous expression was shown using variuos methods. For one of these genes, RHM2, T-DNA lines could be obtained. Moreover, the transcription of the whole family was downregulated facilitating an RNAi approach. In both cases a alteration of cell wall typic polysaccharides and developmental changes could be shown. In the case of the rhm2 mutant these were restricted to the seed or the seed mucilage, whereas the RNAi plants showed profound changes in the whole plant. In the case of the six-membered GAE family, the gene expressed to the highest level (GAE6) was cloned, expressed heterologously and its function was characterized. Thus, it could be shown that GAE6 encodes for an enzyme responsible for the conversion of UDP-glucuronic acid to UDP-galacturonic acid. However, a change in transcript level of variuos GAE family members achieved by T-DNA insertions (gae2, gae5, gae6), overexpression (GAE6) or an RNAi approach, targeting the whole family, did not reveal any robust changes in the cell wall. Contrary to the other two families the MIOX gene family had to be identified using a BLAST based approach due to the lack of enough suitable candidate genes for building a hidden markov model. An initial bioinformatic characterization was performed which will lead to further insights into this pathway. In total it was possible to identify the two gene families which are involved in the synthesis of the two pectin backbone sugars galacturonic acid and rhamnose. Moreover with the identification of the MIOX genes a genefamily, important for the supply of nucleotide sugar precursors was identified. In a second part of this thesis publicly available microarray datasets were analyzed with respect to co-responsive behavior of transcripts on a global basis using nearly 10,000 genes. The data has been made available to the community in form of a database providing additional statistical and visualization tools (http://csbdb.mpimp-golm.mpg.de). Using the framework of the database to identify nucleotide sugar converting genes indicated that co-response might be used for identification of novel genes involved in cell wall synthesis based on already known genes.}, subject = {Zellwand}, language = {en} } @phdthesis{Ullner2004, author = {Ullner, Ekkehard}, title = {Noise-induced phenomena of signal transmission in excitable neural models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001522}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Meine Dissertation behandelt verschiedene neue rauschinduzierte Ph{\"a}nomene in anregbaren Neuronenmodellen, insbesondere solche mit FitzHugh-Nagumo Dynamik. Ich beschreibe das Auftreten von vibronischer Resonanz in anregbaren Systemen. Sowohl in einer anregbaren elektronischen Schaltung als auch im FitzHugh-Nagumo Modell zeige ich, daß eine optimale Amplitude einer hochfrequenten externen Kraft die Signalantwort bez{\"u}glich eines niederfrequenten Signals verbessert. Weiterhin wird der Einfluß von additivem Rauschen auf das Zusammenwirken von stochastischer und vibronischer Resonanz untersucht. Weiterhin untersuche ich Systeme, die sowohl oszillierende als auch anregbare Eigenschaften beinhalten und dadurch zwei interne Frequenzen aufweisen. Ich zeige, daß in solchen Systemen der Effekt der stochastischen Resonanz deutlich erh{\"o}ht werden kann, wenn eine zus{\"a}tzliche hochfrequente Kraft in Resonanz mit den kleinen Oszillationen unterhalb der Anregungsschwelle hinzugenommen wird. Es ist beachtenswert, daß diese Verst{\"a}rkung der stochastischen Resonanz eine geringere Rauschintensit{\"a}t zum Erreichen des Optimums ben{\"o}tigt als die standartm{\"a}ßige stochastische Resonanz in anregbaren Systemen. Ich untersuche Frequenzselektivit{\"a}t bei der rauschinduzierten Signalverarbeitung von Signalen unterhalb der Anregungsschwelle in Systemen mit vielen rauschunterst{\"u}tzten stochastischen Attraktoren. Diese neuen Attraktoren mit abweichenden gemittelten Perioden weisen auch unterschiedliche Phasenbeziehungen zwischen den einzelnen Elementen auf. Ich zeige, daß die Signalantwort des gekoppelten Systems unter verschiedenen Rauscheinwirkungen deutlich verbessert oder auch reduziert werden kann durch das Treiben einzelner Elemente in Resonanz mit diesen neuen Resonanzfrequenzen, die mit passenden Phasenbeziehungen korrespondieren. Weiterhin konnte ich einen rauschinduzierten Phasen{\"u}bergang von einem selbstoszillierenden System zu einem anregbaren System nachweisen. Dieser {\"U}bergang erfolgt durch eine rauschinduzierte Stabilisierung eines deterministisch instabilen Fixpunktes der lokalen Dynamik, w{\"a}hrend die gesamte Phasenraumstruktur des Systems erhalten bleibt. Die gemeinsame Wirkung von Kopplung und Rauschen f{\"u}hrt zu einem neuen Typ von Phasen{\"u}berg{\"a}ngen und bewirkt eine Stabilisierung des Systems. Das sich daraus ergebende rauschinduziert anregbare Regime zeigt charakteristische Eigenschaften von klassisch anregbaren Systemen, wie stochastische Resonanz und Wellenausbreitung. Dieser rauschinduzierte Phasen{\"u}bergang erm{\"o}glicht dadurch die {\"U}bertragung von Signalen durch ansonsten global oszillierende Systeme und die Kontrolle der Signal{\"u}bertragung durch Ver{\"a}nderung der Rauschintensit{\"a}t. Insbesondere er{\"o}ffnen diese theoretischen Ergebnisse einen m{\"o}glichen Mechanismus zur Unterdr{\"u}ckung unerw{\"u}nschter globaler Oszillationen in neuronalen Netzwerken, welche charakteristisch f{\"u}r abnorme medizinische Zust{\"a}nde, wie z.B. bei der Parkinson\′schen Krankheit oder Epilepsie, sind. Die Wirkung von Rauschen w{\"u}rde dann wieder die Anregbarkeit herstellen, die den normalen Zustand der erkrankten Neuronen darstellt.}, language = {en} } @phdthesis{Ullmann2018, author = {Ullmann, Wiebke}, title = {Understanding animal movement behaviour in dynamic agricultural landscapes}, doi = {10.25932/publishup-42715}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427153}, school = {Universit{\"a}t Potsdam}, pages = {vii, 183}, year = {2018}, abstract = {The movement of organisms has formed our planet like few other processes. Movements shape populations, communities, entire ecosystems, and guarantee fundamental ecosystem functions and services, like seed dispersal and pollination. Global, regional and local anthropogenic impacts influence animal movements across ecosystems all around the world. In particular, land-use modification, like habitat loss and fragmentation disrupt movements between habitats with profound consequences, from increased disease transmissions to reduced species richness and abundance. However, neither the influence of anthropogenic change on animal movement processes nor the resulting effects on ecosystems are well understood. Therefore, we need a coherent understanding of organismal movement processes and their underlying mechanisms to predict and prevent altered animal movements and their consequences for ecosystem functions. In this thesis I aim at understanding the influence of anthropogenically caused land-use change on animal movement processes and their underlying mechanisms. In particular, I am interested in the synergistic influence of large-scale landscape structure and fine-scale habitat features on basic-level movement behaviours (e.g. the daily amount of time spend running, foraging, and resting) and their emerging higher-level movements (home range formation). Based on my findings, I identify the likely consequences of altered animal movements that lead to the loss of species richness and abundances. The study system of my thesis are hares in agricultural landscapes. European brown hares (Lepus europaeus) are perfectly suited to study animal movements in agricultural landscapes, as hares are hermerophiles and prefer open habitats. They have historically thrived in agricultural landscapes, but their numbers are in decline. Agricultural areas are undergoing strong land-use changes due to increasing food demand and fast developing agricultural technologies. They are already the largest land-use class, covering 38\% of the world's terrestrial surface. To consider the relevance of a given landscape structure for animal movement behaviour I selected two differently structured agricultural landscapes - a simple landscape in Northern Germany with large fields and few landscape elements (e.g. hedges and tree stands), and a complex landscape in Southern Germany with small fields and many landscape elements. I applied GPS devices (hourly fixes) with internal high-resolution accelerometers (4 min samples) to track hares, receiving an almost continuous observation of the animals' behaviours via acceleration analyses. I used the spatial and behavioural information in combination with remote sensing data (normalized difference vegetation index, or NDVI, a proxy for resource availability), generating an almost complete idea of what the animal was doing when, why and where. Apart from landscape structure (represented by the two differently structured study areas), I specifically tested whether the following fine-scale habitat features influence animal movements: resource, agricultural management events, habitat diversity, and habitat structure. My results show that, irrespective of the movement process or mechanism and the type of fine-scale habitat features, landscape structure was the overarching variable influencing hare movement behaviour. High resource variability forces hares to enlarge their home ranges, but only in the simple and not in the complex landscape. Agricultural management events result in home range shifts in both landscapes, but force hares to increase their home ranges only in the simple landscape. Also the preference of habitat patches with low vegetation and the avoidance of high vegetation, was stronger in the simple landscape. High and dense crop fields restricted hare movements temporarily to very local and small habitat patch remnants. Such insuperable barriers can separate habitat patches that were previously connected by mobile links. Hence, the transport of nutrients and genetic material is temporarily disrupted. This mechanism is also working on a global scale, as human induced changes from habitat loss and fragmentation to expanding monocultures cause a reduction in animal movements worldwide. The mechanisms behind those findings show that higher-level movements, like increasing home ranges, emerge from underlying basic-level movements, like the behavioural modes. An increasing landscape simplicity first acts on the behavioural modes, i.e. hares run and forage more, but have less time to rest. Hence, the emergence of increased home range sizes in simple landscapes is based on an increased proportion of time running and foraging, largely due to longer travelling times between distant habitats and scarce resource items in the landscape. This relationship was especially strong during the reproductive phase, demonstrating the importance of high-quality habitat for reproduction and the need to keep up self-maintenance first, in low quality areas. These changes in movement behaviour may release a cascade of processes that start with more time being allocated to running and foraging, resulting into an increased energy expenditure and may lead to a decline in individual fitness. A decrease in individual fitness and reproductive output will ultimately affect population viability leading to local extinctions. In conclusion, I show that landscape structure has one of the most important effects on hare movement behaviour. Synergistic effects of landscape structure, and fine-scale habitat features, first affect and modify basic-level movement behaviours, that can scales up to altered higher-level movements and may even lead to the decline of species richness and abundances, and the disruption of ecosystem functions. Understanding the connection between movement mechanisms and processes can help to predict and prevent anthropogenically induced changes in movement behaviour. With regard to the paramount importance of landscape structure, I strongly recommend to decrease the size of agricultural fields and increase crop diversity. On the small-scale, conservation policies should assure the year round provision of areas with low vegetation height and high quality forage. This could be done by generating wildflower strips and additional (semi-) natural habitat patches. This will not only help to increase the populations of European brown hares and other farmland species, but also ensure and protects the continuity of mobile links and their intrinsic value for sustaining important ecosystem functions and services.}, language = {en} } @phdthesis{Ulaganathan2016, author = {Ulaganathan, Vamseekrishna}, title = {Molecular fundamentals of foam fractionation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94263}, school = {Universit{\"a}t Potsdam}, pages = {ix, 136}, year = {2016}, abstract = {Foam fractionation of surfactant and protein solutions is a process dedicated to separate surface active molecules from each other due to their differences in surface activities. The process is based on forming bubbles in a certain mixed solution followed by detachment and rising of bubbles through a certain volume of this solution, and consequently on the formation of a foam layer on top of the solution column. Therefore, systematic analysis of this whole process comprises of at first investigations dedicated to the formation and growth of single bubbles in solutions, which is equivalent to the main principles of the well-known bubble pressure tensiometry. The second stage of the fractionation process includes the detachment of a single bubble from a pore or capillary tip and its rising in a respective aqueous solution. The third and final stage of the process is the formation and stabilization of the foam created by these bubbles, which contains the adsorption layers formed at the growing bubble surface, carried up and gets modified during the bubble rising and finally ends up as part of the foam layer. Bubble pressure tensiometry and bubble profile analysis tensiometry experiments were performed with protein solutions at different bulk concentrations, solution pH and ionic strength in order to describe the process of accumulation of protein and surfactant molecules at the bubble surface. The results obtained from the two complementary methods allow understanding the mechanism of adsorption, which is mainly governed by the diffusional transport of the adsorbing protein molecules to the bubble surface. This mechanism is the same as generally discussed for surfactant molecules. However, interesting peculiarities have been observed for protein adsorption kinetics at sufficiently short adsorption times. First of all, at short adsorption times the surface tension remains constant for a while before it decreases as expected due to the adsorption of proteins at the surface. This time interval is called induction time and it becomes shorter with increasing protein bulk concentration. Moreover, under special conditions, the surface tension does not stay constant but even increases over a certain period of time. This so-called negative surface pressure was observed for BCS and BLG and discussed for the first time in terms of changes in the surface conformation of the adsorbing protein molecules. Usually, a negative surface pressure would correspond to a negative adsorption, which is of course impossible for the studied protein solutions. The phenomenon, which amounts to some mN/m, was rather explained by simultaneous changes in the molar area required by the adsorbed proteins and the non-ideality of entropy of the interfacial layer. It is a transient phenomenon and exists only under dynamic conditions. The experiments dedicated to the local velocity of rising air bubbles in solutions were performed in a broad range of BLG concentration, pH and ionic strength. Additionally, rising bubble experiments were done for surfactant solutions in order to validate the functionality of the instrument. It turns out that the velocity of a rising bubble is much more sensitive to adsorbing molecules than classical dynamic surface tension measurements. At very low BLG or surfactant concentrations, for example, the measured local velocity profile of an air bubble is changing dramatically in time scales of seconds while dynamic surface tensions still do not show any measurable changes at this time scale. The solution's pH and ionic strength are important parameters that govern the measured rising velocity for protein solutions. A general theoretical description of rising bubbles in surfactant and protein solutions is not available at present due to the complex situation of the adsorption process at a bubble surface in a liquid flow field with simultaneous Marangoni effects. However, instead of modelling the complete velocity profile, new theoretical work has been started to evaluate the maximum values in the profile as characteristic parameter for dynamic adsorption layers at the bubble surface more quantitatively. The studies with protein-surfactant mixtures demonstrate in an impressive way that the complexes formed by the two compounds change the surface activity as compared to the original native protein molecules and therefore lead to a completely different retardation behavior of rising bubbles. Changes in the velocity profile can be interpreted qualitatively in terms of increased or decreased surface activity of the formed protein-surfactant complexes. It was also observed that the pH and ionic strength of a protein solution have strong effects on the surface activity of the protein molecules, which however, could be different on the rising bubble velocity and the equilibrium adsorption isotherms. These differences are not fully understood yet but give rise to discussions about the structure of protein adsorption layer under dynamic conditions or in the equilibrium state. The third main stage of the discussed process of fractionation is the formation and characterization of protein foams from BLG solutions at different pH and ionic strength. Of course a minimum BLG concentration is required to form foams. This minimum protein concentration is a function again of solution pH and ionic strength, i.e. of the surface activity of the protein molecules. Although at the isoelectric point, at about pH 5 for BLG, the hydrophobicity and hence the surface activity should be the highest, the concentration and ionic strength effects on the rising velocity profile as well as on the foamability and foam stability do not show a maximum. This is another remarkable argument for the fact that the interfacial structure and behavior of BLG layers under dynamic conditions and at equilibrium are rather different. These differences are probably caused by the time required for BLG molecules to adapt respective conformations once they are adsorbed at the surface. All bubble studies described in this work refer to stages of the foam fractionation process. Experiments with different systems, mainly surfactant and protein solutions, were performed in order to form foams and finally recover a solution representing the foamed material. As foam consists to a large extent of foam lamella - two adsorption layers with a liquid core - the concentration in a foamate taken from foaming experiments should be enriched in the stabilizing molecules. For determining the concentration of the foamate, again the very sensitive bubble rising velocity profile method was applied, which works for any type of surface active materials. This also includes technical surfactants or protein isolates for which an accurate composition is unknown.}, language = {en} } @phdthesis{Uhlemann2013, author = {Uhlemann, Steffi}, title = {Understanding trans-basin floods in Germany : data, information and knowledge}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68868}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Large Central European flood events of the past have demonstrated that flooding can affect several river basins at the same time leading to catastrophic economic and humanitarian losses that can stretch emergency resources beyond planned levels of service. For Germany, the spatial coherence of flooding, the contributing processes and the role of trans-basin floods for a national risk assessment is largely unknown and analysis is limited by a lack of systematic data, information and knowledge on past events. This study investigates the frequency and intensity of trans-basin flood events in Germany. It evaluates the data and information basis on which knowledge about trans-basin floods can be generated in order to improve any future flood risk assessment. In particu-lar, the study assesses whether flood documentations and related reports can provide a valuable data source for understanding trans-basin floods. An adaptive algorithm was developed that systematically captures trans-basin floods using series of mean daily discharge at a large number of sites of even time series length (1952-2002). It identifies the simultaneous occurrence of flood peaks based on the exceedance of an initial threshold of a 10 year flood at one location and consecutively pools all causally related, spatially and temporally lagged peak recordings at the other locations. A weighted cumulative index was developed that accounts for the spatial extent and the individual flood magnitudes within an event and allows quantifying the overall event severity. The parameters of the method were tested in a sensitivity analysis. An intensive study on sources and ways of information dissemination of flood-relevant publications in Germany was conducted. Based on the method of systematic reviews a strategic search approach was developed to identify relevant documentations for each of the 40 strongest trans-basin flood events. A novel framework for assessing the quality of event specific flood reports from a user's perspective was developed and validated by independent peers. The framework was designed to be generally applicable for any natural hazard type and assesses the quality of a document addressing accessibility as well as representational, contextual, and intrinsic dimensions of quality. The analysis of time-series of mean daily discharge resulted in the identification of 80 trans-basin flood events within the period 1952-2002 in Germany. The set is dominated by events that were recorded in the hydrological winter (64\%); 36\% occurred during the summer months. The occurrence of floods is characterised by a distinct clustering in time. Dividing the study period into two sub-periods, we find an increase in the percentage of winter events from 58\% in the first to 70.5\% in the second sub-period. Accordingly, we find a significant increase in the number of extreme trans-basin floods in the second sub-period. A large body of 186 flood relevant documentations was identified. For 87.5\% of the 40 strongest trans-basin floods in Germany at least one report has been found and for the most severe floods a substantial amount of documentation could be obtained. 80\% of the material can be considered grey literature (i.e. literature not controlled by commercial publishers). The results of the quality assessment show that the majority of flood event specific reports are of a good quality, i.e. they are well enough drafted, largely accurate and objective, and contain a substantial amount of information on the sources, pathways and receptors/consequences of the floods. The inclusion of this information in the process of knowledge building for flood risk assessment is recommended. Both the results as well as the data produced in this study are openly accessible and can be used for further research. The results of this study contribute to an improved spatial risk assessment in Germany. The identified set of trans-basin floods provides the basis for an assessment of the chance that flooding occurs simultaneously at a number of sites. The information obtained from flood event documentation can usefully supplement the analysis of the processes that govern flood risk.}, language = {en} } @phdthesis{Toenjes2007, author = {T{\"o}njes, Ralf}, title = {Pattern formation through synchronization in systems of nonidentical autonomous oscillators}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15973}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This work is concerned with the spatio-temporal structures that emerge when non-identical, diffusively coupled oscillators synchronize. It contains analytical results and their confirmation through extensive computer simulations. We use the Kuramoto model which reduces general oscillatory systems to phase dynamics. The symmetry of the coupling plays an important role for the formation of patterns. We have studied the ordering influence of an asymmetry (non-isochronicity) in the phase coupling function on the phase profile in synchronization and the intricate interplay between this asymmetry and the frequency heterogeneity in the system. The thesis is divided into three main parts. Chapter 2 and 3 introduce the basic model of Kuramoto and conditions for stable synchronization. In Chapter 4 we characterize the phase profiles in synchronization for various special cases and in an exponential approximation of the phase coupling function, which allows for an analytical treatment. Finally, in the third part (Chapter 5) we study the influence of non-isochronicity on the synchronization frequency in continuous, reaction diffusion systems and discrete networks of oscillators.}, language = {en} } @phdthesis{TzonevaVelinova2003, author = {Tzoneva-Velinova, Rumiana}, title = {The wettability of biomaterials determines the protein adsorption and the cellular responses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001103}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {During the past several decades polymer materials become widely used as components of medical devices and implants such as hemodialysers, bioartificial organs as well as vascular and recombinant surgery. Most of the devices cannot avoid the blood contact in their use. When the polymer materials come in contact with blood they can cause different undesired host responses like thrombosis, inflammatory reactions and infections. Thus the materials must be hemocompatible in order to minimize these undesired body responses. The earliest and one of the main problems in the use of blood-contacting biomaterials is the surface induced thrombosis. The sequence of the thrombus formation on the artificial surfaces has been well established. The first event, which occurs, after exposure of biomaterials to blood, is the adsorption of blood proteins. Surface physicochemical properties of the materials as wettability greatly influence the amount and conformational changes of adsorbed proteins. In turn the type, amount and conformational state of the adsorbed protein layer determines whether platelets will adhere and become activated or not on the artificial surface and thus to complete the thrombus formation. The adsorption of fibrinogen (FNG), which is present in plasma, has been shown to be closely related to surface induced thrombosis by participating in all processes of the thrombus formation such as fibrin formation, platelet adhesion and aggregation. Therefore study the FNG adsorption to artificial surfaces could contribute to better understanding of the mechanisms of platelet adhesion and activation and thus to controlling the surface induced thrombosis. Endothelization of the polymer surfaces is one of the strategies for improving the materials hemocompatibility, which is believed to be the most ideal solution for making truly blood-compatible materials. Since at physiological conditions proteins such as FNG and fibronectin (FN) are the usual extracellular matrix (ECM) for endothelial cells (EC) adhesion, precoating of the materials with these proteins has been shown to improve EC adhesion and growth in vitro. ECM proteins play an essential role not only like a structural support for cell adhesion and spreading, but also they are important factor in transmitting signals for different cell functions. The ability of cells to remodel plasma proteins such as FNG and FN in matrix-like structures together with the classical cell parameters such as actin cytoskeleton and focal adhesion formation could be used as an criteria for proper cell functioning. The establishment and the maintaining of delicate balance between cell-cell and cell-substrate contacts is another important factor for better EC colonization of the implants. The functionality of newly established endothelium in order to produce antithromotic substances should be always considered when EC seeding is used for improving the hemocompatibility of the polymer materials. Controlling the polymer surface properties such as surface wettability represents a versatile approach to manipulate the above cellular responses and therefore can be used in biomaterial and tissue engineering applications for producing better hemocompatible materials.}, language = {en} } @phdthesis{Tyree2017, author = {Tyree, Susan}, title = {Arc expression in the parabrachial nucleus following taste stimulation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396600}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 109}, year = {2017}, abstract = {Researchers have made many approaches to study the complexities of the mammalian taste system; however molecular mechanisms of taste processing in the early structures of the central taste pathway remain unclear. More recently the Arc catFISH (cellular compartment analysis of temporal activity by fluorescent in situ hybridisation) method has been used in our lab to study neural activation following taste stimulation in the first central structure in the taste pathway, the nucleus of the solitary tract. This method uses the immediate early gene Arc as a neural activity marker to identify taste-responsive neurons. Arc plays a critical role in memory formation and is necessary for conditioned taste aversion memory formation. In the nucleus of the solitary tract only bitter taste stimulation resulted in increased Arc expression, however this did not occur following stimulation with tastants of any other taste quality. The primary target for gustatory NTS neurons is the parabrachial nucleus (PbN) and, like Arc, the PbN plays an important role in conditioned taste aversion learning. The aim of this thesis is to investigate Arc expression in the PbN following taste stimulation to elucidate the molecular identity and function of Arc expressing, taste- responsive neurons. Naïve and taste-conditioned mice were stimulated with tastants from each of the five basic taste qualities (sweet, salty, sour, umami, and bitter), with additional bitter compounds included for comparison. The expression patterns of Arc and marker genes were analysed using in situ hybridisation (ISH). The Arc catFISH method was used to observe taste-responsive neurons following each taste stimulation. A double fluorescent in situ hybridisation protocol was then established to investigate possible neuropeptide genes involved in neural responses to taste stimulation. The results showed that bitter taste stimulation induces increased Arc expression in the PbN in naïve mice. This was not true for other taste qualities. In mice conditioned to find an umami tastant aversive, subsequent umami taste stimulation resulted in an increase in Arc expression similar to that seen in bitter-stimulated mice. Taste-responsive Arc expression was denser in the lateral PbN than the medial PbN. In mice that received two temporally separated taste stimulations, each stimulation time-point showed a distinct population of Arc-expressing neurons, with only a small population (10 - 18 \%) of neurons responding to both stimulations. This suggests that either each stimulation event activates a different population of neurons, or that Arc is marking something other than simple cellular activation, such as long-term cellular changes that do not occur twice within a 25 minute time frame. Investigation using the newly established double-FISH protocol revealed that, of the bitter-responsive Arc expressing neuron population: 16 \% co-expressed calcitonin RNA; 17 \% co-expressed glucagon-like peptide 1 receptor RNA; 17 \% co-expressed hypocretin receptor 1 RNA; 9 \% co-expressed gastrin-releasing peptide RNA; and 20 \% co-expressed neurotensin RNA. This co-expression with multiple different neuropeptides suggests that bitter-activated Arc expression mediates multiple neural responses to the taste event, such as taste aversion learning, suppression of food intake, increased heart rate, and involves multiple brain structures such as the lateral hypothalamus, amygdala, bed nucleus of the stria terminalis, and the thalamus. The increase in Arc-expression suggests that bitter taste stimulation, and umami taste stimulation in umami-averse animals, may result in an enhanced state of Arc- dependent synaptic plasticity in the PbN, allowing animals to form taste-relevant memories to these aversive compounds more readily. The results investigating neuropeptide RNA co- expression suggest the amygdala, bed nucleus of the stria terminalis, and thalamus as possible targets for bitter-responsive Arc-expressing PbN neurons.}, language = {en} } @phdthesis{Tunn2020, author = {Tunn, Isabell}, title = {From single molecules to bulk materials: tuning the viscoelastic properties of coiled coil cross-linked hydrogels}, doi = {10.25932/publishup-47595}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-475955}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 140}, year = {2020}, abstract = {The development of bioinspired self-assembling materials, such as hydrogels, with promising applications in cell culture, tissue engineering and drug delivery is a current focus in material science. Biogenic or bioinspired proteins and peptides are frequently used as versatile building blocks for extracellular matrix (ECM) mimicking hydrogels. However, precisely controlling and reversibly tuning the properties of these building blocks and the resulting hydrogels remains challenging. Precise control over the viscoelastic properties and self-healing abilities of hydrogels are key factors for developing intelligent materials to investigate cell matrix interactions. Thus, there is a need to develop building blocks that are self-healing, tunable and self-reporting. This thesis aims at the development of α-helical peptide building blocks, called coiled coils (CCs), which integrate these desired properties. Self-healing is a direct result of the fast self-assembly of these building blocks when used as material cross-links. Tunability is realized by means of reversible histidine (His)-metal coordination bonds. Lastly, implementing a fluorescent readout, which indicates the CC assembly state, self-reporting hydrogels are obtained. Coiled coils are abundant protein folding motifs in Nature, which often have mechanical function, such as in myosin or fibrin. Coiled coils are superhelices made up of two or more α-helices wound around each other. The assembly of CCs is based on their repetitive sequence of seven amino acids, so-called heptads (abcdefg). Hydrophobic amino acids in the a and d position of each heptad form the core of the CC, while charged amino acids in the e and g position form ionic interactions. The solvent-exposed positions b, c and f are excellent targets for modifications since they are more variable. His-metal coordination bonds are strong, yet reversible interactions formed between the amino acid histidine and transition metal ions (e.g. Ni2+, Cu2+ or Zn2+). His-metal coordination bonds essentially contribute to the mechanical stability of various high-performance proteinaceous materials, such as spider fangs, Nereis worm jaws and mussel byssal threads. Therefore, I bioengineered reversible His-metal coordination sites into a well-characterized heterodimeric CC that served as tunable material cross-link. Specifically, I took two distinct approaches facilitating either intramolecular (Chapter 4.2) and/or intermolecular (Chapter 4.3) His-metal coordination. Previous research suggested that force-induced CC unfolding in shear geometry starts from the points of force application. In order to tune the stability of a heterodimeric CC in shear geometry, I inserted His in the b and f position at the termini of force application (Chapter 4.2). The spacing of His is such that intra-CC His-metal coordination bonds can form to bridge one helical turn within the same helix, but also inter-CC coordination bonds are not generally excluded. Starting with Ni2+ ions, Raman spectroscopy showed that the CC maintained its helical structure and the His residues were able to coordinate Ni2+. Circular dichroism (CD) spectroscopy revealed that the melting temperature of the CC increased by 4 °C in the presence of Ni2+. Using atomic force microscope (AFM)-based single molecule force spectroscopy, the energy landscape parameters of the CC were characterized in the absence and the presence of Ni2+. His-Ni2+ coordination increased the rupture force by ~10 pN, accompanied by a decrease of the dissociation rate constant. To test if this stabilizing effect can be transferred from the single molecule level to the bulk viscoelastic material properties, the CC building block was used as a non-covalent cross-link for star-shaped poly(ethylene glycol) (star-PEG) hydrogels. Shear rheology revealed a 3-fold higher relaxation time in His-Ni2+ coordinating hydrogels compared to the hydrogel without metal ions. This stabilizing effect was fully reversible when using an excess of the metal chelator ethylenediaminetetraacetate (EDTA). The hydrogel properties were further investigated using different metal ions, i.e. Cu2+, Co2+ and Zn2+. Overall, these results suggest that Ni2+, Cu2+ and Co2+ primarily form intra-CC coordination bonds while Zn2+ also participates in inter-CC coordination bonds. This may be a direct result of its different coordination geometry. Intermolecular His-metal coordination bonds in the terminal regions of the protein building blocks of mussel byssal threads are primarily formed by Zn2+ and were found to be intimately linked to higher-order assembly and self-healing of the thread. In the above example, the contribution of intra-CC and inter-CC His-Zn2+ cannot be disentangled. In Chapter 4.3, I redesigned the CC to prohibit the formation of intra-CC His-Zn2+ coordination bonds, focusing only on inter-CC interactions. Specifically, I inserted His in the solvent-exposed f positions of the CC to focus on the effect of metal-induced higher-order assembly of CC cross-links. Raman and CD spectroscopy revealed that this CC building block forms α-helical Zn2+ cross-linked aggregates. Using this CC as a cross-link for star-PEG hydrogels, I showed that the material properties can be switched from viscoelastic in the absence of Zn2+ to elastic-like in the presence of Zn2+. Moreover, the relaxation time of the hydrogel was tunable over three orders of magnitude when using different Zn2+:His ratios. This tunability is attributed to a progressive transformation of single CC cross-links into His-Zn2+ cross-linked aggregates, with inter-CC His-Zn2+ coordination bonds serving as an additional, cross-linking mode. Rheological characterization of the hydrogels with inter-CC His-Zn2+ coordination raised the question whether the His-Zn2+ coordination bonds between CCs or also the CCs themselves rupture when shear strain is applied. In general, the amount of CC cross-links initially formed in the hydrogel as well as the amount of CC cross-links breaking under force remains to be elucidated. In order to more deeply probe these questions and monitor the state of the CC cross-links when force is applied, a fluorescent reporter system based on F{\"o}rster resonance energy transfer (FRET) was introduced into the CC (Chapter 4.4). For this purpose, the donor-acceptor pair carboxyfluorescein and tetramethylrhodamine was used. The resulting self-reporting CC showed a FRET efficiency of 77 \% in solution. Using this fluorescently labeled CC as a self-reporting, reversible cross-link in an otherwise covalently cross-linked star-PEG hydrogel enabled the detection of the FRET efficiency change under compression force. This proof-of-principle result sets the stage for implementing the fluorescently labeled CCs as molecular force sensors in non-covalently cross-linked hydrogels. In summary, this thesis highlights that rationally designed CCs are excellent reversibly tunable, self-healing and self-reporting hydrogel cross-links with high application potential in bioengineering and biomedicine. For the first time, I demonstrated that His-metal coordination-based stabilization can be transferred from the single CC level to the bulk material with clear viscoelastic consequences. Insertion of His in specific sequence positions was used to implement a second non-covalent cross-linking mode via intermolecular His-metal coordination. This His-metal binding induced aggregation of the CCs enabled for reversibly tuning the hydrogel properties from viscoelastic to elastic-like. As a proof-of-principle to establish self-reporting CCs as material cross-links, I labeled a CC with a FRET pair. The fluorescently labelled CC acts as a molecular force sensor and first preliminary results suggest that the CC enables the detection of hydrogel cross-link failure under compression force. In the future, fluorescently labeled CC force sensors will likely not only be used as intelligent cross-links to study the failure of hydrogels but also to investigate cell-matrix interactions in 3D down to the single molecule level.}, language = {en} } @phdthesis{Tukhlina2008, author = {Tukhlina, Natalia}, title = {Feedback control of complex oscillatory systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-18546}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {In the present dissertation paper an approach which ensures an efficient control of such diverse systems as noisy or chaotic oscillators and neural ensembles is developed. This approach is implemented by a simple linear feedback loop. The dissertation paper consists of two main parts. One part of the work is dedicated to the application of the suggested technique to a population of neurons with a goal to suppress their synchronous collective dynamics. The other part is aimed at investigating linear feedback control of coherence of a noisy or chaotic self-sustained oscillator. First we start with a problem of suppressing synchronization in a large population of interacting neurons. The importance of this task is based on the hypothesis that emergence of pathological brain activity in the case of Parkinson's disease and other neurological disorders is caused by synchrony of many thousands of neurons. The established therapy for the patients with such disorders is a permanent high-frequency electrical stimulation via the depth microelectrodes, called Deep Brain Stimulation (DBS). In spite of efficiency of such stimulation, it has several side effects and mechanisms underlying DBS remain unclear. In the present work an efficient and simple control technique is suggested. It is designed to ensure suppression of synchrony in a neural ensemble by a minimized stimulation that vanishes as soon as the tremor is suppressed. This vanishing-stimulation technique would be a useful tool of experimental neuroscience; on the other hand, control of collective dynamics in a large population of units represents an interesting physical problem. The main idea of suggested approach is related to the classical problem of oscillation theory, namely the interaction between a self-sustained (active) oscillator and a passive load (resonator). It is known that under certain conditions the passive oscillator can suppress the oscillations of an active one. In this thesis a much more complicated case of active medium, which itself consists of thousands of oscillators is considered. Coupling this medium to a specially designed passive oscillator, one can control the collective motion of the ensemble, specifically can enhance or suppress it. Having in mind a possible application in neuroscience, the problem of suppression is concentrated upon. Second, the efficiency of suggested suppression scheme is illustrated by considering more complex case, i.e. when the population of neurons generating the undesired rhythm consists of two non-overlapping subpopulations: the first one is affected by the stimulation, while the collective activity is registered from the second one. Generally speaking, the second population can be by itself both active and passive; both cases are considered here. The possible applications of suggested technique are discussed. Third, the influence of the external linear feedback on coherence of a noisy or chaotic self-sustained oscillator is considered. Coherence is one of the main properties of self-oscillating systems and plays a key role in the construction of clocks, electronic generators, lasers, etc. The coherence of a noisy limit cycle oscillator in the context of phase dynamics is evaluated by the phase diffusion constant, which is in its turn proportional to the width of the spectral peak of oscillations. Many chaotic oscillators can be described within the framework of phase dynamics, and, therefore, their coherence can be also quantified by the way of the phase diffusion constant. The analytical theory for a general linear feedback, considering noisy systems in the linear and Gaussian approximation is developed and validated by numerical results.}, language = {en} } @phdthesis{Tschoepe2007, author = {Tsch{\"o}pe, Okka}, title = {Managing open habitats for species conservation : the role of wild ungulate grazing, small-scale disturbances, and scale}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13218}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {During the last decades, the global change of the environment has caused a dramatic loss of habitats and species. In Central Europe, open habitats are particularly affected. The main objective of this thesis was to experimentally test the suitability of wild megaherbivore grazing as a conservation tool to manage open habitats. We studied the effect of wild ungulates in a 160 ha game preserve in NE Germany in three successional stages (i) Corynephorus canescens-dominated grassland, (ii) ruderal tall forb vegetation dominated by Tanacetum vulgare and (iii) Pinus sylvestris-pioneer forest over three years. Our results demonstrate that wild megaherbivores considerably affected species composition and delayed successional pathways in open habitats. Grazing effects differed considerably between successional stages: species richness was higher in grazed ruderal and pioneer forest plots, but not in the Corynephorus sites. Species composition changed significantly in the Corynephorus and ruderal sites. Grazed ruderal sites had turned into sites with very short vegetation dominated by Agrostis spp. and the moss Brachythecium albicans, most species did not flower. Woody plant cover was significantly affected only in the pioneer forest sites. Young pine trees were severely damaged and tree height was considerably reduced, leading to a "Pinus-macchie"-appearance. Ecological patterns and processes are known to vary with spatial scale. Since grazing by megaherbivores has a strong spatial component, the scale of monitoring success of grazing may largely differ among and within different systems. Thus, the second aim of this thesis was to test whether grazing effects are consistent over different spatial scales, and to give recommendations for appropriate monitoring scales. For this purpose, we studied grazing effects on plant community structure using multi-scale plots that included three nested spatial scales (0.25 m2, 4 m2, and 40 m2). Over all vegetation types, the scale of observation directly affected grazing effects on woody plant cover and on floristic similarity, but not on the proportion of open soil and species richness. Grazing effects manifested at small scales regarding floristic similarity in pioneer forest and ruderal sites and regarding species richness in ruderal sites. The direction of scale-effects on similarity differed between vegetation types: Grazing effects on floristic similarity in the Corynephorus sites were significantly higher at the medium and large scale, while in the pioneer forest sites they were significantly higher at the smallest scale. Disturbances initiate vegetation changes by creating gaps and affecting colonization and extinction rates. The third intention of the thesis was to investigate the effect of small-scale disturbances on the species-level. In a sowing experiment, we studied early establishment probabilities of Corynephorus canescens, a key species of open sandy habitats. Applying two different regimes of mechanical ground disturbance (disturbed and undisturbed) in the three successional stages mentioned above, we focused on the interactive effects of small-scale disturbances, successional stage and year-to-year variation. Disturbance led to higher emergence in a humid and to lower emergence in a very dry year. Apparently, when soil moisture was sufficient, the main factor limiting C. canescens establishment was competition, while in the dry year water became the limiting factor. Survival rates were not affected by disturbance. In humid years, C. canescens emerged in higher numbers in open successional stages while in the dry year, emergence rates were higher in late stages, suggesting an important role of late successional stages for the persistence of C. canescens. We conclude that wild ungulate grazing is a useful tool to slow down succession and to preserve a species-rich, open landscape, because it does not only create disturbances, thereby supporting early successional stages, but at the same time efficiently controls woody plant cover. However, wild ungulate grazing considerably changed the overall appearance of the landscape. Additional measures like shifting exclosures might be necessary to allow vulnerable species to flower and reproduce. We further conclude that studying grazing impacts on a range of scales is crucial, since different parameters are affected at different spatial scales. Larger scales are suitable for assessing grazing impact on structural parameters like the proportion of open soil or woody plant cover, whereas species richness and floristic similarity are affected at smaller scales. Our results further indicate that the optimal strategy for promoting C. canescens is to apply disturbances just before seed dispersal and not during dry years. Further, at the landscape scale, facilitation by late successional species may be an important mechanism for the persistence of protected pioneer species.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @phdthesis{Trukenbrod2012, author = {Trukenbrod, Hans Arne}, title = {Temporal and spatial aspects of eye-movement control : from reading to scanning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70206}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Eye movements are a powerful tool to examine cognitive processes. However, in most paradigms little is known about the dynamics present in sequences of saccades and fixations. In particular, the control of fixation durations has been widely neglected in most tasks. As a notable exception, both spatial and temporal aspects of eye-movement control have been thoroughly investigated during reading. There, the scientific discourse was dominated by three controversies, (i), the role of oculomotor vs. cognitive processing on eye-movement control, (ii) the serial vs. parallel processing of words, and, (iii), the control of fixation durations. The main purpose of this thesis was to investigate eye movements in tasks that require sequences of fixations and saccades. While reading phenomena served as a starting point, we examined eye guidance in non-reading tasks with the aim to identify general principles of eye-movement control. In addition, the investigation of eye movements in non-reading tasks helped refine our knowledge about eye-movement control during reading. Our approach included the investigation of eye movements in non-reading experiments as well as the evaluation and development of computational models. I present three main results : First, oculomotor phenomena during reading can also be observed in non-reading tasks (Chapter 2 \& 4). Oculomotor processes determine the fixation position within an object. The fixation position, in turn, modulates both the next saccade target and the current fixation duration. Second, predicitions of eye-movement models based on sequential attention shifts were falsified (Chapter 3). In fact, our results suggest that distributed processing of multiple objects forms the basis of eye-movement control. Third, fixation durations are under asymmetric control (Chapter 4). While increasing processing demands immediately prolong fixation durations, decreasing processing demands reduce fixation durations only with a temporal delay. We propose a computational model ICAT to account for asymmetric control. In this model, an autonomous timer initiates saccades after random time intervals independent of ongoing processing. However, processing demands that are higher than expected inhibit the execution of the next saccade and, thereby, prolong the current fixation. On the other hand, lower processing demands will not affect the duration before the next saccade is executed. Since the autonomous timer adjusts to expected processing demands from fixation to fixation, a decrease in processing demands may lead to a temporally delayed reduction of fixation durations. In an extended version of ICAT, we evaluated its performance while simulating both temporal and spatial aspects of eye-movement control. The eye-movement phenomena investigated in this thesis have now been observed in a number of different tasks, which suggests that they represent general principles of eye guidance. I propose that distributed processing of the visual input forms the basis of eye-movement control, while fixation durations are controlled by the principles outlined in ICAT. In addition, oculomotor control contributes considerably to the variability observed in eye movements. Interpretations for the relation between eye movements and cognition strongly benefit from a precise understanding of this interplay.}, language = {en} } @phdthesis{Trost2014, author = {Trost, Gerda}, title = {Poly(A) Polymerase 1 (PAPS1) influences organ size and pathogen response in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72345}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Polyadenylation of pre-mRNAs is critical for efficient nuclear export, stability, and translation of the mature mRNAs, and thus for gene expression. The bulk of pre-mRNAs are processed by canonical nuclear poly(A) polymerase (PAPS). Both vertebrate and higher-plant genomes encode more than one isoform of this enzyme, and these are coexpressed in different tissues. However, in neither case is it known whether the isoforms fulfill different functions or polyadenylate distinct subsets of pre-mRNAs. This thesis shows that the three canonical nuclear PAPS isoforms in Arabidopsis are functionally specialized owing to their evolutionarily divergent C-terminal domains. A moderate loss-of-function mutant in PAPS1 leads to increase in floral organ size, whereas leaf size is reduced. A strong loss-of-function mutation causes a male gametophytic defect, whereas a weak allele leads to reduced leaf growth. By contrast, plants lacking both PAPS2 and PAPS4 function are viable with wild-type leaf growth. Polyadenylation of SMALL AUXIN UP RNA (SAUR) mRNAs depends specifically on PAPS1 function. The resulting reduction in SAUR activity in paps1 mutants contributes to their reduced leaf growth, providing a causal link between polyadenylation of specific pre-mRNAs by a particular PAPS isoform and plant growth. Additionally, opposite effects of PAPS1 on leaf and flower growth reflect the different identities of these organs. The overgrowth of paps1 mutant petals is due to increased recruitment of founder cells into early organ primordia whereas the reduced leaf size is due to an ectopic pathogen response. This constitutive immune response leads to increased resistance to the biotrophic oomycete Hyaloperonospora arabidopsidis and reflects activation of the salicylic acid-independent signalling pathway downstream of ENHANCED DISEASE SUSCEPTIBILITY1 (EDS1)/PHYTOALEXIN DEFICIENT4 (PAD4). Immune responses are accompanied by intracellular redox changes. Consistent with this, the redox-status of the chloroplast is altered in paps1-1 mutants. The molecular effects of the paps1-1 mutation were analysed using an RNA sequencing approach that distinguishes between long- and short tailed mRNA. The results shown here suggest the existence of an additional layer of regulation in plants and possibly vertebrate gene expression, whereby the relative activities of canonical nuclear PAPS isoforms control de novo synthesized poly(A) tail length and hence expression of specific subsets of mRNAs.}, language = {en} } @phdthesis{Tronci2010, author = {Tronci, Giuseppe}, title = {Synthesis, characterization, and biological evaluation of gelatin-based scaffolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49727}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This work presents the development of entropy-elastic gelatin based networks in the form of films or scaffolds. The materials have good prospects for biomedical applications, especially in the context of bone regeneration. Entropy-elastic gelatin based hydrogel films with varying crosslinking densities were prepared with tailored mechanical properties. Gelatin was covalently crosslinked above its sol gel transition, which suppressed the gelatin chain helicity. Hexamethylene diisocyanate (HDI) or ethyl ester lysine diisocyanate (LDI) were applied as chemical crosslinkers, and the reaction was conducted either in dimethyl sulfoxide (DMSO) or water. Amorphous films were prepared as measured by Wide Angle X-ray Scattering (WAXS), with tailorable degrees of swelling (Q: 300-800 vol. \%) and wet state Young's modulus (E: 70 740 kPa). Model reactions showed that the crosslinking reaction resulted in a combination of direct crosslinks (3-13 mol.-\%), grafting (5-40 mol.-\%), and blending of oligoureas (16-67 mol.-\%). The knowledge gained with this bulk material was transferred to the integrated process of foaming and crosslinking to obtain porous 3-D gelatin-based scaffolds. For this purpose, a gelatin solution was foamed in the presence of a surfactant, Saponin, and the resulting foam was fixed by chemical crosslinking with a diisocyanate. The amorphous crosslinked scaffolds were synthesized with varied gelatin and HDI concentrations, and analyzed in the dry state by micro computed tomography (µCT, porosity: 65±11-73±14 vol.-\%), and scanning electron microscopy (SEM, pore size: 117±28-166±32 µm). Subsequently, the work focused on the characterization of the gelatin scaffolds in conditions relevant to biomedical applications. Scaffolds showed high water uptake (H: 630-1680 wt.-\%) with minimal changes in outer dimension. Since a decreased scaffold pore size (115±47-130±49 µm) was revealed using confocal laser scanning microscopy (CLSM) upon wetting, the form stability could be explained. Shape recoverability was observed after removal of stress when compressing wet scaffolds, while dry scaffolds maintained the compressed shape. This was explained by a reduction of the glass transition temperature upon equilibration with water (dynamic mechanical analysis at varied temperature (DMTA)). The composition dependent compression moduli (Ec: 10 50 kPa) were comparable to the bulk micromechanical Young's moduli, which were measured by atomic force microscopy (AFM). The hydrolytic degradation profile could be adjusted, and a controlled decrease of mechanical properties was observed. Partially-degraded scaffolds displayed an increase of pore size. This was likely due to the pore wall disintegration during degradation, which caused the pores to merge. The scaffold cytotoxicity and immunologic responses were analyzed. The porous scaffolds enabled proliferation of human dermal fibroblasts within the implants (up to 90 µm depth). Furthermore, indirect eluate tests were carried out with L929 cells to quantify the material cytotoxic response. Here, the effect of the sterilization method (Ethylene oxide sterilization), crosslinker, and surfactant were analyzed. Fully cytocompatible scaffolds were obtained by using LDI as crosslinker and PEO40 PPO20-PEO40 as surfactant. These investigations were accompanied by a study of the endotoxin material contamination. The formation of medical-grade materials was successfully obtained (<0.5 EU/mL) by using low-endotoxin gelatin and performing all synthetic steps in a laminar flow hood.}, language = {en} } @phdthesis{Trompelt2010, author = {Trompelt, Helena}, title = {Production of regular and non-regular verbs : evidence for a lexical entry complexity account}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-061-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42120}, school = {Universit{\"a}t Potsdam}, pages = {x, 168}, year = {2010}, abstract = {The incredible productivity and creativity of language depends on two fundamental resources: a mental lexicon and a mental grammar. Rules of grammar enable us to produce and understand complex phrases we have not encountered before and at the same time constrain the computation of complex expressions. The concepts of the mental lexicon and mental grammar have been thoroughly tested by comparing the use of regular versus non-regular word forms. Regular verbs (e.g. walk-walked) are computed using a suffixation rule in a neural system for grammatical processing; non-regular verbs (run-ran) are retrieved from associative memory. The role of regularity has only been explored for the past tense, where regularity is overtly visible. To explore the representation and encoding of regularity as well as the inflectional processes involved in the production of regular and non-regular verbs, this dissertation investigated three groups of German verbs: regular, irregular and hybrid verbs. Hybrid verbs in German have completely regular conjugation in the present tense and irregular conjugation in the past tense. Articulation latencies were measured while participants named pictures of actions, producing the 3rd person singular of regular, hybrid, and irregular verbs in present and past tense. Studying the production of German verbs in past and present tense, this dissertation explored the complexity of lexical entries as a decisive factor in the production of verbs.}, language = {en} } @phdthesis{Trommer2001, author = {Trommer, Jochen}, title = {Distributed optimality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001377}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {In dieser Dissertation schlage ich eine Synthese (Distributed Optimality, DO) von Optimalit{\"a}tstheorie und einem derivationellen, morphologischem Asatz, Distributed Morphology (DM; Halle \& Marantz, 1993) vor. Durch die Integration von OT in DM wird es m{\"o}glich, Ph{\"a}nomene, die in DM durch sprachspezifische Regeln oder Merkmale von lexikalischen Eintr{\"a}ge erfasst werden, auf die Interaktion von verletzbaren, universellen Constraints zur{\"u}ckzuf{\"u}hren. Andererseits leistet auch DM zwei substantielle Beitr{\"a}ge zu DO, Lokalit{\"a}t und Impoverishment. Lokalit{\"a}t erlaubt eine formal einfache Interpretation von DO, w{\"a}hrend sich Impoverishment als unverzichtbar erweist, um Kongruenz-Morphologie ad{\"a}quat zu beschreiben. Die empirische Grundlage der Arbeit sind die komplexen Kongruenzsysteme von genetisch unterschiedlichen Sprachen. Der theoretische Schwerpunkt liegt in zwei Bereichen: Erstens, sogenannte Direkt/Invers-Markierung, f{\"u}r die gezeigt wird, dass eine Behandlung durch Constraints {\"u}ber Merkmalsrealisierung am angemessensten ist. Zweitens, die Effekte von Abfolge-Constraints, die den Satus von Affixen als Pr{\"a}fixe und Suffixe sowie ihre relative Reihenfolge regeln. Eine konkrete Typologie f{\"u}r die Abfolge von Kongruenz-Affixen auf der Basis von OT-Constraints wird vorgeschlagen.}, language = {en} } @phdthesis{Treplin2006, author = {Treplin, Simone}, title = {Inference of phylogenetic relationships in passerine birds (Aves: Passeriformes) using new molecular markers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-11230}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The aim of this study was to provide deeper insights in passerine phylogenetic relationships using new molecular markers. The monophyly of the largest avian order Passeriformes (~59\% of all living birds) and the division into its suborders suboscines and oscines are well established. Phylogenetic relationships within the group have been extremely puzzling, as most of the evolutionary lineages originated through rapid radiation. Numerous studies have hypothesised conflicting passerine phylogenies and have repeatedly stimulated further research with new markers. In the present study, I used three different approaches to contribute to the ongoing phylogenetic debate in Passeriformes. I investigated the recently introduced gene ZENK for its phylogenetic utility for passerine systematics in combination and comparison to three already established nuclear markers. My phylogenetic analyses of a comprehensive data set yielded highly resolved, consistent and strongly supported trees. I was able to show the high utility of ZENK for elucidating phylogenetic relationships within Passeriformes. For the second and third approach, I used chicken repeat 1 (CR1) retrotransposons as phylogenetic markers. I presented two specific CR1 insertions as apomorphic characters, whose presence/absence pattern significantly contributed to the resolution of a particular phylogenetic uncertainty, namely the position of the rockfowl species Picathartes spp. in the passerine tree. Based on my results, I suggest a closer relationship of these birds to crows, ravens, jays, and allies. For the third approach, I showed that CR1 sequences contain phylogenetic signal and investigated their applicability in more detail. In this context, I screened for CR1 elements in different passerine birds, used sequences of several loci to construct phylogenetic trees, and evaluated their reliability. I was able to corroborate existing hypotheses and provide strong evidence for some new hypotheses, e.g. I suggest a revision of the taxa Corvidae and Corvinae as vireos are closer related to crows, ravens, and allies. The subdivision of the Passerida into three superfamilies, Sylvioidea, Passeroidea, and Muscicapoidea was strongly supported. I found evidence for a split within Sylvioidea into two clades, one consisting of tits and the other comprising warblers, bulbuls, laughingthrushes, whitethroats, and allies. Whereas Passeridae appear to be paraphyletic, monophyly of weavers and estrild finches as a separate clade was strongly supported. The sister taxon relationships of dippers and the thrushes/flycatcher/chat assemblage was corroborated and I suggest a closer relationship of waxwings and kinglets to wrens, tree-creepers, and nuthatches.}, language = {en} } @phdthesis{Trautwein2019, author = {Trautwein, Jutta}, title = {The Mental lexicon in acquisition}, doi = {10.25932/publishup-43431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434314}, school = {Universit{\"a}t Potsdam}, pages = {IV, 177}, year = {2019}, abstract = {The individual's mental lexicon comprises all known words as well related infor-mation on semantics, orthography and phonology. Moreover, entries connect due to simi-larities in these language domains building a large network structure. The access to lexical information is crucial for processing of words and sentences. Thus, a lack of information in-hibits the retrieval and can cause language processing difficulties. Hence, the composition of the mental lexicon is essential for language skills and its assessment is a central topic of lin-guistic and educational research. In early childhood, measurement of the mental lexicon is uncomplicated, for example through parental questionnaires or the analysis of speech samples. However, with growing content the measurement becomes more challenging: With more and more words in the mental lexicon, the inclusion of all possible known words into a test or questionnaire be-comes impossible. That is why there is a lack of methods to assess the mental lexicon for school children and adults. For the same reason, there are only few findings on the courses of lexical development during school years as well as its specific effect on other language skills. This dissertation is supposed to close this gap by pursuing two major goals: First, I wanted to develop a method to assess lexical features, namely lexicon size and lexical struc-ture, for children of different age groups. Second, I aimed to describe the results of this method in terms of lexical development of size and structure. Findings were intended to help understanding mechanisms of lexical acquisition and inform theories on vocabulary growth. The approach is based on the dictionary method where a sample of words out of a dictionary is tested and results are projected on the whole dictionary to determine an indi-vidual's lexicon size. In the present study, the childLex corpus, a written language corpus for children in German, served as the basis for lexicon size estimation. The corpus is assumed to comprise all words children attending primary school could know. Testing a sample of words out of the corpus enables projection of the results on the whole corpus. For this purpose, a vocabulary test based on the corpus was developed. Afterwards, test performance of virtual participants was simulated by drawing different lexicon sizes from the corpus and comparing whether the test items were included in the lexicon or not. This allowed determination of the relation between test performance and total lexicon size and thus could be transferred to a sample of real participants. Besides lexicon size, lexical content could be approximated with this approach and analyzed in terms of lexical structure. To pursue the presented aims and establish the sampling method, I conducted three consecutive studies. Study 1 includes the development of a vocabulary test based on the childLex corpus. The testing was based on the yes/no format and included three versions for different age groups. The validation grounded on the Rasch Model shows that it is a valid instrument to measure vocabulary for primary school children in German. In Study 2, I estab-lished the method to estimate lexicon sizes and present results on lexical development dur-ing primary school. Plausible results demonstrate that lexical growth follows a quadratic function starting with about 6,000 words at the beginning of school and about 73,000 words on average for young adults. Moreover, the study revealed large interindividual differences. Study 3 focused on the analysis of network structures and their development in the mental lexicon due to orthographic similarities. It demonstrates that networks possess small-word characteristics and decrease in interconnectivity with age. Taken together, this dissertation provides an innovative approach for the assessment and description of the development of the mental lexicon from primary school onwards. The studies determine recent results on lexical acquisition in different age groups that were miss-ing before. They impressively show the importance of this period and display the existence of extensive interindividual differences in lexical development. One central aim of future research needs to address the causes and prevention of these differences. In addition, the application of the method for further research (e.g. the adaptation for other target groups) and teaching purposes (e.g. adaptation of texts for different target groups) appears to be promising.}, language = {en} } @phdthesis{Trautmann2022, author = {Trautmann, Tina}, title = {Understanding global water storage variations using model-data integration}, doi = {10.25932/publishup-56595}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565954}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 141}, year = {2022}, abstract = {Climate change is one of the greatest challenges to humanity in this century, and most noticeable consequences are expected to be impacts on the water cycle - in particular the distribution and availability of water, which is fundamental for all life on Earth. In this context, it is essential to better understand where and when water is available and what processes influence variations in water storages. While estimates of the overall terrestrial water storage (TWS) variations are available from the GRACE satellites, these represent the vertically integrated signal over all water stored in ice, snow, soil moisture, groundwater and surface water bodies. Therefore, complementary observational data and hydrological models are still required to determine the partitioning of the measured signal among different water storages and to understand the underlying processes. However, the application of large-scale observational data is limited by their specific uncertainties and the incapacity to measure certain water fluxes and storages. Hydrological models, on the other hand, vary widely in their structure and process-representation, and rarely incorporate additional observational data to minimize uncertainties that arise from their simplified representation of the complex hydrologic cycle. In this context, this thesis aims to contribute to improving the understanding of global water storage variability by combining simple hydrological models with a variety of complementary Earth observation-based data. To this end, a model-data integration approach is developed, in which the parameters of a parsimonious hydrological model are calibrated against several observational constraints, inducing GRACE TWS, simultaneously, while taking into account each data's specific strengths and uncertainties. This approach is used to investigate 3 specific aspects that are relevant for modelling and understanding the composition of large-scale TWS variations. The first study focusses on Northern latitudes, where snow and cold-region processes define the hydrological cycle. While the study confirms previous findings that seasonal dynamics of TWS are dominated by the cyclic accumulation and melt of snow, it reveals that inter-annual TWS variations on the contrary, are determined by variations in liquid water storages. Additionally, it is found to be important to consider the impact of compensatory effects of spatially heterogeneous hydrological variables when aggregating the contribution of different storage components over large areas. Hence, the determinants of TWS variations are scale-dependent and underlying driving mechanism cannot be simply transferred between spatial and temporal scales. These findings are supported by the second study for the global land areas beyond the Northern latitudes as well. This second study further identifies the considerable impact of how vegetation is represented in hydrological models on the partitioning of TWS variations. Using spatio-temporal varying fields of Earth observation-based data to parameterize vegetation activity not only significantly improves model performance, but also reduces parameter equifinality and process uncertainties. Moreover, the representation of vegetation drastically changes the contribution of different water storages to overall TWS variability, emphasizing the key role of vegetation for water allocation, especially between sub-surface and delayed water storages. However, the study also identifies parameter equifinality regarding the decay of sub-surface and delayed water storages by either evapotranspiration or runoff, and thus emphasizes the need for further constraints hereof. The third study focuses on the role of river water storage, in particular whether it is necessary to include computationally expensive river routing for model calibration and validation against the integrated GRACE TWS. The results suggest that river routing is not required for model calibration in such a global model-data integration approach, due to the larger influence other observational constraints, and the determinability of certain model parameters and associated processes are identified as issues of greater relevance. In contrast to model calibration, considering river water storage derived from routing schemes can already significantly improve modelled TWS compared to GRACE observations, and thus should be considered for model evaluation against GRACE data. Beyond these specific findings that contribute to improved understanding and modelling of large-scale TWS variations, this thesis demonstrates the potential of combining simple modeling approaches with diverse Earth observational data to improve model simulations, overcome inconsistencies of different observational data sets, and identify areas that require further research. These findings encourage future efforts to take advantage of the increasing number of diverse global observational data.}, language = {en} } @phdthesis{Trauth2015, author = {Trauth, Nico}, title = {Flow and reactive transport modeling at the stream-groundwater interface}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82748}, school = {Universit{\"a}t Potsdam}, pages = {xv, 103}, year = {2015}, abstract = {Stream water and groundwater are important fresh water resources but their water quality is deteriorated by harmful solutes introduced by human activities. The interface between stream water and the subsurface water is an important zone for retention, transformation and attenuation of these solutes. Streambed structures enhance these processes by increased water and solute exchange across this interface, denoted as hyporheic exchange. This thesis investigates the influence of hydrological and morphological factors on hyporheic water and solute exchange as well as redox-reactions in fluvial streambed structures on the intermediate scale (10-30m). For this purpose, a three-dimensional numerical modeling approach for coupling stream water flow with porous media flow is used. Multiple steady state stream water flow scenarios over different generic pool-riffle morphologies and a natural in-stream gravel bar are simulated by a computational fluid dynamics code that provides the hydraulic head distribution at the streambed. These heads are subsequently used as the top boundary condition of a reactive transport groundwater model of the subsurface beneath the streambed. Ambient groundwater that naturally interacts with the stream water is considered in scenarios of different magnitudes of downwelling stream water (losing case) and upwelling groundwater (gaining case). Also, the neutral case, where stream stage and groundwater levels are balanced is considered. Transport of oxygen, nitrate and dissolved organic carbon and their reaction by aerobic respiration and denitrification are modeled. The results show that stream stage and discharge primarily induce hyporheic exchange flux and solute transport with implications for specific residence times and reactions at both the fully and partially submerged structures. Gaining and losing conditions significantly diminish the extent of the hyporheic zone, the water exchange flux, and shorten residence times for both the fully and partially submerged structures. With increasing magnitude of gaining or losing conditions, these metrics exponentially decrease. Stream water solutes are transported mainly advectively into the hyporheic zone and hence their influx corresponds directly to the infiltrating water flux. Aerobic respiration takes place in the shallow streambed sediments, coinciding to large parts with the extent of the hyporheic exchange flow. Denitrification occurs mainly as a "reactive fringe" surrounding the aerobic zone, where oxygen concentration is low and still a sufficient amount of stream water carbon source is available. The solute consumption rates and the efficiency of the aerobic and anaerobic reactions depend primarily on the available reactive areas and the residence times, which are both controlled by the interplay between hydraulic head distribution at the streambed and the gradients between stream stage and ambient groundwater. Highest solute consumption rates can be expected under neutral conditions, where highest solute flux, longest residence times and largest extent of the hyporheic exchange occur. The results of this thesis show that streambed structures on the intermediate scale have a significant potential to contribute to a net solute turnover that can support a healthy status of the aquatic ecosystem.}, language = {en} } @phdthesis{Trappmann2007, author = {Trappmann, Henryk}, title = {Arborescent numbers : higher arithmetic operations and division trees}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15247}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {The overall program "arborescent numbers" is to similarly perform the constructions from the natural numbers (N) to the positive fractional numbers (Q+) to positive real numbers (R+) beginning with (specific) binary trees instead of natural numbers. N can be regarded as the associative binary trees. The binary trees B and the left-commutative binary trees P allow the hassle-free definition of arbitrary high arithmetic operations (hyper ... hyperpowers). To construct the division trees the algebraic structure "coppice" is introduced which is a group with an addition over which the multiplication is right-distributive. Q+ is the initial associative coppice. The present work accomplishes one step in the program "arborescent numbers". That is the construction of the arborescent equivalent(s) of the positive fractional numbers. These equivalents are the "division binary trees" and the "fractional trees". A representation with decidable word problem for each of them is given. The set of functions f:R1->R1 generated from identity by taking powers is isomorphic to P and can be embedded into a coppice by taking inverses.}, language = {en} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @phdthesis{Tranter2022, author = {Tranter, Morgan Alan}, title = {Numerical quantification of barite reservoir scaling and the resulting injectivity loss in geothermal systems}, doi = {10.25932/publishup-56113}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-561139}, school = {Universit{\"a}t Potsdam}, pages = {131}, year = {2022}, abstract = {Due to the major role of greenhouse gas emissions in global climate change, the development of non-fossil energy technologies is essential. Deep geothermal energy represents such an alternative, which offers promising properties such as a high base load capability and a large untapped potential. The present work addresses barite precipitation within geothermal systems and the associated reduction in rock permeability, which is a major obstacle to maintaining high efficiency. In this context, hydro-geochemical models are essential to quantify and predict the effects of precipitation on the efficiency of a system. The objective of the present work is to quantify the induced injectivity loss using numerical and analytical reactive transport simulations. For the calculations, the fractured-porous reservoirs of the German geothermal regions North German Basin (NGB) and Upper Rhine Graben (URG) are considered. Similar depth-dependent precipitation potentials could be determined for both investigated regions (2.8-20.2 g/m3 fluid). However, the reservoir simulations indicate that the injectivity loss due to barite deposition in the NGB is significant (1.8\%-6.4\% per year) and the longevity of the system is affected as a result; this is especially true for deeper reservoirs (3000 m). In contrast, simulations of URG sites indicate a minor role of barite (< 0.1\%-1.2\% injectivity loss per year). The key differences between the investigated regions are reservoir thicknesses and the presence of fractures in the rock, as well as the ionic strength of the fluids. The URG generally has fractured-porous reservoirs with much higher thicknesses, resulting in a greater distribution of precipitates in the subsurface. Furthermore, ionic strengths are higher in the NGB, which accelerates barite precipitation, causing it to occur more concentrated around the wellbore. The more concentrated the precipitates occur around the wellbore, the higher the injectivity loss. In this work, a workflow was developed within which numerical and analytical models can be used to estimate and quantify the risk of barite precipitation within the reservoir of geothermal systems. A key element is a newly developed analytical scaling score that provides a reliable estimate of induced injectivity loss. The key advantage of the presented approach compared to fully coupled reservoir simulations is its simplicity, which makes it more accessible to plant operators and decision makers. Thus, in particular, the scaling score can find wide application within geothermal energy, e.g., in the search for potential plant sites and the estimation of long-term efficiency.}, language = {en} } @phdthesis{Traifeh2023, author = {Traifeh, Hanadi}, title = {Design Thinking in the Arab world}, doi = {10.25932/publishup-59891}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-598911}, school = {Universit{\"a}t Potsdam}, pages = {ix, 196}, year = {2023}, abstract = {Design Thinking is a human-centered approach to innovation that has become increasingly popular globally over the last decade. While the spread of Design Thinking is well understood and documented in the Western cultural contexts, particularly in Europe and the US due to the popularity of the Stanford-Potsdam Design Thinking education model, this is not the case when it comes to non-Western cultural contexts. This thesis fills a gap identified in the literature regarding how Design Thinking emerged, was perceived, adopted, and practiced in the Arab world. The culture in that part of the world differs from that of the Western context, which impacts the mindset of people and how they interact with Design Thinking tools and methods. A mixed-methods research approach was followed in which both quantitative and qualitative methods were employed. First, two methods were used in the quantitative phase: a social media analysis using Twitter as a source of data, and an online questionnaire. The results and analysis of the quantitative data informed the design of the qualitative phase in which two methods were employed: ten semi-structured interviews, and participant observation of seven Design Thinking training events. According to the analyzed data, the Arab world appears to have had an early, though relatively weak, and slow, adoption of Design Thinking since 2006. Increasing adoption, however, has been witnessed over the last decade, especially in Saudi Arabia, the United Arab Emirates and Egypt. The results also show that despite its limited spread, Design Thinking has been practiced the most in education, information technology and communication, administrative services, and the non-profit sectors. The way it is being practiced, though, is not fully aligned with how it is being practiced and taught in the US and Europe, as most people in the region do not necessarily believe in all mindset attributes introduced by the Stanford-Potsdam tradition. Practitioners in the Arab world also seem to shy away from the 'wild side' of Design Thinking in particular, and do not fully appreciate the connection between art-design, and science-engineering. This questions the role of the educational institutions in the region since -according to the findings- they appear to be leading the movement in promoting and developing Design Thinking in the Arab world. Nonetheless, it is notable that people seem to be aware of the positive impact of applying Design Thinking in the region, and its potential to bring meaningful transformation. However, they also seem to be concerned about the current cultural, social, political, and economic challenges that may challenge this transformation. Therefore, they call for more awareness and demand to create Arabic, culturally appropriate programs to respond to the local needs. On another note, the lack of Arabic content and local case studies on Design Thinking were identified by several interviewees and were also confirmed by the participant observation as major challenges that are slowing down the spread of Design Thinking or sometimes hampering capacity building in the region. Other challenges that were revealed by the study are: changing the mindset of people, the lack of dedicated Design Thinking spaces, and the need for clear instructions on how to apply Design Thinking methods and activities. The concept of time and how Arabs deal with it, gender management during trainings, and hierarchy and power dynamics among training participants are also among the identified challenges. Another key finding revealed by the study is the confirmation of التفكير التصميمي as the Arabic term to be most widely adopted in the region to refer to Design Thinking, since four other Arabic terms were found to be associated with Design Thinking. Based on the findings of the study, the thesis concludes by presenting a list of recommendations on how to overcome the mentioned challenges and what factors should be considered when designing and implementing culturally-customized Design Thinking training in the Arab region.}, language = {en} } @phdthesis{Trabant2014, author = {Trabant, Christoph}, title = {Ultrafast photoinduced phase transitions in complex materials probed by time-resolved resonant soft x-ray diffraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71377}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In processing and data storage mainly ferromagnetic (FM) materials are being used. Approaching physical limits, new concepts have to be found for faster, smaller switches, for higher data densities and more energy efficiency. Some of the discussed new concepts involve the material classes of correlated oxides and materials with antiferromagnetic coupling. Their applicability depends critically on their switching behavior, i.e., how fast and how energy efficient material properties can be manipulated. This thesis presents investigations of ultrafast non-equilibrium phase transitions on such new materials. In transition metal oxides (TMOs) the coupling of different degrees of freedom and resulting low energy excitation spectrum often result in spectacular changes of macroscopic properties (colossal magneto resistance, superconductivity, metal-to-insulator transitions) often accompanied by nanoscale order of spins, charges, orbital occupation and by lattice distortions, which make these material attractive. Magnetite served as a prototype for functional TMOs showing a metal-to-insulator-transition (MIT) at T = 123 K. By probing the charge and orbital order as well as the structure after an optical excitation we found that the electronic order and the structural distortion, characteristics of the insulating phase in thermal equilibrium, are destroyed within the experimental resolution of 300 fs. The MIT itself occurs on a 1.5 ps timescale. It shows that MITs in functional materials are several thousand times faster than switching processes in semiconductors. Recently ferrimagnetic and antiferromagnetic (AFM) materials have become interesting. It was shown in ferrimagnetic GdFeCo, that the transfer of angular momentum between two opposed FM subsystems with different time constants leads to a switching of the magnetization after laser pulse excitation. In addition it was theoretically predicted that demagnetization dynamics in AFM should occur faster than in FM materials as no net angular momentum has to be transferred out of the spin system. We investigated two different AFM materials in order to learn more about their ultrafast dynamics. In Ho, a metallic AFM below T ≈ 130 K, we found that the AFM Ho can not only be faster but also ten times more energy efficiently destroyed as order in FM comparable metals. In EuTe, an AFM semiconductor below T ≈ 10 K, we compared the loss of magnetization and laser-induced structural distortion in one and the same experiment. Our experiment shows that they are effectively disentangled. An exception is an ultrafast release of lattice dynamics, which we assign to the release of magnetostriction. The results presented here were obtained with time-resolved resonant soft x-ray diffraction at the Femtoslicing source of the Helmholtz-Zentrum Berlin and at the free-electron laser in Stanford (LCLS). In addition the development and setup of a new UHV-diffractometer for these experiments will be reported.}, language = {en} } @phdthesis{TorresAcosta2015, author = {Torres Acosta, Ver{\´o}nica}, title = {Denudation processes in a tectonically active rift on different time scales}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-84534}, school = {Universit{\"a}t Potsdam}, pages = {xv, ix, 183}, year = {2015}, abstract = {Continental rifts are excellent regions where the interplay between extension, the build-up of topography, erosion and sedimentation can be evaluated in the context of landscape evolution. Rift basins also constitute important archives that potentially record the evolution and migration of species and the change of sedimentary conditions as a result of climatic change. Finally, rifts have increasingly become targets of resource exploration, such as hydrocarbons or geothermal systems. The study of extensional processes and the factors that further modify the mainly climate-driven surface process regime helps to identify changes in past and present tectonic and geomorphic processes that are ultimately recorded in rift landscapes. The Cenozoic East African Rift System (EARS) is an exemplary continental rift system and ideal natural laboratory to observe such interactions. The eastern and western branches of the EARS constitute first-order tectonic and topographic features in East Africa, which exert a profound influence on the evolution of topography, the distribution and amount of rainfall, and thus the efficiency of surface processes. The Kenya Rift is an integral part of the eastern branch of the EARS and is characterized by high-relief rift escarpments bounded by normal faults, gently tilted rift shoulders, and volcanic centers along the rift axis. Considering the Cenozoic tectonic processes in the Kenya Rift, the tectonically controlled cooling history of rift shoulders, the subsidence history of rift basins, and the sedimentation along and across the rift, may help to elucidate the morphotectonic evolution of this extensional province. While tectonic forcing of surface processes may play a minor role in the low-strain rift on centennial to millennial timescales, it may be hypothesized that erosion and sedimentation processes impacted by climate shifts associated with pronounced changes in the availability in moisture may have left important imprints in the landscape. In this thesis I combined thermochronological, geomorphic field observations, and morphometry of digital elevation models to reconstruct exhumation processes and erosion rates, as well as the effects of climate on the erosion processes in different sectors of the rift. I present three sets of results: (1) new thermochronological data from the northern and central parts of the rift to quantitatively constrain the Tertiary exhumation and thermal evolution of the Kenya Rift. (2) 10Be-derived catchment-wide mean denudation rates from the northern, central and southern rift that characterize erosional processes on millennial to present-day timescales; and (3) paleo-denudation rates in the northern rift to constrain climatically controlled shifts in paleoenvironmental conditions during the early Holocene (African Humid Period). Taken together, my studies show that time-temperature histories derived from apatite fission track (AFT) analysis, zircon (U-Th)/He dating, and thermal modeling bracket the onset of rifting in the Kenya Rift between 65-50 Ma and about 15 Ma to the present. These two episodes are marked by rapid exhumation and, uplift of the rift shoulders. Between 45 and 15 Ma the margins of the rift experienced very slow erosion/exhumation, with the accommodation of sediments in the rift basin. In addition, I determined that present-day denudation rates in sparsely vegetated parts of the Kenya Rift amount to 0.13 mm/yr, whereas denudation rates in humid and more densely vegetated sectors of the rift flanks reach a maximum of 0.08 mm/yr, despite steeper hillslopes. I inferred that hillslope gradient and vegetation cover control most of the variation in denudation rates across the Kenya Rift today. Importantly, my results support the notion that vegetation cover plays a fundamental role in determining the voracity of erosion of hillslopes through its stabilizing effects on the land surface. Finally, in a pilot study I highlighted how paleo-denudation rates in climatic threshold areas changed significantly during times of transient hydrologic conditions and involved a sixfold increase in erosion rates during increased humidity. This assessment is based on cosmogenic nuclide (10Be) dating of quartzitic deltaic sands that were deposited in the northern Kenya Rift during a highstand of Lake Suguta, which was associated with the Holocene African Humid Period. Taken together, my new results document the role of climate variability in erosion processes that impact climatic threshold environments, which may provide a template for potential future impacts of climate-driven changes in surface processes in the course of Global Change.}, language = {en} } @phdthesis{TorcatoMordido2021, author = {Torcato Mordido, Gon{\c{c}}alo Filipe}, title = {Diversification, compression, and evaluation methods for generative adversarial networks}, doi = {10.25932/publishup-53546}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-535460}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 148}, year = {2021}, abstract = {Generative adversarial networks (GANs) have been broadly applied to a wide range of application domains since their proposal. In this thesis, we propose several methods that aim to tackle different existing problems in GANs. Particularly, even though GANs are generally able to generate high-quality samples, the diversity of the generated set is often sub-optimal. Moreover, the common increase of the number of models in the original GANs framework, as well as their architectural sizes, introduces additional costs. Additionally, even though challenging, the proper evaluation of a generated set is an important direction to ultimately improve the generation process in GANs. We start by introducing two diversification methods that extend the original GANs framework to multiple adversaries to stimulate sample diversity in a generated set. Then, we introduce a new post-training compression method based on Monte Carlo methods and importance sampling to quantize and prune the weights and activations of pre-trained neural networks without any additional training. The previous method may be used to reduce the memory and computational costs introduced by increasing the number of models in the original GANs framework. Moreover, we use a similar procedure to quantize and prune gradients during training, which also reduces the communication costs between different workers in a distributed training setting. We introduce several topology-based evaluation methods to assess data generation in different settings, namely image generation and language generation. Our methods retrieve both single-valued and double-valued metrics, which, given a real set, may be used to broadly assess a generated set or separately evaluate sample quality and sample diversity, respectively. Moreover, two of our metrics use locality-sensitive hashing to accurately assess the generated sets of highly compressed GANs. The analysis of the compression effects in GANs paves the way for their efficient employment in real-world applications. Given their general applicability, the methods proposed in this thesis may be extended beyond the context of GANs. Hence, they may be generally applied to enhance existing neural networks and, in particular, generative frameworks.}, language = {en} } @phdthesis{Topaj2001, author = {Topaj, Dmitri}, title = {Synchronization transitions in complex systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000367}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {Gegenstand dieser Arbeit ist die Untersuchung generischer Synchronisierungsph{\"a}nomene in interagierenden komplexen Systemen. Diese Ph{\"a}nomene werden u.a. in gekoppelten deterministischen chaotischen Systemen beobachtet. Bei sehr schwachen Interaktionen zwischen individuellen Systemen kann ein {\"U}bergang zum schwach koh{\"a}renten Verhalten der Systeme stattfinden. In gekoppelten zeitkontinuierlichen chaotischen Systemen manifestiert sich dieser {\"U}bergang durch den Effekt der Phasensynchronisierung, in gekoppelten chaotischen zeitdiskreten Systemen durch den Effekt eines nichtverschwindenden makroskopischen Feldes. Der {\"U}bergang zur Koh{\"a}renz in einer Kette lokal gekoppelter Oszillatoren, beschrieben durch Phasengleichungen, wird im Bezug auf die Symmetrien des Systems untersucht. Es wird gezeigt, daß die durch die Symmetrien verursachte Reversibilit{\"a}t des Systems nichttriviale topologische Eigenschaften der Trajektorien bedingt, so daß das als dissipativ konstruierte System in einem ganzen Parameterbereich quasi-Hamiltonische Z{\"u}ge aufweist, d.h. das Phasenvolumen ist im Schnitt erhalten, und die Lyapunov-Exponenten sind paarweise symmetrisch. Der {\"U}bergang zur Koh{\"a}renz in einem Ensemble global gekoppelter chaotischer Abbildungen wird durch den Verlust der Stabilit{\"a}t des entkoppelten Zustandes beschrieben. Die entwickelte Methode besteht darin, die Selbstkonsistenz des makroskopischen Feldes aufzuheben, und das Ensemble in Analogie mit einem Verst{\"a}rkerschaltkreis mit R{\"u}ckkopplung durch eine komplexe lineare {\"U}bertragungssfunktion zu charakterisieren. Diese Theorie wird anschließend f{\"u}r einige theoretisch interessanten F{\"a}lle verallgemeinert.}, language = {en} } @phdthesis{Tofelde2018, author = {Tofelde, Stefanie}, title = {Signals stored in sediment}, doi = {10.25932/publishup-42716}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427168}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 172}, year = {2018}, abstract = {Tectonic and climatic boundary conditions determine the amount and the characteristics (size distribution and composition) of sediment that is generated and exported from mountain regions. On millennial timescales, rivers adjust their morphology such that the incoming sediment (Qs,in) can be transported downstream by the available water discharge (Qw). Changes in climatic and tectonic boundary conditions thus trigger an adjustment of the downstream river morphology. Understanding the sensitivity of river morphology to perturbations in boundary conditions is therefore of major importance, for example, for flood assessments, infrastructure and habitats. Although we have a general understanding of how rivers evolve over longer timescales, the prediction of channel response to changes in boundary conditions on a more local scale and over shorter timescales remains a major challenge. To better predict morphological channel evolution, we need to test (i) how channels respond to perturbations in boundary conditions and (ii) how signals reflecting the persisting conditions are preserved in sediment characteristics. This information can then be applied to reconstruct how local river systems have evolved over time. In this thesis, I address those questions by combining targeted field data collection in the Quebrada del Toro (Southern Central Andes of NW Argentina) with cosmogenic nuclide analysis and remote sensing data. In particular, I (1) investigate how information on hillslope processes is preserved in the 10Be concentration (geochemical composition) of fluvial sediments and how those signals are altered during downstream transport. I complement the field-based approach with physical experiments in the laboratory, in which I (2) explore how changes in sediment supply (Qs,in) or water discharge (Qw) generate distinct signals in the amount of sediment discharge at the basin outlet (Qs,out). With the same set of experiments, I (3) study the adjustments of alluvial channel morphology to changes in Qw and Qs,in, with a particular focus in fill-terrace formation. I transfer the findings from the experiments to the field to (4) reconstruct the evolution of a several-hundred meter thick fluvial fill-terrace sequence in the Quebrada del Toro. I create a detailed terrace chronology and perform reconstructions of paleo-Qs and Qw from the terrace deposits. In the following paragraphs, I summarize my findings on each of these four topics. First, I sampled detrital sediment at the outlet of tributaries and along the main stem in the Quebrada del Toro, analyzed their 10Be concentration ([10Be]) and compared the data to a detailed hillslope-process inventory. The often observed non-linear increase in catchment-mean denudation rate (inferred from [10Be] in fluvial sediment) with catchment-median slope, which has commonly been explained by an adjustment in landslide-frequency, coincided with a shift in the main type of hillslope processes. In addition, the [10Be] in fluvial sediments varied with grain-size. I defined the normalized sand-gravel-index (NSGI) as the 10Be-concentration difference between sand and gravel fractions divided by their summed concentrations. The NSGI increased with median catchment slope and coincided with a shift in the prevailing hillslope processes active in the catchments, thus making the NSGI a potential proxy for the evolution of hillslope processes over time from sedimentary deposits. However, the NSGI recorded hillslope-processes less well in regions of reduced hillslope-channel connectivity and, in addition, has the potential to be altered during downstream transport due to lateral sediment input, size-selective sediment transport and abrasion. Second, my physical experiments revealed that sediment discharge at the basin outlet (Qs,out) varied in response to changes in Qs,in or Qw. While changes in Qw caused a distinct signal in Qs,out during the transient adjustment phase of the channel to new boundary conditions, signals related to changes in Qs,in were buffered during the transient phase and likely only become apparent once the channel is adjusted to the new conditions. The temporal buffering is related to the negative feedback between Qs,in and channel-slope adjustments. In addition, I inferred from this result that signals extracted from the geochemical composition of sediments (e.g., [10Be]) are more likely to represent modern-day conditions during times of aggradation, whereas the signal will be temporally buffered due to mixing with older, remobilized sediment during times of channel incision. Third, the same set of experiments revealed that river incision, channel-width narrowing and terrace cutting were initiated by either an increase in Qw, a decrease in Qs,in or a drop in base level. The lag-time between the external perturbation and the terrace cutting determined (1) how well terrace surfaces preserved the channel profile prior to perturbation and (2) the degree of reworking of terrace-surface material. Short lag-times and well preserved profiles occurred in cases with a rapid onset of incision. Also, lag-times were synchronous along the entire channel after upstream perturbations (Qw, Qs,in), whereas base-level fall triggered an upstream migrating knickzone, such that lag-times increased with distance upstream. Terraces formed after upstream perturbations (Qw, Qs,in) were always steeper when compared to the active channel in new equilibrium conditions. In the base-level fall experiment, the slope of the terrace-surfaces and the modern channel were similar. Hence, slope comparisons between the terrace surface and the modern channel can give insights into the mechanism of terrace formation. Fourth, my detailed terrace-formation chronology indicated that cut-and-fill episodes in the Quebrada del Toro followed a ~100-kyr cyclicity, with the oldest terraces ~ 500 kyr old. The terraces were formed due to variability in upstream Qw and Qs. Reconstructions of paleo-Qs over the last 500 kyr, which were restricted to times of sediment deposition, indicated only minor (up to four-fold) variations in paleo-denudation rates. Reconstructions of paleo-Qw were limited to the times around the onset of river incision and revealed enhanced discharge from 10 to 85\% compared to today. Such increases in Qw are in agreement with other quantitative paleo-hydrological reconstructions from the Eastern Andes, but have the advantage of dating further back in time.}, language = {en} } @phdthesis{Todt2009, author = {Todt, Helge Tobias}, title = {Hydrogen-deficient central stars of planetary nebulae}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41047}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Central stars of planetary nebulae are low-mass stars on the brink of their final evolution towards white dwarfs. Because of their surface temperature of above 25,000 K their UV radiation ionizes the surrounding material, which was ejected in an earlier phase of their evolution. Such fluorescent circumstellar gas is called a "Planetary Nebula". About one-tenth of the Galactic central stars are hydrogen-deficient. Generally, the surface of these central stars is a mixture of helium, carbon, and oxygen resulting from partial helium burning. Moreover, most of them have a strong stellar wind, similar to massive Pop-I Wolf-Rayet stars, and are in analogy classified as [WC]. The brackets distinguish the special type from the massive WC stars. Qualitative spectral analyses of [WC] stars lead to the assumption of an evolutionary sequence from the cooler, so-called late-type [WCL] stars to the very hot, early-type [WCE] stars. Quantitative analyses of the winds of [WC] stars became possible by means of computer programs that solve the radiative transfer in the co-moving frame, together with the statistical equilibrium equations for the population numbers. First analyses employing models without iron-line blanketing resulted in systematically different abundances for [WCL] and [WCE] stars. While the mass ratio of He:C is roughly 40:50 for [WCL] stars, it is 60:30 in average for [WCE] stars. The postulated evolution from [WCL] to [WCE] however could only lead to an increase of carbon, since heavier elements are built up by nuclear fusion. In the present work, improved models are used to re-analyze the [WCE] stars and to confirm their He:C abundance ratio. Refined models, calculated with the Potsdam WR model atmosphere code (PoWR), account now for line-blanketing due to iron group elements, small scale wind inhomogeneities, and complex model atoms for He, C, O, H, P, N, and Ne. Referring to stellar evolutionary models for the hydrogen-deficient [WC] stars, Ne and N abundances are of particular interest. Only one out of three different evolutionary channels, the VLTP scenario, leads to a Ne and N overabundance of a few percent by mass. A VLTP, a very late thermal pulse, is a rapid increase of the energy production of the helium-burning shell, while hydrogen burning has already ceased. Subsequently, the hydrogen envelope is mixed with deeper layers and completely burnt in the presence of C, He, and O. This results in the formation of N and Ne. A sample of eleven [WCE] stars has been analyzed. For three of them, PB 6, NGC 5189, and [S71d]3, a N overabundance of 1.5\% has been found, while for three other [WCE] stars such high abundances of N can be excluded. In the case of NGC 5189, strong spectral lines of Ne can be reproduced qualitatively by our models. At present, the Ne mass fraction can only be roughly estimated from the Ne emission lines and seems to be in the order of a few percent by mass. Furthermore, using a diagnostic He-C line pair, the He:C abundance ratio of 60:30 for [WCE] stars is confirmed. Within the framework of the analysis, a new class of hydrogen-deficient central stars has been discovered, with PB 8 as its first member. Its atmospheric mixture resembles rather that of the massive WNL stars than of the [WC] stars. The determined mass fractions H:He:C:N:O are 40:55:1.3:2:1.3. As the wind of PB 8 contains significant amounts of O and C, in contrast to WN stars, a classification as [WN/WC] is suggested.}, language = {en} } @phdthesis{Titov2017, author = {Titov, Evgenii}, title = {Quantum chemistry and surface hopping dynamics of azobenzenes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394610}, school = {Universit{\"a}t Potsdam}, pages = {205}, year = {2017}, abstract = {This cumulative doctoral dissertation, based on three publications, is devoted to the investigation of several aspects of azobenzene molecular switches, with the aid of computational chemistry. In the first paper, the isomerization rates of a thermal cis → trans isomerization of azobenzenes for species formed upon an integer electron transfer, i.e., with added or removed electron, are calculated from Eyring's transition state theory and activation energy barriers, computed by means of density functional theory. The obtained results are discussed in connection with an experimental study of the thermal cis → trans isomerization of azobenzene derivatives in the presence of gold nanoparticles, which is demonstrated to be greatly accelerated in comparison to the same isomerization reaction in the absence of nanoparticles. The second paper is concerned with electronically excited states of (i) dimers, composed of two photoswitchable units placed closely side-by-side, as well as (ii) monomers and dimers adsorbed on a silicon cluster. A variety of quantum chemistry methods, capable of calculating molecular electronic absorption spectra, based on density functional and wave function theories, is employed to quantify changes in optical absorption upon dimerization and covalent grafting to a surface. Specifically, the exciton (Davydov) splitting between states of interest is determined from first-principles calculations with the help of natural transition orbital analysis, allowing for insight into the nature of excited states. In the third paper, nonadiabatic molecular dynamics with trajectory surface hopping is applied to model the photoisomerization of azobenzene dimers, (i) for the isolated case (exhibiting the exciton coupling between two molecules) as well as (ii) for the constrained case (providing the van der Waals interaction with environment in addition to the exciton coupling between two monomers). For the latter, the additional azobenzene molecules, surrounding the dimer, are introduced, mimicking a densely packed self-assembled monolayer. From obtained results it is concluded that the isolated dimer is capable of isomerization likewise the monomer, whereas the steric hindrance considerably suppresses trans → cis photoisomerization. Furthermore, the present dissertation comprises the general introduction describing the main features of the azobenzene photoswitch and objectives of this work, theoretical basis of the employed methods, and discussion of gained findings in the light of existing literature. Also, additional results on (i) activation parameters of the thermal cis → trans isomerization of azobenzenes, (ii) an approximate scheme to account for anharmonicity of molecular vibrations in calculation of the activation entropy, as well as (iii) absorption spectra of photoswitch-silicon composites obtained from time-demanding wave function-based methods are presented.}, language = {en} } @phdthesis{Tirok2008, author = {Tirok, Katrin}, title = {Predator-prey dynamics under the influence of exogenous and endogenous regulation : a data-based modeling study on spring plankton with respect to climate change}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-24528}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Understanding the interactions of predators and their prey and their responses to environmental changes is one of the striking features of ecological research. In this thesis, spring dynamics of phytoplankton and its consumers, zooplankton, were considered in dependence on the environmental conditions in a deep lake (Lake Constance) and a shallow marine water (mesocosms from Kiel Bight), using descriptive statistics, multiple regression models, and process-oriented dynamic simulation models. The development of the spring phytoplankton bloom, representing a dominant feature in the plankton dynamics in temperate and cold oceans and lakes, may depend on temperature, light, and mixing intensity, and the success of over-wintering phyto- and zooplankton. These factors are often correlated in the field. Unexpectedly, irradiance often dominated algal net growth rather than vertical mixing even in deep Lake Constance. Algal net losses from the euphotic layer to larger depth were induced by vertical mixing, but were compensated by the input from larger depth when algae were uniformly distributed over the water column. Dynamics of small, fast-growing algae were well predicted by abiotic variables, such as surface irradiance, vertical mixing intensity, and temperature. A simulation model additionally revealed that even in late winter, grazing may represent an important loss factor of phytoplankton during calm periods when losses due to mixing are small. The importance of losses by mixing and grazing changed rapidly as it depended on the variable mixing intensity. Higher temperature, lower global irradiance and enhanced mixing generated lower algal biomass and primary production in the dynamic simulation model. This suggests that potential consequences of climate change may partly counteract each other. The negative effect of higher temperatures on phytoplankton biomass was due to enhanced temperature-sensitive grazing losses. Comparing the results from deep Lake Constance to those of the shallow mesocosm experiments and simulations, confirmed the strong direct effect of light in contrast to temperature, and the importance of grazing already in early spring as soon as moderate algal biomasses developed. In Lake Constance, ciliates dominated the herbivorous zooplankton in spring. The start of ciliate net growth in spring was closely linked to that of edible algae, chlorophyll a and the vertical mixing intensity but independent of water temperature. The duration of ciliate dominance in spring was largely controlled by the highly variable onset of the phytoplankton bloom, and little by the less variable termination of the ciliate bloom by grazing of meta-zooplankton. During years with an extended spring bloom of algae and ciliates, they coexisted at relatively high biomasses over 15-30 generations, and internally forced species shifts were observed in both communities. Interception feeders alternated with filter feeders, and cryptomonads with non-cryptomonads in their relative importance. These dynamics were not captured by classical 1-predator-1-prey models which consistently predict pronounced predator-prey cycles or equilibria with either the predator or the prey dominating or suppressed. A multi-species predator-prey model with predator species differing in their food selectivity, and prey species in their edibility reproduced the observed patterns. Food-selectivity and edibility were related to the feeding and growth characteristics of the species, which represented ecological trade-offs. For example, the prey species with the highest edibility also had the highest maximum growth rate. Data and model revealed endogenous driven ongoing species alternations, which yielded a higher variability in species-specific biomasses than in total predator and prey biomass. This holds for a broad parameter space as long as the species differ functionally. A more sophisticated model approach enabled the simulation of a continuum of different functional types and adaptability of predator and prey communities to altered environmental conditions, and the maintenance of a rather low model complexity, i.e., low number of equations and free parameters. The community compositions were described by mean functional traits --- prey edibility and predator food-selectivity --- and their variances. The latter represent the functional diversity of the communities and thus, the potential for adaptation. Oscillations in the mean community trait values indicated species shifts. The community traits were related to growth and grazing characteristics representing similar trade-offs as in the multi-species model. The model reproduced the observed patterns, when nonlinear relationships between edibility and capacity, and edibility and food availability for the predator were chosen. A constant minimum amount of variance represented ongoing species invasions and thus, preserved a diversity which allows adaptation on a realistic time-span.}, language = {en} } @phdthesis{Tinnefeld2014, author = {Tinnefeld, Christian}, title = {Building a columnar database on shared main memory-based storage}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72063}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2014}, abstract = {In the field of disk-based parallel database management systems exists a great variety of solutions based on a shared-storage or a shared-nothing architecture. In contrast, main memory-based parallel database management systems are dominated solely by the shared-nothing approach as it preserves the in-memory performance advantage by processing data locally on each server. We argue that this unilateral development is going to cease due to the combination of the following three trends: a) Nowadays network technology features remote direct memory access (RDMA) and narrows the performance gap between accessing main memory inside a server and of a remote server to and even below a single order of magnitude. b) Modern storage systems scale gracefully, are elastic, and provide high-availability. c) A modern storage system such as Stanford's RAMCloud even keeps all data resident in main memory. Exploiting these characteristics in the context of a main-memory parallel database management system is desirable. The advent of RDMA-enabled network technology makes the creation of a parallel main memory DBMS based on a shared-storage approach feasible. This thesis describes building a columnar database on shared main memory-based storage. The thesis discusses the resulting architecture (Part I), the implications on query processing (Part II), and presents an evaluation of the resulting solution in terms of performance, high-availability, and elasticity (Part III). In our architecture, we use Stanford's RAMCloud as shared-storage, and the self-designed and developed in-memory AnalyticsDB as relational query processor on top. AnalyticsDB encapsulates data access and operator execution via an interface which allows seamless switching between local and remote main memory, while RAMCloud provides not only storage capacity, but also processing power. Combining both aspects allows pushing-down the execution of database operators into the storage system. We describe how the columnar data processed by AnalyticsDB is mapped to RAMCloud's key-value data model and how the performance advantages of columnar data storage can be preserved. The combination of fast network technology and the possibility to execute database operators in the storage system opens the discussion for site selection. We construct a system model that allows the estimation of operator execution costs in terms of network transfer, data processed in memory, and wall time. This can be used for database operators that work on one relation at a time - such as a scan or materialize operation - to discuss the site selection problem (data pull vs. operator push). Since a database query translates to the execution of several database operators, it is possible that the optimal site selection varies per operator. For the execution of a database operator that works on two (or more) relations at a time, such as a join, the system model is enriched by additional factors such as the chosen algorithm (e.g. Grace- vs. Distributed Block Nested Loop Join vs. Cyclo-Join), the data partitioning of the respective relations, and their overlapping as well as the allowed resource allocation. We present an evaluation on a cluster with 60 nodes where all nodes are connected via RDMA-enabled network equipment. We show that query processing performance is about 2.4x slower if everything is done via the data pull operator execution strategy (i.e. RAMCloud is being used only for data access) and about 27\% slower if operator execution is also supported inside RAMCloud (in comparison to operating only on main memory inside a server without any network communication at all). The fast-crash recovery feature of RAMCloud can be leveraged to provide high-availability, e.g. a server crash during query execution only delays the query response for about one second. Our solution is elastic in a way that it can adapt to changing workloads a) within seconds, b) without interruption of the ongoing query processing, and c) without manual intervention.}, language = {en} } @phdthesis{Timpanaro2004, author = {Timpanaro, Salvatore}, title = {Conductive properties and morphology of conjugated molecular materials studied by local probe techniques}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001929}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Die vorgelegte Arbeit befasst sich mit der Untersuchung von Zusammenh{\"a}ngen zwischen der Struktur d{\"u}nner organischer Schichten und stellt einen Bezug zur Leitf{\"a}higkeit der Schichten her. Sie liefert einen Beitrag zum vertieften Verst{\"a}ndnis der Transporteigenschaf-ten organischer Halbleiter und soll so zur Verbesserung organischer elektronischer Bauele-mente beitragen. Es ist bekannt, dass die Effizienz von Organischen Leuchtdioden (OLEDs) stark von der Qualit{\"a}t der eingesetzten d{\"u}nnen Filme abh{\"a}ngt. Es ist deshalb interessant, die Strukturen technologisch interessanter organischer Filme mittels Scanning Probe Mikrosko-pie (SPM) zu charakterisieren, um ein besseres Verst{\"a}ndnis sowohl der Oberfl{\"a}chen-Morphologie als auch der molekularen/atomaren Packungen zu erhalten. Die Untersuchung von Quaterthiophen (4T), welches vielfach in Feldeffekt-Transistoren eingesetzt wird, bildet einen ersten Schwerpunkt der Arbeit. Es konnte eine neue Kristall-struktur von 4T gefunden werden, die bisher nicht bekannt war. Daf{\"u}r wurden Quaterthi-ophen-Filme untersucht, welche auf Kaliumphthalat (KAP) aufwuchsen. Die Aufkl{\"a}rung der neuen Struktur gelang durch Kombinierten Einsatz von optischen und R{\"o}ntgen-Beugungsmessungen. Zur Best{\"a}tigung der triklinen Kristall-Einheitszelle mit den Parame-tern a = 0,721 nm, b = 0,632 nm, c = 0,956 nm und α = 91o, β = 91,4o, γ = 91o wurde außer-dem die Atomkraft-Mikroskopie (AFM) angewendet. In Fortf{\"u}hrung der rastermikroskopischen Messungen sind die Morphologien von Filmen vier weiterer organischer Materialien technologischer Relevanz untersucht worden: He-xanthiol-Monoschichten auf Gold, Azobenzenthiol-Monoschichten auf Gold, Para-Phenylenvinylen-Oligomer-Filme auf Gold und Phenyl-Oxadiazol-Filme auf Gold. Daf{\"u}r kam zus{\"a}tzlich die Ultrahochvakuum-Scanning-Tunneling-Mikroskopie (UHV-STM) zum Einsatz. Es zeigte sich eine Vielzahl morphologischer Eigenheiten, deren Besonderheiten sowohl von den gew{\"a}hlten Substraten als auch von der chemischen Struktur der untersuchten organischen Materialien abh{\"a}ngen. So zeigen Para-Phenylenvinylen-Oligomer-Filme eine St{\"a}bchen- und Oxadiazol-Filme eine K{\"o}rnchen-Struktur auf Gold. Auf Basis dieser Kenntnisse gelang es, das optisch induzierte Schalten von Azobenzen durch STM-Untersuchungen und durch Scanning Tunneling Spektroskopie (STS) auf molekularer Skale nachzuweisen. Die Topographie einer Reihe von Poly(3,4-ethylendioxythiophen)-(PEDOT)-Filmen und de-ren Bezug zu den Ladungstransporteigenschaften dieser Filme war ein weiterer Arbeits-schwerpunkt. PEDOT-Filme auf Indium-Zinn-Oxid (ITO) kommen in organischen elektro-nischen Bauelementen als Lochinjektionsschichten zum Einsatz. F{\"u}r die Schichtherstellung kamen Dispersionen des Polymers unterschiedlicher Konzentration zur Anwendung: Mittels AFM und STM konnte gezeigt werden, dass unterschiedliche Konzentrationen zu unter-schiedlichen Topographien f{\"u}hren. Besonders die Oberfl{\"a}che der Filme mit hoher Konzent-ration von PEDOT, d.h. die der leitf{\"a}higsten Filme, wird durch eine k{\"o}rnchenartige Struktur eingebetteter Teilchen charakterisiert. Durch STM Strom-Abstands-(I-d)-Untersuchungen wurde gefunden, dass diese Teilchen die „Spitze eines Eisbergs" leitf{\"a}higer Dom{\"a}nen dar-stellen. Ausgehend von dieser Erkenntnis wird ein Strukturmodell f{\"u}r die Filme vorgeschla-gen, in dem die leitf{\"a}higen Dom{\"a}nen / Partikel in eine weniger leitf{\"a}hige Matrix eingebettet sind. Durch Zugabe von Polyolen wie Sorbitol zur PEDOT-Dispersion ließen sich Filme mit h{\"o}herer Leitf{\"a}higkeit herstellen. Eine klare Abh{\"a}ngigkeit zwischen der Leitf{\"a}higkeit und der bisher nicht beschrieben nano-Morphologie wurde gefunden.}, language = {en} } @phdthesis{Timme2023, author = {Timme, Sinika}, title = {Affective responses during exercise and situated exercise-related decision-making}, doi = {10.25932/publishup-61432}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614323}, school = {Universit{\"a}t Potsdam}, pages = {V, 123}, year = {2023}, abstract = {The field of exercise psychology has established robust evidence on the health benefits of physical activity. However, interventions to promote sustained exercise behavior have often proven ineffective. This dissertation addresses challenges in the field, particularly the neglect of situated and affective processes in understanding and changing exercise behavior. Dual process models, considering both rational and affective processes, have gained recognition. The Affective Reflective Theory of Physical Inactivity and Exercise (ART) is a notable model in this context, positing that situated processes in-the-moment of choice influence exercise decisions and subsequent exercise behavior. The dissertation identifies current challenges within exercise psychology and proposes methodological and theoretical advancements. It emphasizes the importance of momentary affective states and situated processes, offering alternatives to self-reported measures and advocating for a more comprehensive modeling of individual variability. The focus is on the affective processes during exercise, theorized to reappear in momentary decision-making, shaping overall exercise behavior. The first publication introduces a new method by using automated facial action analysis to measure variable affective responses during exercise. It explores how these behavioral indicators covary with self-reported measures of affective valence and perceived exertion. The second publication delves into situated processes at the moment of choice between exercise and non-exercise options, revealing that intraindividual factors play a crucial role in explaining exercise-related choices. The third publication presents an open-source research tool, the Decisional Preferences in Exercising Test (DPEX), designed to capture repeated situated decisions and predict exercise behavior based on past experiences. The findings challenge previous assumptions and provide insights into the complex interplay of affective responses, situated processes, and exercise choices. The dissertation underscores the need for individualized interventions that manipulate affective responses during exercise and calls for systematic testing to establish causal links to automatic affective processes and subsequent exercise behavior. This dissertation highlights the necessity for methodological and conceptual refinements in understanding and promoting exercise behavior, ultimately contributing to the broader goal of combating increasing inactivity trends.}, language = {en} } @phdthesis{Thonicke2003, author = {Thonicke, Kirsten}, title = {Fire disturbance and vegetation dynamics : analysis and models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000713}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {Untersuchungen zur Rolle nat{\"u}rlicher St{\"o}rungen in der Vegetation bzw. in {\"O}kosystemen zeigen, dass nat{\"u}rliche St{\"o}rungen ein essentielles und intrinsisches Element in {\"O}kosystemen darstellen, substanziell zur Vitalit{\"a}t und strukturellen Diversit{\"a}t der {\"O}kosysteme beitragen und Stoffkreisl{\"a}ufe sowohl auf dem lokalen als auch auf dem globalen Niveau beeinflussen. Feuer als Grasland-, Busch- oder Waldbrand ist ein besonderes St{\"o}rungsagens, da es sowohl durch biotische als auch abiotische Umweltfaktoren verursacht wird. Es beeinflusst biogeochemische Kreisl{\"a}ufe und spielt f{\"u}r die chemische Zusammensetzung der Atmosph{\"a}re durch Freisetzung klimarelevanter Spurengase und Aerosole aus der Verbrennung von Biomasse eine bedeutende Rolle. Dies wird auch durch die Emission von ca. 3.9 Gt Kohlenstoff pro Jahr unterstrichen, was einen großen Anteil am globalen Gesamtaufkommen ausmacht. Ein kombiniertes Modell, das die Effekte und R{\"u}ckkopplungen zwischen Feuer und Vegetation beschreibt, wurde erforderlich, als {\"A}nderungen in den Feuerregimes als Folge von {\"A}nderungen in der Landnutzung und dem Landmanagement festgestellt wurden. Diese Notwendigkeit wurde noch durch die Erkenntnis unterstrichen, daß die Menge verbrennender Biomasse als ein bedeutender Kohlenstoffluß sowohl die chemische Zusammensetzung der Atmosph{\"a}re und das Klima, aber auch die Vegetationsdynamik selbst beeinflusst. Die bereits existierenden Modellans{\"a}tze reichen hier jedoch nicht aus, um entsprechende Untersuchungen durchzuf{\"u}hren. Als eine Schlussfolgerung daraus wurde eine optimale Menge von Faktoren gefunden, die das Auftreten und die Ausbreitung des Feuers, sowie deren {\"o}kosystemare Effekte ausreichend beschreiben. Ein solches Modell sollte die Merkmale beobachteter Feuerregime simulieren k{\"o}nnen und Analysen der Interaktionen zwischen Feuer und Vegetationsdynamik unterst{\"u}tzen, um auch Ursachen f{\"u}r bestimmte {\"A}nderungen in den Feuerregimes herausfinden zu k{\"o}nnen. Insbesondere die dynamischen Verkn{\"u}pfungen zwischen Vegetation, Klima und Feuerprozessen sind von Bedeutung, um dynamische R{\"u}ckkopplungen und Effekte einzelner, ver{\"a}nderter Umweltfaktoren zu analysieren. Dadurch ergab sich die Notwendigkeit, neue Feuermodelle zu entwickeln, die die genannten Untersuchungen erlauben und das Verst{\"a}ndnis der Rolle des Feuer in der globalen {\"O}kologie verbessern. Als Schlussfolgerung der Dissertation wird festgestellt, dass Feuchtebedingungen, ihre Andauer {\"u}ber die Zeit (L{\"a}nge der Feuersaison) und die Streumenge die wichtigsten Komponenten darstellen, die die Verteilung der Feuerregime global beschreiben. Werden Zeitreihen einzelner Regionen simuliert, sollten besondere Entz{\"u}ndungsquellen, brandkritische Klimabedingungen und die Bestandesstruktur als zus{\"a}tzliche Determinanten ber{\"u}cksichtigt werden. Die Bestandesstruktur ver{\"a}ndert das Niveau des Auftretens und der Ausbreitung von Feuer, beeinflusst jedoch weniger dessen interannuelle Variabilit{\"a}t. Das es wichtig ist, die vollst{\"a}ndige Wirkungskette wichtiger Feuerprozesse und deren Verkn{\"u}pfungen mit der Vegetationsdynamik zu ber{\"u}cksichtigen, wird besonders unter Klima{\"a}nderungsbedingungen deutlich. Eine l{\"a}nger werdende, vom Klima abh{\"a}ngige Feuersaison bedeutet nicht automatisch eine im gleichen Maße anwachsende Menge verbrannter Biomasse. Sie kann durch {\"A}nderungen in der Produktivit{\"a}t der Vegetation gepuffert oder beschleunigt werden. Sowohl durch {\"A}nderungen der Bestandesstruktur als auch durch eine erh{\"o}hte Produktivit{\"a}t der Vegetation k{\"o}nnen {\"A}nderungen der Feuereigenschaften noch weiter intensiviert werden und zu noch h{\"o}heren, feuerbezogenen Emissionen f{\"u}hren.}, language = {en} } @phdthesis{Thomas2022, author = {Thomas, Timon}, title = {Cosmic-ray hydrodynamics: theory, numerics, applications}, doi = {10.25932/publishup-56384}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563843}, school = {Universit{\"a}t Potsdam}, pages = {334}, year = {2022}, abstract = {Cosmic rays (CRs) are a ubiquitous and an important component of astrophysical environments such as the interstellar medium (ISM) and intracluster medium (ICM). Their plasma physical interactions with electromagnetic fields strongly influence their transport properties. Effective models which incorporate the microphysics of CR transport are needed to study the effects of CRs on their surrounding macrophysical media. Developing such models is challenging because of the conceptional, length-scale, and time-scale separation between the microscales of plasma physics and the macroscales of the environment. Hydrodynamical theories of CR transport achieve this by capturing the evolution of CR population in terms of statistical moments. In the well-established one-moment hydrodynamical model for CR transport, the dynamics of the entire CR population are described by a single statistical quantity such as the commonly used CR energy density. In this work, I develop a new hydrodynamical two-moment theory for CR transport that expands the well-established hydrodynamical model by including the CR energy flux as a second independent hydrodynamical quantity. I detail how this model accounts for the interaction between CRs and gyroresonant Alfv{\´e}n waves. The small-scale magnetic fields associated with these Alfv{\´e}n waves scatter CRs which fundamentally alters CR transport along large-scale magnetic field lines. This leads to the effects of CR streaming and diffusion which are both captured within the presented hydrodynamical theory. I use an Eddington-like approximation to close the hydrodynamical equations and investigate the accuracy of this closure-relation by comparing it to high-order approximations of CR transport. In addition, I develop a finite-volume scheme for the new hydrodynamical model and adapt it to the moving-mesh code Arepo. This scheme is applied using a simulation of a CR-driven galactic wind. I investigate how CRs launch the wind and perform a statistical analysis of CR transport properties inside the simulated circumgalactic medium (CGM). I show that the new hydrodynamical model can be used to explain the morphological appearance of a particular type of radio filamentary structures found inside the central molecular zone (CMZ). I argue that these harp-like features are synchrotron-radiating CRs which are injected into braided magnetic field lines by a point-like source such as a stellar wind of a massive star or a pulsar. Lastly, I present the finite-volume code Blinc that uses adaptive mesh refinement (AMR) techniques to perform simulations of radiation and magnetohydrodynamics (MHD). The mesh of Blinc is block-structured and represented in computer memory using a graph-based approach. I describe the implementation of the mesh graph and how a diffusion process is employed to achieve load balancing in parallel computing environments. Various test problems are used to verify the accuracy and robustness of the employed numerical algorithms.}, language = {en} } @phdthesis{Thomas2013, author = {Thomas, Bj{\"o}rn Daniel}, title = {Analysis and management of low flows in small catchments of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69247}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Water management and environmental protection is vulnerable to extreme low flows during streamflow droughts. During the last decades, in most rivers of Central Europe summer runoff and low flows have decreased. Discharge projections agree that future decrease in runoff is likely for catchments in Brandenburg, Germany. Depending on the first-order controls on low flows, different adaption measures are expected to be appropriate. Small catchments were analyzed because they are expected to be more vulnerable to a changing climate than larger rivers. They are mainly headwater catchments with smaller ground water storage. Local characteristics are more important at this scale and can increase vulnerability. This thesis mutually evaluates potential adaption measures to sustain minimum runoff in small catchments of Brandenburg, Germany, and similarities of these catchments regarding low flows. The following guiding questions are addressed: (i) Which first-order controls on low flows and related time scales exist? (ii) Which are the differences between small catchments regarding low flow vulnerability? (iii) Which adaption measures to sustain minimum runoff in small catchments of Brandenburg are appropriate considering regional low flow patterns? Potential adaption measures to sustain minimum runoff during periods of low flows can be classified into three categories: (i) increase of groundwater recharge and subsequent baseflow by land use change, land management and artificial ground water recharge, (ii) increase of water storage with regulated outflow by reservoirs, lakes and wetland water management and (iii) regional low flow patterns have to be considered during planning of measures with multiple purposes (urban water management, waste water recycling and inter-basin water transfer). The question remained whether water management of areas with shallow groundwater tables can efficiently sustain minimum runoff. Exemplary, water management scenarios of a ditch irrigated area were evaluated using the model Hydrus-2D. Increasing antecedent water levels and stopping ditch irrigation during periods of low flows increased fluxes from the pasture to the stream, but storage was depleted faster during the summer months due to higher evapotranspiration. Fluxes from this approx. 1 km long pasture with an area of approx. 13 ha ranged from 0.3 to 0.7 l\s depending on scenario. This demonstrates that numerous of such small decentralized measures are necessary to sustain minimum runoff in meso-scale catchments. Differences in the low flow risk of catchments and meteorological low flow predictors were analyzed. A principal component analysis was applied on daily discharge of 37 catchments between 1991 and 2006. Flows decreased more in Southeast Brandenburg according to meteorological forcing. Low flow risk was highest in a region east of Berlin because of intersection of a more continental climate and the specific geohydrology. In these catchments, flows decreased faster during summer and the low flow period was prolonged. A non-linear support vector machine regression was applied to iteratively select meteorological predictors for annual 30-day minimum runoff in 16 catchments between 1965 and 2006. The potential evapotranspiration sum of the previous 48 months was the most important predictor (r²=0.28). The potential evapotranspiration of the previous 3 months and the precipitation of the previous 3 months and last year increased model performance (r²=0.49, including all four predictors). Model performance was higher for catchments with low yield and more damped runoff. In catchments with high low flow risk, explanatory power of long term potential evapotranspiration was high. Catchments with a high low flow risk as well as catchments with a considerable decrease in flows in southeast Brandenburg have the highest demand for adaption. Measures increasing groundwater recharge are to be preferred. Catchments with high low flow risk showed relatively deep and decreasing groundwater heads allowing increased groundwater recharge at recharge areas with higher altitude away from the streams. Low flows are expected to stay low or decrease even further because long term potential evapotranspiration was the most important low flow predictor and is projected to increase during climate change. Differences in low flow risk and runoff dynamics between catchments have to be considered for management and planning of measures which do not only have the task to sustain minimum runoff.}, language = {en} } @phdthesis{ThielemannKuehn2017, author = {Thielemann-K{\"u}hn, Nele}, title = {Optically induced ferro- and antiferromagnetic dynamics in the rare-earth metal dysprosium}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402994}, school = {Universit{\"a}t Potsdam}, pages = {iv, 121}, year = {2017}, abstract = {Approaching physical limits in speed and size of today's magnetic storage and processing technologies demands new concepts for controlling magnetization and moves researches on optically induced magnetic dynamics. Studies on photoinduced magnetization dynamics and their underlying mechanisms have been primarily performed on ferromagnetic metals. Ferromagnetic dynamics bases on transfer of the conserved angular momentum connected with atomic magnetic moments out of the parallel aligned magnetic system into other degrees of freedom. In this thesis the so far rarely studied response of antiferromagnetic order to ultra-short optical laser pulses in a metal is investigated. The experiments were performed at the FemtoSpex slicing facility at the storage ring BESSY II, an unique source for ultra-short elliptically polarized x-ray pulses. Laser-induced changes of the 4f-magnetic order parameter in ferro- and antiferromagnetic dysprosium (Dy), were studied by x-ray methods, which yield directly comparable quantities. The discovered fundamental differences in the temporal and spatial behavior of ferro- and antiferrmagnetic dynamics are assinged to an additional channel for angular momentum transfer, which reduces the antiferromagnetic order by redistributing angular momentum within the non-parallel aligned magnetic system, and hence conserves the zero net magnetization. It is shown that antiferromagnetic dynamics proceeds considerably faster and more energy-efficient than demagnetization in ferromagnets. By probing antiferromagnetic order in time and space, it is found to be affected along the whole sample depth of an in situ grown 73 nm tick Dy film. Interatomic transfer of angular momentum via fast diffusion of laser-excited 5d electrons is held responsible for the out-most long-ranging effect. Ultrafast ferromagnetic dynamics can be expected to base on the same origin, which however leads to demagnetization only in regions close to interfaces caused by super-diffusive spin transport. Dynamics due to local scattering processes of excited but less mobile electrons, occur in both magnetic alignments only in directly excited regions of the sample and on slower pisosecond timescales. The thesis provides fundamental insights into photoinduced magnetic dynamics by directly comparing ferro- and antiferromagnetic dynamics in the same material and by consideration of the laser-induced magnetic depth profile.}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @phdthesis{Thiel2004, author = {Thiel, Marco}, title = {Recurrences : exploiting naturally occurring analogues}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001633}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {In der vorliegenden Arbeit wird die Wiederkehr im Phasenraum ausgenutzt. Dabei werden drei Hauptresultate besprochen. 1. Die Wiederkehr erlaubt die Vorhersagbarkeit des Systems zu quantifizieren. 2. Die Wiederkehr enthaelt (unter bestimmten Voraussetzungen) s{\"a}mtliche relevante Information {\"u}ber die Dynamik im Phasenraum 3. Die Wiederkehr erlaubt die Erzeugung dynamischer Ersatzdaten.}, language = {en} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Thiede2005, author = {Thiede, Rasmus Christoph}, title = {Tectonic and climatic controls on orogenic processes : the Northwest Himalaya, India}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2281}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The role of feedback between erosional unloading and tectonics controlling the development of the Himalaya is a matter of current debate. The distribution of precipitation is thought to control surface erosion, which in turn results in tectonic exhumation as an isostatic compensation process. Alternatively, subsurface structures can have significant influence in the evolution of this actively growing orogen. Along the southern Himalayan front new 40Ar/39Ar white mica and apatite fission track (AFT) thermochronologic data provide the opportunity to determine the history of rock-uplift and exhumation paths along an approximately 120-km-wide NE-SW transect spanning the greater Sutlej region of the northwest Himalaya, India. 40Ar/39Ar data indicate, consistent with earlier studies that first the High Himalayan Crystalline, and subsequently the Lesser Himalayan Crystalline nappes were exhumed rapidly during Miocene time, while the deformation front propagated to the south. In contrast, new AFT data delineate synchronous exhumation of an elliptically shaped, NE-SW-oriented ~80 x 40 km region spanning both crystalline nappes during Pliocene-Quaternary time. The AFT ages correlate with elevation, but show within the resolution of the method no spatial relationship to preexisting major tectonic structures, such as the Main Central Thrust or the Southern Tibetan Fault System. Assuming constant exhumation rates and geothermal gradient, the rocks of two age vs. elevation transects were exhumed at ~1.4 \&\#177;0.2 and ~1.1 \&\#177;0.4 mm/a with an average cooling rate of ~50-60 \&\#176;C/Ma during Pliocene-Quaternary time. The locus of pronounced exhumation defined by the AFT data coincides with a region of enhanced precipitation, high discharge, and sediment flux rates under present conditions. We therefore hypothesize that the distribution of AFT cooling ages might reflect the efficiency of surface processes and fluvial erosion, and thus demonstrate the influence of erosion in localizing rock-uplift and exhumation along southern Himalayan front, rather than encompassing the entire orogen.Despite a possible feedback between erosion and exhumation along the southern Himalayan front, we observe tectonically driven, crustal exhumation within the arid region behind the orographic barrier of the High Himalaya, which might be related to and driven by internal plateau forces. Several metamorphic-igneous gneiss dome complexes have been exhumed between the High Himalaya to the south and Indus-Tsangpo suture zone to the north since the onset of Indian-Eurasian collision ~50 Ma ago. Although the overall tectonic setting is characterized by convergence the exhumation of these domes is accommodated by extensional fault systems.Along the Indian-Tibetan border the poorly described Leo Pargil metamorphic-igneous gneiss dome (31-34\&\#176;N/77-78\&\#176;E) is located within the Tethyan Himalaya. New field mapping, structural, and geochronologic data document that the western flank of the Leo Pargil dome was formed by extension along temporally linked normal fault systems. Motion on a major detachment system, referred to as the Leo Pargil detachment zone (LPDZ) has led to the juxtaposition of low-grade metamorphic, sedimentary rocks in the hanging wall and high-grade metamorphic gneisses in the footwall. However, the distribution of new 40Ar/39Ar white mica data indicate a regional cooling event during middle Miocene time. New apatite fission track (AFT) data demonstrate that subsequently more of the footwall was extruded along the LPDZ in a brittle stage between 10 and 2 Ma with a minimum displacement of ~9 km. Additionally, AFT-data indicate a regional accelerated cooling and exhumation episode starting at ~4 Ma. Thus, tectonic processes can affect the entire orogenic system, while potential feedbacks between erosion and tectonics appear to be limited to the windward sides of an orogenic systems.}, language = {en} } @phdthesis{Theves2013, author = {Theves, Matthias}, title = {Bacterial motility and growth in open and confined environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70313}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the presence of a solid-liquid or liquid-air interface, bacteria can choose between a planktonic and a sessile lifestyle. Depending on environmental conditions, cells swimming in close proximity to the interface can irreversibly attach to the surface and grow into three-dimensional aggregates where the majority of cells is sessile and embedded in an extracellular polymer matrix (biofilm). We used microfluidic tools and time lapse microscopy to perform experiments with the polarly flagellated soil bacterium Pseudomonas putida (P. putida), a bacterial species that is able to form biofilms. We analyzed individual trajectories of swimming cells, both in the bulk fluid and in close proximity to a glass-liquid interface. Additionally, surface related growth during the early phase of biofilm formation was investigated. In the bulk fluid, P.putida shows a typical bacterial swimming pattern of alternating periods of persistent displacement along a line (runs) and fast reorientation events (turns) and cells swim with an average speed around 24 micrometer per second. We found that the distribution of turning angles is bimodal with a dominating peak around 180 degrees. In approximately six out of ten turning events, the cell reverses its swimming direction. In addition, our analysis revealed that upon a reversal, the cell systematically changes its swimming speed by a factor of two on average. Based on the experimentally observed values of mean runtime and rotational diffusion, we presented a model to describe the spreading of a population of cells by a run-reverse random walker with alternating speeds. We successfully recover the mean square displacement and, by an extended version of the model, also the negative dip in the directional autocorrelation function as observed in the experiments. The analytical solution of the model demonstrates that alternating speeds enhance a cells ability to explore its environment as compared to a bacterium moving at a constant intermediate speed. As compared to the bulk fluid, for cells swimming near a solid boundary we observed an increase in swimming speed at distances below d= 5 micrometer and an increase in average angular velocity at distances below d= 4 micrometer. While the average speed was maximal with an increase around 15\% at a distance of d= 3 micrometer, the angular velocity was highest in closest proximity to the boundary at d=1 micrometer with an increase around 90\% as compared to the bulk fluid. To investigate the swimming behavior in a confinement between two solid boundaries, we developed an experimental setup to acquire three-dimensional trajectories using a piezo driven objective mount coupled to a high speed camera. Results on speed and angular velocity were consistent with motility statistics in the presence of a single boundary. Additionally, an analysis of the probability density revealed that a majority of cells accumulated near the upper and lower boundaries of the microchannel. The increase in angular velocity is consistent with previous studies, where bacteria near a solid boundary were shown to swim on circular trajectories, an effect which can be attributed to a wall induced torque. The increase in speed at a distance of several times the size of the cell body, however, cannot be explained by existing theories which either consider the drag increase on cell body and flagellum near a boundary (resistive force theory) or model the swimming microorganism by a multipole expansion to account for the flow field interaction between cell and boundary. An accumulation of swimming bacteria near solid boundaries has been observed in similar experiments. Our results confirm that collisions with the surface play an important role and hydrodynamic interactions alone cannot explain the steady-state accumulation of cells near the channel walls. Furthermore, we monitored the number growth of cells in the microchannel under medium rich conditions. We observed that, after a lag time, initially isolated cells at the surface started to grow by division into colonies of increasing size, while coexisting with a comparable smaller number of swimming cells. After 5:50 hours, we observed a sudden jump in the number of swimming cells, which was accompanied by a breakup of bigger clusters on the surface. After approximately 30 minutes where planktonic cells dominated in the microchannel, individual swimming cells reattached to the surface. We interpret this process as an emigration and recolonization event. A number of complementary experiments were performed to investigate the influence of collective effects or a depletion of the growth medium on the transition. Similar to earlier observations on another bacterium from the same family we found that the release of cells to the swimming phase is most likely the result of an individual adaption process, where syntheses of proteins for flagellar motility are upregulated after a number of division cycles at the surface.}, language = {en} } @phdthesis{Theuring2017, author = {Theuring, Philipp Christian}, title = {Suspended sediments in the Kharaa River, sources and impacts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410550}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2017}, abstract = {Anthropogenically amplified erosion leads to increased fine-grained sediment input into the fluvial system in the 15.000 km2 Kharaa River catchment in northern Mongolia and constitutes a major stressing factor for the aquatic ecosystem. This study uniquely combines the application of intensive monitoring, source fingerprinting and catchment modelling techniques to allow for the comparison of the credibility and accuracy of each single method. High-resolution discharge data were used in combination with daily suspended solid measurements to calculate the suspended sediment budget and compare it with estimations of the sediment budget model SedNet. The comparison of both techniques showed that the development of an overall sediment budget with SedNet was possible, yielding results in the same order of magnitude (20.3 kt a- 1 and 16.2 kt a- 1). Radionuclide sediment tracing, using Be-7, Cs-137 and Pb-210 was applied to differentiate sediment sources for particles < 10μm from hillslope and riverbank erosion and showed that riverbank erosion generates 74.5\% of the suspended sediment load, whereas surface erosion contributes 21.7\% and gully erosion only 3.8\%. The contribution of the single subcatchments of the Kharaa to the suspended sediment load was assessed based on their variation in geochemical composition (e.g. in Ti, Sn, Mo, Mn, As, Sr, B, U, Ca and Sb). These variations were used for sediment source discrimination with geochemical composite fingerprints based on Genetic Algorithm driven Discriminant Function Analysis, the Kruskal-Wallis H-test and Principal Component Analysis. The contributions of the individual sub-catchment varied from 6.4\% to 36.2\%, generally showing higher contributions from the sub-catchments in the middle, rather than the upstream portions of the study area. The results indicate that river bank erosion generated by existing grazing practices of livestock is the main cause for elevated fine sediment input. Actions towards the protection of the headwaters and the stabilization of the river banks within the middle reaches were identified as the highest priority. Deforestation and by lodging and forest fires should be prevented to avoid increased hillslope erosion in the mountainous areas. Mining activities are of minor importance for the overall catchment sediment load but can constitute locally important point sources for particular heavy metals in the fluvial system.}, language = {en} }