@phdthesis{Ramos2018, author = {Ramos, Catalina}, title = {Structure and petrophysical properties of the Southern Chile subduction zone along 38.25°S from seismic data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409183}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 111}, year = {2018}, abstract = {Active and passive source data from two seismic experiments within the interdisciplinary project TIPTEQ (from The Incoming Plate to mega Thrust EarthQuake processes) were used to image and identify the structural and petrophysical properties (such as P- and S-velocities, Poisson's ratios, pore pressure, density and amount of fluids) within the Chilean seismogenic coupling zone at 38.25°S, where in 1960 the largest earthquake ever recorded (Mw 9.5) occurred. Two S-wave velocity models calculated using traveltime and noise tomography techniques were merged with an existing velocity model to obtain a 2D S-wave velocity model, which gathered the advantages of each individual model. In a following step, P- and S-reflectivity images of the subduction zone were obtained using different pre stack and post-stack depth migration techniques. Among them, the recent prestack line-drawing depth migration scheme yielded revealing results. Next, synthetic seismograms modelled using the reflectivity method allowed, through their input 1D synthetic P- and S-velocities, to infer the composition and rocks within the subduction zone. Finally, an image of the subduction zone is given, jointly interpreting the results from this work with results from other studies. The Chilean seismogenic coupling zone at 38.25°S shows a continental crust with highly reflective horizontal, as well as (steep) dipping events. Among them, the Lanalhue Fault Zone (LFZ), which is interpreted to be east-dipping, is imaged to very shallow depths. Some steep reflectors are observed for the first time, for example one near the coast, related to high seismicity and another one near the LFZ. Steep shallow reflectivity towards the volcanic arc could be related to a steep west-dipping reflector interpreted as fluids and/or melts, migrating upwards due to material recycling in the continental mantle wedge. The high resolution of the S-velocity model in the first kilometres allowed to identify several sedimentary basins, characterized by very low P- and S-velocities, high Poisson's ratios and possible steep reflectivity. Such high Poisson's ratios are also observed within the oceanic crust, which reaches the seismogenic zone hydrated due to bending-related faulting. It is interpreted to release water until reaching the coast and under the continental mantle wedge. In terms of seismic velocities, the inferred composition and rocks in the continental crust is in agreement with field geology observations at the surface along the proflle. Furthermore, there is no requirement to call on the existence of measurable amounts of present-day fluids above the plate interface in the continental crust of the Coastal Cordillera and the Central Valley in this part of the Chilean convergent margin. A large-scale anisotropy in the continental crust and upper mantle, previously proposed from magnetotelluric studies, is proposed from seismic velocities. However, quantitative studies on this topic in the continental crust of the Chilean seismogenic zone at 38.25°S do not exist to date.}, language = {en} } @phdthesis{Jensen2018, author = {Jensen, Anders Christian Solberg}, title = {Structure and dynamics of amorphous carbonates related to biomineralization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421691}, school = {Universit{\"a}t Potsdam}, pages = {138}, year = {2018}, abstract = {Amorphous calcium carbonate(ACC) is a wide spread biological material found in many organisms, such as sea Urchins and mollusks, where it serves as either a precursor phase for the crystalline biominerals or is stabilized and used in the amorphous state. As ACC readily crystallizes, stabilizers such as anions, cations or macromolecules are often present to avoid or delay unwanted crystallization. Furthermore, additives often control the properties of the materials to suit the specific function needed for the organism. E.g. cystoliths in leaves that scatter light to optimize energy uptake from the sun or calcite/aragonite crystals used in protective shells in mussels and gastropods. Lifetime of the amorphous phase is controlled by the kinetic stability against crystallization. This has often been linked to water which plays a role in the mobility of ions and hence the probability of forming crystalline nuclei to initiate crystallization. However, it is unclear how the water molecules are incorporated within the amorphous phase, either as liquid confined in pores, as structural water binding to the ions or as a mixture of both. It is also unclear how this is perturbed when additives are added, especially Mg2+, one the most common additives found in biogenic samples. Mg2+ are expected to have a strong influence on the water incorporated into ACC, given the high energy barrier to dehydration of magnesium ions compared to calcium ions in solution. During the last 10-15 years, there has been a large effort to understand the local environment of the ions/molecules and how this affects the properties of the amorphous phase. But only a few aspects of the structure have so far been well-described in literature. The reason for this is partly caused by the low stability of ACC if exposed to air, where it tends to crystallize within minutes and by the limited quantities of ACC produced in traditional synthesis routes. A further obstacle has been the difficulty in modeling the local structure based on experimental data. To solve the problem of stability and sample size, a few studies have used stabilizers such as Mg2+ or OH- and severely dehydrated samples so as to stabilize the amorphous state, allowing for combined neutron and x-ray analysis to be performed. However, so far, a clear description of the local environments of water present in the structure has not been reported. In this study we show that ACC can be synthesized without any stabilizing additives in quantities necessary for neutron measurements and that accurate models can be derived with the help of empirical-potential structural refinement. These analyses have shown that there is a wide range of local environments for all of the components in the system suggesting that the amorphous phase is highly inhomogeneous, without any phase separation between ions and water. We also showed that the water in ACC is mainly structural and that there is no confined or liquid-like water present in the system. Analysis of amorphous magnesium carbonate also showed that there is a large difference in the local structure of the two cations and that Mg2+ surprisingly interacts with significantly less water molecules then Ca2+ despite the higher dehydration energy. All in all, this shows that the role of water molecules as a structural component of ACC, with a strong binding to cat- and anions probably retard or prevents the crystallization of the amorphous phase.}, language = {en} } @phdthesis{RodriguezLoureiro2018, author = {Rodriguez Loureiro, Ignacio}, title = {Structural characterization of single and interacting soft interfaces displaying brushes of synthetic or biomolecular polymers}, doi = {10.25932/publishup-42367}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423675}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2018}, abstract = {The interaction between surfaces displaying end-grafted hydrophilic polymer brushes plays important roles in biology and in many wet-technological applications. The outer surfaces of Gram-negative bacteria, for example, are composed of lipopolysaccharide (LPS) molecules exposing oligo- and polysaccharides to the aqueous environment. This unique, structurally complex biological interface is of great scientific interest as it mediates the interaction of bacteria with neighboring bacteria in colonies and biofilms. The interaction between polymer-decorated surfaces is generally coupled to the distance-dependent conformation of the polymer chains. Therefore, structural insight into the interacting surfaces is a prerequisite to understand the interaction characteristics as well as the underlying physical mechanisms. This problem has been addressed by theory, but accurate experimental data on polymer conformations under confinement are rare, because obtaining perturbation-free structural insight into buried soft interfaces is inherently difficult. In this thesis, lipid membrane surfaces decorated with hydrophilic polymers of technological and biological relevance are investigated under controlled interaction conditions, i.e., at defined surface separations. For this purpose, dedicated sample architectures and experimental tools are developed. Via ellipsometry and neutron reflectometry pressure-distance curves and distance-dependent polymer conformations in terms of brush compression and reciprocative interpenetration are determined. Additional element-specific structural insight into the end-point distribution of interacting brushes is obtained by standing-wave x-ray fluorescence (SWXF). The methodology is first established for poly[ethylene glycol] (PEG) brushes of defined length and grafting density. For this system, neutron reflectometry revealed pronounced brush interpenetration, which is not captured in common brush theories and therefore motivates rigorous simulation-based treatments. In the second step the same approach is applied to realistic mimics of the outer surfaces of Gram-negative bacteria: monolayers of wild type LPSs extracted from E. Coli O55:B5 displaying strain-specific O-side chains. The neutron reflectometry experiments yield unprecedented structural insight into bacterial interactions, which are of great relevance for the properties of biofilms.}, language = {en} } @phdthesis{Hill2018, author = {Hill, Lukasz}, title = {Soziale Integration und politische Partizipation in Demokratien}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-420077}, school = {Universit{\"a}t Potsdam}, pages = {215}, year = {2018}, abstract = {Die Frage nach dem Zusammenhalt einer ganzen Gesellschaft ist eine der zentralen Fragen der Sozialwissenschaften und Soziologie. Seit dem {\"U}bergang in die Moderne bildet das Problem des Zusammenhalts von sich differenzierenden Gesellschaften den Gegenstand des wissenschaftlichen und gesellschaftlichen Diskurses. In der vorliegenden Studie stellt soziale Integration eine Form der gelungenen Vergesellschaftung dar, die sich in der Reproduktion von symbolischen und nicht-symbolischen Ressourcen artikuliert. Das Resultat dieser Reproduktion sind pluralistische Vergesellschaftungen, die, bezogen auf politische Pr{\"a}ferenzen, konfligierende Interessen verursachen. Diese Pr{\"a}ferenzen kommen in unterschiedlichen Formen, in ihrer Intensit{\"a}t und Wahrnehmung der politischen Partizipation zum Ausdruck. Da moderne politische Herrschaft aufgrund der rechtlichen und institutionellen Ausstattung einen bedeutsamen Einfluss auf soziale Reproduktion aus{\"u}ben kann (z.B. durch Sozialpolitik), stellt direkte Beeinflussung politischer Entscheidungen, als Artikulation von sich aus den Konfliktlinien etablierenden, unterschiedlichen Pr{\"a}ferenzen, das einzige legitime Mittel zwecks Umverteilung von Ressourcen auf der Ebene des Politischen dar. Somit wird die Konnotation zwischen Integration und politischer Partizipation sichtbar. In die Gesellschaft gut integrierte Mitglieder sind aufgrund einer breiten Teilnahme an Reproduktionsprozessen in der Lage, eigene Interessen zu erkennen und durch politische Aktivit{\"a}ten zum Ausdruck zu bringen. Die empirischen Befunde scheinen den Eindruck zu vermitteln, dass der demokratische Konflikt in der modernen Gesellschaft nicht mehr direkt von Klassenzugeh{\"o}rigkeit und Klasseninteressen gepr{\"a}gt wird, sondern durch den Zugang zu und die Verf{\"u}gbarkeit von symbolischen und nicht-symbolischen Ressourcen geformt wird. In der Konsequenz lautet die Fragestellung der vorliegenden Arbeit, ob integrierte Gesellschaften politisch aktiver sind. Die Fragestellung der Arbeit wird mithilfe von Aggregatdaten demokratisch-verfasster politischer Systemen untersucht, die als etablierte Demokratien gelten und unterschiedlich Breite wohlfahrtstaatlichen Maßnahmen aufweisen. Die empirische {\"U}berpr{\"u}fung der Hypothesen erfolgte mithilfe von bivariaten und multivariaten Regressionsanalysen. Die {\"u}berpr{\"u}ften Hypothesen lassen sich folgend in einer Hypothese zusammenfassen: Je st{\"a}rker die soziale Integration einer Gesellschaft, desto gr{\"o}ßer ist die konventionelle bzw. unkonventionelle politische Partizipation. Verallgemeinert ist die Aussage zul{\"a}ssig, dass soziale Integration einer Gesellschaft positive Effekte auf die H{\"a}ufigkeit politischer Partizipation innerhalb dieser Gesellschaft hat. St{\"a}rker integrierte Gesellschaften sind politisch aktiver und dies unabh{\"a}ngig von der Form (konventionelle oder unkonventionelle) politischer Beteiligung. Dabei ist der direkte Effekt der gesamtgesellschaftlichen Integration auf die konventionellen Formen st{\"a}rker als auf unkonventionellen. Diese Aussage ist nur zul{\"a}ssig, wenn die Elemente des Wahlsystems, wie z.B. Verh{\"a}ltniswahlrecht, und das BIP nicht ber{\"u}cksichtigt werden. Auf der Grundlage der Ergebnisse mit Kontrollvariablen erlauben die Daten die auf die Makroebene bezogene Aussage, dass neben einem hohen Niveau sozialer Integration auch ein durch (Mit-)Beteiligung bestimmtes Wahlsystem und ein hoher wirtschaftlicher Entwicklungsgrad beg{\"u}nstigend f{\"u}r ein hohes Niveau politischer Partizipation sind.}, language = {de} } @phdthesis{Valliappan2018, author = {Valliappan, Senthamizh Pavai}, title = {Solar Activity Reconstruction from Historical Observations of Sunspots}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-413600}, school = {Universit{\"a}t Potsdam}, pages = {115}, year = {2018}, abstract = {The solar activity and its consequences affect space weather and Earth's climate. The solar activity exhibits a cyclic behaviour with a period of about 11 years. The solar cycle properties are governed by the dynamo taking place in the interior of the Sun, and they are distinctive. Extending the knowledge about solar cycle properties into the past is essential for understanding the solar dynamo and forecasting space weather. It can be acquired through the analysis of historical sunspot drawings. Sunspots are the dark areas, which are associated with strong magnetic fields, on the solar surface. Sunspots are the oldest and longest available observed features of solar activity. One of the longest available records of sunspot drawings is the collection by Samuel Heinrich Schwabe during 1825-1867. The sunspot sizes measured from digitized Schwabe drawings are not to scale and need to be converted into physical sunspot areas. We employed a statistical approach assuming that the area distribution of sunspots was the same in the 19th century as it was in the 20th century. Umbral areas for about 130 000 sunspots observed by Schwabe were obtained. The annually averaged sunspot areas correlate reasonably well with the sunspot number. Tilt angles and polarity separations of sunspot groups were calculated assuming them to be bipolar. There is, of course, no polarity information in the observations. We derived an average tilt angle by attempting to exclude unipolar groups with a minimum separation of the two surmised polarities and an outlier rejection method, which follows the evolution of each group and detects the moment, when it turns unipolar as it decays. As a result, the tilt angles, although displaying considerable natural scatter, are on average 5.85° ± 0.25°, with the leading polarity located closer to the equator, in good agreement with tilt angles obtained from 20th century data sets. Sources of uncertainties in the tilt angle determination are discussed and need to be addressed whenever different data sets are combined. Digital images of observations printed in the books Rosa Ursina and Prodromus pro sole mobili by Christoph Scheiner, as well as the drawings from Scheiner's letters to Marcus Welser, are analyzed to obtain information on the positions and sizes of sunspots that appeared before the Maunder minimum. In most cases, the given orientation of the ecliptic is used to set up the heliographic coordinate system for the drawings. Positions and sizes are measured manually displaying the drawings on a computer screen. Very early drawings have no indication of the solar orientation. A rotational matching using common spots of adjacent days is used in some cases, while in other cases, the assumption that images were aligned with a zenith-horizon coordinate system appeared to be the most likely. In total, 8167 sunspots were measured. A distribution of sunspot latitudes versus time (butterfly diagram) is obtained for Scheiner's observations. The observations of 1611 are very inaccurate, but the drawings of 1612 have at least an indication of the solar orientation, while the remaining part of the spot positions from 1618-1631 have good to very good accuracy. We also computed 697 tilt angles of apparent bipolar sunspot groups, which were observed in the period 1618-1631. We find that the average tilt angle of nearly 4° does not significantly differ from the 20th century values. The solar cycle properties seem to be related to the tilt angles of sunspot groups, and it is an important parameter in the surface flux transport models. The tilt angles of bipolar sunspot groups from various historical sets of solar drawings including from Schwabe and Scheiner are analyzed. Data by Scheiner, Hevelius, Staudacher, Zucconi, Schwabe, and Spörer deliver a series of average tilt angles spanning a period of 270 years, in addition to previously found values for 20th-century data obtained by other authors. We find that the average tilt angles before the Maunder minimum were not significantly different from modern values. However, the average tilt angles of a period 50 years after the Maunder minimum, namely for cycles 0 and 1, were much lower and near zero. The typical tilt angles before the Maunder minimum suggest that abnormally low tilt angles were not responsible for driving the solar cycle into a grand minimum. With the Schwabe (1826-1867) and Spörer (1866-1880) sunspot data, the butterfly diagram of sunspot groups extends back till 1826. A recently developed method, which separates the wings of the butterfly diagram based on the long gaps present in sunspot group occurrences at different latitudinal bands, is used to separate the wings of the butterfly diagram. The cycle-to-cycle variation in the start (F), end (L), and highest (H) latitudes of the wings with respect to the strength of the wings are analyzed. On the whole, the wings of the stronger cycles tend to start at higher latitudes and have a greater extent. The time spans of the wings and the time difference between the wings in the northern hemisphere display a quasi-periodicity of 5-6 cycles. The average wing overlap is zero in the southern hemisphere, whereas it is 2-3 months in the north. A marginally significant oscillation of about 10 solar cycles is found in the asymmetry of the L latitudes. This latest, extended database of butterfly wings provides new observational constraints, regarding the spatio-temporal distribution of sunspot occurrences over the solar cycle, to solar dynamo models.}, language = {en} } @phdthesis{Tofelde2018, author = {Tofelde, Stefanie}, title = {Signals stored in sediment}, doi = {10.25932/publishup-42716}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427168}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 172}, year = {2018}, abstract = {Tectonic and climatic boundary conditions determine the amount and the characteristics (size distribution and composition) of sediment that is generated and exported from mountain regions. On millennial timescales, rivers adjust their morphology such that the incoming sediment (Qs,in) can be transported downstream by the available water discharge (Qw). Changes in climatic and tectonic boundary conditions thus trigger an adjustment of the downstream river morphology. Understanding the sensitivity of river morphology to perturbations in boundary conditions is therefore of major importance, for example, for flood assessments, infrastructure and habitats. Although we have a general understanding of how rivers evolve over longer timescales, the prediction of channel response to changes in boundary conditions on a more local scale and over shorter timescales remains a major challenge. To better predict morphological channel evolution, we need to test (i) how channels respond to perturbations in boundary conditions and (ii) how signals reflecting the persisting conditions are preserved in sediment characteristics. This information can then be applied to reconstruct how local river systems have evolved over time. In this thesis, I address those questions by combining targeted field data collection in the Quebrada del Toro (Southern Central Andes of NW Argentina) with cosmogenic nuclide analysis and remote sensing data. In particular, I (1) investigate how information on hillslope processes is preserved in the 10Be concentration (geochemical composition) of fluvial sediments and how those signals are altered during downstream transport. I complement the field-based approach with physical experiments in the laboratory, in which I (2) explore how changes in sediment supply (Qs,in) or water discharge (Qw) generate distinct signals in the amount of sediment discharge at the basin outlet (Qs,out). With the same set of experiments, I (3) study the adjustments of alluvial channel morphology to changes in Qw and Qs,in, with a particular focus in fill-terrace formation. I transfer the findings from the experiments to the field to (4) reconstruct the evolution of a several-hundred meter thick fluvial fill-terrace sequence in the Quebrada del Toro. I create a detailed terrace chronology and perform reconstructions of paleo-Qs and Qw from the terrace deposits. In the following paragraphs, I summarize my findings on each of these four topics. First, I sampled detrital sediment at the outlet of tributaries and along the main stem in the Quebrada del Toro, analyzed their 10Be concentration ([10Be]) and compared the data to a detailed hillslope-process inventory. The often observed non-linear increase in catchment-mean denudation rate (inferred from [10Be] in fluvial sediment) with catchment-median slope, which has commonly been explained by an adjustment in landslide-frequency, coincided with a shift in the main type of hillslope processes. In addition, the [10Be] in fluvial sediments varied with grain-size. I defined the normalized sand-gravel-index (NSGI) as the 10Be-concentration difference between sand and gravel fractions divided by their summed concentrations. The NSGI increased with median catchment slope and coincided with a shift in the prevailing hillslope processes active in the catchments, thus making the NSGI a potential proxy for the evolution of hillslope processes over time from sedimentary deposits. However, the NSGI recorded hillslope-processes less well in regions of reduced hillslope-channel connectivity and, in addition, has the potential to be altered during downstream transport due to lateral sediment input, size-selective sediment transport and abrasion. Second, my physical experiments revealed that sediment discharge at the basin outlet (Qs,out) varied in response to changes in Qs,in or Qw. While changes in Qw caused a distinct signal in Qs,out during the transient adjustment phase of the channel to new boundary conditions, signals related to changes in Qs,in were buffered during the transient phase and likely only become apparent once the channel is adjusted to the new conditions. The temporal buffering is related to the negative feedback between Qs,in and channel-slope adjustments. In addition, I inferred from this result that signals extracted from the geochemical composition of sediments (e.g., [10Be]) are more likely to represent modern-day conditions during times of aggradation, whereas the signal will be temporally buffered due to mixing with older, remobilized sediment during times of channel incision. Third, the same set of experiments revealed that river incision, channel-width narrowing and terrace cutting were initiated by either an increase in Qw, a decrease in Qs,in or a drop in base level. The lag-time between the external perturbation and the terrace cutting determined (1) how well terrace surfaces preserved the channel profile prior to perturbation and (2) the degree of reworking of terrace-surface material. Short lag-times and well preserved profiles occurred in cases with a rapid onset of incision. Also, lag-times were synchronous along the entire channel after upstream perturbations (Qw, Qs,in), whereas base-level fall triggered an upstream migrating knickzone, such that lag-times increased with distance upstream. Terraces formed after upstream perturbations (Qw, Qs,in) were always steeper when compared to the active channel in new equilibrium conditions. In the base-level fall experiment, the slope of the terrace-surfaces and the modern channel were similar. Hence, slope comparisons between the terrace surface and the modern channel can give insights into the mechanism of terrace formation. Fourth, my detailed terrace-formation chronology indicated that cut-and-fill episodes in the Quebrada del Toro followed a ~100-kyr cyclicity, with the oldest terraces ~ 500 kyr old. The terraces were formed due to variability in upstream Qw and Qs. Reconstructions of paleo-Qs over the last 500 kyr, which were restricted to times of sediment deposition, indicated only minor (up to four-fold) variations in paleo-denudation rates. Reconstructions of paleo-Qw were limited to the times around the onset of river incision and revealed enhanced discharge from 10 to 85\% compared to today. Such increases in Qw are in agreement with other quantitative paleo-hydrological reconstructions from the Eastern Andes, but have the advantage of dating further back in time.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Stefanie}, title = {Sequence dependency of photon and electron induced DNA strand breaks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419669}, school = {Universit{\"a}t Potsdam}, pages = {xii, 117}, year = {2018}, abstract = {Deoxyribonucleic acid (DNA) is the carrier of human genetic information and is exposed to environmental influences such as the ultraviolet (UV) fraction of sunlight every day. The photostability of the DNA against UV light is astonishing. Even if the DNA bases have a strong absorption maximum at around 260 nm/4.77 eV, their quantum yield of photoproducts remains very low 1. If the photon energies exceed the ionization energy (IE) of the nucleobases ( ̴ 8-9 eV) 2, the DNA can be severely damaged. Photoexcitation and -ionization reactions occur, which can induce strand breaks in the DNA. The efficiency of the excitation and ionization induced strand breaks in the target DNA sequences are represented by cross sections. If Si as a substrate material is used in the VUV irradiation experiments, secondary electrons with an energy below 3.6 eV are generated from the substrate. This low energy electrons (LEE) are known to induce dissociative electron attachment (DEA) in DNA and with it DNA strand breakage very efficiently. LEEs play an important role in cancer radiation therapy, since they are generated secondarily along the radiation track of ionizing radiation. In the framework of this thesis, different single stranded DNA sequences were irradiated with 8.44 eV vacuum UV (VUV) light and cross sections for single strand breaks (SSB) were determined. Several sequences were also exposed to secondary LEEs, which additionally contributed to the SSBs. First, the cross sections for SSBs depending on the type of nucleobases were determined. Both types of DNA sequences, mono-nucleobase and mixed sequences showed very similar results upon VUV radiation. The additional influence of secondarily generated LEEs resulted in contrast in a clear trend for the SSB cross sections. In this, the polythymine sequence had the highest cross section for SSBs, which can be explained by strong anionic resonances in this energy range. Furthermore, SSB cross sections were determined as a function of sequence length. This resulted in an increase in the strand breaks to the same extent as the increase in the geometrical cross section. The longest DNA sequence (20 nucleotides) investigated in this series, however, showed smaller cross section values for SSBs, which can be explained by conformational changes in the DNA. Moreover, several DNA sequences that included the radiosensitizers 5-Bromouracil (5BrU) and 8-Bromoadenine (8BrA) were investigated and the corresponding SSB cross sections were determined. It was shown that 5BrU reacts very strongly to VUV radiation leading to high strand break yields, which showed in turn a strong sequence-dependency. 8BrA, on the other hand, showed no sensitization to the applied VUV radiation, since almost no increase in strand breakage yield was observed in comparison to non-modified DNA sequences. In order to be able to identify the mechanisms of radiation damage by photons, the IEs of certain DNA sequences were further explored using photoionization tandem mass spectrometry. By varying the DNA sequence, both the IEs depending on the type of nucleobase as well as on the DNA strand length could be identified and correlated to the SSB cross sections. The influence of the IE on the photoinduced reaction in the brominated DNA sequences could be excluded.}, language = {en} } @phdthesis{Kruse2018, author = {Kruse, Sebastian}, title = {Scalable data profiling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412521}, school = {Universit{\"a}t Potsdam}, pages = {ii, 156}, year = {2018}, abstract = {Data profiling is the act of extracting structural metadata from datasets. Structural metadata, such as data dependencies and statistics, can support data management operations, such as data integration and data cleaning. Data management often is the most time-consuming activity in any data-related project. Its support is extremely valuable in our data-driven world, so that more time can be spent on the actual utilization of the data, e. g., building analytical models. In most scenarios, however, structural metadata is not given and must be extracted first. Therefore, efficient data profiling methods are highly desirable. Data profiling is a computationally expensive problem; in fact, most dependency discovery problems entail search spaces that grow exponentially in the number of attributes. To this end, this thesis introduces novel discovery algorithms for various types of data dependencies - namely inclusion dependencies, conditional inclusion dependencies, partial functional dependencies, and partial unique column combinations - that considerably improve over state-of-the-art algorithms in terms of efficiency and that scale to datasets that cannot be processed by existing algorithms. The key to those improvements are not only algorithmic innovations, such as novel pruning rules or traversal strategies, but also algorithm designs tailored for distributed execution. While distributed data profiling has been mostly neglected by previous works, it is a logical consequence on the face of recent hardware trends and the computational hardness of dependency discovery. To demonstrate the utility of data profiling for data management, this thesis furthermore presents Metacrate, a database for structural metadata. Its salient features are its flexible data model, the capability to integrate various kinds of structural metadata, and its rich metadata analytics library. We show how to perform a data anamnesis of unknown, complex datasets based on this technology. In particular, we describe in detail how to reconstruct the schemata and assess their quality as part of the data anamnesis. The data profiling algorithms and Metacrate have been carefully implemented, integrated with the Metanome data profiling tool, and are available as free software. In that way, we intend to allow for easy repeatability of our research results and also provide them for actual usage in real-world data-related projects.}, language = {en} } @phdthesis{Rosenwinkel2018, author = {Rosenwinkel, Swenja}, title = {Rock glaciers and natural dams in Central Asia}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410386}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 181}, year = {2018}, abstract = {The formation and breaching of natural dammed lakes have formed the landscapes, especially in seismically active high-mountain regions. Dammed lakes pose both, potential water resources, and hazard in case of dam breaching. Central Asia has mostly arid and semi-arid climates. Rock glaciers already store more water than ice-glaciers in some semi-arid regions of the world, but their distribution and advance mechanisms are still under debate in recent research. Their impact on the water availability in Central Asia will likely increase as temperatures rise and glaciers diminish. This thesis provides insight to the relative age distribution of selected Kyrgyz and Kazakh rock glaciers and their single lobes derived from lichenometric dating. The size of roughly 8000 different lichen specimens was used to approximate an exposure age of the underlying debris surface. We showed that rock-glacier movement differs signifcantly on small scales. This has several implications for climatic inferences from rock glaciers. First, reactivation of their lobes does not necessarily point to climatic changes, or at least at out-of-equilibrium conditions. Second, the elevations of rock-glacier toes can no longer be considered as general indicators of the limit of sporadic mountain permafrost as they have been used traditionally. In the mountainous and seismically active region of Central Asia, natural dams, besides rock glaciers, also play a key role in controlling water and sediment infux into river valleys. However, rock glaciers advancing into valleys seem to be capable of infuencing the stream network, to dam rivers, or to impound lakes. This influence has not previously been addressed. We quantitatively explored these controls using a new inventory of 1300 Central Asian rock glaciers. Elevation, potential incoming solar radiation, and the size of rock glaciers and their feeder basins played key roles in predicting dam appearance. Bayesian techniques were used to credibly distinguish between lichen sizes on rock glaciers and their lobes, and to find those parameters of a rock-glacier system that are most credibly expressing the potential to build natural dams. To place these studies in the region's history of natural dams, a combination of dating of former lake levels and outburst flood modelling addresses the history and possible outburst flood hypotheses of the second largest mountain lake of the world, Issyk Kul in Kyrgyzstan. Megafoods from breached earthen or glacial dams were found to be a likely explanation for some of the lake's highly fluctuating water levels. However, our detailed analysis of candidate lake sediments and outburst-flood deposits also showed that more localised dam breaks to the west of Issyk Kul could have left similar geomorphic and sedimentary evidence in this Central Asian mountain landscape. We thus caution against readily invoking megafloods as the main cause of lake-level drops of Issyk Kul. In summary, this thesis addresses some new pathways for studying rock glaciers and natural dams with several practical implications for studies on mountain permafrost and natural hazards.}, language = {en} } @phdthesis{Sieg2018, author = {Sieg, Tobias}, title = {Reliability of flood damage estimations across spatial scales}, doi = {10.25932/publishup-42616}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426161}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 115}, year = {2018}, abstract = {Extreme Naturereignisse sind ein integraler Bestandteil der Natur der Erde. Sie werden erst dann zu Gefahren f{\"u}r die Gesellschaft, wenn sie diesen Ereignissen ausgesetzt ist. Dann allerdings k{\"o}nnen Naturgefahren verheerende Folgen f{\"u}r die Gesellschaft haben. Besonders hydro-meteorologische Gefahren wie zum Beispiel Flusshochwasser, Starkregenereignisse, Winterst{\"u}rme, Orkane oder Tornados haben ein hohes Schadenspotential und treten rund um den Globus auf. Einhergehend mit einer immer w{\"a}rmer werdenden Welt, werden auch Extremwetterereignisse, welche potentiell Naturgefahren ausl{\"o}sen k{\"o}nnen, immer wahrscheinlicher. Allerdings tr{\"a}gt nicht nur eine sich ver{\"a}ndernde Umwelt zur Erh{\"o}hung des Risikos von Naturgefahren bei, sondern auch eine sich ver{\"a}ndernde Gesellschaft. Daher ist ein angemessenes Risikomanagement erforderlich um die Gesellschaft auf jeder r{\"a}umlichen Ebene an diese Ver{\"a}nderungen anzupassen. Ein essentieller Bestandteil dieses Managements ist die Absch{\"a}tzung der {\"o}konomischen Auswirkungen der Naturgefahren. Bisher allerdings fehlen verl{\"a}ssliche Methoden um die Auswirkungen von hydro-meteorologischen Gefahren abzusch{\"a}tzen. Ein Hauptbestandteil dieser Arbeit ist daher die Entwicklung und Anwendung einer neuen Methode, welche die Verl{\"a}sslichkeit der Schadenssch{\"a}tzung verbessert. Die Methode wurde beispielhaft zur Sch{\"a}tzung der {\"o}konomischen Auswirkungen eines Flusshochwassers auf einzelne Unternehmen bis hin zu den Auswirkungen auf das gesamte Wirtschaftssystem Deutschlands erfolgreich angewendet. Bestehende Methoden geben meist wenig Information {\"u}ber die Verl{\"a}sslichkeit ihrer Sch{\"a}tzungen. Da diese Informationen Entscheidungen zur Anpassung an das Risiko erleichtern, wird die Verl{\"a}sslichkeit der Schadenssch{\"a}tzungen mit der neuen Methode dargestellt. Die Verl{\"a}sslichkeit bezieht sich dabei nicht nur auf die Schadenssch{\"a}tzung selber, sondern auch auf die Annahmen, die {\"u}ber betroffene Geb{\"a}ude gemacht werden. Nach diesem Prinzip kann auch die Verl{\"a}sslichkeit von Annahmen {\"u}ber die Zukunft dargestellt werden, dies ist ein wesentlicher Aspekt f{\"u}r Prognosen. Die Darstellung der Verl{\"a}sslichkeit und die erfolgreiche Anwendung zeigt das Potential der Methode zur Verwendung von Analysen f{\"u}r gegenw{\"a}rtige und zuk{\"u}nftige hydro-meteorologische Gefahren.}, language = {en} } @phdthesis{Wendi2018, author = {Wendi, Dadiyorto}, title = {Recurrence Plots and Quantification Analysis of Flood Runoff Dynamics}, doi = {10.25932/publishup-43191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431915}, school = {Universit{\"a}t Potsdam}, pages = {114}, year = {2018}, abstract = {This paper introduces a novel measure to assess similarity between event hydrographs. It is based on Cross Recurrence Plots and Recurrence Quantification Analysis which have recently gained attention in a range of disciplines when dealing with complex systems. The method attempts to quantify the event runoff dynamics and is based on the time delay embedded phase space representation of discharge hydrographs. A phase space trajectory is reconstructed from the event hydrograph, and pairs of hydrographs are compared to each other based on the distance of their phase space trajectories. Time delay embedding allows considering the multi-dimensional relationships between different points in time within the event. Hence, the temporal succession of discharge values is taken into account, such as the impact of the initial conditions on the runoff event. We provide an introduction to Cross Recurrence Plots and discuss their parameterization. An application example based on flood time series demonstrates how the method can be used to measure the similarity or dissimilarity of events, and how it can be used to detect events with rare runoff dynamics. It is argued that this methods provides a more comprehensive approach to quantify hydrograph similarity compared to conventional hydrological signatures.}, language = {en} } @phdthesis{Siegmund2018, author = {Siegmund, Jonatan Frederik}, title = {Quantifying impacts of climate extreme events on vegetation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407095}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {Together with the gradual change of mean values, ongoing climate change is projected to increase frequency and amplitude of temperature and precipitation extremes in many regions of Europe. The impacts of such in most cases short term extraordinary climate situations on terrestrial ecosystems are a matter of central interest of recent climate change research, because it can not per se be assumed that known dependencies between climate variables and ecosystems are linearly scalable. So far, yet, there is a high demand for a method to quantify such impacts in terms of simultaneities of event time series. In the course of this manuscript the new statistical approach of Event Coincidence Analysis (ECA) as well as it's R implementation is introduced, a methodology that allows assessing whether or not two types of event time series exhibit similar sequences of occurrences. Applications of the method are presented, analyzing climate impacts on different temporal and spacial scales: the impact of extraordinary expressions of various climatic variables on tree stem variations (subdaily and local scale), the impact of extreme temperature and precipitation events on the owering time of European shrub species (weekly and country scale), the impact of extreme temperature events on ecosystem health in terms of NDVI (weekly and continental scale) and the impact of El Ni{\~n}o and La Ni{\~n}a events on precipitation anomalies (seasonal and global scale). The applications presented in this thesis refine already known relationships based on classical methods and also deliver substantial new findings to the scientific community: the widely known positive correlation between flowering time and temperature for example is confirmed to be valid for the tails of the distributions while the widely assumed positive dependency between stem diameter variation and temperature is shown to be not valid for very warm and very cold days. The larger scale investigations underline the sensitivity of anthrogenically shaped landscapes towards temperature extremes in Europe and provide a comprehensive global ENSO impact map for strong precipitation events. Finally, by publishing the R implementation of the method, this thesis shall enable other researcher to further investigate on similar research questions by using Event Coincidence Analysis.}, language = {en} } @phdthesis{Kotha2018, author = {Kotha, Sreeram Reddy}, title = {Quantification of uncertainties in seismic ground-motion prediction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415743}, school = {Universit{\"a}t Potsdam}, pages = {xii, 101}, year = {2018}, abstract = {The purpose of Probabilistic Seismic Hazard Assessment (PSHA) at a construction site is to provide the engineers with a probabilistic estimate of ground-motion level that could be equaled or exceeded at least once in the structure's design lifetime. A certainty on the predicted ground-motion allows the engineers to confidently optimize structural design and mitigate the risk of extensive damage, or in worst case, a collapse. It is therefore in interest of engineering, insurance, disaster mitigation, and security of society at large, to reduce uncertainties in prediction of design ground-motion levels. In this study, I am concerned with quantifying and reducing the prediction uncertainty of regression-based Ground-Motion Prediction Equations (GMPEs). Essentially, GMPEs are regressed best-fit formulae relating event, path, and site parameters (predictor variables) to observed ground-motion values at the site (prediction variable). GMPEs are characterized by a parametric median (μ) and a non-parametric variance (σ) of prediction. μ captures the known ground-motion physics i.e., scaling with earthquake rupture properties (event), attenuation with distance from source (region/path), and amplification due to local soil conditions (site); while σ quantifies the natural variability of data that eludes μ. In a broad sense, the GMPE prediction uncertainty is cumulative of 1) uncertainty on estimated regression coefficients (uncertainty on μ,σ_μ), and 2) the inherent natural randomness of data (σ). The extent of μ parametrization, the quantity, and quality of ground-motion data used in a regression, govern the size of its prediction uncertainty: σ_μ and σ. In the first step, I present the impact of μ parametrization on the size of σ_μ and σ. Over-parametrization appears to increase the σ_μ, because of the large number of regression coefficients (in μ) to be estimated with insufficient data. Under-parametrization mitigates σ_μ, but the reduced explanatory strength of μ is reflected in inflated σ. For an optimally parametrized GMPE, a ~10\% reduction in σ is attained by discarding the low-quality data from pan-European events with incorrect parametric values (of predictor variables). In case of regions with scarce ground-motion recordings, without under-parametrization, the only way to mitigate σ_μ is to substitute long-term earthquake data at a location with short-term samples of data across several locations - the Ergodic Assumption. However, the price of ergodic assumption is an increased σ, due to the region-to-region and site-to-site differences in ground-motion physics. σ of an ergodic GMPE developed from generic ergodic dataset is much larger than that of non-ergodic GMPEs developed from region- and site-specific non-ergodic subsets - which were too sparse to produce their specific GMPEs. Fortunately, with the dramatic increase in recorded ground-motion data at several sites across Europe and Middle-East, I could quantify the region- and site-specific differences in ground-motion scaling and upgrade the GMPEs with 1) substantially more accurate region- and site-specific μ for sites in Italy and Turkey, and 2) significantly smaller prediction variance σ. The benefit of such enhancements to GMPEs is quite evident in my comparison of PSHA estimates from ergodic versus region- and site-specific GMPEs; where the differences in predicted design ground-motion levels, at several sites in Europe and Middle-Eastern regions, are as large as ~50\%. Resolving the ergodic assumption with mixed-effects regressions is feasible when the quantified region- and site-specific effects are physically meaningful, and the non-ergodic subsets (regions and sites) are defined a priori through expert knowledge. In absence of expert definitions, I demonstrate the potential of machine learning techniques in identifying efficient clusters of site-specific non-ergodic subsets, based on latent similarities in their ground-motion data. Clustered site-specific GMPEs bridge the gap between site-specific and fully ergodic GMPEs, with their partially non-ergodic μ and, σ ~15\% smaller than the ergodic variance. The methodological refinements to GMPE development produced in this study are applicable to new ground-motion datasets, to further enhance certainty of ground-motion prediction and thereby, seismic hazard assessment. Advanced statistical tools show great potential in improving the predictive capabilities of GMPEs, but the fundamental requirement remains: large quantity of high-quality ground-motion data from several sites for an extended time-period.}, language = {en} } @phdthesis{Knospe2018, author = {Knospe, Gloria-Mona}, title = {Processing of pronouns and reflexives in Turkish-German bilinguals}, doi = {10.25932/publishup-43644}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436442}, school = {Universit{\"a}t Potsdam}, pages = {xxii, 410}, year = {2018}, abstract = {Previous studies on native language (L1) anaphor resolution have found that monolingual native speakers are sensitive to syntactic, pragmatic, and semantic constraints on pronouns and reflexive resolution. However, most studies have focused on English and other Germanic languages, and little is currently known about the online (i.e., real-time) processing of anaphors in languages with syntactically less restricted anaphors, such as Turkish. We also know relatively little about how 'non-standard' populations such as non-native (L2) speakers and heritage speakers (HSs) resolve anaphors. This thesis investigates the interpretation and real-time processing of anaphors in German and in a typologically different and as yet understudied language, Turkish. It compares hypotheses about differences between native speakers' (L1ers) and L2 speakers' (L2ers) sentence processing, looking into differences in processing mechanisms as well as the possibility of cross-linguistic influence. To help fill the current research gap regarding HS sentence comprehension, it compares findings for this group with those for L2ers. To investigate the representation and processing of anaphors in these three populations, I carried out a series of offline questionnaires and Visual-World eye-tracking experiments on the resolution of reflexives and pronouns in both German and Turkish. In the German experiments, native German speakers as well as L2ers of German were tested, while in the Turkish experiments, non-bilingual native Turkish speakers as well as HSs of Turkish with L2 German were tested. This allowed me to observe both cross-linguistic differences as well as population differences between monolinguals' and different types of bilinguals' resolution of anaphors. Regarding the comprehension of Turkish anaphors by L1ers, contrary to what has been previously assumed, I found that Turkish has no reflexive that follows Condition A of Binding theory (Chomsky, 1981). Furthermore, I propose more general cross-linguistic differences between Turkish and German, in the form of a stronger reliance on pragmatic information in anaphor resolution overall in Turkish compared to German. As for the processing differences between L1ers and L2ers of a language, I found evidence in support of hypotheses which propose that L2ers of German rely more strongly on non-syntactic information compared to L1ers (Clahsen \& Felser, 2006, 2017; Cunnings, 2016, 2017) independent of a potential influence of their L1. HSs, on the other hand, showed a tendency to overemphasize interpretational contrasts between different Turkish anaphors compared to monolingual native speakers. However, lower-proficiency HSs were likely to merge different forms for simplified representation and processing. Overall, L2ers and HSs showed differences from monolingual native speakers both in their final interpretation of anaphors and during online processing. However, these differences were not parallel between the two types of bilingual and thus do not support a unified model of L2 and HS processing (cf. Montrul, 2012). The findings of this thesis contribute to the field of anaphor resolution by providing data from a previously unexplored language, Turkish, as well as contributing to research on native and non-native processing differences. My results also illustrate the importance of considering individual differences in the acquisition process when studying bilingual language comprehension. Factors such as age of acquisition, language proficiency and the type of input a language learner receives may influence the processing mechanisms they develop and employ, both between and within different bilingual populations.}, language = {en} } @phdthesis{Li2018, author = {Li, Lina}, title = {Preparation of novel photoactive materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410952}, school = {Universit{\"a}t Potsdam}, pages = {105}, year = {2018}, abstract = {Photocatalysis is considered significant in this new energy era, because the inexhaustibly abundant, clean, and safe energy of the sun can be harnessed for sustainable, nonhazardous, and economically development of our society. In the research of photocatalysis, the current focus was held by the design and modification of photocatalyst. As one of the most promising photocatalysts, g-C3N4 has gained considerable attention for its eye-catching properties. It has been extensively explored in photocatalysis applications, such as water splitting, organic pollutant degradation, and CO2 reduction. Even so, it also has its own drawbacks which inhibit its further application. Inspired by that, this thesis will mainly present and discuss the process and achievement on the preparation of some novel photocatalysts and their photocatalysis performance. These materials were all synthesized via the alteration of classic g-C3N4 preparation method, like using different pre-compositions for initial supramolecular complex and functional group post-modification. By taking place of cyanuric acid, 2,5-Dihydroxy-1,4-benzoquinone and chloranilic acid can form completely new supramolecular complex with melamine. After heating, the resulting products of the two complex shown 2D sheet-like and 1D fiber-like morphologies, respectively, which maintain at even up to high temperature of 800 °C. These materials cover crystals, polymers and N-doped carbons with the increase of synthesis temperature. Based on their different pre-compositions, they show different dye degradation performances. For CLA-M-250, it shows the highest photocatalytic activity and strong oxidation capacity. It shows not only great photo-performance in RhB degradation, but also oxygen production in water splitting. In the post-modification method, a novel photocatalysis solution was proposed to modify carbon nitride scaffold with cyano group, whose content can be well controlled by the input of sodium thiocyanate. The cyanation modification leads to narrowed band gap as well as improved photo-induced charges separation. Cyano group grafted carbon nitride thus shows dramatically enhanced performance in the photocatalytic coupling reaction between styrene and sodium benzenesulfinate under green light irradiation, which is in stark contrast with the inactivity of pristine g-C3N4.}, language = {en} } @phdthesis{Roezer2018, author = {R{\"o}zer, Viktor}, title = {Pluvial flood loss to private households}, doi = {10.25932/publishup-42991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429910}, school = {Universit{\"a}t Potsdam}, pages = {XXII, 109}, year = {2018}, abstract = {Today, more than half of the world's population lives in urban areas. With a high density of population and assets, urban areas are not only the economic, cultural and social hubs of every society, they are also highly susceptible to natural disasters. As a consequence of rising sea levels and an expected increase in extreme weather events caused by a changing climate in combination with growing cities, flooding is an increasing threat to many urban agglomerations around the globe. To mitigate the destructive consequences of flooding, appropriate risk management and adaptation strategies are required. So far, flood risk management in urban areas is almost exclusively focused on managing river and coastal flooding. Often overlooked is the risk from small-scale rainfall-triggered flooding, where the rainfall intensity of rainstorms exceeds the capacity of urban drainage systems, leading to immediate flooding. Referred to as pluvial flooding, this flood type exclusive to urban areas has caused severe losses in cities around the world. Without further intervention, losses from pluvial flooding are expected to increase in many urban areas due to an increase of impervious surfaces compounded with an aging drainage infrastructure and a projected increase in heavy precipitation events. While this requires the integration of pluvial flood risk into risk management plans, so far little is known about the adverse consequences of pluvial flooding due to a lack of both detailed data sets and studies on pluvial flood impacts. As a consequence, methods for reliably estimating pluvial flood losses, needed for pluvial flood risk assessment, are still missing. Therefore, this thesis investigates how pluvial flood losses to private households can be reliably estimated, based on an improved understanding of the drivers of pluvial flood loss. For this purpose, detailed data from pluvial flood-affected households was collected through structured telephone- and web-surveys following pluvial flood events in Germany and the Netherlands. Pluvial flood losses to households are the result of complex interactions between impact characteristics such as the water depth and a household's resistance as determined by its risk awareness, preparedness, emergency response, building properties and other influencing factors. Both exploratory analysis and machine-learning approaches were used to analyze differences in resistance and impacts between households and their effects on the resulting losses. The comparison of case studies showed that the awareness around pluvial flooding among private households is quite low. Low awareness not only challenges the effective dissemination of early warnings, but was also found to influence the implementation of private precautionary measures. The latter were predominately implemented by households with previous experience of pluvial flooding. Even cases where previous flood events affected a different part of the same city did not lead to an increase in preparedness of the surveyed households, highlighting the need to account for small-scale variability in both impact and resistance parameters when assessing pluvial flood risk. While it was concluded that the combination of low awareness, ineffective early warning and the fact that only a minority of buildings were adapted to pluvial flooding impaired the coping capacities of private households, the often low water levels still enabled households to mitigate or even prevent losses through a timely and effective emergency response. These findings were confirmed by the detection of loss-influencing variables, showing that cases in which households were able to prevent any loss to the building structure are predominately explained by resistance variables such as the household's risk awareness, while the degree of loss is mainly explained by impact variables. Based on the important loss-influencing variables detected, different flood loss models were developed. Similar to flood loss models for river floods, the empirical data from the preceding data collection was used to train flood loss models describing the relationship between impact and resistance parameters and the resulting loss to building structures. Different approaches were adapted from river flood loss models using both models with the water depth as only predictor for building structure loss and models incorporating additional variables from the preceding variable detection routine. The high predictive errors of all compared models showed that point predictions are not suitable for estimating losses on the building level, as they severely impair the reliability of the estimates. For that reason, a new probabilistic framework based on Bayesian inference was introduced that is able to provide predictive distributions instead of single loss estimates. These distributions not only give a range of probable losses, they also provide information on how likely a specific loss value is, representing the uncertainty in the loss estimate. Using probabilistic loss models, it was found that the certainty and reliability of a loss estimate on the building level is not only determined by the use of additional predictors as shown in previous studies, but also by the choice of response distribution defining the shape of the predictive distribution. Here, a mix between a beta and a Bernoulli distribution to account for households that are able to prevent losses to their building's structure was found to provide significantly more certain and reliable estimates than previous approaches using Gaussian or non-parametric response distributions. The successful model transfer and post-event application to estimate building structure loss in Houston, TX, caused by pluvial flooding during Hurricane Harvey confirmed previous findings, and demonstrated the potential of the newly developed multi-variable beta model for future risk assessments. The highly detailed input data set constructed from openly available data sources containing over 304,000 affected buildings in Harris County further showed the potential of data-driven, building-level loss models for pluvial flood risk assessment. In conclusion, pluvial flood losses to private households are the result of complex interactions between impact and resistance variables, which should be represented in loss models. The local occurrence of pluvial floods requires loss estimates on high spatial resolutions, i.e. on the building level, where losses are variable and uncertainties are high. Therefore, probabilistic loss estimates describing the uncertainty of the estimate should be used instead of point predictions. While the performance of probabilistic models on the building level are mainly driven by the choice of response distribution, multi-variable models are recommended for two reasons: First, additional resistance variables improve the detection of cases in which households were able to prevent structural losses. Second, the added variability of additional predictors provides a better representation of the uncertainties when loss estimates from multiple buildings are aggregated. This leads to the conclusion that data-driven probabilistic loss models on the building level allow for a reliable loss estimation at an unprecedented level of detail, with a consistent quantification of uncertainties on all aggregation levels. This makes the presented approach suitable for a wide range of applications, from decision support in spatial planning to impact- based early warning systems.}, language = {en} } @phdthesis{Rafighi2018, author = {Rafighi, Iman}, title = {Plasma Instabilities from blazar-induced pair beams propagating through IGM}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-417428}, school = {Universit{\"a}t Potsdam}, pages = {ix, 114}, year = {2018}, abstract = {Modern gamma-ray telescopes, provide the main stream of data for astrophysicists in quest of detecting the sources of gamma rays such as active galactic nuclei (AGN). Many blazars have been detected with gamma-ray telescopes such as HESS, VERITAS, MAGIC and Fermi satellite as sources of gamma-rays with the energy E ≥ 100 GeV. These very-high-energy photons interact with extragalactic background light (EBL) producing ultra-relativistic electron-positron pairs. Observations with Fermi-LAT indicate that the GeV gamma-ray flux from some blazars is lower than that predicted from the full electromagnetic cascade. The pairs can induce electrostatic and electromagnetic instabilities. In this case, wave-particle interactions can reduce the energy of the pairs. Therefore, the collective plasma effects can also substantially suppress the GeV-band gamma-ray emission affecting as well the IGMF constraints. Using Particle in cell (PIC) simulations, we have revisited the issue of plasma instabilities induced by electron-positron beams in the fully ionized intergalactic medium. This problem is related to pair beams produced by TeV radiation of blazars. The main objective of our study is to clarify the feedback of the beam-driven instabilities on the pairs. The present dissertation provides new results regarding the plasma instabilities from blazar induced pair beams interacting with intergalactic medium. This clarifies the relevance of plasma instabilities and improves our understanding of blazars.}, language = {en} } @phdthesis{Naseri2018, author = {Naseri, Gita}, title = {Plant-derived transcription factors and their application for synthetic biology approaches in Saccharomyces cerevisiae}, doi = {10.25932/publishup-42151}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421514}, school = {Universit{\"a}t Potsdam}, pages = {187}, year = {2018}, abstract = {Bereits seit 9000 Jahren verwendet die Menschheit die B{\"a}ckerhefe Saccharomyces cerevisiae f{\"u}r das Brauen von Bier, aber erst seit 150 Jahren wissen wir, dass es sich bei diesem unerm{\"u}dlichen Helfer im Brauprozess um einzellige, lebende Organismen handelt. Und die B{\"a}ckerhefe kann noch viel mehr. Im Rahmen des Forschungsgebietes der Synthetischen Biologie soll unter anderem die B{\"a}ckerhefe als innovatives Werkzeug f{\"u}r die biobasierte Herstellung verschiedenster Substanzen etabliert werden. Zu diesen Substanzen z{\"a}hlen unter anderem Feinchemikalien, Biokraftstoffe und Biopolymere sowie pharmakologisch und medizinisch interessante Pflanzenstoffe. Damit diese verschiedensten Substanzen in der B{\"a}ckerhefe hergestellt werden k{\"o}nnen, m{\"u}ssen große Mengen an Produktionsinformationen zum Beispiel aus Pflanzen in die Hefezellen {\"u}bertragen werden. Dar{\"u}ber hinaus m{\"u}ssen die neu eingebrachten Biosynthesewege reguliert und kontrolliert in den Zellen ablaufen. Auch Optimierungsprozesse zur Erh{\"o}hung der Produktivit{\"a}t sind notwendig. F{\"u}r alle diese Arbeitsschritte mangelt es bis heute an anwendungsbereiten Technologien und umfassenden Plattformen. Daher wurden im Rahmen dieser Doktorarbeit verschiedene Technologien und Plattformen zur Informations{\"u}bertragung, Regulation und Prozessoptimierung geplant und erzeugt. F{\"u}r die Konstruktion von Biosynthesewegen in der B{\"a}ckerhefe wurde als erstes eine Plattform aus neuartigen Regulatoren und Kontrollelementen auf der Basis pflanzlicher Kontrollelemente generiert und charakterisiert. Im zweiten Schritt erfolgte die Entwicklung einer Technologie zur kombinatorischen Verwendung der Regulatoren in der Planung und Optimierung von Biosynthesewegen (COMPASS). Abschließend wurde eine Technologie f{\"u}r die Prozessoptimierung der ver{\"a}nderten Hefezellen entwickelt (CapRedit). Die Leistungsf{\"a}higkeit der entwickelten Plattformen und Technologien wurde durch eine Optimierung der Produktion von Carotenoiden (Beta-Carotin und Beta-Ionon) und Flavonoiden (Naringenin) in Hefezellen nachgewiesen. Die im Rahmen der Arbeit etablierten neuartigen Plattformen und innovativen Technologien sind ein wertvoller Grundbaustein f{\"u}r die Erweiterung der Nutzbarkeit der B{\"a}ckerhefe. Sie erm{\"o}glichen den Einsatz der Hefezellen in kosteneffizienten Produktionswegen und alternativen chemischen Wertsch{\"o}pfungsketten. Dadurch k{\"o}nnen zum Beispiel Biokraftstoffe und pharmakologisch interessante Pflanzenstoffe unter Verwendung von nachwachsenden Rohstoffen, Reststoffen und Nebenprodukten hergestellt werden. Dar{\"u}ber hinaus ergeben sich Anwendungsm{\"o}glichkeiten zur Bodensanierung und Wasseraufbereitung.}, language = {en} } @phdthesis{Fuhrmann2018, author = {Fuhrmann, Saskia}, title = {Physiologically-based pharmacokinetic and mechanism-based pharmacodynamic modelling of monoclonal antibodies with a focus on tumour targeting}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418861}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 171}, year = {2018}, abstract = {Monoclonal antibodies (mAbs) are an innovative group of drugs with increasing clinical importance in oncology, combining high specificity with generally low toxicity. There are, however, numerous challenges associated with the development of mAbs as therapeutics. Mechanistic understanding of factors that govern the pharmacokinetics (PK) of mAbs is critical for drug development and the optimisation of effective therapies; in particular, adequate dosing strategies can improve patient quality life and lower drug cost. Physiologically-based PK (PBPK) models offer a physiological and mechanistic framework, which is of advantage in the context of animal to human extrapolation. Unlike for small molecule drugs, however, there is no consensus on how to model mAb disposition in a PBPK context. Current PBPK models for mAb PK hugely vary in their representation of physiology and parameterisation. Their complexity poses a challenge for their applications, e.g., translating knowledge from animal species to humans. In this thesis, we developed and validated a consensus PBPK model for mAb disposition taking into account recent insights into mAb distribution (antibody biodistribution coefficients and interstitial immunoglobulin G (IgG) pharmacokinetics) to predict tissue PK across several pre-clinical species and humans based on plasma data only. The model allows to a priori predict target-independent (unspecific) mAb disposition processes as well as mAb disposition in concentration ranges, for which the unspecific clearance (CL) dominates target-mediated CL processes. This is often the case for mAb therapies at steady state dosing. The consensus PBPK model was then used and refined to address two important problems: 1) Immunodeficient mice are crucial models to evaluate mAb efficacy in cancer therapy. Protection from elimination by binding to the neonatal Fc receptor is known to be a major pathway influencing the unspecific CL of both, endogenous and therapeutic IgG. The concentration of endogenous IgG, however, is reduced in immunodeficient mouse models, and this effect on unspecific mAb CL is unknown, yet of great importance for the extrapolation to human in the context of mAb cancer therapy. 2) The distribution of mAbs into solid tumours is of great interest. To comprehensively investigate mAb distribution within tumour tissue and its implications for therapeutic efficacy, we extended the consensus PBPK model by a detailed tumour distribution model incorporating a cell-level model for mAb-target interaction. We studied the impact of variations in tumour microenvironment on therapeutic efficacy and explored the plausibility of different mechanisms of action in mAb cancer therapy. The mathematical findings and observed phenomena shed new light on therapeutic utility and dosing regimens in mAb cancer treatment.}, language = {en} } @phdthesis{Riedel2018, author = {Riedel, Marc}, title = {Photonic wiring of enzymatic reactions to photoactive entities for the construction of biohybrid electrodes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-417280}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 168}, year = {2018}, abstract = {In this work, different strategies for the construction of biohybrid photoelectrodes are investigated and have been evaluated according to their intrinsic catalytic activity for the oxidation of the cofactor NADH or for the connection with the enzymes PQQ glucose dehydrogenase (PQQ-GDH), FAD-dependent glucose dehydrogenase (FAD-GDH) and fructose dehydrogenase (FDH). The light-controlled oxidation of NADH has been analyzed with InGaN/GaN nanowire-modified electrodes. Upon illumination with visible light the InGaN/GaN nanowires generate an anodic photocurrent, which increases in a concentration-dependent manner in the presence of NADH, thus allowing determination of the cofactor. Furthermore, different approaches for the connection of enzymes to quantum dot (QD)-modified electrodes via small redox molecules or redox polymers have been analyzed and discussed. First, interaction studies with diffusible redox mediators such as hexacyanoferrate(II) and ferrocenecarboxylic acid have been performed with CdSe/ZnS QD-modified gold electrodes to build up photoelectrochemical signal chains between QDs and the enzymes FDH and PQQ-GDH. In the presence of substrate and under illumination of the electrode, electrons are transferred from the enzyme via the redox mediators to the QDs. The resulting photocurrent is dependent on the substrate concentration and allows a quantification of the fructose and glucose content in solution. A first attempt with immobilized redox mediator, i.e. ferrocenecarboxylic acid chemically coupled to PQQ-GDH and attached to QD-modified gold electrodes, reveal the potential to build up photoelectrochemical signal chains even without diffusible redox mediators in solution. However, this approach results in a significant deteriorated photocurrent response compared to the situation with diffusing mediators. In order to improve the photoelectrochemical performance of such redox mediator-based, light-switchable signal chains, an osmium complex-containing redox polymer has been evaluated as electron relay for the electronic linkage between QDs and enzymes. The redox polymer allows the stable immobilization of the enzyme and the efficient wiring with the QD-modified electrode. In addition, a 3D inverse opal TiO2 (IO-TiO2) electrode has been used for the integration of PbS QDs, redox polymer and FAD-GDH in order to increase the electrode surface. This results in a significantly improved photocurrent response, a quite low onset potential for the substrate oxidation and a broader glucose detection range as compared to the approach with ferrocenecarboxylic acid and PQQ-GDH immobilized on CdSe/ZnS QD-modified gold electrodes. Furthermore, IO-TiO2 electrodes are used to integrate sulfonated polyanilines (PMSA1) and PQQ-GDH, and to investigate the direct interaction between the polymer and the enzyme for the light-switchable detection of glucose. While PMSA1 provides visible light excitation and ensures the efficient connection between the IO-TiO2 electrode and the biocatalytic entity, PQQ-GDH enables the oxidation of glucose. Here, the IO-TiO2 electrodes with pores of approximately 650 nm provide a suitable interface and morphology, which is required for a stable and functional assembly of the polymer and enzyme. The successful integration of the polymer and the enzyme can be confirmed by the formation of a glucose-dependent anodic photocurrent. In conclusion, this work provides insights into the design of photoelectrodes and presents different strategies for the efficient coupling of redox enzymes to photoactive entities, which allows for light-directed sensing and provides the basis for the generation of power from sun light and energy-rich compounds.}, language = {en} }