@phdthesis{Šustr2020, author = {Šustr, David}, title = {Molecular diffusion in polyelectrolyte multilayers}, doi = {10.25932/publishup-48903}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489038}, school = {Universit{\"a}t Potsdam}, pages = {106}, year = {2020}, abstract = {Research on novel and advanced biomaterials is an indispensable step towards their applications in desirable fields such as tissue engineering, regenerative medicine, cell culture, or biotechnology. The work presented here focuses on such a promising material: polyelectrolyte multilayer (PEM) composed of hyaluronic acid (HA) and poly(L-lysine) (PLL). This gel-like polymer surface coating is able to accumulate (bio-)molecules such as proteins or drugs and release them in a controlled manner. It serves as a mimic of the extracellular matrix (ECM) in composition and intrinsic properties. These qualities make the HA/PLL multilayers a promising candidate for multiple bio-applications such as those mentioned above. The work presented aims at the development of a straightforward approach for assessment of multi-fractional diffusion in multilayers (first part) and at control of local molecular transport into or from the multilayers by laser light trigger (second part). The mechanism of the loading and release is governed by the interaction of bioactives with the multilayer constituents and by the diffusion phenomenon overall. The diffusion of a molecule in HA/PLL multilayers shows multiple fractions of different diffusion rate. Approaches, that are able to assess the mobility of molecules in such a complex system, are limited. This shortcoming motivated the design of a novel evaluation tool presented here. The tool employs a simulation-based approach for evaluation of the data acquired by fluorescence recovery after photobleaching (FRAP) method. In this approach, possible fluorescence recovery scenarios are primarily simulated and afterwards compared with the data acquired while optimizing parameters of a model until a sufficient match is achieved. Fluorescent latex particles of different sizes and fluorescein in an aqueous medium are utilized as test samples validating the analysis results. The diffusion of protein cytochrome c in HA/PLL multilayers is evaluated as well. This tool significantly broadens the possibilities of analysis of spatiotemporal FRAP data, which originate from multi-fractional diffusion, while striving to be widely applicable. This tool has the potential to elucidate the mechanisms of molecular transport and empower rational engineering of the drug release systems. The second part of the work focuses on the fabrication of such a spatiotemporarily-controlled drug release system employing the HA/PLL multilayer. This release system comprises different layers of various functionalities that together form a sandwich structure. The bottom layer, which serves as a reservoir, is formed by HA/PLL PEM deposited on a planar glass substrate. On top of the PEM, a layer of so-called hybrids is deposited. The hybrids consist of thermoresponsive poly(N-isopropylacrylamide) (PNIPAM) -based hydrogel microparticles with surface-attached gold nanorods. The layer of hybrids is intended to serve as a gate that controls the local molecular transport through the PEM-solution-interface. The possibility of stimulating the molecular transport by near-infrared (NIR) laser irradiation is being explored. From several tested approaches for the deposition of hybrids onto the PEM surface, the drying-based approach was identified as optimal. Experiments, that examine the functionality of the fabricated sandwich at elevated temperature, document the reversible volume phase transition of the PEM-attached hybrids while sustaining the sandwich stability. Further, the gold nanorods were shown to effectively absorb light radiation in the tissue- and cell-friendly NIR spectral region while transducing the energy of light into heat. The rapid and reversible shrinkage of the PEM-attached hybrids was thereby achieved. Finally, dextran was employed as a model transport molecule. It loads into the PEM reservoir in a few seconds with the partition constant of 2.4, while it spontaneously releases in a slower, sustained manner. The local laser irradiation of the sandwich, which contains the fluorescein isothiocyanate tagged dextran, leads to a gradual reduction of fluorescence intensity in the irradiated region. The release system fabricated employs renowned photoresponsivity of the hybrids in an innovative setting. The results of the research are a step towards a spatially-controlled on-demand drug release system that paves the way to spatiotemporally controlled drug release. The approaches developed in this work have the potential to elucidate the molecular dynamics in ECM and to foster engineering of multilayers with properties tuned to mimic the ECM. The work aims at spatiotemporal control over the diffusion of bioactives and their presentation to the cells.}, language = {en} } @phdthesis{Zorn2020, author = {Zorn, Edgar Ulrich}, title = {Monitoring lava dome growth and deformation with photogrammetric methods and modelling}, doi = {10.25932/publishup-48360}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-483600}, school = {Universit{\"a}t Potsdam}, pages = {IX, 167}, year = {2020}, abstract = {Lava domes are severely hazardous, mound-shaped extrusions of highly viscous lava and commonly erupt at many active stratovolcanoes around the world. Due to gradual growth and flank oversteepening, such lava domes regularly experience partial or full collapses, resulting in destructive and far-reaching pyroclastic density currents. They are also associated with cyclic explosive activity as the complex interplay of cooling, degassing, and solidification of dome lavas regularly causes gas pressurizations on the dome or the underlying volcano conduit. Lava dome extrusions can last from days to decades, further highlighting the need for accurate and reliable monitoring data. This thesis aims to improve our understanding of lava dome processes and to contribute to the monitoring and prediction of hazards posed by these domes. The recent rise and sophistication of photogrammetric techniques allows for the extraction of observational data in unprecedented detail and creates ideal tools for accomplishing this purpose. Here, I study natural lava dome extrusions as well as laboratory-based analogue models of lava dome extrusions and employ photogrammetric monitoring by Structure-from-Motion (SfM) and Particle-Image-Velocimetry (PIV) techniques. I primarily use aerial photography data obtained by helicopter, airplanes, Unoccupied Aircraft Systems (UAS) or ground-based timelapse cameras. Firstly, by combining a long time-series of overflight data at Volc{\´a}n de Colima, M{\´e}xico, with seismic and satellite radar data, I construct a detailed timeline of lava dome and crater evolution. Using numerical model, the impact of the extrusion on dome morphology and loading stress is further evaluated and an impact on the growth direction is identified, bearing important implications for the location of collapse hazards. Secondly, sequential overflight surveys at the Santiaguito lava dome, Guatemala, reveal surface motion data in high detail. I quantify the growth of the lava dome and the movement of a lava flow, showing complex motions that occur on different timescales and I provide insight into rock properties relevant for hazard assessment inferred purely by photogrammetric processing of remote sensing data. Lastly, I recreate artificial lava dome and spine growth using analogue modelling under controlled conditions, providing new insights into lava extrusion processes and structures as well as the conditions in which they form. These findings demonstrate the capabilities of photogrammetric data analyses to successfully monitor lava dome growth and evolution while highlighting the advantages of complementary modelling methods to explain the observed phenomena. The results presented herein further bear important new insights and implications for the hazards posed by lava domes.}, language = {en} } @phdthesis{Zhelavskaya2020, author = {Zhelavskaya, Irina}, title = {Modeling of the Plasmasphere Dynamics}, doi = {10.25932/publishup-48243}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-482433}, school = {Universit{\"a}t Potsdam}, pages = {xlii, 256}, year = {2020}, abstract = {The plasmasphere is a dynamic region of cold, dense plasma surrounding the Earth. Its shape and size are highly susceptible to variations in solar and geomagnetic conditions. Having an accurate model of plasma density in the plasmasphere is important for GNSS navigation and for predicting hazardous effects of radiation in space on spacecraft. The distribution of cold plasma and its dynamic dependence on solar wind and geomagnetic conditions remain, however, poorly quantified. Existing empirical models of plasma density tend to be oversimplified as they are based on statistical averages over static parameters. Understanding the global dynamics of the plasmasphere using observations from space remains a challenge, as existing density measurements are sparse and limited to locations where satellites can provide in-situ observations. In this dissertation, we demonstrate how such sparse electron density measurements can be used to reconstruct the global electron density distribution in the plasmasphere and capture its dynamic dependence on solar wind and geomagnetic conditions. First, we develop an automated algorithm to determine the electron density from in-situ measurements of the electric field on the Van Allen Probes spacecraft. In particular, we design a neural network to infer the upper hybrid resonance frequency from the dynamic spectrograms obtained with the Electric and Magnetic Field Instrument Suite and Integrated Science (EMFISIS) instrumentation suite, which is then used to calculate the electron number density. The developed Neural-network-based Upper hybrid Resonance Determination (NURD) algorithm is applied to more than four years of EMFISIS measurements to produce the publicly available electron density data set. We utilize the obtained electron density data set to develop a new global model of plasma density by employing a neural network-based modeling approach. In addition to the location, the model takes the time history of geomagnetic indices and location as inputs, and produces electron density in the equatorial plane as an output. It is extensively validated using in-situ density measurements from the Van Allen Probes mission, and also by comparing the predicted global evolution of the plasmasphere with the global IMAGE EUV images of He+ distribution. The model successfully reproduces erosion of the plasmasphere on the night side as well as plume formation and evolution, and agrees well with data. The performance of neural networks strongly depends on the availability of training data, which is limited during intervals of high geomagnetic activity. In order to provide reliable density predictions during such intervals, we can employ physics-based modeling. We develop a new approach for optimally combining the neural network- and physics-based models of the plasmasphere by means of data assimilation. The developed approach utilizes advantages of both neural network- and physics-based modeling and produces reliable global plasma density reconstructions for quiet, disturbed, and extreme geomagnetic conditions. Finally, we extend the developed machine learning-based tools and apply them to another important problem in the field of space weather, the prediction of the geomagnetic index Kp. The Kp index is one of the most widely used indicators for space weather alerts and serves as input to various models, such as for the thermosphere, the radiation belts and the plasmasphere. It is therefore crucial to predict the Kp index accurately. Previous work in this area has mostly employed artificial neural networks to nowcast and make short-term predictions of Kp, basing their inferences on the recent history of Kp and solar wind measurements at L1. We analyze how the performance of neural networks compares to other machine learning algorithms for nowcasting and forecasting Kp for up to 12 hours ahead. Additionally, we investigate several machine learning and information theory methods for selecting the optimal inputs to a predictive model of Kp. The developed tools for feature selection can also be applied to other problems in space physics in order to reduce the input dimensionality and identify the most important drivers. Research outlined in this dissertation clearly demonstrates that machine learning tools can be used to develop empirical models from sparse data and also can be used to understand the underlying physical processes. Combining machine learning, physics-based modeling and data assimilation allows us to develop novel methods benefiting from these different approaches.}, language = {en} } @phdthesis{Zhang2020, author = {Zhang, Jianrui}, title = {Completely water-based emulsions as compartmentalized systems via pickering stabilization}, doi = {10.25932/publishup-47654}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476542}, school = {Universit{\"a}t Potsdam}, pages = {II, 119}, year = {2020}, abstract = {Completely water-based systems are of interest for the development of novel material for various reasons: On one hand, they provide benign environment for biological systems and on the other hand they facilitate effective molecular transport in a membrane-free environment. In order to investigate the general potential of aqueous two-phase systems (ATPSs) for biomaterials and compartmentalized systems, various solid particles were applied to stabilize all-aqueous emulsion droplets. The target ATPS to be investigated should be prepared via mixing of two aqueous solutions of water-soluble polymers, which turn biphasic when exceeding a critical polymer concentration. Hydrophilic polymers with a wide range of molar mass such as dextran/poly(ethylene glycol) (PEG) can therefore be applied. Solid particles adsorbed at the interfaces can be exceptionally efficient stabilizers forming so-called Pickering emulsions, and nanoparticles can bridge the correlation length of polymer solutions and are thereby the best option for water-in-water emulsions. The first approach towards the investigation of ATPS was conducted with all aqueous dextran-PEG emulsions in the presence of poly(dopamine) particles (PDP) in Chapter 4. The water-in-water emulsions were formed with a PEG/dextran system via utilizing PDP as stabilizers. Studies of the formed emulsions were performed via laser scanning confocal microscope (CLSM), optical microscope (OM), cryo-scanning electron microscope (SEM) and tensiometry. The stable emulsions (at least 16 weeks) were demulsified easily via dilution or surfactant addition. Furthermore, the solid PDP at the water-water interface were crosslinked in order to inhibit demulsification of the Pickering emulsion. Transmission electron microscope (TEM) and scanning electron microscope (SEM) were used to visualize the morphology of PDP before and after crosslinking. PDP stabilized water-in-water emulsions were utilized in the following Chapter 5 to form supramolecular compartmentalized hydrogels. Here, hydrogels were prepared in pre-formed water-in-water emulsions and gelled via α-cyclodextrin-PEG (α-CD-PEG) inclusion complex formation. Studies of the formed complexes were performed via X-ray powder diffraction (XRD) and the mechanical properties of the hydrogels were measured with oscillatory shear rheology. In order to verify the compartmentalized state and its triggered decomposition, hydrogels and emulsions were assessed via OM, SEM and CLSM. The last chapter broadens the investigations from the previous two systems by utilizing various carbon nitrides (CN) as different stabilizers in ATPS. CN introduces another way to trigger demulsification, namely irradiation with visible light. Therefore, emulsification and demulsification with various triggers were probed. The investigated all aqueous multi-phase systems will act as model for future fabrication of biocompatible materials, cell micropatterning as well as separation of compartmentalized systems.}, language = {en} } @phdthesis{Zeckra2020, author = {Zeckra, Martin}, title = {Seismological and seismotectonic analysis of the northwestern Argentine Central Andean foreland}, doi = {10.25932/publishup-47324}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473240}, school = {Universit{\"a}t Potsdam}, pages = {vii, 120}, year = {2020}, abstract = {After a severe M W 5.7 earthquake on October 17, 2015 in El Galp{\´o}n in the province of Salta NW Argentina, I installed a local seismological network around the estimated epicenter. The network covered an area characterized by inherited Cretaceous normal faults and neotectonic faults with unknown recurrence intervals, some of which may have been reactivated normal faults. The 13 three-component seismic stations recorded data continuously for 15 months. The 2015 earthquake took place in the Santa B{\´a}rbara System of the Andean foreland, at about 17km depth. This region is the easternmost morphostructural region of the central Andes. As a part of the broken foreland, it is bounded to the north by the Subandes fold-and-thrust belt and the Sierras Pampeanas to the south; to the east lies the Chaco-Paran{\´a} basin. A multi-stage morphotectonic evolution with thick-skinned basement uplift and coeval thin-skinned deformation in the intermontane basins is suggested for the study area. The release of stresses associated with the foreland deformation can result in strong earthquakes, as the study area is known for recurrent and historical, destructive earthquakes. The available continuous record reaches back in time, when the strongest event in 1692 (magnitude 7 or intensity IX) destroyed the city of Esteco. Destructive earthquakes and surface deformation are thus a hallmark of this part of the Andean foreland. With state-of-the-art Python packages (e.g. pyrocko, ObsPy), a semi-automatic approach is followed to analyze the collected continuous data of the seismological network. The resulting 1435 hypocenter locations consist of three different groups: 1.) local crustal earthquakes (nearly half of the events belong to this group), 2.) interplate activity, of regional distance in the slab of the Nazca-plate, and 3.) very deep earthquakes at about 600km depth. My major interest focused on the first event class. Those crustal events are partly aftershock events of the El Galp{\´o}n earthquake and a second earthquake, in the south of the same fault. Further events can be considered as background seismicity of other faults within the study area. Strikingly, the seismogenic zone encompass the whole crust and propagates brittle deformation down, close to the Moho. From the collected seismological data, a local seismic velocity model is estimated, using VELEST. After the execution of various stability tests, the robust minimum 1D-velocity model implies guiding values for the composition of the local, subsurface structure of the crust. Afterwards, performing a hypocenter relocation enables the assignment of individual earthquakes to aftershock clusters or extended seismotectonic structures. This allows the mapping of previously unknown seismogenic faults. Finally, focal mechanisms are modeled for events with acurately located hypocenters, using the newly derived local velocity model. A compressive regime is attested by the majority of focal mechanisms, while the strike direction of the individual seismogenic structures is in agreement with the overall north - south orientation of the Central Andes, its mountain front, and individual mountain ranges in the southern Santa-B{\´a}rbara-System.}, language = {en} } @phdthesis{Youakim2020, author = {Youakim, Kris}, title = {Galactic archaeology with metal-poor stars from the Pristine survey}, doi = {10.25932/publishup-47431}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474314}, school = {Universit{\"a}t Potsdam}, pages = {iv, 151}, year = {2020}, abstract = {The Milky Way is a spiral galaxy consisting of a disc of gas, dust and stars embedded in a halo of dark matter. Within this dark matter halo there is also a diffuse population of stars called the stellar halo, that has been accreting stars for billions of years from smaller galaxies that get pulled in and disrupted by the large gravitational potential of the Milky Way. As they are disrupted, these galaxies leave behind long streams of stars that can take billions of years to mix with the rest of the stars in the halo. Furthermore, the amount of heavy elements (metallicity) of the stars in these galaxies reflects the rate of chemical enrichment that occurred in them, since the Universe has been slowly enriched in heavy elements (e.g. iron) through successive generations of stars which produce them in their cores and supernovae explosions. Therefore, stars that contain small amounts of heavy elements (metal-poor stars) either formed at early times before the Universe was significantly enriched, or in isolated environments. The aim of this thesis is to develop a better understanding of the substructure content and chemistry of the Galactic stellar halo, in order to gain further insight into the formation and evolution of the Milky Way. The Pristine survey uses a narrow-band filter which specifically targets the Ca II H \& K spectral absorption lines to provide photometric metallicities for a large number of stars down to the extremely metal-poor (EMP) regime, making it a very powerful data set for Galactic archaeology studies. In Chapter 2, we quantify the efficiency of the survey using a preliminary spectroscopic follow-up sample of ~ 200 stars. We also use this sample to establish a set of selection criteria to improve the success rate of selecting EMP candidates for follow-up spectroscopy. In Chapter 3, we extend this work and present the full catalogue of ~ 1000 stars from a three year long medium resolution spectroscopic follow-up effort conducted as part of the Pristine survey. From this sample, we compute success rates of 56\% and 23\% for recovering stars with [Fe/H] < -2.5 and [Fe/H] < -3.0, respectively. This demonstrates a high efficiency for finding EMP stars as compared to previous searches with success rates of 3-4\%. In Chapter 4, we select a sample of ~ 80000 halo stars using colour and magnitude cuts to select a main sequence turnoff population in the distance range 6 < dʘ < 20 kpc. We then use the spectroscopic follow-up sample presented in Chapter 3 to statistically rescale the Pristine photometric metallicities of this sample, and present the resulting corrected metallicity distribution function (MDF) of the halo. The slope at the metal-poor end is significantly shallower than previous spectroscopic efforts have shown, suggesting that there may be more metal-poor stars with [Fe/H] < -2.5 in the halo than previously thought. This sample also shows evidence that the MDF of the halo may not be bimodal as was proposed by previous works, and that the lack of globular clusters in the Milky Way may be the result of a physical truncation of the MDF rather than just statistical under-sampling. Chapter 5 showcases the unexpected capability of the Pristine filter for separating blue horizontal branch (BHB) stars from Blue Straggler (BS) stars. We demonstrate a purity of 93\% and completeness of 91\% for identifying BHB stars, a substantial improvement over previous works. We then use this highly pure and complete sample of BHB stars to trace the halo density profile out to d > 100 kpc, and the Sagittarius stream substructure out to ~ 130 kpc. In Chapter 6 we use the photometric metallicities from the Pristine survey to perform a clustering analysis of the halo as a function of metallicity. Separating the Pristine sample into four metallicity bins of [Fe/H] < -2, -2 < [Fe/H] < -1.5, -1.5 < [Fe/H] < -1 and -0.9 < [Fe/H] < -0.8, we compute the two-point correlation function to measure the amount of clustering on scales of < 5 deg. For a smooth comparison sample we make a mock Pristine data set generated using the Galaxia code based on the Besan{\c{c}}on model of the Galaxy. We find enhanced clustering on small scales (< 0.5 deg) for some regions of the Galaxy for the most metal-poor bin ([Fe/H] < -2), while in others we see large scale signals that correspond to known substructures in those directions. This confirms that the substructure content of the halo is highly anisotropic and diverse in different Galactic environments. We discuss the difficulties of removing systematic clustering signals from the data and the limitations of disentangling weak clustering signals from real substructures and residual systematic structure in the data. Taken together, the work presented in this thesis approaches the problem of better understanding the halo of our Galaxy from multiple angles. Firstly, presenting a sizeable sample of EMP stars and improving the selection efficiency of EMP stars for the Pristine survey, paving the way for the further discovery of metal-poor stars to be used as probes to early chemical evolution. Secondly, improving the selection of BHB distance tracers to map out the halo to large distances, and finally, using the large samples of metal-poor stars to derive the MDF of the inner halo and analyse the substructure content at different metallicities. The results of this thesis therefore expand our understanding of the physical and chemical properties of the Milky Way stellar halo, and provide insight into the processes involved in its formation and evolution.}, language = {en} } @phdthesis{Yang2020, author = {Yang, Xiaoqiang}, title = {Spatial and temporal analyses of catchment and in-stream nitrate dynamics}, doi = {10.25932/publishup-47702}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-477029}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 146}, year = {2020}, abstract = {Water quality in river systems is of growing concern due to rising anthropogenic pressures and climate change. Mitigation efforts have been placed under the guidelines of different governance conventions during last decades (e.g., the Water Framework Directive in Europe). Despite significant improvement through relatively straightforward measures, the environmental status has likely reached a plateau. A higher spatiotemporal accuracy of catchment nitrate modeling is, therefore, needed to identify critical source areas of diffuse nutrient pollution (especially for nitrate) and to further guide implementation of spatially differentiated, cost-effective mitigation measures. On the other hand, the emerging high-frequency sensor monitoring upgrades the monitoring resolution to the time scales of biogeochemical processes and enables more flexible monitoring deployments under varying conditions. The newly available information offers new prospects in understanding nitrate spatiotemporal dynamics. Formulating such advanced process understanding into catchment models is critical for model further development and environmental status evaluation. This dissertation is targeting on a comprehensive analysis of catchment and in-stream nitrate dynamics and is aiming to derive new insights into their spatial and temporal variabilities through the new fully distributed model development and the new high-frequency data. Firstly, a new fully distributed, process-based catchment nitrate model (the mHM-Nitrate model) is developed based on the mesoscale Hydrological Model (mHM) platform. Nitrate process descriptions are adopted from the Hydrological Predictions for the Environment (HYPE), with considerable improved implementations. With the multiscale grid-based discretization, mHM-Nitrate balances the spatial representation and the modeling complexity. The model has been thoughtfully evaluated in the Selke catchment (456 km2), central Germany, which is characterized by heterogeneous physiographic conditions. Results show that the model captures well the long-term discharge and nitrate dynamics at three nested gauging stations. Using daily nitrate-N observations, the model is also validated in capturing short-term fluctuations due to changes in runoff partitioning and spatial contribution during flooding events. By comparing the model simulations with the values reported in the literature, the model is capable of providing detailed and reliable spatial information of nitrate concentrations and fluxes. Therefore, the model can be taken as a promising tool for environmental scientists in advancing environmental modeling research, as well as for stakeholders in supporting their decision-making, especially for spatially differentiated mitigation measures. Secondly, a parsimonious approach of regionalizing the in-stream autotrophic nitrate uptake is proposed using high-frequency data and further integrated into the new mHM-Nitrate model. The new regionalization approach considers the potential uptake rate (as a general parameter) and effects of above-canopy light and riparian shading (represented by global radiation and leaf area index data, respectively). Multi-parameter sensors have been continuously deployed in a forest upstream reach and an agricultural downstream reach of the Selke River. Using the continuous high-frequency data in both streams, daily autotrophic uptake rates (2011-2015) are calculated and used to validate the regionalization approach. The performance and spatial transferability of the approach is validated in terms of well-capturing the distinct seasonal patterns and value ranges in both forest and agricultural streams. Integrating the approach into the mHM-Nitrate model allows spatiotemporal variability of in-stream nitrate transport and uptake to be investigated throughout the river network. Thirdly, to further assess the spatial variability of catchment nitrate dynamics, for the first time the fully distributed parameterization is investigated through sensitivity analysis. Sensitivity results show that parameters of soil denitrification, in-stream denitrification and in-stream uptake processes are the most sensitive parameters throughout the Selke catchment, while they all show high spatial variability, where hot-spots of parameter sensitivity can be explicitly identified. The Spearman rank correlation is further analyzed between sensitivity indices and multiple catchment factors. The correlation identifies that the controlling factors vary spatially, reflecting heterogeneous catchment responses in the Selke catchment. These insights are, therefore, informative in informing future parameter regionalization schemes for catchment water quality models. In addition, the spatial distributions of parameter sensitivity are also influenced by the gauging information that is being used for sensitivity evaluation. Therefore, an appropriate monitoring scheme is highly recommended to truly reflect the catchment responses.}, language = {en} } @phdthesis{Wolff2020, author = {Wolff, Christian Michael}, title = {Identification and reduction of losses in perovskite solar cells}, doi = {10.25932/publishup-47930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-479301}, school = {Universit{\"a}t Potsdam}, pages = {x, 158}, year = {2020}, abstract = {Perovskite solar cells have become one of the most studied systems in the quest for new, cheap and efficient solar cell materials. Within a decade device efficiencies have risen to >25\% in single-junction and >29\% in tandem devices on top of silicon. This rapid improvement was in many ways fortunate, as e. g. the energy levels of commonly used halide perovskites are compatible with already existing materials from other photovoltaic technologies such as dye-sensitized or organic solar cells. Despite this rapid success, fundamental working principles must be understood to allow concerted further improvements. This thesis focuses on a comprehensive understanding of recombination processes in functioning devices. First the impact the energy level alignment between the perovskite and the electron transport layer based on fullerenes is investigated. This controversial topic is comprehensively addressed and recombination is mitigated through reducing the energy difference between the perovskite conduction band minimum and the LUMO of the fullerene. Additionally, an insulating blocking layer is introduced, which is even more effective in reducing this recombination, without compromising carrier collection and thus efficiency. With the rapid efficiency development (certified efficiencies have broken through the 20\% ceiling) and thousands of researchers working on perovskite-based optoelectronic devices, reliable protocols on how to reach these efficiencies are lacking. Having established robust methods for >20\% devices, while keeping track of possible pitfalls, a detailed description of the fabrication of perovskite solar cells at the highest efficiency level (>20\%) is provided. The fabrication of low-temperature p-i-n structured devices is described, commenting on important factors such as practical experience, processing atmosphere \& temperature, material purity and solution age. Analogous to reliable fabrication methods, a method to identify recombination losses is needed to further improve efficiencies. Thus, absolute photoluminescence is identified as a direct way to quantify the Quasi-Fermi level splitting of the perovskite absorber (1.21eV) and interfacial recombination losses the transport layers impose, reducing the latter to ~1.1eV. Implementing very thin interlayers at both the p- and n-interface (PFN-P2 and LiF, respectively), these losses are suppressed, enabling a VOC of up to 1.17eV. Optimizing the device dimensions and the bandgap, 20\% devices with 1cm2 active area are demonstrated. Another important consideration is the solar cells' stability if subjected to field-relevant stressors during operation. In particular these are heat, light, bias or a combination thereof. Perovskite layers - especially those incorporating organic cations - have been shown to degrade if subjected to these stressors. Keeping in mind that several interlayers have been successfully used to mitigate recombination losses, a family of perfluorinated self-assembled monolayers (X-PFCn, where X denotes I/Br and n = 7-12) are introduced as interlayers at the n-interface. Indeed, they reduce interfacial recombination losses enabling device efficiencies up to 21.3\%. Even more importantly they improve the stability of the devices. The solar cells with IPFC10 are stable over 3000h stored in the ambient and withstand a harsh 250h of MPP at 85◦C without appreciable efficiency losses. To advance further and improve device efficiencies, a sound understanding of the photophysics of a device is imperative. Many experimental observations in recent years have however drawn an inconclusive picture, often suffering from technical of physical impediments, disguising e. g. capacitive discharge as recombination dynamics. To circumvent these obstacles, fully operational, highly efficient perovskites solar cells are investigated by a combination of multiple optical and optoelectronic probes, allowing to draw a conclusive picture of the recombination dynamics in operation. Supported by drift-diffusion simulations, the device recombination dynamics can be fully described by a combination of first-, second- and third-order recombination and JV curves as well as luminescence efficiencies over multiple illumination intensities are well described within the model. On this basis steady state carrier densities, effective recombination constants, densities-of-states and effective masses are calculated, putting the devices at the brink of the radiative regime. Moreover, a comprehensive review of recombination in state-of-the-art devices is given, highlighting the importance of interfaces in nonradiative recombination. Different strategies to assess these are discussed, before emphasizing successful strategies to reduce interfacial recombination and pointing towards the necessary steps to further improve device efficiency and stability. Overall, the main findings represent an advancement in understanding loss mechanisms in highly efficient solar cells. Different reliable optoelectronic techniques are used and interfacial losses are found to be of grave importance for both efficiency and stability. Addressing the interfaces, several interlayers are introduced, which mitigate recombination losses and degradation.}, language = {en} } @phdthesis{Wolf2020, author = {Wolf, Kristine}, title = {Produktentwicklung eines luteinhaltigen, kolloidalen Nahrungserg{\"a}nzungsmittels: physikochemische und ern{\"a}hrungsphysiologische Aspekte}, doi = {10.25932/publishup-48774}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487743}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 243}, year = {2020}, abstract = {Sekund{\"a}re Pflanzenstoffe und ihre gesundheitsf{\"o}rdernden Eigenschaften sind in den letzten zwei Jahrzehnten vielfach ern{\"a}hrungsphysiologisch untersucht und spezifische positive Effekte im humanen Organismus zum Teil sehr genau beschrieben worden. Zu den Carotinoiden z{\"a}hlend ist der sekund{\"a}re Pflanzenstoff Lutein insbesondere in der Pr{\"a}vention von ophthalmologischen Erkrankungen in den Mittelpunkt der Forschung ger{\"u}ckt. Das ausschließlich von Pflanzen und einigen Algen synthetisierte Xanthophyll wird {\"u}ber die pflanzliche Nahrung insbesondere gr{\"u}nes Blattgem{\"u}se in den humanen Organismus aufgenommen. Dort akkumuliert es bevorzugt im Makulapigment der Retina des menschlichen Auges und ist bedeutend im Prozess der Aufrechterhaltung der Funktionsf{\"a}higkeit der Photorezeptorzellen. Im Laufe des Alterns kann die Abnahme der Dichte des Makulapigments und der Abbau von Lutein beobachtet werden. Die dadurch eintretende Destabilisierung der Photorezeptorzellen im Zusammenhang mit einer ver{\"a}nderten Stoffwechsellage im alternden Organismus kann zur Auspr{\"a}gung der altersbedingten Makuladegeneration (AMD) f{\"u}hren. Die pathologische Symptomatik der Augenerkrankung reicht vom Verlust der Sehsch{\"a}rfe bis hin zum irreversiblen Erblinden. Da therapeutische Mittel ausschließlich ein Fortschreiten verhindern, bestehen hier Forschungsans{\"a}tze pr{\"a}ventive Maßnahmen zu finden. Die Supplementierung von luteinhaltigen Pr{\"a}paraten bietet dabei einen Ansatzpunkt. Auf dem Markt finden sich bereits Nahrungserg{\"a}nzungsmittel (NEM) mit Lutein in verschiedenen Applikationen. Limitierend ist dabei die Stabilit{\"a}t und Bioverf{\"u}gbarkeit von Lutein, welches teilweise kostenintensiv und mit unbekannter Reinheit zu erwerben ist. Aus diesem Grund w{\"a}re die Verwendung von Luteinestern als die pflanzliche Speicherform des Luteins im Rahmen eines NEMs vorteilhaft. Neben ihrer nat{\"u}rlichen, h{\"o}heren Stabilit{\"a}t sind Luteinester nachhaltig und kosteng{\"u}nstig einsetzbar. In dieser Arbeit wurden physikochemische und ern{\"a}hrungsphysiologisch relevante Aspekte in dem Produktentwicklungsprozess eines NEMs mit Luteinestern in einer kolloidalen Formulierung untersucht. Die bisher einzigartige Anwendung von Luteinestern in einem Mundspray sollte die Aufnahme des Wirkstoffes insbesondere f{\"u}r {\"a}ltere Menschen erleichtern und verbessern. Unter Beachtung der Ergebnisse und der ern{\"a}hrungsphysiologischen Bewertung sollten u.a. Empfehlungen f{\"u}r die Rezepturzusammensetzungen einer Miniemulsion (Emulsion mit Partikelgr{\"o}ßen <1,0 µm) gegeben werden. Eine Einsch{\"a}tzung der Bioverf{\"u}gbarkeit der Luteinester aus den entwickelten, kolloidalen Formulierungen konnte anhand von Studien zur Resorption- und Absorptionsverf{\"u}gbarkeit in vitro erm{\"o}glicht werden. In physikalischen Untersuchungen wurden zun{\"a}chst Basisbestandteile f{\"u}r die Formulierungen pr{\"a}zisiert. In ersten wirkstofffreien Musteremulsionen konnten ausgew{\"a}hlte {\"O}le als Tr{\"a}gerphase sowie Emulgatoren und L{\"o}slichkeitsvermittler (Peptisatoren) hinsichtlich ihrer Eignung zur Bereitstellung einer Miniemulsion physikalisch gepr{\"u}ft werden. Die beste Stabilit{\"a}t und optimale Eigenschaften einer Miniemulsion zeigten sich bei der Verwendung von MCT-{\"O}l (engl. medium chain triglyceride) bzw. Raps{\"o}l in der Tr{\"a}gerphase sowie des Emulgators Tween® 80 (Tween 80) allein oder in Kombination mit dem Molkenproteinhydrolysat Biozate® 1 (Biozate 1). Aus den physikalischen Untersuchungen der Musteremulsionen gingen die Pr{\"a}emulsionen als Prototypen hervor. Diese enthielten den Wirkstoff Lutein in verschiedenen Formen. So wurden Pr{\"a}emulsionen mit Lutein, mit Luteinestern sowie mit Lutein und Luteinestern konzipiert, welche den Emulgator Tween 80 oder die Kombination mit Biozate 1 enthielten. Bei der Herstellung der Pr{\"a}emulsionen f{\"u}hrte die Anwendung der Emulgiertechniken Ultraschall mit anschließender Hochdruckhomogenisation zu den gew{\"u}nschten Miniemulsionen. Beide eingesetzten Emulgatoren boten optimale Stabilisierungseffekte. Anschließend erfolgte die physikochemische Charakterisierung der Wirkstoffe. Insbesondere Luteinester aus Oleoresin erwiesen sich hier als stabil gegen{\"u}ber verschiedenen Lagerungsbedingungen. Ebenso konnte bei einer kurzzeitigen Behandlung der Wirkstoffe unter spezifischen mechanischen, thermischen, sauren und basischen Bedingungen eine Stabilit{\"a}t von Lutein und Luteinestern gezeigt werden. Die Zugabe von Biozate 1 bot dabei nur f{\"u}r Lutein einen zus{\"a}tzlichen Schutz. Bei l{\"a}ngerer physikochemischer Behandlung unterlagen die in den Miniemulsionen eingebrachten Wirkstoffe moderaten Abbauvorg{\"a}ngen. Markant war deren Sensitivit{\"a}t gegen{\"u}ber dem basischen Milieu. Im Rahmen der Rezepturentwicklung des NEMs war hier die Empfehlung, eine Miniemulsion mit einem leicht saurem pH-Milieu zum Schutz des Wirkstoffes durch kontrollierte Zugabe weiterer Inhaltstoffe zu gestalten. Im weiteren Entwicklungsprozess des NEMs wurden Fertigrezepturen mit dem Wirkstoff Luteinester aufgestellt. Die alleinige Anwendung des Emulgators Biozate 1 zeigte sich dabei als ungeeignet. Die weiterhin zur Verf{\"u}gung stehenden Fertigrezepturen enthielten in der {\"O}l-phase neben dem Wirkstoff das MCT-{\"O}L oder Raps{\"o}l sowie a-Tocopherol zur Stabilisierung. Die Wasserphase bestand aus dem Emulgator Tween 80 oder einer Kombination aus Tween 80 und Biozate 1. Zusatzstoffe waren zudem als mikrobiologischer Schutz Ascorbins{\"a}ure und Kaliumsorbat sowie f{\"u}r sensorische Effekte Xylitol und Orangenaroma. Die Anordnung der Basisrezeptur und das angewendete Emulgierverfahren lieferten stabile Miniemulsionen. Weiterhin zeigten langfristige Lagerungsversuche mit den Fertigrezepturen bei 4°C, dass eine Aufrechterhaltung der geforderten Luteinestermenge im Produkt gew{\"a}hrleistet war. Analoge Untersuchungen an einem luteinhaltigen, marktg{\"a}ngigen Pr{\"a}parat best{\"a}tigten dagegen eine bereits bei kurzfristiger Lagerung auftretende Instabilit{\"a}t von Lutein. Abschließend wurde durch Resorptions- und Absorptionsstudien in vitro mit den Pr{\"a}emulsionen und Fertigrezepturen die Bioverf{\"u}gbarkeit von Luteinestern gepr{\"u}ft. Nach Behandlung in einem etablierten in vitro Verdaumodell konnte eine geringf{\"u}gige Resorptionsverf{\"u}gbarkeit der Luteinester definiert werden. Limitiert war eine Micellarisierung des Wirkstoffes aus den konzipierten Formulierungen zu beobachten. Eine enzymatische Spaltung der Luteinester zu freiem Lutein wurde nur begrenzt festgestellt. Spezifit{\"a}t und Aktivit{\"a}t von entsprechenden hydrolytischen Lipasen sind als {\"a}ußerst gering gegen{\"u}ber Luteinestern zu bewerten. In sich anschließenden Zellkulturversuchen mit der Zelllinie Caco-2 wurden keine zytotoxischen Effekte durch die relevanten Inhaltsstoffe in den Pr{\"a}emulsionen gezeigt. Dagegen konnten eine Sensibilit{\"a}t gegen{\"u}ber den Fertigrezepturen beobachtet werden. Diese sollte im Zusammenhang mit Irritationen der Schleimh{\"a}ute des Magen-Darm-Traktes bedacht werden. Eine weniger komplexe Rezeptur k{\"o}nnte die beobachteten Einschr{\"a}nkungen m{\"o}glicherweise minimieren. Abschließende Absorptionsstudien zeigten, dass grunds{\"a}tzlich eine geringf{\"u}gige Aufnahme von vorrangig Lutein, aber auch Luteinmonoestern in den Enterocyten aus Miniemulsionen erfolgen kann. Dabei hatte weder Tween 80 noch Biozate 1 einen f{\"o}rderlichen Einfluss auf die Absorptionsrate von Lutein oder Luteinestern. Die Metabolisierung der Wirkstoffe durch vorherigen in vitro-Verdau steigerte die zellul{\"a}re Aufnahme von Wirkstoffen aus Formulierungen mit Lutein und Luteinestern gleichermaßen. Die beobachtete Aufnahme von Lutein und Luteinmonoestern in den Enterocyten scheint {\"u}ber passive Diffusion zu erfolgen, wobei auch der aktive Transport nicht ausgeschlossen werden kann. Dagegen k{\"o}nnen Luteindiester aufgrund ihrer Molek{\"u}lgr{\"o}ße nicht {\"u}ber den Weg der Micellarisierung und einfachen Diffusion in die Enterocyten gelangen. Ihre Aufnahme in die D{\"u}nndarmepithelzellen bedarf einer vorherigen hydrolytischen Spaltung durch spezifische Lipasen. Dieser Schritt limitiert wiederum die effektive Aufnahme der Luteinester in die Zellen bzw. stellt eine Einschr{\"a}nkung in ihrer Bioverf{\"u}gbarkeit im Vergleich zu freiem Lutein dar. Zusammenfassend konnte f{\"u}r die physikochemisch stabilen Luteinester eine geringe Bioverf{\"u}gbarkeit aus kolloidalen Formulierungen gezeigt werden. Dennoch ist die Verwendung als Wirkstoffquelle f{\"u}r den sekund{\"a}ren Pflanzenstoff Lutein in einem NEM zu empfehlen. Im Zusammenhang mit der Aufnahme von luteinreichen, pflanzlichen Lebensmitteln kann trotz der zu erwartenden geringen Bioverf{\"u}gbarkeit der Luteinester aus dem NEM ein Beitrag zur Verbesserung des Luteinstatus erreicht werden. Entsprechende Publikationen zeigten eindeutige Korrelationen zwischen der Aufnahme von luteinesterhaltigen Pr{\"a}paraten und einem Anstieg der Luteinkonzentration im Serum bzw. der Makulapigmentdichte in vivo. Die geringf{\"u}gig bessere Bioverf{\"u}gbarkeit von freiem Lutein steht im kritischen Zusammenhang mit seiner Instabilit{\"a}t und Kostenintensit{\"a}t. Bilanzierend wurde im Rahmen dieser Arbeit das marktg{\"a}ngige Produkt Vita Culus® konzipiert. Im Ausblick sollten humane Interventionsstudien mit dem NEM die abschließende Bewertung der Bioverf{\"u}gbarkeit von Luteinestern aus dem Pr{\"a}parat m{\"o}glich machen.}, language = {de} } @phdthesis{Wen2020, author = {Wen, Xi}, title = {Distribution patterns and environmental drivers of methane-cycling microorganisms in natural environments and restored wetlands}, doi = {10.25932/publishup-47177}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471770}, school = {Universit{\"a}t Potsdam}, pages = {VIII, iii, 152}, year = {2020}, abstract = {Methane is an important greenhouse gas contributing to global climate change. Natural environments and restored wetlands contribute a large proportion to the global methane budget. Methanogenic archaea (methanogens) and methane oxidizing bacteria (methanotrophs), the biogenic producers and consumers of methane, play key roles in the methane cycle in those environments. A large number of studies revealed the distribution, diversity and composition of these microorganisms in individual habitats. However, uncertainties exist in predicting the response and feedback of methane-cycling microorganisms to future climate changes and related environmental changes due to the limited spatial scales considered so far, and due to a poor recognition of the biogeography of these important microorganisms combining global and local scales. With the aim of improving our understanding about whether and how methane-cycling microbial communities will be affected by a series of dynamic environmental factors in response to climate change, this PhD thesis investigates the biogeographic patterns of methane-cycling communities, and the driving factors which define these patterns at different spatial scales. At the global scale, a meta-analysis was performed by implementing 94 globally distributed public datasets together with environmental data from various natural environments including soils, lake sediments, estuaries, marine sediments, hydrothermal sediments and mud volcanos. In combination with a global biogeographic map of methanogenic archaea from multiple natural environments, this thesis revealed that biogeographic patterns of methanogens exist. The terrestrial habitats showed higher alpha diversities than marine environments. Methanoculleus and Methanosaeta (Methanothrix) are the most frequently detected taxa in marine habitats, while Methanoregula prevails in terrestrial habitats. Estuary ecosystems, the transition zones between marine and terrestrial/limnic ecosystems, have the highest methanogenic richness but comparably low methane emission rates. At the local scale, this study compared two rewetted fens with known high methane emissions in northeastern Germany, a coastal brackish fen (H{\"u}telmoor) and a freshwater riparian fen (Polder Zarnekow). Consistent with different geochemical conditions and land-use history, the two rewetted fens exhibit dissimilar methanogenic and, especially, methanotrophic community compositions. The methanotrophic community was generally under-represented among the prokaryotic communities and both fens show similarly low ratios of methanotrophic to methanogenic abundances. Since few studies have characterized methane-cycling microorganisms in rewetted fens, this study provides first evidence that the rapid and well re-established methanogenic community in combination with the low and incomplete re-establishment of the methanotrophic community after rewetting contributes to elevated sustained methane fluxes following rewetting. Finally, this thesis demonstrates that dispersal limitation only slightly regulates the biogeographic distribution patterns of methanogenic microorganisms in natural environments and restored wetlands. Instead, their existence, adaption and establishment are more associated with the selective pressures under different environmental conditions. Salinity, pH and temperature are identified as the most important factors in shaping microbial community structure at different spatial scales (global versus terrestrial environments). Predicted changes in climate, such as increasing temperature, changes in precipitation patterns and increasing frequency of flooding events, are likely to induce a series of environmental alterations, which will either directly or indirectly affect the driving environmental forces of methanogenic communities, leading to changes in their community composition and thus potentially also in methane emission patterns in the future.}, language = {en} } @phdthesis{Weisshuhn2020, author = {Weißhuhn, Peter}, title = {Assessing biotope vulnerability to landscape changes}, doi = {10.25932/publishup-44277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442777}, school = {Universit{\"a}t Potsdam}, pages = {v, 134}, year = {2020}, abstract = {Largescale patterns of global land use change are very frequently accompanied by natural habitat loss. To assess the consequences of habitat loss for the remaining natural and semi-natural biotopes, inclusion of cumulative effects at the landscape level is required. The interdisciplinary concept of vulnerability constitutes an appropriate assessment framework at the landscape level, though with few examples of its application for ecological assessments. A comprehensive biotope vulnerability analysis allows identification of areas most affected by landscape change and at the same time with the lowest chances of regeneration. To this end, a series of ecological indicators were reviewed and developed. They measured spatial attributes of individual biotopes as well as some ecological and conservation characteristics of the respective resident species community. The final vulnerability index combined seven largely independent indicators, which covered exposure, sensitivity and adaptive capacity of biotopes to landscape changes. Results for biotope vulnerability were provided at the regional level. This seems to be an appropriate extent with relevance for spatial planning and designing the distribution of nature reserves. Using the vulnerability scores calculated for the German federal state of Brandenburg, hot spots and clusters within and across the distinguished types of biotopes were analysed. Biotope types with high dependence on water availability, as well as biotopes of the open landscape containing woody plants (e.g., orchard meadows) are particularly vulnerable to landscape changes. In contrast, the majority of forest biotopes appear to be less vulnerable. Despite the appeal of such generalised statements for some biotope types, the distribution of values suggests that conservation measures for the majority of biotopes should be designed specifically for individual sites. Taken together, size, shape and spatial context of individual biotopes often had a dominant influence on the vulnerability score. The implementation of biotope vulnerability analysis at the regional level indicated that large biotope datasets can be evaluated with high level of detail using geoinformatics. Drawing on previous work in landscape spatial analysis, the reproducible approach relies on transparent calculations of quantitative and qualitative indicators. At the same time, it provides a synoptic overview and information on the individual biotopes. It is expected to be most useful for nature conservation in combination with an understanding of population, species, and community attributes known for specific sites. The biotope vulnerability analysis facilitates a foresighted assessment of different land uses, aiding in identifying options to slow habitat loss to sustainable levels. It can also be incorporated into planning of restoration measures, guiding efforts to remedy ecological damage. Restoration of any specific site could yield synergies with the conservation objectives of other sites, through enhancing the habitat network or buffering against future landscape change. Biotope vulnerability analysis could be developed in line with other important ecological concepts, such as resilience and adaptability, further extending the broad thematic scope of the vulnerability concept. Vulnerability can increasingly serve as a common framework for the interdisciplinary research necessary to solve major societal challenges.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Xia}, title = {Reef ecosystem recovery following the Middle Permian (Capitanian) mass extinction}, doi = {10.25932/publishup-48750}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487502}, school = {Universit{\"a}t Potsdam}, pages = {XI, 144}, year = {2020}, abstract = {To find out the future of nowadays reef ecosystem turnover under the environmental stresses such as global warming and ocean acidification, analogue studies from the geologic past are needed. As a critical time of reef ecosystem innovation, the Permian-Triassic transition witnessed the most severe demise of Phanerozoic reef builders, and the establishment of modern style symbiotic relationships within the reef-building organisms. Being the initial stage of this transition, the Middle Permian (Capitanian) mass extinction coursed a reef eclipse in the early Late Permian, which lead to a gap of understanding in the post-extinction Wuchiapingian reef ecosystem, shortly before the radiation of Changhsingian reefs. Here, this thesis presents detailed biostratigraphic, sedimentological, and palaeoecological studies of the Wuchiapingian reef recovery following the Middle Permian (Capitanian) mass extinction, on the only recorded Wuchiapingian reef setting, outcropping in South China at the Tieqiao section. Conodont biostratigraphic zonations were revised from the Early Permian Artinskian to the Late Permian Wuchiapingian in the Tieqiao section. Twenty main and seven subordinate conodont zones are determined at Tieqiao section including two conodont zone below and above the Tieqiao reef complex. The age of Tieqiao reef was constrained as early to middle Wuchiapingian. After constraining the reef age, detailed two-dimensional outcrop mapping combined with lithofacies study were carried out on the Wuchiapingian Tieqiao Section to investigate the reef growth pattern stratigraphically as well as the lateral changes of reef geometry on the outcrop scale. Semi-quantitative studies of the reef-building organisms were used to find out their evolution pattern within the reef recovery. Six reef growth cycles were determined within six transgressive-regressive cycles in the Tieqiao section. The reefs developed within the upper part of each regressive phase and were dominated by different biotas. The timing of initial reef recovery after the Middle Permian (Capitanian) mass extinction was updated to the Clarkina leveni conodont zone, which is earlier than previous understanding. Metazoans such as sponges were not the major components of the Wuchiapingian reefs until the 5th and 6th cycles. So, the recovery of metazoan reef ecosystem after the Middle Permian (Capitanian) mass extinction was obviously delayed. In addition, although the importance of metazoan reef builders such as sponges did increase following the recovery process, encrusting organisms such as Archaeolithoporella and Tubiphytes, combined with microbial carbonate precipitation, still played significant roles to the reef building process and reef recovery after the mass extinction. Based on the results from outcrop mapping and sedimentological studies, quantitative composition analysis of the Tieqiao reef complex were applied on selected thin sections to further investigate the functioning of reef building components and the reef evolution after the Middle Permian (Capitanian) mass extinction. Data sets of skeletal grains and whole rock components were analyzed. The results show eleven biocommunity clusters/eight rock composition clusters dominated by different skeletal grains/rock components. Sponges, Archaeolithoporella and Tubiphytes were the most ecologically important components within the Wuchiapingian Tieqiao reef, while the clotted micrites and syndepositional cements are the additional important rock components for reef cores. The sponges were important within the whole reef recovery. Tubiphytes were broadly distributed in different environments and played a key-role in the initial reef communities. Archaeolithoporella concentrated in the shallower part of reef cycles (i.e., the upper part of reef core) and was functionally significant for the enlargement of reef volume. In general, the reef recovery after the Middle Permian (Capitanian) mass extinction has some similarities with the reef recovery following the end-Permian mass extinction. It shows a delayed recovery of metazoan reefs and a stepwise recovery pattern that was controlled by both ecological and environmental factors. The importance of encrusting organisms and microbial carbonates are also similar to most of the other post-extinction reef ecosystems. These findings can be instructive to extend our understanding of the reef ecosystem evolution under environmental perturbation or stresses.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Weishi}, title = {Influence of river reconstruction at a bank filtration site}, doi = {10.25932/publishup-49023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490234}, school = {Universit{\"a}t Potsdam}, pages = {IIV, 120}, year = {2020}, abstract = {Bank filtration is an effective water treatment technique and is widely adopted in Europe along major rivers. It is the process where surface water penetrates the riverbed, flows through the aquifer, and then is extracted by near-bank production wells. By flowing in the subsurface flow passage, the water quality can be improved by a series of beneficial processes. Long-term riverbank filtration also produces colmation layers on the riverbed. The colmation layer may act as a bioactive zone that is governed by biochemical and physical processes owing to its enrichment of microbes and organic matter. Low permeability may strongly limit the surface water infiltration and further lead to a decreasing recoverable ratio of production wells.The removal of the colmation layer is therefore a trade-off between the treatment capacity and treatment efficiency. The goal of this Ph.D. thesis is to focus on the temporal and spatial change of the water quality and quantity along the flow path of a hydrogeological heterogeneous riverbank filtration site adjacent to an artificial-reconstructed (bottom excavation and bank reconstruction) canal in Potsdam, Germany. To quantify the change of the infiltration rate, travel time distribution, and the thermal field brought by the canal reconstruction, a three-dimensional flow and heat transport model was created. This model has two scenarios, 1) 'with' canal reconstruction, and 2) 'without' canal reconstruction. Overall, the model calibration results of both water heads and temperatures matched those observed in the field study. In comparison to the model without reconstruction, the reconstruction model led to more water being infiltrated into the aquifer on that section, on average 521 m3/d, which corresponded to around 9\% of the total pumping rate. Subsurface travel-time distribution substantially shifted towards shorter travel times. Flow paths with travel times <200 days increased by ~10\% and those with <300 days by 15\%. Furthermore, the thermal distribution in the aquifer showed that the seasonal variation in the scenario with reconstruction reaches deeper and laterally propagates further. By scatter plotting of δ18O versus δ 2H, the infiltrated river water could be differentiated from water flowing in the deep aquifer, which may contain remnant landside groundwater from further north. In contrast, the increase of river water contribution due to decolmation could be shown by piper plot. Geological heterogeneity caused a substantial spatial difference in redox zonation among different flow paths, both horizontally and vertically. Using the Wilcoxon rank test, the reconstruction changed the redox potential differently in observation wells. However, taking the small absolute concentration level, the change is also relatively minor. The treatment efficiency for both organic matter and inorganic matter is consistent after the reconstruction, except for ammonium. The inconsistent results for ammonium could be explained by changes in the Cation Exchange Capacity (CEC) in the newly paved riverbed. Because the bed is new, it was not yet capable of keeping the newly produced ammonium by sorption and further led to the breakthrough of the ammonium plume. By estimation, the peak of the ammonium plume would reach the most distant observation well before February 2024, while the peaking concentration could be further dampened by sorption and diluted by the afterward low ammonium flow. The consistent DOC and SUVA level suggests that there was no clear preference for the organic matter removal along the flow path.}, language = {en} } @phdthesis{Wang2020, author = {Wang, Jingwen}, title = {Electret properties of polypropylene with surface chemical modification and crystalline reconstruction}, doi = {10.25932/publishup-47027}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470271}, school = {Universit{\"a}t Potsdam}, pages = {vi, 121}, year = {2020}, abstract = {As one of the most-produced commodity polymers, polypropylene draws considerable scientific and commercial interest as an electret material. In the present thesis, the influence of the surface chemical modification and crystalline reconstruction on the electret properties of the polypropylene thin films will be discussed. The chemical treatment with orthophosphoric acid can significantly improve the surface charge stability of the polypropylene electrets by introducing phosphorus- and oxygen-containing structures onto the modified surface. The thermally stimulated discharge measurement and charge profiling by means of piezoelectrically generated pressure steps are used to investigate the electret behaviour. It is concluded that deep traps of limited number density are created during the treatment with inorganic chemicals. Hence, the improvement dramatically decreases when the surface-charge density is substantially higher than ±1.2×10^(-3) C·m^(-2). The newly formed traps also show a higher trapping energy for negative charges. The energetic distributions of the traps in the non-treated and chemically treated samples offer an insight regarding the surface and foreign-chemical dominance on the charge storage and transport in the polypropylene electrets. Additionally, different electret properties are observed on the polypropylene films with the spherulitic and transcrystalline structures. It indicates the dependence of the charge storage and transport on the crystallite and molecular orientations in the crystalline phase. In general, a more diverse crystalline growth in the spherulitic samples can result in a more complex energetic trap distribution, in comparison to that in a transcrystalline polypropylene. The double-layer transcrystalline polypropylene film with a crystalline interface in the middle can be obtained by crystallising the film in contact with rough moulding surfaces on both sides. A layer of heterocharges appears on each side of the interface in the double-layer transcrystalline polypropylene electrets after the thermal poling. However, there is no charge captured within the transcrystalline layers. The phenomenon reveals the importance of the crystalline interface in terms of creating traps with the higher activation energy in polypropylene. The present studies highlight the fact that even slight variations in the polypropylene film may lead to dramatic differences in its electret properties.}, language = {en} } @phdthesis{Verdiani2020, author = {Verdiani, Silvia}, title = {Silenzio, immagini e parole. La costruzione del significato nella multimodalit{\`a} digitale}, isbn = {9788875901516}, doi = {10.25932/publishup-52359}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-523590}, school = {Universit{\"a}t Potsdam}, pages = {409}, year = {2020}, abstract = {Internetsprache weist besondere Merkmale auf, denn neben dem verbalen Text sind auch andere Elemente wichtig: die Reaktionszeit, das Fehlen einer Antwort sowie die umfangreiche Verwendung von Bild- und Multimedia-Elementen. Greg Myers (2010: 15) verwendet in diesem Zusammenhang den Begriff - und das Sprachspiel - aural materials: Bild-, Multimedia- und Sprachmaterial haben im Computer Mediated Communication (CMC) als Referent ein bestimmtes Interpretationsfeld. In dieser Perspektive funktioniert verbale Sprache als Aktivator der m{\"o}glichen inferentialen Wege und Kontexte innerhalb unserer Welt. Wichtig sind die Kultur- und Lebenserfahrungen der Teilnehmer (Basile 2012: 21), die uns erm{\"o}glichen die Nachricht zu verstehen und an der Kommunikation teilzunehmen. Die indexikalische Rolle der verbalen Sprache in Bezug auf Bilder und Multimedia, so wie sie in der Online-Kommunikation verwendet wird, ist von zentraler Bedeutung. Die Einbeziehung dieser Referenzdatei erregt die Aufmerksamkeit der Leser: Das Multimedia-Material ist sowohl ein integraler Bestandteil der Interaktion als auch deren Ausdrucksweise, Focus des Gespr{\"a}chs und gemeinsame Bildsprache. Die Anwendungsbereiche sind vielf{\"a}ltig, denn der Prozess des Bildlesens ist in der Rezeption, aber auch in der k{\"u}nstlerischen Produktion, in der Werbung, der politischen Propaganda und dar{\"u}ber hinaus in der multimedialen Kommunikation pr{\"a}sent. Die Konglomerate von Sprache und Bild sind in der Lage, komplexe Begriffe zu synthetisieren, aber auch in der Erinnerung der Empf{\"a}nger genau zu haften, und zwar aufgrund ihres gemischten Codes und ihrer syn{\"a}sthetischen Natur. Dies ist funktional f{\"u}r die digitale Kommunikation, die sich auf die phatischen und spielerischen Aspekte der Interaktion konzentriert. Die Auswirkungen auf die sprachliche Verwendung sind zweifellos einer der interessantesten und aktuellsten Aspekte dieser Art von hybriden Kommunikation, die Neuheit liegt nicht so sehr in der Wahl einer medien{\"u}bergreifenden expressiven Dimension - auch in der Vergangenheit vielseitig bezeugt -, sondern in ihrer aktuellen Verbreitung, denn sie ist in der Tat die normale kommunikative Dimension f{\"u}r eine immer gr{\"o}ßere Anzahl von Benutzern geworden.}, language = {it} } @phdthesis{VenturaBort2020, author = {Ventura-Bort, Carlos}, title = {Temporo-spatial dynamics of the impact of emotional contexts on visual processing and memory}, doi = {10.25932/publishup-55023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550236}, school = {Universit{\"a}t Potsdam}, pages = {208}, year = {2020}, abstract = {It has frequently been observed that single emotional events are not only more efficiently processed, but also better remembered, and form longer-lasting memory traces than neutral material. However, when emotional information is perceived as a part of a complex event, such as in the context of or in relation to other events and/or source details, the modulatory effects of emotion are less clear. The present work aims to investigate how emotional, contextual source information modulates the initial encoding and subsequent long-term retrieval of associated neutral material (item memory) and contextual source details (contextual source memory). To do so, a two-task experiment was used, consisting of an incidental encoding task in which neutral objects were displayed over different contextual background scenes which varied in emotional content (unpleasant, pleasant, and neutral), and a delayed retrieval task (1 week), in which previously-encoded objects and new ones were presented. In a series of studies, behavioral indices (Studies 2, 3, and 5), event-related potentials (ERPs; Studies 1-4), and functional magnetic resonance imaging (Study 5) were used to investigate whether emotional contexts can rapidly tune the visual processing of associated neutral information (Study 1) and modulate long-term item memory (Study 2), how different recognition memory processes (familiarity vs. recollection) contribute to these emotion effects on item and contextual source memory (Study 3), whether the emotional effects of item memory can also be observed during spontaneous retrieval (Sstudy 4), and which brain regions underpin the modulatory effects of emotional contexts on item and contextual source memory (Study 5). In Study 1, it was observed that emotional contexts by means of emotional associative learning, can rapidly alter the processing of associated neutral information. Neutral items associated with emotional contexts (i.e. emotional associates) compared to neutral ones, showed enhanced perceptual and more elaborate processing after one single pairing, as indexed by larger amplitudes in the P100 and LPP components, respectively. Study 2 showed that emotional contexts produce longer-lasting memory effects, as evidenced by better item memory performance and larger ERP Old/New differences for emotional associates. In Study 3, a mnemonic differentiation was observed between item and contextual source memory which was modulated by emotion. Item memory was driven by familiarity, independently of emotional contexts during encoding, whereas contextual source memory was driven by recollection, and better for emotional material. As in Study 2, enhancing effects of emotional contexts for item memory were observed in ERPs associated with recollection processes. Likewise, for contextual source memory, a pronounced recollection-related ERP enhancement was observed for exclusively emotional contexts. Study 4 showed that the long-term recollection enhancement of emotional contexts on item memory can be observed even when retrieval is not explicitly attempted, as measured with ERPs, suggesting that the emotion enhancing effects on memory are not related to the task embedded during recognition, but to the motivational relevance of the triggering event. In Study 5, it was observed that enhancing effects of emotional contexts on item and contextual source memory involve stronger engagement of the brain's regions which are associated with memory recollection, including areas of the medial temporal lobe, posterior parietal cortex, and prefrontal cortex. Taken together, these findings suggest that emotional contexts rapidly modulate the initial processing of associated neutral information and the subsequent, long-term item and contextual source memories. The enhanced memory effects of emotional contexts are strongly supported by recollection rather than familiarity processes, and are shown to be triggered when retrieval is both explicitly and spontaneously attempted. These results provide new insights into the modulatory role of emotional information on the visual processing and the long-term recognition memory of complex events. The present findings are integrated into the current theoretical models and future ventures are discussed.}, language = {en} } @phdthesis{Tunn2020, author = {Tunn, Isabell}, title = {From single molecules to bulk materials: tuning the viscoelastic properties of coiled coil cross-linked hydrogels}, doi = {10.25932/publishup-47595}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-475955}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 140}, year = {2020}, abstract = {The development of bioinspired self-assembling materials, such as hydrogels, with promising applications in cell culture, tissue engineering and drug delivery is a current focus in material science. Biogenic or bioinspired proteins and peptides are frequently used as versatile building blocks for extracellular matrix (ECM) mimicking hydrogels. However, precisely controlling and reversibly tuning the properties of these building blocks and the resulting hydrogels remains challenging. Precise control over the viscoelastic properties and self-healing abilities of hydrogels are key factors for developing intelligent materials to investigate cell matrix interactions. Thus, there is a need to develop building blocks that are self-healing, tunable and self-reporting. This thesis aims at the development of α-helical peptide building blocks, called coiled coils (CCs), which integrate these desired properties. Self-healing is a direct result of the fast self-assembly of these building blocks when used as material cross-links. Tunability is realized by means of reversible histidine (His)-metal coordination bonds. Lastly, implementing a fluorescent readout, which indicates the CC assembly state, self-reporting hydrogels are obtained. Coiled coils are abundant protein folding motifs in Nature, which often have mechanical function, such as in myosin or fibrin. Coiled coils are superhelices made up of two or more α-helices wound around each other. The assembly of CCs is based on their repetitive sequence of seven amino acids, so-called heptads (abcdefg). Hydrophobic amino acids in the a and d position of each heptad form the core of the CC, while charged amino acids in the e and g position form ionic interactions. The solvent-exposed positions b, c and f are excellent targets for modifications since they are more variable. His-metal coordination bonds are strong, yet reversible interactions formed between the amino acid histidine and transition metal ions (e.g. Ni2+, Cu2+ or Zn2+). His-metal coordination bonds essentially contribute to the mechanical stability of various high-performance proteinaceous materials, such as spider fangs, Nereis worm jaws and mussel byssal threads. Therefore, I bioengineered reversible His-metal coordination sites into a well-characterized heterodimeric CC that served as tunable material cross-link. Specifically, I took two distinct approaches facilitating either intramolecular (Chapter 4.2) and/or intermolecular (Chapter 4.3) His-metal coordination. Previous research suggested that force-induced CC unfolding in shear geometry starts from the points of force application. In order to tune the stability of a heterodimeric CC in shear geometry, I inserted His in the b and f position at the termini of force application (Chapter 4.2). The spacing of His is such that intra-CC His-metal coordination bonds can form to bridge one helical turn within the same helix, but also inter-CC coordination bonds are not generally excluded. Starting with Ni2+ ions, Raman spectroscopy showed that the CC maintained its helical structure and the His residues were able to coordinate Ni2+. Circular dichroism (CD) spectroscopy revealed that the melting temperature of the CC increased by 4 °C in the presence of Ni2+. Using atomic force microscope (AFM)-based single molecule force spectroscopy, the energy landscape parameters of the CC were characterized in the absence and the presence of Ni2+. His-Ni2+ coordination increased the rupture force by ~10 pN, accompanied by a decrease of the dissociation rate constant. To test if this stabilizing effect can be transferred from the single molecule level to the bulk viscoelastic material properties, the CC building block was used as a non-covalent cross-link for star-shaped poly(ethylene glycol) (star-PEG) hydrogels. Shear rheology revealed a 3-fold higher relaxation time in His-Ni2+ coordinating hydrogels compared to the hydrogel without metal ions. This stabilizing effect was fully reversible when using an excess of the metal chelator ethylenediaminetetraacetate (EDTA). The hydrogel properties were further investigated using different metal ions, i.e. Cu2+, Co2+ and Zn2+. Overall, these results suggest that Ni2+, Cu2+ and Co2+ primarily form intra-CC coordination bonds while Zn2+ also participates in inter-CC coordination bonds. This may be a direct result of its different coordination geometry. Intermolecular His-metal coordination bonds in the terminal regions of the protein building blocks of mussel byssal threads are primarily formed by Zn2+ and were found to be intimately linked to higher-order assembly and self-healing of the thread. In the above example, the contribution of intra-CC and inter-CC His-Zn2+ cannot be disentangled. In Chapter 4.3, I redesigned the CC to prohibit the formation of intra-CC His-Zn2+ coordination bonds, focusing only on inter-CC interactions. Specifically, I inserted His in the solvent-exposed f positions of the CC to focus on the effect of metal-induced higher-order assembly of CC cross-links. Raman and CD spectroscopy revealed that this CC building block forms α-helical Zn2+ cross-linked aggregates. Using this CC as a cross-link for star-PEG hydrogels, I showed that the material properties can be switched from viscoelastic in the absence of Zn2+ to elastic-like in the presence of Zn2+. Moreover, the relaxation time of the hydrogel was tunable over three orders of magnitude when using different Zn2+:His ratios. This tunability is attributed to a progressive transformation of single CC cross-links into His-Zn2+ cross-linked aggregates, with inter-CC His-Zn2+ coordination bonds serving as an additional, cross-linking mode. Rheological characterization of the hydrogels with inter-CC His-Zn2+ coordination raised the question whether the His-Zn2+ coordination bonds between CCs or also the CCs themselves rupture when shear strain is applied. In general, the amount of CC cross-links initially formed in the hydrogel as well as the amount of CC cross-links breaking under force remains to be elucidated. In order to more deeply probe these questions and monitor the state of the CC cross-links when force is applied, a fluorescent reporter system based on F{\"o}rster resonance energy transfer (FRET) was introduced into the CC (Chapter 4.4). For this purpose, the donor-acceptor pair carboxyfluorescein and tetramethylrhodamine was used. The resulting self-reporting CC showed a FRET efficiency of 77 \% in solution. Using this fluorescently labeled CC as a self-reporting, reversible cross-link in an otherwise covalently cross-linked star-PEG hydrogel enabled the detection of the FRET efficiency change under compression force. This proof-of-principle result sets the stage for implementing the fluorescently labeled CCs as molecular force sensors in non-covalently cross-linked hydrogels. In summary, this thesis highlights that rationally designed CCs are excellent reversibly tunable, self-healing and self-reporting hydrogel cross-links with high application potential in bioengineering and biomedicine. For the first time, I demonstrated that His-metal coordination-based stabilization can be transferred from the single CC level to the bulk material with clear viscoelastic consequences. Insertion of His in specific sequence positions was used to implement a second non-covalent cross-linking mode via intermolecular His-metal coordination. This His-metal binding induced aggregation of the CCs enabled for reversibly tuning the hydrogel properties from viscoelastic to elastic-like. As a proof-of-principle to establish self-reporting CCs as material cross-links, I labeled a CC with a FRET pair. The fluorescently labelled CC acts as a molecular force sensor and first preliminary results suggest that the CC enables the detection of hydrogel cross-link failure under compression force. In the future, fluorescently labeled CC force sensors will likely not only be used as intelligent cross-links to study the failure of hydrogels but also to investigate cell-matrix interactions in 3D down to the single molecule level.}, language = {en} } @phdthesis{Taeumel2020, author = {Taeumel, Marcel}, title = {Data-driven tool construction in exploratory programming environments}, doi = {10.25932/publishup-44428}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444289}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 299}, year = {2020}, abstract = {This work presents a new design for programming environments that promote the exploration of domain-specific software artifacts and the construction of graphical tools for such program comprehension tasks. In complex software projects, tool building is essential because domain- or task-specific tools can support decision making by representing concerns concisely with low cognitive effort. In contrast, generic tools can only support anticipated scenarios, which usually align with programming language concepts or well-known project domains. However, the creation and modification of interactive tools is expensive because the glue that connects data to graphics is hard to find, change, and test. Even if valuable data is available in a common format and even if promising visualizations could be populated, programmers have to invest many resources to make changes in the programming environment. Consequently, only ideas of predictably high value will be implemented. In the non-graphical, command-line world, the situation looks different and inspiring: programmers can easily build their own tools as shell scripts by configuring and combining filter programs to process data. We propose a new perspective on graphical tools and provide a concept to build and modify such tools with a focus on high quality, low effort, and continuous adaptability. That is, (1) we propose an object-oriented, data-driven, declarative scripting language that reduces the amount of and governs the effects of glue code for view-model specifications, and (2) we propose a scalable UI-design language that promotes short feedback loops in an interactive, graphical environment such as Morphic known from Self or Squeak/Smalltalk systems. We implemented our concept as a tool building environment, which we call VIVIDE, on top of Squeak/Smalltalk and Morphic. We replaced existing code browsing and debugging tools to iterate within our solution more quickly. In several case studies with undergraduate and graduate students, we observed that VIVIDE can be applied to many domains such as live language development, source-code versioning, modular code browsing, and multi-language debugging. Then, we designed a controlled experiment to measure the effect on the time to build tools. Several pilot runs showed that training is crucial and, presumably, takes days or weeks, which implies a need for further research. As a result, programmers as users can directly work with tangible representations of their software artifacts in the VIVIDE environment. Tool builders can write domain-specific scripts to populate views to approach comprehension tasks from different angles. Our novel perspective on graphical tools can inspire the creation of new trade-offs in modularity for both data providers and view designers.}, language = {en} } @phdthesis{Stone2020, author = {Stone, Kate}, title = {Predicting long-distance lexical content in German verb-particle constructions}, doi = {10.25932/publishup-47679}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476798}, school = {Universit{\"a}t Potsdam}, year = {2020}, abstract = {A large body of research now supports the presence of both syntactic and lexical predictions in sentence processing. Lexical predictions, in particular, are considered to indicate a deep level of predictive processing that extends past the structural features of a necessary word (e.g. noun), right down to the phonological features of the lexical identity of a specific word (e.g. /kite/; DeLong et al., 2005). However, evidence for lexical predictions typically focuses on predictions in very local environments, such as the adjacent word or words (DeLong et al., 2005; Van Berkum et al., 2005; Wicha et al., 2004). Predictions in such local environments may be indistinguishable from lexical priming, which is transient and uncontrolled, and as such may prime lexical items that are not compatible with the context (e.g. Kukona et al., 2014). Predictive processing has been argued to be a controlled process, with top-down information guiding preactivation of plausible upcoming lexical items (Kuperberg \& Jaeger, 2016). One way to distinguish lexical priming from prediction is to demonstrate that preactivated lexical content can be maintained over longer distances. In this dissertation, separable German particle verbs are used to demonstrate that preactivation of lexical items can be maintained over multi-word distances. A self-paced reading time and an eye tracking experiment provide some support for the idea that particle preactivation triggered by a verb and its context can be observed by holding the sentence context constant and manipulating the predictabilty of the particle. Although evidence of an effect of particle predictability was only seen in eye tracking, this is consistent with previous evidence suggesting that predictive processing facilitates only some eye tracking measures to which the self-paced reading modality may not be sensitive (Staub, 2015; Rayner1998). Interestingly, manipulating the distance between the verb and the particle did not affect reading times, suggesting that the surprisal-predicted faster reading times at long distance may only occur when the additional distance is created by information that adds information about the lexical identity of a distant element (Levy, 2008; Grodner \& Gibson, 2005). Furthermore, the results provide support for models proposing that temporal decay is not major influence on word processing (Lewandowsky et al., 2009; Vasishth et al., 2019). In the third and fourth experiments, event-related potentials were used as a method for detecting specific lexical predictions. In the initial ERP experiment, we found some support for the presence of lexical predictions when the sentence context constrained the number of plausible particles to a single particle. This was suggested by a frontal post-N400 positivity (PNP) that was elicited when a lexical prediction had been violated, but not to violations when more than one particle had been plausible. The results of this study were highly consistent with previous research suggesting that the PNP might be a much sought-after ERP marker of prediction failure (DeLong et al., 2011; DeLong et al., 2014; Van Petten \& Luka, 2012; Thornhill \& Van Petten, 2012; Kuperberg et al., 2019). However, a second experiment in a larger sample experiment failed to replicate the effect, but did suggest the relationship of the PNP to predictive processing may not yet be fully understood. Evidence for long-distance lexical predictions was inconclusive. The conclusion drawn from the four experiments is that preactivation of the lexical entries of plausible upcoming particles did occur and was maintained over long distances. The facilitatory effect of this preactivation at the particle site therefore did not appear to be the result of transient lexical priming. However, the question of whether this preactivation can also lead to lexical predictions of a specific particle remains unanswered. Of particular interest to future research on predictive processing is further characterisation of the PNP. Implications for models of sentence processing may be the inclusion of long-distance lexical predictions, or the possibility that preactivation of lexical material can facilitate reading times and ERP amplitude without commitment to a specific lexical item.}, language = {en} } @phdthesis{Stete2020, author = {Stete, Felix}, title = {Gold at the nanoscale}, doi = {10.25932/publishup-49605}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-496055}, school = {Universit{\"a}t Potsdam}, pages = {X, 186}, year = {2020}, abstract = {In this cumulative dissertation, I want to present my contributions to the field of plasmonic nanoparticle science. Plasmonic nanoparticles are characterised by resonances of the free electron gas around the spectral range of visible light. In recent years, they have evolved as promising components for light based nanocircuits, light harvesting, nanosensors, cancer therapies, and many more. This work exhibits the articles I authored or co-authored in my time as PhD student at the University of Potsdam. The main focus lies on the coupling between localised plasmons and excitons in organic dyes. Plasmon-exciton coupling brings light-matter coupling to the nanoscale. This size reduction is accompanied by strong enhancements of the light field which can, among others, be utilised to enhance the spectroscopic footprint of molecules down to single molecule detection, improve the efficiency of solar cells, or establish lasing on the nanoscale. When the coupling exceeds all decay channels, the system enters the strong coupling regime. In this case, hybrid light-matter modes emerge utilisable as optical switches, in quantum networks, or as thresholdless lasers. The present work investigates plasmon-exciton coupling in gold-dye core-shell geometries and contains both fundamental insights and technical novelties. It presents a technique which reveals the anticrossing in coupled systems without manipulating the particles themselves. The method is used to investigate the relation between coupling strength and particle size. Additionally, the work demonstrates that pure extinction measurements can be insufficient when trying to assess the coupling regime. Moreover, the fundamental quantum electrodynamic effect of vacuum induced saturation is introduced. This effect causes the vacuum fluctuations to diminish the polarisability of molecules and has not yet been considered in the plasmonic context. The work additionally discusses the reaction of gold nanoparticles to optical heating. Such knowledge is of great importance for all potential optical applications utilising plasmonic nanoparticles since optical excitation always generates heat. This heat can induce a change in the optical properties, but also mechanical changes up to melting can occur. Here, the change of spectra in coupled plasmon-exciton particles is discussed and explained with a precise model. Moreover, the work discusses the behaviour of gold nanotriangles exposed to optical heating. In a pump-probe measurement, X-ray probe pulses directly monitored the particles' breathing modes. In another experiment, the triangles were exposed to cw laser radiation with varying intensities and illumination areas. X-ray diffraction directly measured the particles' temperature. Particle melting was investigated with surface enhanced Raman spectroscopy and SEM imaging demonstrating that larger illumination areas can cause melting at lower intensities. An elaborate methodological and theoretical introduction precedes the articles. This way, also readers without specialist's knowledge get a concise and detailed overview of the theory and methods used in the articles. I introduce localised plasmons in metal nanoparticles of different shapes. For this work, the plasmons were mostly coupled to excitons in J-aggregates. Therefore, I discuss these aggregates of organic dyes with sharp and intense resonances and establish an understanding of the coupling between the two systems. For ab initio simulations of the coupled systems, models for the systems' permittivites are presented, too. Moreover, the route to the sample fabrication - the dye coating of gold nanoparticles, their subsequent deposition on substrates, and the covering with polyelectrolytes - is presented together with the measurement methods that were used for the articles.}, language = {en} } @phdthesis{Staubitz2020, author = {Staubitz, Thomas}, title = {Gradable team assignments in large scale learning environments}, doi = {10.25932/publishup-47183}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471830}, school = {Universit{\"a}t Potsdam}, pages = {122}, year = {2020}, abstract = {Lifelong learning plays an increasingly important role in many societies. Technology is changing faster than ever and what has been important to learn today, may be obsolete tomorrow. The role of informal programs is becoming increasingly important. Particularly, Massive Open Online Courses have become popular among learners and instructors. In 2008, a group of Canadian education enthusiasts started the first Massive Open Online Courses or MOOCs to prove their cognitive theory of Connectivism. Around 2012, a variety of American start-ups redefined the concept of MOOCs. Instead of following the connectivist doctrine they returned to a more traditional approach. They focussed on video lecturing and combined this with a course forum that allowed the participants to discuss with each other and the teaching team. While this new version of the concept was enormously successful in terms of massiveness—hundreds of thousands of participants from all over the world joined the first of these courses—many educators criticized the re-lapse to the cognitivist model. In the early days, the evolving platforms often did not have more features than a video player, simple multiple-choice quizzes, and the course forum. It soon became a major interest of research to allow the scaling of more modern approaches of learning and teaching for the massiveness of these courses. Hands-on exercises, alternative forms of assessment, collaboration, and teamwork are some of the topics on the agenda. The insights provided by cognitive and pedagogical theories, however, do not necessarily always run in sync with the needs and the preferences of the majority of participants. While the former promote action-learning, hands-on-learning, competence-based-learning, project-based-learning, team-based-learning as the holy grail, many of the latter often rather prefer a more laid-back style of learning, sometimes referred to as edutainment. Obviously, given the large numbers of participants in these courses, there is not just one type of learners. Participants are not a homogeneous mass but a potpourri of individuals with a wildly heterogeneous mix of backgrounds, previous knowledge, familial and professional circumstances, countries of origin, gender, age, and so on. For the majority of participants, a full-time job and/or a family often just does not leave enough room for more time intensive tasks, such as practical exercises or teamwork. Others, however, particularly enjoy these hands-on or collaborative aspects of MOOCs. Furthermore, many subjects particularly require these possibilities and simply cannot be taught or learned in courses that lack collaborative or hands-on features. In this context, the thesis discusses how team assignments have been implemented on the HPI MOOC platform. During the recent years, several experiments have been conducted and a great amount of experience has been gained by employing team assignments in courses in areas, such as Object-Oriented Programming, Design Thinking, and Business Innovation on various instances of this platform: openHPI, openSAP, and mooc.house}, language = {en} } @phdthesis{Sposini2020, author = {Sposini, Vittoria}, title = {The random diffusivity approach for diffusion in heterogeneous systems}, doi = {10.25932/publishup-48780}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-487808}, school = {Universit{\"a}t Potsdam}, year = {2020}, abstract = {The two hallmark features of Brownian motion are the linear growth < x2(t)> = 2Ddt of the mean squared displacement (MSD) with diffusion coefficient D in d spatial dimensions, and the Gaussian distribution of displacements. With the increasing complexity of the studied systems deviations from these two central properties have been unveiled over the years. Recently, a large variety of systems have been reported in which the MSD exhibits the linear growth in time of Brownian (Fickian) transport, however, the distribution of displacements is pronouncedly non-Gaussian (Brownian yet non-Gaussian, BNG). A similar behaviour is also observed for viscoelastic-type motion where an anomalous trend of the MSD, i.e., ~ ta, is combined with a priori unexpected non-Gaussian distributions (anomalous yet non-Gaussian, ANG). This kind of behaviour observed in BNG and ANG diffusions has been related to the presence of heterogeneities in the systems and a common approach has been established to address it, that is, the random diffusivity approach. This dissertation explores extensively the field of random diffusivity models. Starting from a chronological description of all the main approaches used as an attempt of describing BNG and ANG diffusion, different mathematical methodologies are defined for the resolution and study of these models. The processes that are reported in this work can be classified in three subcategories, i) randomly-scaled Gaussian processes, ii) superstatistical models and iii) diffusing diffusivity models, all belonging to the more general class of random diffusivity models. Eventually, the study focuses more on BNG diffusion, which is by now well-established and relatively well-understood. Nevertheless, many examples are discussed for the description of ANG diffusion, in order to highlight the possible scenarios which are known so far for the study of this class of processes. The second part of the dissertation deals with the statistical analysis of random diffusivity processes. A general description based on the concept of moment-generating function is initially provided to obtain standard statistical properties of the models. Then, the discussion moves to the study of the power spectral analysis and the first passage statistics for some particular random diffusivity models. A comparison between the results coming from the random diffusivity approach and the ones for standard Brownian motion is discussed. In this way, a deeper physical understanding of the systems described by random diffusivity models is also outlined. To conclude, a discussion based on the possible origins of the heterogeneity is sketched, with the main goal of inferring which kind of systems can actually be described by the random diffusivity approach.}, language = {en} } @phdthesis{Siemiatkowska2020, author = {Siemiatkowska, Beata}, title = {Redox signalling in plants}, doi = {10.25932/publishup-48911}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489119}, school = {Universit{\"a}t Potsdam}, pages = {127}, year = {2020}, abstract = {Once proteins are synthesized, they can additionally be modified by post-translational modifications (PTMs). Proteins containing reactive cysteine thiols, stabilized in their deprotonated form due to their local environment as thiolates (RS-), serve as redox sensors by undergoing a multitude of oxidative PTMs (Ox-PTMs). Ox-PTMs such as S-nitrosylation or formation of inter- or intra-disulfide bridges induce functional changes in these proteins. Proteins containing cysteines, whose thiol oxidation state regulates their functions, belong to the so-called redoxome. Such Ox-PTMs are controlled by site-specific cellular events that play a crucial role in protein regulation, affecting enzyme catalytic sites, ligand binding affinity, protein-protein interactions or protein stability. Reversible protein thiol oxidation is an essential regulatory mechanism of photosynthesis, metabolism, and gene expression in all photosynthetic organisms. Therefore, studying PTMs will remain crucial for understanding plant adaptation to external stimuli like fluctuating light conditions. Optimizing methods suitable for studying plants Ox-PTMs is of high importance for elucidation of the redoxome in plants. This study focusses on thiol modifications occurring in plant and provides novel insight into in vivo redoxome of Arabidopsis thaliana in response to light vs. dark. This was achieved by utilizing a resin-assisted thiol enrichment approach. Furthermore, confirmation of candidates on the single protein level was carried out by a differential labelling approach. The thiols and disulfides were differentially labelled, and the protein levels were detected using immunoblot analysis. Further analysis was focused on light-reduced proteins. By the enrichment approach many well studied redox-regulated proteins were identified. Amongst those were fructose 1,6-bisphosphatase (FBPase) and sedoheptulose-1,7-bisphosphatase (SBPase) which have previously been described as thioredoxin system targeted enzymes. The redox regulated proteins identified in the current study were compared to several published, independent results showing redox regulated proteins in Arabidopsis leaves, root, mitochondria and specifically S-nitrosylated proteins. These proteins were excluded as potential new candidates but remain as a proof-of-concept to the enrichment experiments to be effective. Additionally, CSP41A and CSP41B proteins, which emerged from this study as potential targets of redox-regulation, were analyzed by Ribo-Seq. The active translatome study of csp41a mutant vs. wild-type showed most of the significant changes at end of the night, similarly as csp41b. Yet, in both mutants only several chloroplast-encoded genes were altered. Further studies of CSP41A and CSP41B proteins are needed to reveal their functions and elucidate the role of redox regulation of these proteins.}, language = {en} } @phdthesis{Sianipar2020, author = {Sianipar, Johannes Harungguan}, title = {Towards scalable and secure virtual laboratory for cybersecurity e-learning}, doi = {10.25932/publishup-50279}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-502793}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 156}, year = {2020}, abstract = {Distance Education or e-Learning platform should be able to provide a virtual laboratory to let the participants have hands-on exercise experiences in practicing their skill remotely. Especially in Cybersecurity e-Learning where the participants need to be able to attack or defend the IT System. To have a hands-on exercise, the virtual laboratory environment must be similar to the real operational environment, where an attack or a victim is represented by a node in a virtual laboratory environment. A node is usually represented by a Virtual Machine (VM). Scalability has become a primary issue in the virtual laboratory for cybersecurity e-Learning because a VM needs a significant and fix allocation of resources. Available resources limit the number of simultaneous users. Scalability can be increased by increasing the efficiency of using available resources and by providing more resources. Increasing scalability means increasing the number of simultaneous users. In this thesis, we propose two approaches to increase the efficiency of using the available resources. The first approach in increasing efficiency is by replacing virtual machines (VMs) with containers whenever it is possible. The second approach is sharing the load with the user-on-premise machine, where the user-on-premise machine represents one of the nodes in a virtual laboratory scenario. We also propose two approaches in providing more resources. One way to provide more resources is by using public cloud services. Another way to provide more resources is by gathering resources from the crowd, which is referred to as Crowdresourcing Virtual Laboratory (CRVL). In CRVL, the crowd can contribute their unused resources in the form of a VM, a bare metal system, an account in a public cloud, a private cloud and an isolated group of VMs, but in this thesis, we focus on a VM. The contributor must give the credential of the VM admin or root user to the CRVL system. We propose an architecture and methods to integrate or dis-integrate VMs from the CRVL system automatically. A Team placement algorithm must also be investigated to optimize the usage of resources and at the same time giving the best service to the user. Because the CRVL system does not manage the contributor host machine, the CRVL system must be able to make sure that the VM integration will not harm their system and that the training material will be stored securely in the contributor sides, so that no one is able to take the training material away without permission. We are investigating ways to handle this kind of threats. We propose three approaches to strengthen the VM from a malicious host admin. To verify the integrity of a VM before integration to the CRVL system, we propose a remote verification method without using any additional hardware such as the Trusted Platform Module chip. As the owner of the host machine, the host admins could have access to the VM's data via Random Access Memory (RAM) by doing live memory dumping, Spectre and Meltdown attacks. To make it harder for the malicious host admin in getting the sensitive data from RAM, we propose a method that continually moves sensitive data in RAM. We also propose a method to monitor the host machine by installing an agent on it. The agent monitors the hypervisor configurations and the host admin activities. To evaluate our approaches, we conduct extensive experiments with different settings. The use case in our approach is Tele-Lab, a Virtual Laboratory platform for Cyber Security e-Learning. We use this platform as a basis for designing and developing our approaches. The results show that our approaches are practical and provides enhanced security.}, language = {en} } @phdthesis{Shaabani2020, author = {Shaabani, Nuhad}, title = {On discovering and incrementally updating inclusion dependencies}, doi = {10.25932/publishup-47186}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471862}, school = {Universit{\"a}t Potsdam}, pages = {119}, year = {2020}, abstract = {In today's world, many applications produce large amounts of data at an enormous rate. Analyzing such datasets for metadata is indispensable for effectively understanding, storing, querying, manipulating, and mining them. Metadata summarizes technical properties of a dataset which rang from basic statistics to complex structures describing data dependencies. One type of dependencies is inclusion dependency (IND), which expresses subset-relationships between attributes of datasets. Therefore, inclusion dependencies are important for many data management applications in terms of data integration, query optimization, schema redesign, or integrity checking. So, the discovery of inclusion dependencies in unknown or legacy datasets is at the core of any data profiling effort. For exhaustively detecting all INDs in large datasets, we developed S-indd++, a new algorithm that eliminates the shortcomings of existing IND-detection algorithms and significantly outperforms them. S-indd++ is based on a novel concept for the attribute clustering for efficiently deriving INDs. Inferring INDs from our attribute clustering eliminates all redundant operations caused by other algorithms. S-indd++ is also based on a novel partitioning strategy that enables discording a large number of candidates in early phases of the discovering process. Moreover, S-indd++ does not require to fit a partition into the main memory--this is a highly appreciable property in the face of ever-growing datasets. S-indd++ reduces up to 50\% of the runtime of the state-of-the-art approach. None of the approach for discovering INDs is appropriate for the application on dynamic datasets; they can not update the INDs after an update of the dataset without reprocessing it entirely. To this end, we developed the first approach for incrementally updating INDs in frequently changing datasets. We achieved that by reducing the problem of incrementally updating INDs to the incrementally updating the attribute clustering from which all INDs are efficiently derivable. We realized the update of the clusters by designing new operations to be applied to the clusters after every data update. The incremental update of INDs reduces the time of the complete rediscovery by up to 99.999\%. All existing algorithms for discovering n-ary INDs are based on the principle of candidate generation--they generate candidates and test their validity in the given data instance. The major disadvantage of this technique is the exponentially growing number of database accesses in terms of SQL queries required for validation. We devised Mind2, the first approach for discovering n-ary INDs without candidate generation. Mind2 is based on a new mathematical framework developed in this thesis for computing the maximum INDs from which all other n-ary INDs are derivable. The experiments showed that Mind2 is significantly more scalable and effective than hypergraph-based algorithms.}, language = {en} } @phdthesis{Senftleben2020, author = {Senftleben, Robin}, title = {Earth's magnetic field over the last 1000 years}, doi = {10.25932/publishup-47315}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473150}, school = {Universit{\"a}t Potsdam}, pages = {xii, 104}, year = {2020}, abstract = {To investigate the reliability and stability of spherical harmonic models based on archeo/-paleomagnetic data, 2000 Geomagnetic models were calculated. All models are based on the same data set but with randomized uncertainties. Comparison of these models to the geomagnetic field model gufm1 showed that large scale magnetic field structures up to spherical harmonic degree 4 are stable throughout all models. Through a ranking of all models by comparing the dipole coefficients to gufm1 more realistic uncertainty estimates were derived than the authors of the data provide. The derived uncertainty estimates were used in further modelling, which combines archeo/-paleomagnetic and historical data. The huge difference in data count, accuracy and coverage of these two very different data sources made it necessary to introduce a time dependent spatial damping, which was constructed to constrain the spatial complexity of the model. Finally 501 models were calculated by considering that each data point is a Gaussian random variable, whose mean is the original value and whose standard deviation is its uncertainty. The final model arhimag1k is calculated by taking the mean of the 501 sets of Gauss coefficients. arhimag1k fits different dependent and independent data sets well. It shows an early reverse flux patch at the core-mantle boundary between 1000 AD and 1200 AD at the location of the South Atlantic Anomaly today. Another interesting feature is a high latitude flux patch over Greenland between 1200 and 1400 AD. The dipole moment shows a constant behaviour between 1600 and 1840 AD. In the second part of the thesis 4 new paleointensities from 4 different flows of the island Fogo, which is part of Cape Verde, are presented. The data is fitted well by arhimag1k with the exception of the value at 1663 of 28.3 microtesla, which is approximately 10 microtesla lower than the model suggest.}, language = {en} } @phdthesis{Seebeck2020, author = {Seebeck, Nicole}, title = {Regulation of the organokines FGF21 and chemerin by diet}, doi = {10.25932/publishup-47114}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471140}, school = {Universit{\"a}t Potsdam}, pages = {i, 132}, year = {2020}, abstract = {The hepatokine FGF21 and the adipokine chemerin have been implicated as metabolic regulators and mediators of inter-tissue crosstalk. While FGF21 is associated with beneficial metabolic effects and is currently being tested as an emerging therapeutic for obesity and diabetes, chemerin is linked to inflammation-mediated insulin resistance. However, dietary regulation of both organokines and their role in tissue interaction needs further investigation. The LEMBAS nutritional intervention study investigated the effects of two diets differing in their protein content in obese human subjects with non-alcoholic fatty liver disease (NAFLD). The study participants consumed hypocaloric diets containing either low (LP: 10 EN\%, n = 10) or high (HP: 30 EN\%, n = 9) dietary protein 3 weeks prior to bariatric surgery. Before and after the intervention the participants were anthropometrically assessed, blood samples were drawn, and hepatic fat content was determined by MRS. During bariatric surgery, paired subcutaneous and visceral adipose tissue biopsies as well as liver biopsies were collected. The aim of this thesis was to investigate circulating levels and tissue-specific regulation of (1) FGF21 and (2) chemerin in the LEMBAS cohort. The results were compared to data obtained in 92 metabolically healthy subjects with normal glucose tolerance and normal liver fat content. (1) Serum FGF21 concentrations were elevated in the obese subjects, and strongly associated with intrahepatic lipids (IHL). In accordance, FGF21 serum concentrations increased with severity of NAFLD as determined histologically in the liver biopsies. Though both diets were successful in reducing IHL, the effect was more pronounced in the HP group. FGF21 serum concentrations and mRNA expression were bi-directionally regulated by dietary protein, independent from metabolic improvements. In accordance, in the healthy study subjects, serum FGF21 concentrations dropped by more than 60\% in response to the HP diet. A short-term HP intervention confirmed the acute downregulation of FGF21 within 24 hours. Lastly, experiments in HepG2 cell cultures and primary murine hepatocytes identified nitrogen metabolites (NH4Cl and glutamine) to dose-dependently suppress FGF21 expression. (2) Circulating chemerin concentrations were considerably elevated in the obese versus lean study participants and differently associated with markers of obesity and NAFLD in the two cohorts. The adipokine decreased in response to the hypocaloric interventions while an unhealthy high-fat diet induced a rise in chemerin serum levels. In the lean subjects, mRNA expression of RARRES2, encoding chemerin, was strongly and positively correlated with expression of several cytokines, including MCP1, TNFα, and IL6, as well as markers of macrophage infiltration in the subcutaneous fat depot. However, RARRES2 was not associated with any cytokine assessed in the obese subjects and the data indicated an involvement of chemerin not only in the onset but also resolution of inflammation. Analyses of the tissue biopsies and experiments in human primary adipocytes point towards a role of chemerin in adipogenesis while discrepancies between the in vivo and in vitro data were detected. Taken together, the results of this thesis demonstrate that circulating FGF21 and chemerin levels are considerably elevated in obesity and responsive to dietary interventions. FGF21 was acutely and bi-directionally regulated by dietary protein in a hepatocyte-autonomous manner. Given that both, a lack in essential amino acids and excessive nitrogen intake, exert metabolic stress, FGF21 may serve as an endocrine signal for dietary protein balance. Lastly, the data revealed that chemerin is derailed in obesity and associated with obesity-related inflammation. However, future studies on chemerin should consider functional and regulatory differences between secreted and tissue-specific isoforms.}, language = {en} } @phdthesis{Schuetze2020, author = {Sch{\"u}tze, Franziska}, title = {Finance for a sustainable economy}, doi = {10.25932/publishup-48441}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-484415}, school = {Universit{\"a}t Potsdam}, pages = {xi, 128}, year = {2020}, abstract = {With his September 2015 speech "Breaking the tragedy of the horizon", the President of the Central Bank of England, Mark Carney, put climate change on the agenda of financial market regulators. Until then, climate change had been framed mainly as a problem of negative externalities leading to long-term economic costs, which resulted in countries trying to keep the short-term costs of climate action to a minimum. Carney argued that climate change, as well as climate policy, can also lead to short-term financial risks, potentially causing strong adjustments in asset prices. Analysing the effect of a sustainability transition on the financial sector challenges traditional economic and financial analysis and requires a much deeper understanding of the interrelations between climate policy and financial markets. This dissertation thus investigates the implications of climate policy for financial markets as well as the role of financial markets in a transition to a sustainable economy. The approach combines insights from macroeconomic and financial risk analysis. Following an introduction and classification in Chapter 1, Chapter 2 shows a macroeconomic analysis that combines ambitious climate targets (negative externality) with technological innovation (positive externality), adaptive expectations and an investment program, resulting in overall positive macroeconomic outcomes. The analysis also reveals the limitations of climate economic models in their representation of financial markets. Therefore, the subsequent part of this dissertation is concerned with the link between climate policies and financial markets. In Chapter 3, an empirical analysis of stock-market responses to the announcement of climate policy targets is performed to investigate impacts of climate policy on financial markets. Results show that 1) international climate negotiations have an effect on asset prices and 2) investors increasingly recognize transition risks in carbon-intensive investments. In Chapter 4, an analysis of equity markets and the interbank market shows that transition risks can potentially affect a large part of the equity market and that financial interconnections can amplify negative shocks. In Chapter 5, an analysis of mortgage loans shows how information on climate policy and the energy performance of buildings can be integrated into risk management and reflected in interest rates. While costs of climate action have been explored at great depth, this dissertation offers two main contributions. First, it highlights the importance of a green investment program to strengthen the macroeconomic benefits of climate action. Second, it shows different approaches on how to integrate transition risks and opportunities into financial market analysis. Anticipating potential losses and gains in the value of financial assets as early as possible can make the financial system more resilient to transition risks and can stimulate investments into the decarbonization of the economy.}, language = {en} } @phdthesis{Schuster2020, author = {Schuster, Maja}, title = {High resolution decoding of the tobacco chloroplast translatome and its dynamics during light-intensity acclimation}, doi = {10.25932/publishup-51268}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512680}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 155}, year = {2020}, abstract = {Chloroplasts are the photosynthetic organelles in plant and algae cells that enable photoautotrophic growth. Due to their prokaryotic origin, modern-day chloroplast genomes harbor 100 to 200 genes. These genes encode for core components of the photosynthetic complexes and the chloroplast gene expression machinery, making most of them essential for the viability of the organism. The regulation of those genes is predominated by translational adjustments. The powerful technique of ribosome profiling was successfully used to generate highly resolved pictures of the translational landscape of Arabidopsis thaliana cytosol, identifying translation of upstream open reading frames and long non-coding transcripts. In addition, differences in plastidial translation and ribosomal pausing sites were addressed with this method. However, a highly resolved picture of the chloroplast translatome is missing. Here, with the use of chloroplast isolation and targeted ribosome affinity purification, I generated highly enriched ribosome profiling datasets of the chloroplasts translatome for Nicotiana tabacum in the dark and light. Chloroplast isolation was found unsuitable for the unbiased analysis of translation in the chloroplast but adequate to identify potential co-translational import. Affinity purification was performed for the small and large ribosomal subunit independently. The enriched datasets mirrored the results obtained from whole-cell ribosome profiling. Enhanced translational activity was detected for psbA in the light. An alternative translation initiation mechanism was not identified by selective enrichment of small ribosomal subunit footprints. In sum, this is the first study that used enrichment strategies to obtain high-depth ribosome profiling datasets of chloroplasts to study ribosome subunit distribution and chloroplast associated translation. Ever-changing light intensities are challenging the photosynthetic capacity of photosynthetic organism. Increased light intensities may lead to over-excitation of photosynthetic reaction centers resulting in damage of the photosystem core subunits. Additional to an expensive repair mechanism for the photosystem II core protein D1, photosynthetic organisms developed various features to reduce or prevent photodamage. In the long-term, photosynthetic complex contents are adjusted for the efficient use of experienced irradiation. However, the contribution of chloroplastic gene expression in the acclimation process remained largely unknown. Here, comparative transcriptome and ribosome profiling was performed for the early time points of high-light acclimation in Nicotiana tabacum chloroplasts in a genome-wide scale. The time- course data revealed stable transcript level and only minor changes in translational activity of specific chloroplast genes during high-light acclimation. Yet, psbA translation was increased by two-fold in the high light from shortly after the shift until the end of the experiment. A stress-inducing shift from low- to high light exhibited increased translation only of psbA. This study indicate that acclimation fails to start in the observed time frame and only short-term responses to reduce photoinhibition were observed.}, language = {en} } @phdthesis{SchulzBehrendt2020, author = {Schulz-Behrendt, Claudia}, title = {Entwicklung und Evaluation eines berufsorientierten Gruppenprogramms Sozialer Arbeit in der medizinischen Rehabilitation am Beispiel kardiovaskul{\"a}rer Erkrankungen}, doi = {10.25932/publishup-49139}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-491397}, school = {Universit{\"a}t Potsdam}, pages = {142}, year = {2020}, abstract = {Die vorliegende Untersuchung analysierte den direkten Zusammenhang eines berufsbezogenen Angebots Sozialer Gruppenarbeit mit dem Ergebnis beruflicher Wiedereingliederung bei Rehabilitandinnen und Rehabilitanden in besonderen beruflichen Problemlagen. Sie wurde von der Deutschen Rentenversicherung Bund als Forschungsprojekt vom 01.01.2013 bis 31.12. 2015 gef{\"o}rdert und an der Professur f{\"u}r Rehabilitationswissenschaften der Universit{\"a}t Potsdam realisiert. Die Forschungsfrage lautete: Kann eine intensive sozialarbeiterische Gruppenintervention im Rahmen der station{\"a}ren medizinischen Rehabilitation soweit auf die St{\"a}rkung sozialer Kompetenzen und die Soziale Unterst{\"u}tzung von Rehabilitandinnen und Rehabilitanden einwirken, dass sich dadurch langfristige Verbesserungen hinsichtlich der beruflichen Wiedereingliederung im Vergleich zur konventionellen Behandlung ergeben? Die Studie gliederte sich in eine qualitative und eine quantitative Erhebung mit einer zwischenliegenden Intervention. Eingeschlossen waren 352 Patientinnen und Patienten im Alter zwischen 18 und 65 Jahren mit kardiovaskul{\"a}ren Diagnosen, deren Krankheitsbilder h{\"a}ufig von komplexen Problemlagen begleitet sind, verbunden mit einer schlechten sozialmedizinischen Prognose. Die Evaluation der Gruppenintervention erfolgte in einem clusterrandomisierten kontrollierten Studiendesign, um einen empirischen Nachweis dar{\"u}ber zu erbringen, inwieweit die Intervention gegen{\"u}ber der regul{\"a}ren sozialarbeiterischen Behandlung h{\"o}here Effekte erzielen kann. Die Interventionsgruppen nahmen am Gruppenprogramm teil, die Kontrollgruppen erhielten die regul{\"a}re sozialarbeiterische Behandlung. Im Ergebnis konnte mit dieser Stichprobe kein Nachweis zur Verbesserung der beruflichen Wiedereingliederung, der gesundheitsbezogenen Arbeitsf{\"a}higkeit, der Lebensqualit{\"a}t sowie der Sozialen Unterst{\"u}tzung durch die Teilnahme am sozialarbeiterischen Gruppenprogramm erbracht werden. Die Return-To-Work-Rate betrug 43,7 \%, ein Viertel der Untersuchungsgruppe befand sich nach einem Jahr in Arbeitslosigkeit. Die durchgef{\"u}hrte Gruppenintervention ist dem konventionellen Setting Sozialer Arbeit als gleichwertig anzusehen. Schlussfolgernd wurde auf eine sozialarbeiterische Unterst{\"u}tzung der beruflichen Wiedereingliederung {\"u}ber einen l{\"a}ngeren Zeitraum nach einer kardiovaskul{\"a}ren Erkrankung verwiesen, insbesondere durch wohnortnahe Angebote zu einem sp{\"a}teren Zeitpunkt bei stabilerer Gesundheit. Aus den Erhebungen ließen sich m{\"o}gliche Erfolge bei engerer Kooperation zwischen dem Fachbereich der Sozialen Arbeit und der Psychologie ableiten. Ebenfalls gab es Hinweise auf die einflussreiche Rolle der Angeh{\"o}rigen, die durch Einbindung in die Soziale Beratung unterst{\"u}tzend auf den Wiedereingliederungsprozess wirken k{\"o}nnten. Die Passgenauigkeit der untersuchten sozialarbeiterischen Gruppeninterventionen ist durch eine gezielte Soziale Diagnostik zu verbessern.}, language = {de} } @phdthesis{Schuck2020, author = {Schuck, Bernhard}, title = {Geomechanical and petrological characterisation of exposed slip zones, Alpine Fault, New Zealand}, doi = {10.25932/publishup-44612}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-446129}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 143}, year = {2020}, abstract = {The Alpine Fault is a large, plate-bounding, strike-slip fault extending along the north-western edge of the Southern Alps, South Island, New Zealand. It regularly accommodates large (MW > 8) earthquakes and has a high statistical probability of failure in the near future, i.e., is late in its seismic cycle. This pending earthquake and associated co-seismic landslides are expected to cause severe infrastructural damage that would affect thousands of people, so it presents a substantial geohazard. The interdisciplinary study presented here aims to characterise the fault zone's 4D (space and time) architecture, because this provides information about its rheological properties that will enable better assessment of the hazard the fault poses. The studies undertaken include field investigations of principal slip zone fault gouges exposed along strike of the fault, and subsequent laboratory analyses of these outcrop and additional borehole samples. These observations have provided new information on (I) characteristic microstructures down to the nanoscale that indicate which deformation mechanisms operated within the rocks, (II) mineralogical information that constrains the fault's geomechanical behaviour and (III) geochemical compositional information that allows the influence of fluid- related alteration processes on material properties to be unraveled. Results show that along-strike variations of fault rock properties such as microstructures and mineralogical composition are minor and / or do not substantially influence fault zone architecture. They furthermore provide evidence that the architecture of the fault zone, particularly its fault core, is more complex than previously considered, and also more complex than expected for this sort of mature fault cutting quartzofeldspathic rocks. In particular our results strongly suggest that the fault has more than one principal slip zone, and that these form an anastomosing network extending into the basement below the cover of Quaternary sediments. The observations detailed in this thesis highlight that two major processes, (I) cataclasis and (II) authigenic mineral formation, are the major controls on the rheology of the Alpine Fault. The velocity-weakening behaviour of its fault gouge is favoured by abundant nanoparticles promoting powder lubrication and grain rolling rather than frictional sliding. Wall-rock fragmentation is accompanied by co-seismic, fluid-assisted dilatancy that is recorded by calcite cementation. This mineralisation, along with authigenic formation of phyllosilicates, quickly alters the petrophysical fault zone properties after each rupture, restoring fault competency. Dense networks of anastomosing and mutually cross-cutting calcite veins and intensively reworked gouge matrix demonstrate that strain repeatedly localised within the narrow fault gouge. Abundantly undeformed euhedral chlorite crystallites and calcite veins cross-cutting both fault gouge and gravels that overlie basement on the fault's footwall provide evidence that the processes of authigenic phyllosilicate growth, fluid-assisted dilatancy and associated fault healing are processes active particularly close to the Earth's surface in this fault zone. Exposed Alpine Fault rocks are subject to intense weathering as direct consequence of abundant orogenic rainfall associated with the fault's location at the base of the Southern Alps. Furthermore, fault rock rheology is substantially affected by shallow-depth conditions such as the juxtaposition of competent hanging wall fault rocks on poorly consolidated footwall sediments. This means microstructural, mineralogical and geochemical properties of the exposed fault rocks may differ substantially from those at deeper levels, and thus are not characteristic of the majority of the fault rocks' history. Examples are (I) frictionally weak smectites found within the fault gouges being artefacts formed at temperature conditions, and imparting petrophysical properties that are not typical for most of fault rocks of the Alpine Fault, (II) grain-scale dissolution resulting from subaerial weathering rather than deformation by pressure-solution processes and (III) fault gouge geometries being more complex than expected for deeper counterparts. The methodological approaches deployed in analyses of this, and other fault zones, and the major results of this study are finally discussed in order to contextualize slip zone investigations of fault zones and landslides. Like faults, landslides are major geohazards, which highlights the importance of characterising their geomechanical properties. Similarities between faults, especially those exposed to subaerial processes, and landslides, include mineralogical composition and geomechanical behaviour. Together, this ensures failure occurs predominantly by cataclastic processes, although aseismic creep promoted by weak phyllosilicates is not uncommon. Consequently, the multidisciplinary approach commonly used to investigate fault zones may contribute to increase the understanding of landslide faulting processes and the assessment of their hazard potential.}, language = {en} } @phdthesis{Schinkoeth2020, author = {Schink{\"o}th, Michaela}, title = {Automatic affective reactions to exercise-related stimuli}, doi = {10.25932/publishup-47111}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471115}, school = {Universit{\"a}t Potsdam}, pages = {II, 117}, year = {2020}, abstract = {Even though the majority of individuals know that exercising is healthy, a high percentage struggle to achieve the recommended amount of exercise. The (social-cognitive) theories that are commonly applied to explain exercise motivation refer to the assumption that people base their decisions mainly on rational reasoning. However, behavior is not only bound to reflection. In recent years, the role of automaticity and affect for exercise motivation has been increasingly discussed. In this dissertation, central assumptions of the affective-reflective theory of physical inactivity and exercise (ART; Brand \& Ekkekakis, 2018), an exercise-specific dual-process theory that emphasizes the role of a momentary automatic affective reaction for exercise-decisions, were examined. The central aim of this dissertation was to investigate exercisers and non-exercisers automatic affective reactions to exercise-related stimuli (i.e., type-1 process). In particular, the two components of the ART's type-1 process, that are, automatic associations with exercise and the automatic affective valuation to exercise, were under study. In the first publication (Schinkoeth \& Antoniewicz, 2017), research on automatic (evaluative) associations with exercise was summarized and evaluated in a systematic review. The results indicated that automatic associations with exercise appeared to be relevant predictors for exercise behavior and other exercise-related variables, providing evidence for a central assumption of the ART's type-1 process. Furthermore, indirect methods seem to be suitable to assess automatic associations. The aim of the second publication (Schinkoeth, Weymar, \& Brand, 2019) was to approach the somato-affective core of the automatic valuation of exercise using analysis of reactivity in vagal HRV while viewing exercise-related pictures. Results revealed that differences in exercise volume could be regressed on HRV reactivity. In light of the ART, these findings were interpreted as evidence of an inter-individual affective reaction elicited at the thought of exercise and triggered by exercise-stimuli. In the third publication (Schinkoeth \& Brand, 2019, subm.), it was sought to disentangle and relate to each other the ART's type-1 process components—automatic associations and the affective valuation of exercise. Automatic associations to exercise were assessed with a recoding-free variant of an implicit association test (IAT). Analysis of HRV reactivity was applied to approach a somatic component of the affective valuation, and facial reactions in a facial expression (FE) task served as indicators of the automatic affective reaction's valence. Exercise behavior was assessed via self-report. The measurement of the affective valuation's valence with the FE task did not work well in this study. HRV reactivity was predicted by the IAT score and did also statistically predict exercise behavior. These results thus confirm and expand upon the results of publication two and provide empirical evidence for the type-1 process, as defined in the ART. This dissertation advances the field of exercise psychology concerning the influence of automaticity and affect on exercise motivation. Moreover, both methodical implications and theoretical extensions for the ART can be derived from the results.}, language = {en} } @phdthesis{Schill2020, author = {Schill, Denisa}, title = {Employer Branding in der Gesundheitswirtschaft}, doi = {10.25932/publishup-47333}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473330}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 237}, year = {2020}, abstract = {Die Gesundheitswirtschaft in Deutschland steht vor zahlreichen Ver{\"a}nderungen in ihrem Umfeld. Dabei mangelt es der Krankenhauslandschaft an {\"a}rztlichem Personal. Es fehlt insbesondere an Arztstunden bei zugleich steigendem Bedarf an medizinischen Leistungen. Der demografische Wandel, Abwanderung ins Ausland (z. B. in die Schweiz) und Extrapolationen wie Feminisierung des Arztberufs und der Wertewandel respektive Generationenforschung beg{\"u}nstigen diese Entwicklung. Zudem wird der Arbeitsplatz Krankenhaus zunehmend als unattraktiv von zuk{\"u}nftigen Arbeitnehmerkohorten angesehen. Nachwuchs{\"a}rzte entscheiden sich bereits im Studium vermehrt gegen die kurative Medizin beziehungsweise gegen eine medizinisch-klinische T{\"a}tigkeit. Ein virulentes Beschaffungsproblem zeichnet sich ab, das es zu l{\"o}sen gilt. Das Forschungsziel ist die Entwicklung eines ganzheitlichen strategischen L{\"o}sungsansatzes mit marktorientierter Akzentuierung f{\"u}r den Arbeitgeber Krankenhaus. Dabei wird das Krankenhaus als wissens- und kompetenzintensive Dienstleistungsorganisation definiert. Employer Branding stellt den Bezugsrahmen eines marktorientierten Personalmanagements dar. Ein forschungstheoretischer Unterbau wird durch strategische Managementans{\"a}tze integriert. Employer Branding schl{\"a}gt die Br{\"u}cke vom Market-based View zum Competence-based View und ist Markenmanagement im Kontext des strategischen Personalmanagements. In der vorliegenden Arbeit wird ein holistischer Bezugsrahmen vorgestellt, der die Wirkungszusammenh{\"a}nge respektive Employer Branding darstellt. Das Herzst{\"u}ck ist die Employer Value Proposition, die auf der Markenidentit{\"a}t der Organisation basiert. Ziel des Employer Branding Ansatzes ist es, unter anderem eine pr{\"a}ferenzerzeugende Wirkung im Prozess der Arbeitsplatzwahl sicherzustellen. Die Zielrichtung und die Erkenntnis-Interessen erfordern einen breit angelegten Forschungsansatz, der qualitative und quantitative Methoden kombiniert. Ziel der leitfadengesteuerten Tiefeninterviews (exploratives Studiendesign) ist es, bestehende und noch zu entwickelnde Kompetenzst{\"a}rken der Krankenhausorganisationen zu identifizieren. Bei der Stichprobe handelt es sich um ein „Typical Case Sampling" (N=12). Defizit{\"a}re Befunde, welche Werte und Attraktivit{\"a}tsfaktoren bei angehenden {\"A}rzten ausgepr{\"a}gt sind, werden best{\"a}tigt. In der Krankenhauslandschaft wird eine fragmentarische und reaktive Herangehensweise identifiziert, die die erfolgreiche Rekrutierung von qualifiziertem Krankenhauspersonal erschwert. Durch die qualitative Marktforschung werden Anforderungen des Marktes - also zuk{\"u}nftiges, {\"a}rztliches Krankenhauspersonal - auf faktorenanalytischer Basis analysiert. Die Stichprobe (N=475) ist isomorph. Dabei wird der Prozess der Einstellungsbildung in das neobehavioristische Erkl{\"a}rungsmodell des K{\"a}uferverhaltens eingeordnet. Die Vereinbarkeit von Beruf, Karriere und Familie sowie die betriebliche Kinderbetreuung werden als Schl{\"u}sselkomponenten erkannt. Wichtigste Arbeitswerte in Bezug auf einen attraktiven Arbeitgeber sind Verl{\"a}sslichkeit, Verantwortung und Respekt. Diese Komponenten haben kommunikativen Nutzen und Differenzierungskraft. Schließlich bewerten Bewerber ein Krankenhaus positiver im Prozess der Arbeitsplatzwahl, je mehr die Werte des potentiellen Arbeitnehmers mit dem Werteprofil der Person {\"u}bereinstimmen (Person-Organisation Fit). Krankenhausorganisationen, die den Employer Branding Ansatz implementieren und als Chance zur Definition ihrer St{\"a}rken, ihrer Vorz{\"u}ge und ihres Leistungsangebots als Arbeitgeber annehmen, r{\"u}sten sich f{\"u}r das verst{\"a}rkte Werben um Jungmediziner. Schließlich setzt Employer Branding als marktorientierter Strategieansatz intern und extern Kr{\"a}fte frei, die Differenzierungsvorteile gegen{\"u}ber anderen Arbeitgebern bringen. Dabei hat Employer Branding positive Auswirkungen auf den Human Ressource-Bereich entlang der Wertsch{\"o}pfungskette und mindert das gesamtwirtschaftliche Problem.}, language = {de} } @phdthesis{Rothe2020, author = {Rothe, Viktoria}, title = {Das Yamabe-Problem auf global-hyperbolischen Lorentz-Mannigfaltigkeiten}, doi = {10.25932/publishup-48601}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-486012}, school = {Universit{\"a}t Potsdam}, pages = {ix, 65}, year = {2020}, abstract = {Im Jahre 1960 behauptete Yamabe folgende Aussage bewiesen zu haben: Auf jeder kompakten Riemannschen Mannigfaltigkeit (M,g) der Dimension n ≥ 3 existiert eine zu g konform {\"a}quivalente Metrik mit konstanter Skalarkr{\"u}mmung. Diese Aussage ist {\"a}quivalent zur Existenz einer L{\"o}sung einer bestimmten semilinearen elliptischen Differentialgleichung, der Yamabe-Gleichung. 1968 fand Trudinger einen Fehler in seinem Beweis und infolgedessen besch{\"a}ftigten sich viele Mathematiker mit diesem nach Yamabe benannten Yamabe-Problem. In den 80er Jahren konnte durch die Arbeiten von Trudinger, Aubin und Schoen gezeigt werden, dass diese Aussage tats{\"a}chlich zutrifft. Dadurch ergeben sich viele Vorteile, z.B. kann beim Analysieren von konform invarianten partiellen Differentialgleichungen auf kompakten Riemannschen Mannigfaltigkeiten die Skalarkr{\"u}mmung als konstant vorausgesetzt werden. Es stellt sich nun die Frage, ob die entsprechende Aussage auch auf Lorentz-Mannigfaltigkeiten gilt. Das Lorentz'sche Yamabe Problem lautet somit: Existiert zu einer gegebenen r{\"a}umlich kompakten global-hyperbolischen Lorentz-Mannigfaltigkeit (M,g) eine zu g konform {\"a}quivalente Metrik mit konstanter Skalarkr{\"u}mmung? Das Ziel dieser Arbeit ist es, dieses Problem zu untersuchen. Bei der sich aus dieser Fragestellung ergebenden Yamabe-Gleichung handelt es sich um eine semilineare Wellengleichung, deren L{\"o}sung eine positive glatte Funktion ist und aus der sich der konforme Faktor ergibt. Um die f{\"u}r die Behandlung des Yamabe-Problems ben{\"o}tigten Grundlagen so allgemein wie m{\"o}glich zu halten, wird im ersten Teil dieser Arbeit die lokale Existenztheorie f{\"u}r beliebige semilineare Wellengleichungen f{\"u}r Schnitte auf Vektorb{\"u}ndeln im Rahmen eines Cauchy-Problems entwickelt. Hierzu wird der Umkehrsatz f{\"u}r Banachr{\"a}ume angewendet, um mithilfe von bereits existierenden Existenzergebnissen zu linearen Wellengleichungen, Existenzaussagen zu semilinearen Wellengleichungen machen zu k{\"o}nnen. Es wird bewiesen, dass, falls die Nichtlinearit{\"a}t bestimmte Bedingungen erf{\"u}llt, eine fast zeitglobale L{\"o}sung des Cauchy-Problems f{\"u}r kleine Anfangsdaten sowie eine zeitlokale L{\"o}sung f{\"u}r beliebige Anfangsdaten existiert. Der zweite Teil der Arbeit befasst sich mit der Yamabe-Gleichung auf global-hyperbolischen Lorentz-Mannigfaltigkeiten. Zuerst wird gezeigt, dass die Nichtlinearit{\"a}t der Yamabe-Gleichung die geforderten Bedingungen aus dem ersten Teil erf{\"u}llt, so dass, falls die Skalarkr{\"u}mmung der gegebenen Metrik nahe an einer Konstanten liegt, kleine Anfangsdaten existieren, so dass die Yamabe-Gleichung eine fast zeitglobale L{\"o}sung besitzt. Mithilfe von Energieabsch{\"a}tzungen wird anschließend f{\"u}r 4-dimensionale global-hyperbolische Lorentz-Mannigfaltigkeiten gezeigt, dass unter der Annahme, dass die konstante Skalarkr{\"u}mmung der konform {\"a}quivalenten Metrik nichtpositiv ist, eine zeitglobale L{\"o}sung der Yamabe-Gleichung existiert, die allerdings nicht notwendigerweise positiv ist. Außerdem wird gezeigt, dass, falls die H2-Norm der Skalarkr{\"u}mmung bez{\"u}glich der gegebenen Metrik auf einem kompakten Zeitintervall auf eine bestimmte Weise beschr{\"a}nkt ist, die L{\"o}sung positiv auf diesem Zeitintervall ist. Hierbei wird ebenfalls angenommen, dass die konstante Skalarkr{\"u}mmung der konform {\"a}quivalenten Metrik nichtpositiv ist. Falls zus{\"a}tzlich hierzu gilt, dass die Skalarkr{\"u}mmung bez{\"u}glich der gegebenen Metrik negativ ist und die Metrik gewisse Bedingungen erf{\"u}llt, dann ist die L{\"o}sung f{\"u}r alle Zeiten in einem kompakten Zeitintervall positiv, auf dem der Gradient der Skalarkr{\"u}mmung auf eine bestimmte Weise beschr{\"a}nkt ist. In beiden F{\"a}llen folgt unter den angef{\"u}hrten Bedingungen die Existenz einer zeitglobalen positiven L{\"o}sung, falls M = I x Σ f{\"u}r ein beschr{\"a}nktes offenes Intervall I ist. Zum Schluss wird f{\"u}r M = R x Σ ein Beispiel f{\"u}r die Nichtexistenz einer globalen positiven L{\"o}sung angef{\"u}hrt.}, language = {de} } @phdthesis{Rolf2020, author = {Rolf, Werner}, title = {Peri-urban farmland included in green infrastructure strategies promotes transformation pathways towards sustainable urban development}, doi = {10.25932/publishup-47700}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-477002}, school = {Universit{\"a}t Potsdam}, pages = {IV, 116}, year = {2020}, abstract = {Urbanization and agricultural land use are two of the main drivers of global changes with effects on ecosystem functions and human wellbeing. Green Infrastructure is a new approach in spatial planning contributing to sustainable urban development, and to address urban challenges, such as biodiversity conservation, climate change adaptation, green economy development, and social cohesion. Because the research focus has been mainly on open green space structures, such as parks, urban forest, green building, street green, but neglected spatial and functional potentials of utilizable agricultural land, this thesis aims at fill this gap. This cumulative thesis addresses how agricultural land in urban and peri-urban landscapes can contribute to the development of urban green infrastructure as a strategy to promote sustainable urban development. Therefore, a number of different research approaches have been applied. First, a quantitative, GIS-based modeling approach looked at spatial potentials, addressing the heterogeneity of peri-urban landscape that defines agricultural potentials and constraints. Second, a participatory approach was applied, involving stakeholder opinions to evaluate multiple urban functions and benefits. Finally, an evidence synthesis was conducted to assess the current state of research on evidence to support future policy making at different levels. The results contribute to the conceptual understanding of urban green infrastructures as a strategic spatial planning approach that incorporates inner-urban utilizable agricultural land and the agriculturally dominated landscape at the outer urban fringe. It highlights the proposition that the linkage of peri-urban farmland with the green infrastructure concept can contribute to a network of multifunctional green spaces to provide multiple benefits to the urban system and to successfully address urban challenges. Four strategies are introduced for spatial planning with the contribution of peri-urban farmland to a strategically planned multifunctional network, namely the connecting, the productive, the integrated, and the adapted way. Finally, this thesis sheds light on the opportunities that arise from the integration of the peri- urban farmland in the green infrastructure concept to support transformation towards a more sustainable urban development. In particular, the inherent core planning principle of multifunctionality endorses the idea of co-benefits that are considered crucial to trigger transformative processes. This work concludes that the linkage of peri-urban farmland with the green infrastructure concept is a promising action field for the development of new pathways for urban transformation towards sustainable urban development. Along with these outcomes, attention is drawn to limitations that remain to be addressed by future research, especially the identification of further mechanisms required to support policy integration at all levels.}, language = {en} } @phdthesis{RodriguezZuluaga2020, author = {Rodriguez Zuluaga, Juan}, title = {Electric and magnetic characteristics of equatorial plasma depletions}, doi = {10.25932/publishup-44587}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445873}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 87}, year = {2020}, abstract = {Near-Earth space represents a significant scientific and technological challenge. Particularly at magnetic low-latitudes, the horizontal magnetic field geometry at the dip equator and its closed field-lines support the existence of a distinct electric current system, abrupt electric field variations and the development of plasma irregularities. Of particular interest are small-scale irregularities associated with equatorial plasma depletions (EPDs). They are responsible for the disruption of trans-ionospheric radio waves used for navigation, communication, and Earth observation. The fast increase of satellite missions makes it imperative to study the near-Earth space, especially the phenomena known to harm space technology or disrupt their signals. EPDs correspond to the large-scale structure (i.e., tens to hundreds of kilometers) of topside F region irregularities commonly known as Spread F. They are observed as depleted-plasma density channels aligned with the ambient magnetic field in the post-sunset low-latitude ionosphere. Although the climatological variability of their occurrence in terms of season, longitude, local time and solar flux is well-known, their day to day variability is not. The sparse observations from ground-based instruments like radars and the few simultaneous measurements of ionospheric parameters by space-based instruments have left gaps in the knowledge of EPDs essential to comprehend their variability. In this dissertation, I profited from the unique observations of the ESA's Swarm constellation mission launched in November 2013 to tackle three issues that revealed novel and significant results on the current knowledge of EPDs. I used Swarm's measurements of the electron density, magnetic, and electric fields to answer, (1.) what is the direction of propagation of the electromagnetic energy associated with EPDs?, (2.) what are the spatial and temporal characteristics of the electric currents (field-aligned and diamagnetic currents) related to EPDs, i.e., seasonal/geographical, and local time dependencies?, and (3.) under what conditions does the balance between magnetic and plasma pressure across EPDs occur? The results indicate that: (1.) The electromagnetic energy associated with EPDs presents a preference for interhemispheric flows; that is, the related Poynting flux directs from one magnetic hemisphere to the other and varies with longitude and season. (2.) The field-aligned currents at the edges of EPDs are interhemispheric. They generally close in the hemisphere with the highest Pedersen conductance. Such hemispherical preference presents a seasonal/longitudinal dependence. The diamagnetic currents increase or decrease the magnetic pressure inside EPDs. These two effects rely on variations of the plasma temperature inside the EPDs that depend on longitude and local time. (3.) EPDs present lower or higher plasma pressure than the ambient. For low-pressure EPDs the plasma pressure gradients are mostly dominated by variations of the plasma density so that variations of the temperature are negligible. High-pressure EPDs suggest significant temperature variations with magnitudes of approximately twice the ambient. Since their occurrence is more frequent in the vicinity of the South Atlantic magnetic anomaly, such high temperatures are suggested to be due to particle precipitation. In a broader context, this dissertation shows how dedicated satellite missions with high-resolution capabilities improve the specification of the low-latitude ionospheric electrodynamics and expand knowledge on EPDs which is valuable for current and future communication, navigation, and Earth-observing missions. The contributions of this investigation represent several 'firsts' in the study of EPDs: (1.) The first observational evidence of interhemispheric electromagnetic energy flux and field-aligned currents. (2.) The first spatial and temporal characterization of EPDs based on their associated field-aligned and diamagnetic currents. (3.) The first evidence of high plasma pressure in regions of depleted plasma density in the ionosphere. These findings provide new insights that promise to advance our current knowledge of not only EPDs but the low-latitude post-sunset ionosphere environment.}, language = {en} } @phdthesis{Risch2020, author = {Risch, Julian}, title = {Reader comment analysis on online news platforms}, doi = {10.25932/publishup-48922}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489222}, school = {Universit{\"a}t Potsdam}, pages = {xi, 135}, year = {2020}, abstract = {Comment sections of online news platforms are an essential space to express opinions and discuss political topics. However, the misuse by spammers, haters, and trolls raises doubts about whether the benefits justify the costs of the time-consuming content moderation. As a consequence, many platforms limited or even shut down comment sections completely. In this thesis, we present deep learning approaches for comment classification, recommendation, and prediction to foster respectful and engaging online discussions. The main focus is on two kinds of comments: toxic comments, which make readers leave a discussion, and engaging comments, which make readers join a discussion. First, we discourage and remove toxic comments, e.g., insults or threats. To this end, we present a semi-automatic comment moderation process, which is based on fine-grained text classification models and supports moderators. Our experiments demonstrate that data augmentation, transfer learning, and ensemble learning allow training robust classifiers even on small datasets. To establish trust in the machine-learned models, we reveal which input features are decisive for their output with attribution-based explanation methods. Second, we encourage and highlight engaging comments, e.g., serious questions or factual statements. We automatically identify the most engaging comments, so that readers need not scroll through thousands of comments to find them. The model training process builds on upvotes and replies as a measure of reader engagement. We also identify comments that address the article authors or are otherwise relevant to them to support interactions between journalists and their readership. Taking into account the readers' interests, we further provide personalized recommendations of discussions that align with their favored topics or involve frequent co-commenters. Our models outperform multiple baselines and recent related work in experiments on comment datasets from different platforms.}, language = {en} } @phdthesis{Rietze2020, author = {Rietze, Clemens}, title = {Optimierung und Analyse von molekularen Schaltern in komplexen Umgebungen: thermische Stabilit{\"a}t, Auslesbarkeit und Schaltbarkeit}, doi = {10.25932/publishup-45959}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459594}, school = {Universit{\"a}t Potsdam}, pages = {131}, year = {2020}, abstract = {Seit Jahrzehnten stellen die molekularen Schalter ein wachsendes Forschungsgebiet dar. Im Rahmen dieser Dissertation stand die Verbesserung der thermischen Stabilit{\"a}t, der Auslesbarkeit und Schaltbarkeit dieser molekularen Schalter in komplexen Umgebungen mithilfe computergest{\"u}tzter Chemie im Vordergrund. Im ersten Projekt wurde die Kinetik der thermischen E → Z-Isomerisierung und die damit verbundene thermische Stabilit{\"a}t eines Azobenzol-Derivats untersucht. Daf{\"u}r wurde Dichtefunktionaltheorie (DFT) in Verbindung mit der Eyring-Theorie des {\"U}bergangszustandes (TST) angewendet. Das Azobenzol-Derivat diente als vereinfachtes Modell f{\"u}r das Schalten in einer komplexen Umgebung (hier in metallorganischen Ger{\"u}sten). Es wurden thermodynamische und kinetische Gr{\"o}ßen unter verschiedenen Einfl{\"u}ssen berechnet, wobei gute {\"U}bereinstimmungen mit dem Experiment gefunden wurden. Die hier verwendete Methode stellte einen geeigneten Ansatz dar, um diese Gr{\"o}ßen mit angemessener Genauigkeit vorherzusagen. Im zweiten Projekt wurde die Auslesbarkeit der Schaltzust{\"a}nde in Form des nichtlinearen optischen (NLO) Kontrastes f{\"u}r die Molek{\"u}lklasse der Fulgimide untersucht. Die daf{\"u}r ben{\"o}tigten dynamischen Hyperpolarisierbarkeiten unter Ber{\"u}cksichtigung der Elektronenkorrelation wurden mittels einer etablierten Skalierungsmethode berechnet. Es wurden verschiedene Fulgimide analysiert, wobei viele experimentelle Befunde best{\"a}tigt werden konnten. Dar{\"u}ber hinaus legte die theoretische Vorhersage f{\"u}r ein weiteres System nahe, dass insbesondere die Erweiterung des π-Elektronensystems ein vielversprechender Ansatz zur Verbesserung von NLO-Kontrasten darstellt. Die Fulgimide verf{\"u}gen somit {\"u}ber n{\"u}tzliche Eigenschaften, sodass diese in Zukunft als Bauelemente in photonischen und optoelektronischen Bereichen Anwendungen finden k{\"o}nnten. Im dritten Projekt wurde die E → Z-Isomerisierung auf ein quantenmechanisch (QM) behandeltes Dimer mit molekularmechanischer (MM) Umgebung und zwei Fluorazobenzol-Monomeren durch Molek{\"u}ldynamik simuliert. Dadurch wurde die Schaltbarkeit in komplexer Umgebung (hier selbstorgansierte Einzelschichten = SAMs) bzw. von Azobenzolderivaten analysiert. Mit dem QM/MM Modell wurden sowohl Van-der-Waals-Interaktionen mit der Umgebung als auch elektronische Kopplung (nur zwischen QM-Molek{\"u}len) ber{\"u}cksichtigt. Dabei wurden systematische Untersuchungen zur Packungsdichte durchgef{\"u}hrt. Es zeigte sich, dass bereits bei einem Molek{\"u}labstand von 4.5 {\AA} die Quantenausbeute (prozentuale Anzahl erfolgreicher Schaltprozesse) des Monomers erreicht wird. Die gr{\"o}ßten Quantenausbeuten wurden f{\"u}r die beiden untersuchten Fluorazobenzole erzielt. Es wurden die Effekte des Molek{\"u}labstandes und der Einfluss von Fluorsubstituenten auf die Dynamik eingehend untersucht, sodass der Weg f{\"u}r darauf aufbauende Studien geebnet ist.}, language = {de} } @phdthesis{Renz2020, author = {Renz, Jan}, title = {Lebensbegleitendes Lernen in einer digitalen Welt}, doi = {10.25932/publishup-47257}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 184}, year = {2020}, abstract = {In unserer digitalisierten Welt verlagert sich das Lernen in die Cloud. Vom Unterricht in der Schule und der Tafel zum Tablet, hin zu einem lebenslangen Lernen in der Arbeitswelt und sogar dar{\"u}ber hinaus. Wie erfolgreich und attraktiv dieses zeitgem{\"a}ße Lernen erfolgt, h{\"a}ngt nicht unwesentlich von den technologischen M{\"o}glichkeiten ab, die digitale Lernplattformen rund um MOOCs und Schul-Clouds bieten. Bei deren Weiterentwicklung sollten statt {\"o}konomischen Messgr{\"o}ßen und KPIs die Lernenden und ihre Lernerfahrungen im Vordergrund stehen. Hierf{\"u}r wurde ein Optimierungsframework entwickelt, das f{\"u}r die Entwicklung von Lernplattformen anhand verschiedener qualitativer und quantitative Methoden Verbesserungen identifiziert, priorisiert und deren Beurteilung und Umsetzung steuert. Datengest{\"u}tzte Entscheidungen sollten auf einer ausreichenden Datenbasis aufbauen. Moderne Web-Anwendungen bestehen aber oft aus mehreren Microservices mit jeweils eigener Datenhaltung. Viele Daten sind daher nicht mehr einfach zug{\"a}nglich. Daher wird in dieser Arbeit ein Learning Analytics Dienst eingef{\"u}hrt, der diese Daten sammelt und verarbeitet. Darauf aufbauend werden Metriken eingef{\"u}hrt, auf deren Grundlage die erfassten Daten nutzbar werden und die somit zu verschiedenen Zwecken verwendet werden k{\"o}nnen. Neben der Visualisierung der Daten in Dashboards werden die Daten f{\"u}r eine automatisierte Qualit{\"a}tskontrolle herangezogen. So kann festgestellt werden, wenn Tests zu schwierig oder die soziale Interaktion in einem MOOC zu gering ist. Die vorgestellte Infrastruktur l{\"a}sst sich aber auch verwenden, um verschiedene A/B/n-Tests durchzuf{\"u}hren. In solchen Tests gibt es mehrere Varianten, die an verschiedene Nutzergruppen in einem kontrollierten Experiment erprobt werden. Dank der vorgestellten Testinfrastruktur, die in der HPI MOOC Plattform eingebaut wurde, kann ermittelt werden, ob sich f{\"u}r diese Gruppen statistisch signifikante {\"A}nderungen in der Nutzung feststellen lassen. Dies wurde mit f{\"u}nf verschiedenen Verbesserungen der HPI MOOC Plattform evaluiert, auf der auch openHPI und openSAP basieren. Dabei konnte gezeigt werden, dass sich Lernende mit reaktivierenden Mails zur{\"u}ck in den Kurs holen lassen. Es ist prim{\"a}r die Kommunikation der unbearbeiteten Lerninhalte des Nutzers, die eine reaktivierende Wirkung hat. Auch {\"U}bersichtsmails, die die Forenaktivit{\"a}t zusammenfassen, haben einen positiven Effekt erzielt. Ein gezieltes On-Boarding kann dazu f{\"u}hren, dass die Nutzer die Plattform besser verstehen und hierdurch aktiver sind. Der vierte Test konnte zeigen, dass die Zuordnung von Forenfragen zu einem bestimmten Zeitpunkt im Video und die grafische Anzeige dieser Informationen zu einer erh{\"o}hten Forenaktivit{\"a}t f{\"u}hrt. Auch die experimentelle Erprobung von unterschiedlichen Lernmaterialien, wie sie im f{\"u}nften Test durchgef{\"u}hrt wurde, ist in MOOCs hilfreich, um eine Verbesserung der Kursmaterialien zu erreichen. Neben diesen funktionalen Verbesserungen wird untersucht wie MOOC Plattformen und Schul-Clouds einen Nutzen bieten k{\"o}nnen, wenn Nutzern nur eine schwache oder unzuverl{\"a}ssige Internetanbindung zur Verf{\"u}gung steht (wie dies in vielen deutschen Schulen der Fall ist). Hier wird gezeigt, dass durch ein geschicktes Vorausladen von Daten die Internetanbindungen entlastet werden k{\"o}nnen. Teile der Lernanwendungen funktionieren dank dieser Anpassungen, selbst wenn keine Verbindung zum Internet besteht. Als Letztes wird gezeigt, wie Endger{\"a}te sich in einem lokalen Peer-to-Peer CDN gegenseitig mit Daten versorgen k{\"o}nnen, ohne dass diese aus dem Internet heruntergeladen werden m{\"u}ssen.}, language = {de} } @phdthesis{Reinhardt2020, author = {Reinhardt, Maria}, title = {Hybrid filters and multi-scale models}, doi = {10.25932/publishup-47435}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474356}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 102}, year = {2020}, abstract = {This thesis is concerned with Data Assimilation, the process of combining model predictions with observations. So called filters are of special interest. One is inter- ested in computing the probability distribution of the state of a physical process in the future, given (possibly) imperfect measurements. This is done using Bayes' rule. The first part focuses on hybrid filters, that bridge between the two main groups of filters: ensemble Kalman filters (EnKF) and particle filters. The first are a group of very stable and computationally cheap algorithms, but they request certain strong assumptions. Particle filters on the other hand are more generally applicable, but computationally expensive and as such not always suitable for high dimensional systems. Therefore it exists a need to combine both groups to benefit from the advantages of each. This can be achieved by splitting the likelihood function, when assimilating a new observation and treating one part of it with an EnKF and the other part with a particle filter. The second part of this thesis deals with the application of Data Assimilation to multi-scale models and the problems that arise from that. One of the main areas of application for Data Assimilation techniques is predicting the development of oceans and the atmosphere. These processes involve several scales and often balance rela- tions between the state variables. The use of Data Assimilation procedures most often violates relations of that kind, which leads to unrealistic and non-physical pre- dictions of the future development of the process eventually. This work discusses the inclusion of a post-processing step after each assimilation step, in which a minimi- sation problem is solved, which penalises the imbalance. This method is tested on four different models, two Hamiltonian systems and two spatially extended models, which adds even more difficulties.}, language = {en} } @phdthesis{RamezaniZiarani2020, author = {Ramezani Ziarani, Maryam}, title = {Characterization of atmospheric processes related to hydro-meteorological extreme events over the south-central Andes}, doi = {10.25932/publishup-47175}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471755}, school = {Universit{\"a}t Potsdam}, pages = {i, 88}, year = {2020}, abstract = {The significant environmental and socioeconomic consequences of hydrometeorological extreme events, such as extreme rainfall, are constituted as a most important motivation for analyzing these events in the south-central Andes of NW Argentina. The steep topographic and climatic gradients and their interactions frequently lead to the formation of deep convective storms and consequently trigger extreme rainfall generation. In this dissertation, I focus on identifying the dominant climatic variables and atmospheric conditions and their spatiotemporal variability leading to deep convection and extreme rainfall in the south-central Andes. This dissertation first examines the significant contribution of temperature on atmospheric humidity (dew-point temperature, Td) and on convection (convective available potential energy, CAPE) for deep convective storms and hence, extreme rainfall along the topographic and climatic gradients. It was found that both climatic variables play an important role in extreme rainfall generation. However, their contributions differ depending on topographic and climatic sub-regions, as well as rainfall percentiles. Second, this dissertation explores if (near real-time) the measurements conducted by the Global Navigation Satellite System (GNSS) on integrated water vapor (IWV) provide reliable data for explaining atmospheric humidity. I argue that GNSS-IWV, in conjunction with other atmospheric stability parameters such as CAPE, is able to decipher the extreme rainfall in the eastern central Andes. In my work, I rely on a multivariable regression analysis described by a theoretical relationship and fitting function analysis. Third, this dissertation identifies the local impact of convection on extreme rainfall in the eastern Andes. Relying on a Principal Component Analysis (PCA) it was found that during the existence of moist and warm air, extreme rainfall is observed more often during local night hours. The analysis includes the mechanisms for this observation. Exploring the atmospheric conditions and climatic variables leading to extreme rainfall is one of the main findings of this dissertation. The conditions and variables are a prerequisite for understanding the dynamics of extreme rainfall and predicting these events in the eastern Andes.}, language = {en} } @phdthesis{Purinton2020, author = {Purinton, Benjamin}, title = {Remote sensing applications to earth surface processes in the Eastern Central Andes}, doi = {10.25932/publishup-44592}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445926}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 134}, year = {2020}, abstract = {Geomorphology seeks to characterize the forms, rates, and magnitudes of sediment and water transport that sculpt landscapes. This is generally referred to as earth surface processes, which incorporates the influence of biologic (e.g., vegetation), climatic (e.g., rainfall), and tectonic (e.g., mountain uplift) factors in dictating the transport of water and eroded material. In mountains, high relief and steep slopes combine with strong gradients in rainfall and vegetation to create dynamic expressions of earth surface processes. This same rugged topography presents challenges in data collection and process measurement, where traditional techniques involving detailed observations or physical sampling are difficult to apply at the scale of entire catchments. Herein lies the utility of remote sensing. Remote sensing is defined as any measurement that does not disturb the natural environment, typically via acquisition of images in the visible- to radio-wavelength range of the electromagnetic spectrum. Remote sensing is an especially attractive option for measuring earth surface processes, because large areal measurements can be acquired at much lower cost and effort than traditional methods. These measurements cover not only topographic form, but also climatic and environmental metrics, which are all intertwined in the study of earth surface processes. This dissertation uses remote sensing data ranging from handheld camera-based photo surveying to spaceborne satellite observations to measure the expressions, rates, and magnitudes of earth surface processes in high-mountain catchments of the Eastern Central Andes in Northwest Argentina. This work probes the limits and caveats of remote sensing data and techniques applied to geomorphic research questions, and presents important progress at this disciplinary intersection.}, language = {en} } @phdthesis{Pudell2020, author = {Pudell, Jan-Etienne}, title = {Lattice dynamics}, doi = {10.25932/publishup-48445}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-484453}, school = {Universit{\"a}t Potsdam}, pages = {XII, 259}, year = {2020}, abstract = {In this thesis I summarize my contribution to the research field of ultrafast structural dynamics in condensend matter. It consists of 17 publications that cover the complex interplay between electron, magnon, and phonon subsystems in solid materials and the resulting lattice dynamics after ultrafast photoexcitation. The investigation of such dynamics is necessary for the physical understanding of the processes in materials that might become important in the future as functional materials for technological applications, for example in data storage applications, information processing, sensors, or energy harvesting. In this work I present ultrafast x-ray diffraction (UXRD) experiments based on the optical pump - x-ray probe technique revealing the time-resolved lattice strain. To study these dynamics the samples (mainly thin film heterostructures) are excited by femtosecond near-infrared or visible light pulses. The induced strain dynamics caused by stresses of the excited subsystems are measured in a pump-probe scheme with x-ray diffraction (XRD) as a probe. The UXRD setups used during my thesis are a laser-driven table-top x-ray source and large-scale synchrotron facilities with dedicated time-resolved diffraction setups. The UXRD experiments provide quantitative access to heat reservoirs in nanometric layers and monitor the transient responses of these layers with coupled electron, magnon, and phonon subsystems. In contrast to optical probes, UXRD allows accessing the material-specific information, which is unavailable for optical light due to the detection of multiple indistinguishable layers in the range of the penetration depth. In addition, UXRD facilitates a layer-specific probe for layers buried opaque heterostructures to study the energy flow. I extended this UXRD technique to obtain the driving stress profile by measuring the strain dynamics in the unexcited buried layer after excitation of the adjacent absorbing layers with femtosecond laser pulses. This enables the study of negative thermal expansion (NTE) in magnetic materials, which occurs due to the loss of the magnetic order. Part of this work is the investigation of stress profiles which are the source of coherent acoustic phonon wave packets (hypersound waves). The spatiotemporal shape of these stress profiles depends on the energy distribution profile and the ability of the involved subsystems to produce stress. The evaluation of the UXRD data of rare-earth metals yields a stress profile that closely matches the optical penetration profile: In the paramagnetic (PM) phase the photoexcitation results in a quasi-instantaneous expansive stress of the metallic layer whereas in the antiferromagnetic (AFM) phase a quasi-instantaneous contractive stress and a second contractive stress contribution rising on a 10 ps time scale adds to the PM contribution. These two time scales are characteristic for the magnetic contribution and are in agreement with related studies of the magnetization dynamics of rare-earth materials. Several publications in this thesis demonstrate the scientific progress in the field of active strain control to drive a second excitation or engineer an ultrafast switch. These applications of ultrafast dynamics are necessary to enable control of functional material properties via strain on ultrafast time scales. For this thesis I implemented upgrades of the existing laser-driven table-top UXRD setup in order to achieve an enhancement of x-ray flux to resolve single digit nanometer thick layers. Furthermore, I developed and built a new in-situ time-resolved magneto-optic Kerr effect (MOKE) and optical reflectivity setup at the laser-driven table-top UXRD setup to measure the dynamics of lattice, electrons and magnons under the same excitation conditions.}, language = {en} } @phdthesis{Pilz2020, author = {Pilz, Tobias}, title = {Pursuing the understanding of uncertainties in hydrological modelling}, doi = {10.25932/publishup-47664}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476643}, school = {Universit{\"a}t Potsdam}, pages = {136}, year = {2020}, abstract = {Hydrological models are important tools for the simulation and quantification of the water cycle. They therefore aid in the understanding of hydrological processes, prediction of river discharge, assessment of the impacts of land use and climate changes, or the management of water resources. However, uncertainties associated with hydrological modelling are still large. While significant research has been done on the quantification and reduction of uncertainties, there are still fields which have gained little attention so far, such as model structural uncertainties that are related to the process implementations in the models. This holds especially true for complex process-based models in contrast to simpler conceptual models. Consequently, the aim of this thesis is to improve the understanding of structural uncertainties with focus on process-based hydrological modelling, including methods for their quantification. To identify common deficits of frequently used hydrological models and develop further strategies on how to reduce them, a survey among modellers was conducted. It was found that there is a certain degree of subjectivity in the perception of modellers, for instance with respect to the distinction of hydrological models into conceptual groups. It was further found that there are ambiguities on how to apply a certain hydrological model, for instance how many parameters should be calibrated, together with a large diversity of opinion regarding the deficits of models. Nevertheless, evapotranspiration processes are often represented in a more physically based manner, while processes of groundwater and soil water movement are often simplified, which many survey participants saw as a drawback. A large flexibility, for instance with respect to different alternative process implementations or a small number of parameters that needs to be calibrated, was generally seen as strength of a model. Flexible and efficient software, which is straightforward to apply, has been increasingly acknowledged by the hydrological community. This work further elaborated on this topic in a twofold way. First, a software package for semi-automated landscape discretisation has been developed, which serves as a tool for model initialisation. This was complemented by a sensitivity analysis of important and commonly used discretisation parameters, of which the size of hydrological sub-catchments as well as the size and number of hydrologically uniform computational units appeared to be more influential than information considered for the characterisation of hillslope profiles. Second, a process-based hydrological model has been implemented into a flexible simulation environment with several alternative process representations and a number of numerical solvers. It turned out that, even though computation times were still long, enhanced computational capabilities nowadays in combination with innovative methods for statistical analysis allow for the exploration of structural uncertainties of even complex process-based models, which up to now was often neglected by the modelling community. In a further study it could be shown that process-based models may even be employed as tools for seasonal operational forecasting. In contrast to statistical models, which are faster to initialise and to apply, process-based models produce more information in addition to the target variable, even at finer spatial and temporal scales, and provide more insights into process behaviour and catchment functioning. However, the process-based model was much more dependent on reliable rainfall forecasts. It seems unlikely that there exists a single best formulation for hydrological processes, even for a specific catchment. This supports the use of flexible model environments with alternative process representations instead of a single model structure. However, correlation and compensation effects between process formulations, their parametrisation, and other aspects such as numerical solver and model resolution, may lead to surprising results and potentially misleading conclusions. In future studies, such effects should be more explicitly addressed and quantified. Moreover, model functioning appeared to be highly dependent on the meteorological conditions and rainfall input generally was the most important source of uncertainty. It is still unclear, how this could be addressed, especially in the light of the aforementioned correlations. The use of innovative data products, e.g.\ remote sensing data in combination with station measurements, and efficient processing methods for the improvement of rainfall input and explicit consideration of associated uncertainties is advisable to bring more insights and make hydrological simulations and predictions more reliable.}, language = {en} } @phdthesis{Pick2020, author = {Pick, Leonie Johanna Lisa}, title = {The centennial evolution of geomagnetic activity and its driving mechanisms}, doi = {10.25932/publishup-47275}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472754}, school = {Universit{\"a}t Potsdam}, pages = {ix, 135}, year = {2020}, abstract = {This cumulative thesis is concerned with the evolution of geomagnetic activity since the beginning of the 20th century, that is, the time-dependent response of the geomagnetic field to solar forcing. The focus lies on the description of the magnetospheric response field at ground level, which is particularly sensitive to the ring current system, and an interpretation of its variability in terms of the solar wind driving. Thereby, this work contributes to a comprehensive understanding of long-term solar-terrestrial interactions. The common basis of the presented publications is formed by a reanalysis of vector magnetic field measurements from geomagnetic observatories located at low and middle geomagnetic latitudes. In the first two studies, new ring current targeting geomagnetic activity indices are derived, the Annual and Hourly Magnetospheric Currents indices (A/HMC). Compared to existing indices (e.g., the Dst index), they do not only extend the covered period by at least three solar cycles but also constitute a qualitative improvement concerning the absolute index level and the ~11-year solar cycle variability. The analysis of A/HMC shows that (a) the annual geomagnetic activity experiences an interval-dependent trend with an overall linear decline during 1900-2010 of ~5 \% (b) the average trend-free activity level amounts to ~28 nT (c) the solar cycle related variability shows amplitudes of ~15-45 nT (d) the activity level for geomagnetically quiet conditions (Kp<2) lies slightly below 20 nT. The plausibility of the last three points is ensured by comparison to independent estimations either based on magnetic field measurements from LEO satellite missions (since the 1990s) or the modeling of geomagnetic activity from solar wind input (since the 1960s). An independent validation of the longterm trend is problematic mainly because the sensitivity of the locally measured geomagnetic activity depends on geomagnetic latitude. Consequently, A/HMC is neither directly comparable to global geomagnetic activity indices (e.g., the aa index) nor to the partly reconstructed open solar magnetic flux, which requires a homogeneous response of the ground-based measurements to the interplanetary magnetic field and the solar wind speed. The last study combines a consistent, HMC-based identification of geomagnetic storms from 1930-2015 with an analysis of the corresponding spatial (magnetic local time-dependent) disturbance patterns. Amongst others, the disturbances at dawn and dusk, particularly their evolution during the storm recovery phases, are shown to be indicative of the solar wind driving structure (Interplanetary Coronal Mass Ejections vs. Stream or Co-rotating Interaction Regions), which enables a backward-prediction of the storm driver classes. The results indicate that ICME-driven geomagnetic storms have decreased since 1930 which is consistent with the concurrent decrease of HMC. Out of the collection of compiled follow-up studies the inclusion of measurements from high-latitude geomagnetic observatories into the third study's framework seems most promising at this point.}, language = {en} } @phdthesis{Phung2020, author = {Phung, Thi Thuy Nga}, title = {Defect chemistry in halide perovskites}, doi = {10.25932/publishup-47652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476529}, school = {Universit{\"a}t Potsdam}, pages = {vi, 231}, year = {2020}, abstract = {Metallhalogenid-Perowskite haben sich aufgrund ihrer hervorragenden optoelektronischen Eigenschaften zu einer attraktiven Materialklasse f{\"u}r die Photovoltaikindustrie entwickelt. Die Langzeitstabilit{\"a}t ist jedoch noch immer ein Hindernis f{\"u}r die industrielle Realisierung dieser Materialklasse. Zunehmend zeigen sich Hinweise daf{\"u}r, dass intrinsische Defekte im Perowskit die Material-Degradation f{\"o}rdern. Das Verst{\"a}ndnis der Defekte im Perowskit ist wichtig, um seine Stabilit{\"a}t und optoelektronische Qualit{\"a}t weiter zu verbessern. Diese Dissertation konzentriert sich daher auf das Thema Defektchemie im Perowskit. Der erste Teil der Dissertation gibt einen kurzen {\"U}berblick {\"u}ber die Defekteigenschaften von Halogenid-Perowskiten. Anschließend zeigt der zweite Teil, dass das Dotieren von Methylammoniumbleiiodid mit einer kleinen Menge von Erdalkalimetallen (Sr und Mg) ein h{\"o}herwertiges, weniger fehlerhaftes Material erzeugt, was zu hohen Leerlaufspannungen sowohl in der n-i-p als auch in der p-i-n Architektur von Solarzellen f{\"u}hrt. Es wurde beobachtet, dass die Dotierung in zwei Dom{\"a}nen stattfindet: eine niedrige Dotierungskonzentration f{\"u}hrt zum Einschluss der entsprechenden Elemente in das Kristallgitter erm{\"o}glicht, w{\"a}hrend eine hohe Dotierungskonzentration zu einer Phasentrennung f{\"u}hrt. Das Material kann im Niedrigdotierungsbereich mehr n-dotiert sein, w{\"a}hrend es im Hochdotierungsbereich weniger n-dotiert ist. Die Schwelle dieser beiden Regime h{\"a}ngt von der Atomgr{\"o}ße der Dotierelemente ab. Der n{\"a}chste Teil der Dissertation untersucht die photoinduzierte Degradation von Methylammonium-Bleiiodid. Dieser Abbaumechanismus h{\"a}ngt eng mit der Bildung und Migration von defekten zusammen. Nach der Bildung k{\"o}nnen sich diese in Abh{\"a}ngigkeit von der Defektdichte und ihrer Verteilung bewegen. Demnach kann eine hohe Defektdichte wie an den Korngrenzen eines Perowskitfilms die Beweglichkeit von ionischen Punktdefekten hemmen. Diese Erkenntnis ließe sich auf das zuk{\"u}nftige Materialdesign in der Photovoltaikindustrie anwenden, da die Perowskit-Solarzellen normalerweise einen polykristallinen D{\"u}nnfilm mit hoher Korngrenzendichte verwenden. Die abschließende Studie, die in dieser Dissertation vorgestellt wird, konzentriert sich auf die Stabilit{\"a}t der neuesten „dreifach-kationen" Perowskit-basierten Solarzellen unter dem Einfluss einer permanent angelegten elektrischen Spannung. Eine l{\"a}ngere Betriebsdauer (mehr als drei Stunden permanente Spannung) f{\"o}rdert die Amorphisierung im Halogenid-Perowskiten. Es wird hierbei vermutet, dass sich eine amorphe Phase an den Grenzfl{\"a}chen bildet, insbesondere zwischen der lochselektiven Schicht und dem Perowskit. Diese amorphe Phase hemmt den Ladungstransport und beeintr{\"a}chtigt die Leistung der Perowskit-Solarzelle erheblich. Sobald jedoch keine Spannung mehr anliegt k{\"o}nnen sich die Perowskitschichten im Dunkeln bereits nach einer kurzen Pause regenerieren. Die Amorphisierung wird auf die Migration von ionischen Fehlordnungen zur{\"u}ckgef{\"u}hrt, h{\"o}chstwahrscheinlich auf die Migration von Halogeniden. Dieser Ansatz zeigt ein neues Verst{\"a}ndnis des Abbau-Mechanismus in Perowskit-Solarzellen unter Betriebsbedingungen.}, language = {en} } @phdthesis{Perovic2020, author = {Perovic, Milena}, title = {Functionalization of nanoporous carbon materials for chiral separation and heterogeneous oxidation catalysis}, doi = {10.25932/publishup-48659}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-486594}, school = {Universit{\"a}t Potsdam}, pages = {140}, year = {2020}, abstract = {The impact that catalysis has on global economy and environment is substantial, since 85\% of all chemical industrial processes are catalytic. Among those, 80\% of the processes are heterogeneously catalyzed, 17\% make use of homogeneous catalysts, and 3\% are biocatalytic processes. Especially in the pharmaceutical and agrochemical industry, a significant part of these processes involves chiral compounds. Obtaining enantiomerically pure compounds is necessary and it is usually accomplished by asymmetric synthesis and catalysis, as well as chiral separation. The efficiency of these processes may be vastly improved if the chiral selectors are positioned on a porous solid support, thereby increasing the available surface area for chiral recognition. Similarly, the majority of commercial catalysts are also supported, usually comprising of metal nanoparticles (NPs) dispersed on highly porous oxide or nanoporous carbon material. Materials that have exceptional thermal and chemical stability, and are electrically conductive are porous carbons. Their stability in extreme pH regions and temperatures, the possibility to tailor their pore architecture and chemical functionalization, and their electric conductivity have already established these materials in the fields of separation and catalysis. However, their heterogeneous chemical structure with abundant defects make it challenging to develop reliable models for the investigation of structure-performance relationships. Therefore, there is a necessity for expanding the fundamental understanding of these robust materials under experimental conditions to allow for their further optimization for particular applications. This thesis gives a contribution to our knowledge about carbons, through different aspects, and in different applications. On the one hand, a rather exotic novel application was investigated by attempts in synthesizing porous carbon materials with an enantioselective surface. Chapter 4.1 described an approach for obtaining mesoporous carbons with an enantioselective surface by direct carbonization of a chiral precursor. Two enantiomers of chiral ionic liquids (CIL) based on amino acid tyrosine were used as carbon precursors and ordered mesoporous silica SBA-15 served as a hard template for obtaining porosity. The chiral recognition of the prepared carbons has been tested in the solution by isothermal titration calorimetry with enantiomers of Phenylalanine as probes, as well as chiral vapor adsorption with 2-butanol enantiomers. Measurements in both solution and the gas phase revealed the differences in the affinity of carbons towards two enantiomers. The atomic efficiency of the CIL precursors was increased in Chapter 4.2, and the porosity was developed independently from the development of chiral carbons, through the formation of stable composites of pristine carbon and CIL-derived coating. After the same set of experiments for the investigation of chirality, the enantiomeric ratios of the composites reported herein were even higher than in the previous chapter. On the other hand, the structure‒activity relationship of carbons as supports for gold nanoparticles in a rather traditional catalytic model reaction, on the interface between gas, liquid, and solid, was studied. In Chapter 5.1 it was shown on the series of catalysts with different porosities that the kinetics of ᴅ-glucose oxidation reaction can be enhanced by increasing the local concentration of the reactants around the active phase of the catalyst. A large amount of uniform narrow mesopores connected to the surface of the Au catalyst supported on ordered mesoporous carbon led to the water confinement, which increased the solubility of the oxygen in the proximity of the catalyst and thereby increased the apparent catalytic activity of this catalyst. After increasing the oxygen concentration in the internal area of the catalyst, in Chapter 5.2 the concentration of oxygen was increased in the external environment of the catalyst, by the introduction of less cohesive liquids that serve as efficient solvent for oxygen, perfluorinated compounds, near the active phase of the catalyst. This was achieved by a formation of catalyst particle-stabilized emulsions of perfluorocarbon in aqueous ᴅ-glucose solution, that further promoted the catalytic activity of gold-on-carbon catalyst. The findings reported within this thesis are an important step in the understanding of the structure-related properties of carbon materials.}, language = {en} } @phdthesis{Nordmann2020, author = {Nordmann, Paul-Patrick}, title = {Fehlerkorrektur von Speicherfehlern mit Low-Density-Parity-Check-Codes}, doi = {10.25932/publishup-48048}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-480480}, school = {Universit{\"a}t Potsdam}, pages = {IV, 99, XII}, year = {2020}, abstract = {Die Fehlerkorrektur in der Codierungstheorie besch{\"a}ftigt sich mit der Erkennung und Behebung von Fehlern bei der {\"U}bertragung und auch Sicherung von Nachrichten. Hierbei wird die Nachricht durch zus{\"a}tzliche Informationen in ein Codewort kodiert. Diese Kodierungsverfahren besitzen verschiedene Anspr{\"u}che, wie zum Beispiel die maximale Anzahl der zu korrigierenden Fehler und die Geschwindigkeit der Korrektur. Ein g{\"a}ngiges Codierungsverfahren ist der BCH-Code, welches industriell f{\"u}r bis zu vier Fehler korrigiere Codes Verwendung findet. Ein Nachteil dieser Codes ist die technische Durchlaufzeit f{\"u}r die Berechnung der Fehlerstellen mit zunehmender Codel{\"a}nge. Die Dissertation stellt ein neues Codierungsverfahren vor, bei dem durch spezielle Anordnung kleinere Codel{\"a}ngen eines BCH-Codes ein langer Code erzeugt wird. Diese Anordnung geschieht {\"u}ber einen weiteren speziellen Code, einem LDPC-Code, welcher f{\"u}r eine schneller Fehlererkennung konzipiert ist. Hierf{\"u}r wird ein neues Konstruktionsverfahren vorgestellt, welches einen Code f{\"u}r einen beliebige L{\"a}nge mit vorgebbaren beliebigen Anzahl der zu korrigierenden Fehler vorgibt. Das vorgestellte Konstruktionsverfahren erzeugt zus{\"a}tzlich zum schnellen Verfahren der Fehlererkennung auch eine leicht und schnelle Ableitung eines Verfahrens zu Kodierung der Nachricht zum Codewort. Dies ist in der Literatur f{\"u}r die LDPC-Codes bis zum jetzigen Zeitpunkt einmalig. Durch die Konstruktion eines LDPC-Codes wird ein Verfahren vorgestellt wie dies mit einem BCH-Code kombiniert wird, wodurch eine Anordnung des BCH-Codes in Bl{\"o}cken erzeugt wird. Neben der allgemeinen Beschreibung dieses Codes, wird ein konkreter Code f{\"u}r eine 2-Bitfehlerkorrektur beschrieben. Diese besteht aus zwei Teilen, welche in verschiedene Varianten beschrieben und verglichen werden. F{\"u}r bestimmte L{\"a}ngen des BCH-Codes wird ein Problem bei der Korrektur aufgezeigt, welche einer algebraischen Regel folgt. Der BCH-Code wird sehr allgemein beschrieben, doch existiert durch bestimmte Voraussetzungen ein BCH-Code im engerem Sinne, welcher den Standard vorgibt. Dieser BCH-Code im engerem Sinne wird in dieser Dissertation modifiziert, so dass das algebraische Problem bei der 2-Bitfehler Korrektur bei der Kombination mit dem LDPC-Code nicht mehr existiert. Es wird gezeigt, dass nach der Modifikation der neue Code weiterhin ein BCH-Code im allgemeinen Sinne ist, welcher 2-Bitfehler korrigieren und 3-Bitfehler erkennen kann. Bei der technischen Umsetzung der Fehlerkorrektur wird des Weiteren gezeigt, dass die Durchlaufzeiten des modifizierten Codes im Vergleich zum BCH-Code schneller ist und weiteres Potential f{\"u}r Verbesserungen besitzt. Im letzten Kapitel wird gezeigt, dass sich dieser modifizierte Code mit beliebiger L{\"a}nge eignet f{\"u}r die Kombination mit dem LDPC-Code, wodurch dieses Verfahren nicht nur umf{\"a}nglicher in der L{\"a}nge zu nutzen ist, sondern auch durch die schnellere Dekodierung auch weitere Vorteile gegen{\"u}ber einem BCH-Code im engerem Sinne besitzt.}, language = {de} } @phdthesis{Nooshiri2020, author = {Nooshiri, Nima}, title = {Improvement of routine seismic source parameter estimation based on regional and teleseismic recordings}, doi = {10.25932/publishup-45946}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459462}, school = {Universit{\"a}t Potsdam}, pages = {ix, 118}, year = {2020}, abstract = {Seismological agencies play an important role in seismological research and seismic hazard mitigation by providing source parameters of seismic events (location, magnitude, mechanism), and keeping these data accessible in the long term. The availability of catalogues of seismic source parameters is an important requirement for the evaluation and mitigation of seismic hazards, and the catalogues are particularly valuable to the research community as they provide fundamental long-term data in the geophysical sciences. This work is well motivated by the rising demand for developing more robust and efficient methods for routine source parameter estimation, and ultimately generation of reliable catalogues of seismic source parameters. Specifically, the aim of this work is to develop some methods to determine hypocentre location and temporal evolution of seismic sources based on regional and teleseismic observations more accurately, and investigate the potential of these methods to be integrated in near real-time processing. To achieve this, a location method that considers several events simultaneously and improves the relative location accuracy among nearby events has been provided. This method tries to reduce the biasing effects of the lateral velocity heterogeneities (or equivalently to compensate for limitations and inaccuracies in the assumed velocity model) by calculating a set of timing corrections for each seismic station. The systematic errors introduced into the locations by the inaccuracies in the assumed velocity structure can be corrected without explicitly solving for a velocity model. Application to sets of multiple earthquakes in complex tectonic environments with strongly heterogeneous structure such as subduction zones and plate boundary region demonstrate that this relocation process significantly improves the hypocentre locations compared to standard locations. To meet the computational demands of this location process, a new open-source software package has been developed that allows for efficient relocation of large-scale multiple seismic events using arrival time data. Upon that, a flexible location framework is provided which can be tailored to various application cases on local, regional, and global scales. The latest version of the software distribution including source codes, a user guide, an example data set, and a change history, is freely available to the community. The developed relocation algorithm has been modified slightly and then its performance in a simulated near real-time processing has been evaluated. It has been demonstrated that applying the proposed technique significantly reduces the bias in routine locations and enhance the ability to locate the lower magnitude events using only regional arrival data. Finally, to return to emphasis on global seismic monitoring, an inversion framework has been developed to determine the seismic source time function through direct waveform fitting of teleseismic recordings. The inversion technique can be systematically applied to moderate- size seismic events and has the potential to be performed in near real-time applications. It is exemplified by application to an abnormal seismic event; the 2017 North Korean nuclear explosion. The presented work and application case studies in this dissertation represent the first step in an effort to establish a framework for automatic, routine generation of reliable catalogues of seismic event locations and source time functions.}, language = {en} } @phdthesis{Neumann2020, author = {Neumann, Justus}, title = {Secular evolution in galaxies}, doi = {10.25932/publishup-48270}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-482701}, school = {Universit{\"a}t Potsdam}, pages = {viii, 97}, year = {2020}, abstract = {Galaxies are gravitationally bound systems of stars, gas, dust and - probably - dark matter. They are the building blocks of the Universe. The morphology of galaxies is diverse: some galaxies have structures such as spirals, bulges, bars, rings, lenses or inner disks, among others. The main processes that characterise galaxy evolution can be separated into fast violent events that dominated evolution at earlier times and slower processes, which constitute a phase called secular evolution, that became dominant at later times. Internal processes of secular evolution include the gradual rearrangement of matter and angular momentum, the build-up and dissolution of substructures or the feeding of supermassive black holes and their feedback. Galaxy bulges - bright central components in disc galaxies -, on one hand, are relics of galaxy formation and evolution. For instance, the presence of a classical bulge suggests a relatively violent history. In contrast, the presence of a disc-like bulge instead indicates the occurrence of secular evolution processes in the main disc. Galaxy bars - elongated central stellar structures -, on the other hand, are the engines of secular evolution. Studying internal properties of both bars and bulges is key to comprehending some of the processes through which secular evolution takes place. The main objectives of this thesis are (1) to improve the classification of bulges by combining photometric and spectroscopic approaches for a large sample of galaxies, (2) to quantify star formation in bars and verify dependencies on galaxy properties and (3) to analyse stellar populations in bars to aid in understanding the formation and evolution of bars. Integral field spectroscopy is fundamental to the work presented in this thesis, which consists of three different projects as part of three different galaxy surveys: the CALIFA survey, the CARS survey and the TIMER project. The first part of this thesis constitutes an investigation of the nature of bulges in disc galaxies. We analyse 45 galaxies from the integral-field spectroscopic survey CALIFA by performing 2D image decompositions, growth curve measurements and spectral template fitting to derive stellar kinematics from CALIFA data cubes. From the obtained results, we present a recipe to classify bulges that combines four different parameters from photometry and kinematics: The bulge Sersic index nb, the concentration index C20;50, the Kormendy relation and the inner slope of the radial velocity dispersion profile ∇σ. The results of the different approaches are in good agreement and allow a safe classification for approximately 95\% of the galaxies. We also find that our new 'inner' concentration index performs considerably better than the traditionally used C50;90 and, in combination with the Kormendy relation, provides a very robust indication of the physical nature of the bulge. In the second part, we study star formation within bars using VLT/MUSE observations for 16 nearby (0.01 < z < 0.06) barred active-galactic-nuclei (AGN)-host galaxies from the CARS survey. We derive spatially-resolved star formation rates (SFR) from Hα emission line fluxes and perform a detailed multi-component photometric decomposition on images derived from the data cubes. We find a clear separation into eight star-forming (SF) and eight non-SF bars, which we interpret as indication of a fast quenching process. We further report a correlation between the SFR in the bar and the shape of the bar surface brightness profile: only the flattest bars (nbar < 0.4) are SF. Both parameters are found to be uncorrelated with Hubble type. Additionally, owing to the high spatial resolution of the MUSE data cubes, for the first time, we are able to dissect the SFR within the bar and analyse trends parallel and perpendicular to the bar major axis. Star formation is 1.75 times stronger on the leading edge of a rotating bar than on the trailing edge and is radially decreasing. Moreover, from testing an AGN feeding scenario, we report that the SFR of the bar is uncorrelated with AGN luminosity. Lastly, we present a detailed analysis of star formation histories and chemical enrichment of stellar populations (SP) in galaxy bars. We use MUSE observations of nine very nearby barred galaxies from the TIMER project to derive spatially resolved maps of stellar ages and metallicities, [α/Fe] abundances, star formation histories, as well as Hα as tracer of star formation. Using these maps, we explore in detail variations of SP perpendicular to the bar major axes. We find observational evidence for a separation of SP, supposedly caused by an evolving bar. Specifically, intermediate-age stars (∼ 2-6 Gyr) get trapped on more elongated orbits forming a thinner bar, while old stars (> 8 Gyr) form a rounder and thicker bar. This evidence is further strengthened by very similar results obtained from barred galaxies in the cosmological zoom-in simulations from the Auriga project. In addition, we find imprints of typical star formation patterns in barred galaxies on the youngest populations (< 2 Gyr), which continuously become more dominant from the major axis towards the sides of the bar. The effect is slightly stronger on the leading side. Furthermore, we find that bars are on average more metal-rich and less α-enhanced than the inner parts of the discs that surrounds them. We interpret this result as an indication of a more prolonged or continuous formation of stars that shape the bar as compared to shorter formation episodes in the disc within the bar region.}, language = {en} }