@phdthesis{Panzer2024, author = {Panzer, Marcel}, title = {Design of a hyper-heuristics based control framework for modular production systems}, doi = {10.25932/publishup-63300}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-633006}, school = {Universit{\"a}t Potsdam}, pages = {vi, 334}, year = {2024}, abstract = {Volatile supply and sales markets, coupled with increasing product individualization and complex production processes, present significant challenges for manufacturing companies. These must navigate and adapt to ever-shifting external and internal factors while ensuring robustness against process variabilities and unforeseen events. This has a pronounced impact on production control, which serves as the operational intersection between production planning and the shop- floor resources, and necessitates the capability to manage intricate process interdependencies effectively. Considering the increasing dynamics and product diversification, alongside the need to maintain constant production performances, the implementation of innovative control strategies becomes crucial. In recent years, the integration of Industry 4.0 technologies and machine learning methods has gained prominence in addressing emerging challenges in production applications. Within this context, this cumulative thesis analyzes deep learning based production systems based on five publications. Particular attention is paid to the applications of deep reinforcement learning, aiming to explore its potential in dynamic control contexts. Analysis reveal that deep reinforcement learning excels in various applications, especially in dynamic production control tasks. Its efficacy can be attributed to its interactive learning and real-time operational model. However, despite its evident utility, there are notable structural, organizational, and algorithmic gaps in the prevailing research. A predominant portion of deep reinforcement learning based approaches is limited to specific job shop scenarios and often overlooks the potential synergies in combined resources. Furthermore, it highlights the rare implementation of multi-agent systems and semi-heterarchical systems in practical settings. A notable gap remains in the integration of deep reinforcement learning into a hyper-heuristic. To bridge these research gaps, this thesis introduces a deep reinforcement learning based hyper- heuristic for the control of modular production systems, developed in accordance with the design science research methodology. Implemented within a semi-heterarchical multi-agent framework, this approach achieves a threefold reduction in control and optimisation complexity while ensuring high scalability, adaptability, and robustness of the system. In comparative benchmarks, this control methodology outperforms rule-based heuristics, reducing throughput times and tardiness, and effectively incorporates customer and order-centric metrics. The control artifact facilitates a rapid scenario generation, motivating for further research efforts and bridging the gap to real-world applications. The overarching goal is to foster a synergy between theoretical insights and practical solutions, thereby enriching scientific discourse and addressing current industrial challenges.}, language = {en} } @misc{BanerjeeLipowskySanter2020, author = {Banerjee, Pallavi and Lipowsky, Reinhard and Santer, Mark}, title = {Coarse-grained molecular model for the Glycosylphosphatidylinositol anchor with and without protein}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {6}, issn = {1866-8372}, doi = {10.25932/publishup-52374}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-523742}, pages = {17}, year = {2020}, abstract = {Glycosylphosphatidylinositol (GPI) anchors are a unique class of complex glycolipids that anchor a great variety of proteins to the extracellular leaflet of plasma membranes of eukaryotic cells. These anchors can exist either with or without an attached protein called GPI-anchored protein (GPI-AP) both in vitro and in vivo. Although GPIs are known to participate in a broad range of cellular functions, it is to a large extent unknown how these are related to GPI structure and composition. Their conformational flexibility and microheterogeneity make it difficult to study them experimentally. Simplified atomistic models are amenable to all-atom computer simulations in small lipid bilayer patches but not suitable for studying their partitioning and trafficking in complex and heterogeneous membranes. Here, we present a coarse-grained model of the GPI anchor constructed with a modified version of the MARTINI force field that is suited for modeling carbohydrates, proteins, and lipids in an aqueous environment using MARTINI's polarizable water. The nonbonded interactions for sugars were reparametrized by calculating their partitioning free energies between polar and apolar phases. In addition, sugar-sugar interactions were optimized by adjusting the second virial coefficients of osmotic pressures for solutions of glucose, sucrose, and trehalose to match with experimental data. With respect to the conformational dynamics of GPI-anchored green fluorescent protein, the accessible time scales are now at least an order of magnitude larger than for the all-atom system. This is particularly important for fine-tuning the mutual interactions of lipids, carbohydrates, and amino acids when comparing to experimental results. We discuss the prospective use of the coarse-grained GPI model for studying protein-sorting and trafficking in membrane models.}, language = {en} } @masterthesis{Engelhardt2021, type = {Bachelor Thesis}, author = {Engelhardt, Max Angel Ronan}, title = {Zwischen Simulation und Beweis - eine mathematische Analyse des Bienaym{\´e}-Galton-Watson-Prozesses und sein Einsatz innerhalb des Mathematikunterrichts}, doi = {10.25932/publishup-52447}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-524474}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2021}, abstract = {Die Bienaym{\´e}-Galton-Watson Prozesse k{\"o}nnen f{\"u}r die Untersuchung von speziellen und sich entwickelnden Populationen verwendet werden. Die Populationen umfassen Individuen, welche sich identisch, zuf{\"a}llig, selbstst{\"a}ndig und unabh{\"a}ngig voneinander fortpflanzen und die jeweils nur eine Generation existieren. Die n-te Generation ergibt sich als zuf{\"a}llige Summe der Individuen der (n-1)-ten Generation. Die Relevanz dieser Prozesse begr{\"u}ndet sich innerhalb der Historie und der inner- und außermathematischen Bedeutung. Die Geschichte der Bienaym{\´e}-Galton-Watson-Prozesse wird anhand der Entwicklung des Konzeptes bis heute dargestellt. Dabei werden die Wissenschaftler:innen verschiedener Disziplinen angef{\"u}hrt, die Erkenntnisse zu dem Themengebiet beigetragen und das Konzept in ihren Fachbereichen angef{\"u}hrt haben. Somit ergibt sich die außermathematische Signifikanz. Des Weiteren erh{\"a}lt man die innermathematische Bedeutsamkeit mittels des Konzeptes der Verzweigungsprozesse, welches auf die Bienaym{\´e}-Galton-Watson Prozesse zur{\"u}ckzuf{\"u}hren ist. Die Verzweigungsprozesse stellen eines der aussagekr{\"a}ftigsten Modelle f{\"u}r die Beschreibung des Populationswachstums dar. Dar{\"u}ber hinaus besteht die derzeitige Wichtigkeit durch die Anwendungsm{\"o}glichkeit der Verzweigungsprozesse und der Bienaym{\´e}-Galton-Watson Prozesse innerhalb der Epidemiologie. Es werden die Ebola- und die Corona-Pandemie als Anwendungsfelder angef{\"u}hrt. Die Prozesse dienen als Entscheidungsst{\"u}tze f{\"u}r die Politik und erm{\"o}glichen Aussagen {\"u}ber die Auswirkungen von Maßnahmen bez{\"u}glich der Pandemien. Neben den Prozessen werden ebenfalls der bedingte Erwartungswert bez{\"u}glich diskreter Zufallsvariablen, die wahrscheinlichkeitserzeugende Funktion und die zuf{\"a}llige Summe eingef{\"u}hrt. Die Konzepte vereinfachen die Beschreibung der Prozesse und bilden somit die Grundlage der Betrachtungen. Außerdem werden die ben{\"o}tigten und weiterf{\"u}hrenden Eigenschaften der grundlegenden Themengebiete und der Prozesse aufgef{\"u}hrt und bewiesen. Das Kapitel erreicht seinen H{\"o}hepunkt bei dem Beweis des Kritikalit{\"a}tstheorems, wodurch eine Aussage {\"u}ber das Aussterben des Prozesses in verschiedenen F{\"a}llen und somit {\"u}ber die Aussterbewahrscheinlichkeit get{\"a}tigt werden kann. Die F{\"a}lle werden anhand der zu erwartenden Anzahl an Nachkommen eines Individuums unterschieden. Es zeigt sich, dass ein Prozess bei einer zu erwartenden Anzahl kleiner gleich Eins mit Sicherheit ausstirbt und bei einer Anzahl gr{\"o}ßer als Eins, die Population nicht in jedem Fall aussterben muss. Danach werden einzelne Beispiele, wie der linear fractional case, die Population von Fibroblasten (Bindegewebszellen) von M{\"a}usen und die Entstehungsfragestellung der Prozesse, angef{\"u}hrt. Diese werden mithilfe der erlangten Ergebnisse untersucht und einige ausgew{\"a}hlte zuf{\"a}llige Dynamiken werden im nachfolgenden Kapitel simuliert. Die Simulationen erfolgen durch ein in Python erstelltes Programm und werden mithilfe der Inversionsmethode realisiert. Die Simulationen stellen beispielhaft die Entwicklungen in den verschiedenen Kritikalit{\"a}tsf{\"a}llen der Prozesse dar. Zudem werden die H{\"a}ufigkeiten der einzelnen Populationsgr{\"o}ßen in Form von Histogrammen angebracht. Dabei l{\"a}sst sich der Unterschied zwischen den einzelnen F{\"a}llen best{\"a}tigen und es wird die Anwendungsm{\"o}glichkeit der Bienaym{\´e}-Galton-Watson Prozesse bei komplexeren Problemen deutlich. Histogramme bekr{\"a}ftigen, dass die einzelnen Populationsgr{\"o}ßen nur endlich oft vorkommen. Diese Aussage wurde von Galton aufgeworfen und in der Extinktions-Explosions-Dichotomie verwendet. Die dargestellten Erkenntnisse {\"u}ber das Themengebiet und die Betrachtung des Konzeptes werden mit einer didaktischen Analyse abgeschlossen. Die Untersuchung beinhaltet die Ber{\"u}cksichtigung der Fundamentalen Ideen, der Fundamentalen Ideen der Stochastik und der Leitidee „Daten und Zufall". Dabei ergibt sich, dass in Abh{\"a}ngigkeit der gew{\"a}hlten Perspektive die Anwendung der Bienaym{\´e}-Galton-Watson Prozesse innerhalb der Schule plausibel ist und von Vorteil f{\"u}r die Sch{\"u}ler:innen sein kann. F{\"u}r die Behandlung wird exemplarisch der Rahmenlehrplan f{\"u}r Berlin und Brandenburg analysiert und mit dem Kernlehrplan Nordrhein-Westfalens verglichen. Die Konzeption des Lehrplans aus Berlin und Brandenburg l{\"a}sst nicht den Schluss zu, dass die Bienaym{\´e}-Galton-Watson Prozesse angewendet werden sollten. Es l{\"a}sst sich feststellen, dass die zugrunde liegende Leitidee nicht vollumf{\"a}nglich mit manchen Fundamentalen Ideen der Stochastik vereinbar ist. Somit w{\"u}rde eine Modifikation hinsichtlich einer st{\"a}rkeren Orientierung des Lehrplans an den Fundamentalen Ideen die Anwendung der Prozesse erm{\"o}glichen. Die Aussage wird durch die Betrachtung und {\"U}bertragung eines nordrhein-westf{\"a}lischen Unterrichtsentwurfes f{\"u}r stochastische Prozesse auf die Bienaym{\´e}-Galton-Watson Prozesse unterst{\"u}tzt. Dar{\"u}ber hinaus werden eine Concept Map und ein Vernetzungspentagraph nach von der Bank konzipiert um diesen Aspekt hervorzuheben.}, language = {de} } @phdthesis{Šustr2020, author = {Šustr, David}, title = {Molecular diffusion in polyelectrolyte multilayers}, doi = {10.25932/publishup-48903}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489038}, school = {Universit{\"a}t Potsdam}, pages = {106}, year = {2020}, abstract = {Research on novel and advanced biomaterials is an indispensable step towards their applications in desirable fields such as tissue engineering, regenerative medicine, cell culture, or biotechnology. The work presented here focuses on such a promising material: polyelectrolyte multilayer (PEM) composed of hyaluronic acid (HA) and poly(L-lysine) (PLL). This gel-like polymer surface coating is able to accumulate (bio-)molecules such as proteins or drugs and release them in a controlled manner. It serves as a mimic of the extracellular matrix (ECM) in composition and intrinsic properties. These qualities make the HA/PLL multilayers a promising candidate for multiple bio-applications such as those mentioned above. The work presented aims at the development of a straightforward approach for assessment of multi-fractional diffusion in multilayers (first part) and at control of local molecular transport into or from the multilayers by laser light trigger (second part). The mechanism of the loading and release is governed by the interaction of bioactives with the multilayer constituents and by the diffusion phenomenon overall. The diffusion of a molecule in HA/PLL multilayers shows multiple fractions of different diffusion rate. Approaches, that are able to assess the mobility of molecules in such a complex system, are limited. This shortcoming motivated the design of a novel evaluation tool presented here. The tool employs a simulation-based approach for evaluation of the data acquired by fluorescence recovery after photobleaching (FRAP) method. In this approach, possible fluorescence recovery scenarios are primarily simulated and afterwards compared with the data acquired while optimizing parameters of a model until a sufficient match is achieved. Fluorescent latex particles of different sizes and fluorescein in an aqueous medium are utilized as test samples validating the analysis results. The diffusion of protein cytochrome c in HA/PLL multilayers is evaluated as well. This tool significantly broadens the possibilities of analysis of spatiotemporal FRAP data, which originate from multi-fractional diffusion, while striving to be widely applicable. This tool has the potential to elucidate the mechanisms of molecular transport and empower rational engineering of the drug release systems. The second part of the work focuses on the fabrication of such a spatiotemporarily-controlled drug release system employing the HA/PLL multilayer. This release system comprises different layers of various functionalities that together form a sandwich structure. The bottom layer, which serves as a reservoir, is formed by HA/PLL PEM deposited on a planar glass substrate. On top of the PEM, a layer of so-called hybrids is deposited. The hybrids consist of thermoresponsive poly(N-isopropylacrylamide) (PNIPAM) -based hydrogel microparticles with surface-attached gold nanorods. The layer of hybrids is intended to serve as a gate that controls the local molecular transport through the PEM-solution-interface. The possibility of stimulating the molecular transport by near-infrared (NIR) laser irradiation is being explored. From several tested approaches for the deposition of hybrids onto the PEM surface, the drying-based approach was identified as optimal. Experiments, that examine the functionality of the fabricated sandwich at elevated temperature, document the reversible volume phase transition of the PEM-attached hybrids while sustaining the sandwich stability. Further, the gold nanorods were shown to effectively absorb light radiation in the tissue- and cell-friendly NIR spectral region while transducing the energy of light into heat. The rapid and reversible shrinkage of the PEM-attached hybrids was thereby achieved. Finally, dextran was employed as a model transport molecule. It loads into the PEM reservoir in a few seconds with the partition constant of 2.4, while it spontaneously releases in a slower, sustained manner. The local laser irradiation of the sandwich, which contains the fluorescein isothiocyanate tagged dextran, leads to a gradual reduction of fluorescence intensity in the irradiated region. The release system fabricated employs renowned photoresponsivity of the hybrids in an innovative setting. The results of the research are a step towards a spatially-controlled on-demand drug release system that paves the way to spatiotemporally controlled drug release. The approaches developed in this work have the potential to elucidate the molecular dynamics in ECM and to foster engineering of multilayers with properties tuned to mimic the ECM. The work aims at spatiotemporal control over the diffusion of bioactives and their presentation to the cells.}, language = {en} } @misc{AlHalbouniHolohanTaherietal.2018, author = {Al-Halbouni, Djamil and Holohan, Eoghan P. and Taheri, Abbas and Sch{\"o}pfer, Martin P. J. and Emam, Sacha and Dahm, Torsten}, title = {Geomechanical modelling of sinkhole development using distinct elements}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1061}, issn = {1866-8372}, doi = {10.25932/publishup-46843}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-468435}, pages = {35}, year = {2018}, abstract = {Mechanical and/or chemical removal of material from the subsurface may generate large subsurface cavities, the destabilisation of which can lead to ground collapse and the formation of sinkholes. Numerical simulation of the interaction of cavity growth, host material deformation and overburden collapse is desirable to better understand the sinkhole hazard but is a challenging task due to the involved high strains and material discontinuities. Here, we present 2-D distinct element method numerical simulations of cavity growth and sinkhole development. Firstly, we simulate cavity formation by quasi-static, stepwise removal of material in a single growing zone of an arbitrary geometry and depth. We benchmark this approach against analytical and boundary element method models of a deep void space in a linear elastic material. Secondly, we explore the effects of properties of different uniform materials on cavity stability and sinkhole development. We perform simulated biaxial tests to calibrate macroscopic geotechnical parameters of three model materials representative of those in which sinkholes develop at the Dead Sea shoreline: mud, alluvium and salt. We show that weak materials do not support large cavities, leading to gradual sagging or suffusion-style subsidence. Strong materials support quasi-stable to stable cavities, the overburdens of which may fail suddenly in a caprock or bedrock collapse style. Thirdly, we examine the consequences of layered arrangements of weak and strong materials. We find that these are more susceptible to sinkhole collapse than uniform materials not only due to a lower integrated strength of the overburden but also due to an inhibition of stabilising stress arching. Finally, we compare our model sinkhole geometries to observations at the Ghor Al-Haditha sinkhole site in Jordan. Sinkhole depth ∕ diameter ratios of 0.15 in mud, 0.37 in alluvium and 0.33 in salt are reproduced successfully in the calibrated model materials. The model results suggest that the observed distribution of sinkhole depth ∕ diameter values in each material type may partly reflect sinkhole growth trends.}, language = {en} } @phdthesis{Michalczyk2019, author = {Michalczyk, Anna}, title = {Modelling of nitrogen cycles in intensive winter wheat-summer maize double cropping systems in the North China Plain}, doi = {10.25932/publishup-44421}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444213}, school = {Universit{\"a}t Potsdam}, pages = {X, 154}, year = {2019}, abstract = {The North China Plain (NCP) is one of the most productive and intensive agricultural regions in China. High doses of mineral nitrogen (N) fertiliser, often combined with flood irrigation, are applied, resulting in N surplus, groundwater depletion and environmental pollution. The objectives of this thesis were to use the HERMES model to simulate the N cycle in winter wheat (Triticum aestivum L.)-summer maize (Zea mays L.) double crop rotations and show the performance of the HERMES model, of the new ammonia volatilisation sub-module and of the new nitrification inhibition tool in the NCP. Further objectives were to assess the models potential to save N and water on plot and county scale, as well as on short and long-term. Additionally, improved management strategies with the help of a model-based nitrogen fertiliser recommendation (NFR) and adapted irrigation, should be found. Results showed that the HERMES model performed well under growing conditions of the NCP and was able to describe the relevant processes related to soil-plant interactions concerning N and water during a 2.5 year field experiment. No differences in grain yield between the real-time model-based NFR and the other treatments of the experiments on plot scale in Quzhou County could be found. Simulations with increasing amounts of irrigation resulted in significantly higher N leaching, higher N requirements of the NFR and reduced yields. Thus, conventional flood irrigation as currently practised by the farmers bears great uncertainties and exact irrigation amounts should be known for future simulation studies. In the best-practice scenario simulation on plot-scale, N input and N leaching, but also irrigation water could be reduced strongly within 2 years. Thus, the model-based NFR in combination with adapted irrigation had the highest potential to reduce nitrate leaching, compared to farmers practice and mineral N (Nmin)-reduced treatments. Also the calibrated and validated ammonia volatilisation sub-module of the HERMES model worked well under the climatic and soil conditions of northern China. Simple ammonia volatilisation approaches gave also satisfying results compared to process-oriented approaches. During the simulation with Ammonium sulphate Nitrate with nitrification inhibitor (ASNDMPP) ammonia volatilisation was higher than in the simulation without nitrification inhibitor, while the result for nitrate leaching was the opposite. Although nitrification worked well in the model, nitrification-born nitrous oxide emissions should be considered in future. Results of the simulated annual long-term (31 years) N losses in whole Quzhou County in Hebei Province were 296.8 kg N ha-1 under common farmers practice treatment and 101.7 kg N ha-1 under optimised treatment including NFR and automated irrigation (OPTai). Spatial differences in simulated N losses throughout Quzhou County, could only be found due to different N inputs. Simulations of an optimised treatment, could save on average more than 260 kg N ha-1a-1 from fertiliser input and 190 kg N ha-1a-1 from N losses and around 115.7 mm a-1 of water, compared to farmers practice. These long-term simulation results showed lower N and water saving potential, compared to short-term simulations and underline the necessity of long-term simulations to overcome the effect of high initial N stocks in soil. Additionally, the OPTai worked best on clay loam soil except for a high simulated denitrification loss, while the simulations using farmers practice irrigation could not match the actual water needs resulting in yield decline, especially for winter wheat. Thus, a precise adaption of management to actual weather conditions and plant growth needs is necessary for future simulations. However, the optimised treatments did not seem to be able to maintain the soil organic matter pools, even with full crop residue input. Extra organic inputs seem to be required to maintain soil quality in the optimised treatments. HERMES is a relatively simple model, with regard to data input requirements, to simulate the N cycle. It can offer interpretation of management options on plot, on county and regional scale for extension and research staff. Also in combination with other N and water saving methods the model promises to be a useful tool.}, language = {en} } @phdthesis{Codutti2018, author = {Codutti, Agnese}, title = {Behavior of magnetic microswimmers}, doi = {10.25932/publishup-42297}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-422976}, school = {Universit{\"a}t Potsdam}, pages = {iv, 142}, year = {2018}, abstract = {Microswimmers, i.e. swimmers of micron size experiencing low Reynolds numbers, have received a great deal of attention in the last years, since many applications are envisioned in medicine and bioremediation. A promising field is the one of magnetic swimmers, since magnetism is biocom-patible and could be used to direct or actuate the swimmers. This thesis studies two examples of magnetic microswimmers from a physics point of view. The first system to be studied are magnetic cells, which can be magnetic biohybrids (a swimming cell coupled with a magnetic synthetic component) or magnetotactic bacteria (naturally occurring bacteria that produce an intracellular chain of magnetic crystals). A magnetic cell can passively interact with external magnetic fields, which can be used for direction. The aim of the thesis is to understand how magnetic cells couple this magnetic interaction to their swimming strategies, mainly how they combine it with chemotaxis (the ability to sense external gradient of chemical species and to bias their walk on these gradients). In particular, one open question addresses the advantage given by these magnetic interactions for the magnetotactic bacteria in a natural environment, such as porous sediments. In the thesis, a modified Active Brownian Particle model is used to perform simulations and to reproduce experimental data for different systems such as bacteria swimming in the bulk, in a capillary or in confined geometries. I will show that magnetic fields speed up chemotaxis under special conditions, depending on parameters such as their swimming strategy (run-and-tumble or run-and-reverse), aerotactic strategy (axial or polar), and magnetic fields (intensities and orientations), but it can also hinder bacterial chemotaxis depending on the system. The second example of magnetic microswimmer are rigid magnetic propellers such as helices or random-shaped propellers. These propellers are actuated and directed by an external rotating magnetic field. One open question is how shape and magnetic properties influence the propeller behavior; the goal of this research field is to design the best propeller for a given situation. The aim of the thesis is to propose a simulation method to reproduce the behavior of experimentally-realized propellers and to determine their magnetic properties. The hydrodynamic simulations are based on the use of the mobility matrix. As main result, I propose a method to match the experimental data, while showing that not only shape but also the magnetic properties influence the propellers swimming characteristics.}, language = {en} } @misc{KuikLauerChurkinaetal.2016, author = {Kuik, Friderike and Lauer, Axel and Churkina, Galina and Denier Van der Gon, Hugo Anne Cornelis and Fenner, Daniel and Mar, Kathleen A. and Butler, Tim M.}, title = {Air quality modelling in the Berlin-Brandenburg region using WRF-Chem v3.7.1}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {531}, issn = {1866-8372}, doi = {10.25932/publishup-41013}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410131}, pages = {25}, year = {2016}, abstract = {Air pollution is the number one environmental cause of premature deaths in Europe. Despite extensive regulations, air pollution remains a challenge, especially in urban areas. For studying summertime air quality in the Berlin-Brandenburg region of Germany, the Weather Research and Forecasting Model with Chemistry (WRF-Chem) is set up and evaluated against meteorological and air quality observations from monitoring stations as well as from a field campaign conducted in 2014. The objective is to assess which resolution and level of detail in the input data is needed for simulating urban background air pollutant concentrations and their spatial distribution in the Berlin-Brandenburg area. The model setup includes three nested domains with horizontal resolutions of 15, 3 and 1 km and anthropogenic emissions from the TNO-MACC III inventory. We use RADM2 chemistry and the MADE/SORGAM aerosol scheme. Three sensitivity simulations are conducted updating input parameters to the single-layer urban canopy model based on structural data for Berlin, specifying land use classes on a sub-grid scale (mosaic option) and downscaling the original emissions to a resolution of ca. 1 km x 1 km for Berlin based on proxy data including traffic density and population density. The results show that the model simulates meteorology well, though urban 2m temperature and urban wind speeds are biased high and nighttime mixing layer height is biased low in the base run with the settings described above. We show that the simulation of urban meteorology can be improved when specifying the input parameters to the urban model, and to a lesser extent when using the mosaic option. On average, ozone is simulated reasonably well, but maximum daily 8 h mean concentrations are underestimated, which is consistent with the results from previous modelling studies using the RADM2 chemical mechanism. Particulate matter is underestimated, which is partly due to an underestimation of secondary organic aerosols. NOx (NO + NO2) concentrations are simulated reasonably well on average, but nighttime concentrations are overestimated due to the model's underestimation of the mixing layer height, and urban daytime concentrations are underestimated. The daytime underestimation is improved when using downscaled, and thus locally higher emissions, suggesting that part of this bias is due to deficiencies in the emission input data and their resolution. The results further demonstrate that a horizontal resolution of 3 km improves the results and spatial representativeness of the model compared to a horizontal resolution of 15 km. With the input data (land use classes, emissions) at the level of detail of the base run of this study, we find that a horizontal resolution of 1 km does not improve the results compared to a resolution of 3 km. However, our results suggest that a 1 km horizontal model resolution could enable a detailed simulation of local pollution patterns in the Berlin-Brandenburg region if the urban land use classes, together with the respective input parameters to the urban canopy model, are specified with a higher level of detail and if urban emissions of higher spatial resolution are used.}, language = {en} } @phdthesis{Konon2018, author = {Konon, Alexander}, title = {Essays on career choice under risk and ambiguity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-416466}, school = {Universit{\"a}t Potsdam}, pages = {x, 250, xxxv}, year = {2018}, abstract = {This dissertation consists of five self-contained essays, addressing different aspects of career choices, especially the choice of entrepreneurship, under risk and ambiguity. In Chapter 2, the first essay develops an occupational choice model with boundedly rational agents, who lack information, receive noisy feedback, and are restricted in their decisions by their personality, to analyze and explain puzzling empirical evidence on entrepreneurial decision processes. In the second essay, in Chapter 3, I contribute to the literature on entrepreneurial choice by constructing a general career choice model on the basis of the assumption that outcomes are partially ambiguous. The third essay, in Chapter 4, theoretically and empirically analyzes the impact of media on career choices, where information on entrepreneurship provided by the media is treated as an informational shock affecting prior beliefs. The fourth essay, presented in Chapter 5, contains an empirical analysis of the effects of cyclical macro variables (GDP and unemployment) on innovative start-ups in Germany. In the fifth, and last, essay in Chapter 6, we examine whether information on personality is useful for advice, using the example of career advice.}, language = {en} } @misc{GieseHenklerHirsch2017, author = {Giese, Holger and Henkler, Stefan and Hirsch, Martin}, title = {A multi-paradigm approach supporting the modular execution of reconfigurable hybrid systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402896}, pages = {34}, year = {2017}, abstract = {Advanced mechatronic systems have to integrate existing technologies from mechanical, electrical and software engineering. They must be able to adapt their structure and behavior at runtime by reconfiguration to react flexibly to changes in the environment. Therefore, a tight integration of structural and behavioral models of the different domains is required. This integration results in complex reconfigurable hybrid systems, the execution logic of which cannot be addressed directly with existing standard modeling, simulation, and code-generation techniques. We present in this paper how our component-based approach for reconfigurable mechatronic systems, M ECHATRONIC UML, efficiently handles the complex interplay of discrete behavior and continuous behavior in a modular manner. In addition, its extension to even more flexible reconfiguration cases is presented.}, language = {en} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @phdthesis{Schroeder2015, author = {Schr{\"o}der, Sarah}, title = {Modelling surface evolution coupled with tectonics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90385}, school = {Universit{\"a}t Potsdam}, pages = {viii, 129}, year = {2015}, abstract = {This study presents the development of 1D and 2D Surface Evolution Codes (SECs) and their coupling to any lithospheric-scale (thermo-)mechanical code with a quadrilateral structured surface mesh. Both SECs involve diffusion as approach for hillslope processes and the stream power law to reflect riverbed incision. The 1D SEC settles sediment that was produced by fluvial incision in the appropriate minimum, while the supply-limited 2D SEC DANSER uses a fast filling algorithm to model sedimantation. It is based on a cellular automaton. A slope-dependent factor in the sediment flux extends the diffusion equation to nonlinear diffusion. The discharge accumulation is achieved with the D8-algorithm and an improved drainage accumulation routine. Lateral incision enhances the incision's modelling. Following empirical laws, it incises channels of several cells width. The coupling method enables different temporal and spatial resolutions of the SEC and the thermo-mechanical code. It transfers vertical as well as horizontal displacements to the surface model. A weighted smoothing of the 3D surface displacements is implemented. The smoothed displacement vectors transmit the deformation by bilinear interpolation to the surface model. These interpolation methods ensure mass conservation in both directions and prevent the two surfaces from drifting apart. The presented applications refer to the evolution of the Pamir orogen. A calibration of DANSER's parameters with geomorphological data and a DEM as initial topography highlights the advantage of lateral incision. Preserving the channel width and reflecting incision peaks in narrow channels, this closes the huge gap between current orogen-scale incision models and observed topographies. River capturing models in a system of fault-bounded block rotations reaffirm the importance of the lateral incision routine for capturing events with channel initiation. The models show a low probability of river capturings with large deflection angles. While the probability of river capturing is directly depending on the uplift rate, the erodibility inside of a dip-slip fault speeds up headward erosion along the fault: The model's capturing speed increases within a fault. Coupling DANSER with the thermo-mechanical code SLIM 3D emphasizes the versatility of the SEC. While DANSER has minor influence on the lithospheric evolution of an indenter model, the brittle surface deformation is strongly affected by its sedimentation, widening a basin in between two forming orogens and also the southern part of the southern orogen to south, east and west.}, language = {en} } @phdthesis{Kieling2015, author = {Kieling, Katrin}, title = {Quantification of ground motions by broadband simulations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-85989}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 118}, year = {2015}, abstract = {In many procedures of seismic risk mitigation, ground motion simulations are needed to test systems or improve their effectiveness. For example they may be used to estimate the level of ground shaking caused by future earthquakes. Good physical models for ground motion simulation are also thought to be important for hazard assessment, as they could close gaps in the existing datasets. Since the observed ground motion in nature shows a certain variability, part of which cannot be explained by macroscopic parameters such as magnitude or position of an earthquake, it would be desirable that a good physical model is not only able to produce one single seismogram, but also to reveal this natural variability. In this thesis, I develop a method to model realistic ground motions in a way that is computationally simple to handle, permitting multiple scenario simulations. I focus on two aspects of ground motion modelling. First, I use deterministic wave propagation for the whole frequency range - from static deformation to approximately 10 Hz - but account for source variability by implementing self-similar slip distributions and rough fault interfaces. Second, I scale the source spectrum so that the modelled waveforms represent the correct radiated seismic energy. With this scaling I verify whether the energy magnitude is suitable as an explanatory variable, which characterises the amount of energy radiated at high frequencies - the advantage of the energy magnitude being that it can be deduced from observations, even in real-time. Applications of the developed method for the 2008 Wenchuan (China) earthquake, the 2003 Tokachi-Oki (Japan) earthquake and the 1994 Northridge (California, USA) earthquake show that the fine source discretisations combined with the small scale source variability ensure that high frequencies are satisfactorily introduced, justifying the deterministic wave propagation approach even at high frequencies. I demonstrate that the energy magnitude can be used to calibrate the high-frequency content in ground motion simulations. Because deterministic wave propagation is applied to the whole frequency range, the simulation method permits the quantification of the variability in ground motion due to parametric uncertainties in the source description. A large number of scenario simulations for an M=6 earthquake show that the roughness of the source as well as the distribution of fault dislocations have a minor effect on the simulated variability by diminishing directivity effects, while hypocenter location and rupture velocity more strongly influence the variability. The uncertainty in energy magnitude, however, leads to the largest differences of ground motion amplitude between different events, resulting in a variability which is larger than the one observed. For the presented approach, this dissertation shows (i) the verification of the computational correctness of the code, (ii) the ability to reproduce observed ground motions and (iii) the validation of the simulated ground motion variability. Those three steps are essential to evaluate the suitability of the method for means of seismic risk mitigation.}, language = {en} } @phdthesis{Hutter2014, author = {Hutter, Anne}, title = {Unveiling the epoch of reionization by simulations and high-redshift galaxies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76998}, school = {Universit{\"a}t Potsdam}, pages = {vi, 155}, year = {2014}, abstract = {The Epoch of Reionization marks after recombination the second major change in the ionization state of the universe, going from a neutral to an ionized state. It starts with the appearance of the first stars and galaxies; a fraction of high-energy photons emitted from galaxies permeate into the intergalactic medium (IGM) and gradually ionize the hydrogen, until the IGM is completely ionized at z~6 (Fan et al., 2006). While the progress of reionization is driven by galaxy evolution, it changes the ionization and thermal state of the IGM substantially and affects subsequent structure and galaxy formation by various feedback mechanisms. Understanding this interaction between reionization and galaxy formation is further impeded by a lack of understanding of the high-redshift galactic properties such as the dust distribution and the escape fraction of ionizing photons. Lyman Alpha Emitters (LAEs) represent a sample of high-redshift galaxies that are sensitive to all these galactic properties and the effects of reionization. In this thesis we aim to understand the progress of reionization by performing cosmological simulations, which allows us to investigate the limits of constraining reionization by high-redshift galaxies as LAEs, and examine how galactic properties and the ionization state of the IGM affect the visibility and observed quantities of LAEs and Lyman Break galaxies (LBGs). In the first part of this thesis we focus on performing radiative transfer calculations to simulate reionization. We have developed a mapping-sphere-scheme, which, starting from spherically averaged temperature and density fields, uses our 1D radiative transfer code and computes the effect of each source on the IGM temperature and ionization (HII, HeII, HeIII) profiles, which are subsequently mapped onto a grid. Furthermore we have updated the 3D Monte-Carlo radiative transfer pCRASH, enabling detailed reionization simulations which take individual source characteristics into account. In the second part of this thesis we perform a reionization simulation by post-processing a smoothed-particle hydrodynamical (SPH) simulation (GADGET-2) with 3D radiative transfer (pCRASH), where the ionizing sources are modelled according to the characteristics of the stellar populations in the hydrodynamical simulation. Following the ionization fractions of hydrogen (HI) and helium (HeII, HeIII), and temperature in our simulation, we find that reionization starts at z~11 and ends at z~6, and high density regions near sources are ionized earlier than low density regions far from sources. In the third part of this thesis we couple the cosmological SPH simulation and the radiative transfer simulations with a physically motivated, self-consistent model for LAEs, in order to understand the importance of the ionization state of the IGM, the escape fraction of ionizing photons from galaxies and dust in the interstellar medium (ISM) on the visibility of LAEs. Comparison of our models results with the LAE Lyman Alpha (Lya) and UV luminosity functions at z~6.6 reveals a three-dimensional degeneracy between the ionization state of the IGM, the ionizing photons escape fraction and the ISM dust distribution, which implies that LAEs act not only as tracers of reionization but also of the ionizing photon escape fraction and of the ISM dust distribution. This degeneracy does not even break down when we compare simulated with observed clustering of LAEs at z~6.6. However, our results show that reionization has the largest impact on the amplitude of the LAE angular correlation functions, and its imprints are clearly distinguishable from those of properties on galactic scales. These results show that reionization cannot be constrained tightly by exclusively using LAE observations. Further observational constraints, e.g. tomographies of the redshifted hydrogen 21cm line, are required. In addition we also use our LAE model to probe the question when a galaxy is visible as a LAE or a LBG. Within our model galaxies above a critical stellar mass can produce enough luminosity to be visible as a LBG and/or a LAE. By finding an increasing duty cycle of LBGs with Lya emission as the UV magnitude or stellar mass of the galaxy rises, our model reveals that the brightest (and most massive) LBGs most often show Lya emission. Predicting the Lya equivalent width (Lya EW) distribution and the fraction of LBGs showing Lya emission at z~6.6, we reproduce the observational trend of the Lya EWs with UV magnitude. However, the Lya EWs of the UV brightest LBGs exceed observations and can only be reconciled by accounting for an increased Lya attenuation of massive galaxies, which implies that the observed Lya brightest LAEs do not necessarily coincide with the UV brightest galaxies. We have analysed the dependencies of LAE observables on the properties of the galactic and intergalactic medium and the LAE-LBG connection, and this enhances our understanding of the nature of LAEs.}, language = {en} } @phdthesis{Fischer2014, author = {Fischer, Jost Leonhardt}, title = {Nichtlineare Kopplungsmechanismen akustischer Oszillatoren am Beispiel der Synchronisation von Orgelpfeifen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71975}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In dieser Arbeit werden nichtlineare Kopplungsmechanismen von akustischen Oszillatoren untersucht, die zu Synchronisation f{\"u}hren k{\"o}nnen. Aufbauend auf die Fragestellungen vorangegangener Arbeiten werden mit Hilfe theoretischer und experimenteller Studien sowie mit Hilfe numerischer Simulationen die Elemente der Tonentstehung in der Orgelpfeife und die Mechanismen der gegenseitigen Wechselwirkung von Orgelpfeifen identifiziert. Daraus wird erstmalig ein vollst{\"a}ndig auf den aeroakustischen und fluiddynamischen Grundprinzipien basierendes nichtlinear gekoppeltes Modell selbst-erregter Oszillatoren f{\"u}r die Beschreibung des Verhaltens zweier wechselwirkender Orgelpfeifen entwickelt. Die durchgef{\"u}hrten Modellrechnungen werden mit den experimentellen Befunden verglichen. Es zeigt sich, dass die Tonentstehung und die Kopplungsmechanismen von Orgelpfeifen durch das entwickelte Oszillatormodell in weiten Teilen richtig beschrieben werden. Insbesondere kann damit die Ursache f{\"u}r den nichtlinearen Zusammenhang von Kopplungsst{\"a}rke und Synchronisation des gekoppelten Zwei-Pfeifen Systems, welcher sich in einem nichtlinearen Verlauf der Arnoldzunge darstellt, gekl{\"a}rt werden. Mit den gewonnenen Erkenntnissen wird der Einfluss des Raumes auf die Tonentstehung bei Orgelpfeifen betrachtet. Daf{\"u}r werden numerische Simulationen der Wechselwirkung einer Orgelpfeife mit verschiedenen Raumgeometrien, wie z. B. ebene, konvexe, konkave, und gezahnte Geometrien, exemplarisch untersucht. Auch der Einfluss von Schwellk{\"a}sten auf die Tonentstehung und die Klangbildung der Orgelpfeife wird studiert. In weiteren, neuartigen Synchronisationsexperimenten mit identisch gestimmten Orgelpfeifen, sowie mit Mixturen wird die Synchronisation f{\"u}r verschiedene, horizontale und vertikale Pfeifenabst{\"a}nde in der Ebene der Schallabstrahlung, untersucht. Die dabei erstmalig beobachteten r{\"a}umlich isotropen Unstetigkeiten im Schwingungsverhalten der gekoppelten Pfeifensysteme, deuten auf abstandsabh{\"a}ngige Wechsel zwischen gegen- und gleichphasigen Sychronisationsregimen hin. Abschließend wird die M{\"o}glichkeit dokumentiert, das Ph{\"a}nomen der Synchronisation zweier Orgelpfeifen durch numerische Simulationen, also der Behandlung der kompressiblen Navier-Stokes Gleichungen mit entsprechenden Rand- und Anfangsbedingungen, realit{\"a}tsnah abzubilden. Auch dies stellt ein Novum dar.}, language = {de} } @phdthesis{Pilz2010, author = {Pilz, Marco}, title = {A comparison of proxies for seismic site conditions and amplification for the large urban area of Santiago de Chile}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52961}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Situated in an active tectonic region, Santiago de Chile, the country´s capital with more than six million inhabitants, faces tremendous earthquake hazard. Macroseismic data for the 1985 Valparaiso and the 2010 Maule events show large variations in the distribution of damage to buildings within short distances indicating strong influence of local sediments and the shape of the sediment-bedrock interface on ground motion. Therefore, a temporary seismic network was installed in the urban area for recording earthquake activity, and a study was carried out aiming to estimate site amplification derived from earthquake data and ambient noise. The analysis of earthquake data shows significant dependence on the local geological structure with regards to amplitude and duration. Moreover, the analysis of noise spectral ratios shows that they can provide a lower bound in amplitude for site amplification and, since no variability in terms of time and amplitude is observed, that it is possible to map the fundamental resonance frequency of the soil for a 26 km x 12 km area in the northern part of the Santiago de Chile basin. By inverting the noise spectral rations, local shear wave velocity profiles could be derived under the constraint of the thickness of the sedimentary cover which had previously been determined by gravimetric measurements. The resulting 3D model was derived by interpolation between the single shear wave velocity profiles and shows locally good agreement with the few existing velocity profile data, but allows the entire area, as well as deeper parts of the basin, to be represented in greater detail. The wealth of available data allowed further to check if any correlation between the shear wave velocity in the uppermost 30 m (vs30) and the slope of topography, a new technique recently proposed by Wald and Allen (2007), exists on a local scale. While one lithology might provide a greater scatter in the velocity values for the investigated area, almost no correlation between topographic gradient and calculated vs30 exists, whereas a better link is found between vs30 and the local geology. When comparing the vs30 distribution with the MSK intensities for the 1985 Valparaiso event it becomes clear that high intensities are found where the expected vs30 values are low and over a thick sedimentary cover. Although this evidence cannot be generalized for all possible earthquakes, it indicates the influence of site effects modifying the ground motion when earthquakes occur well outside of the Santiago basin. Using the attained knowledge on the basin characteristics, simulations of strong ground motion within the Santiago Metropolitan area were carried out by means of the spectral element technique. The simulation of a regional event, which has also been recorded by a dense network installed in the city of Santiago for recording aftershock activity following the 27 February 2010 Maule earthquake, shows that the model is capable to realistically calculate ground motion in terms of amplitude, duration, and frequency and, moreover, that the surface topography and the shape of the sediment bedrock interface strongly modify ground motion in the Santiago basin. An examination on the dependency of ground motion on the hypocenter location for a hypothetical event occurring along the active San Ram{\´o}n fault, which is crossing the eastern outskirts of the city, shows that the unfavorable interaction between fault rupture, radiation mechanism, and complex geological conditions in the near-field may give rise to large values of peak ground velocity and therefore considerably increase the level of seismic risk for Santiago de Chile.}, language = {en} } @phdthesis{Uyaver2004, author = {Uyaver, Sahin}, title = {Simulation of annealed polyelectrolytes in poor solvents}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001488}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Polymere sind lange kettenartige Molek{\"u}le. Sie bestehen aus vielen elementaren chemischen Einheiten, den Monomeren, die durch kovalente Bindungen aneinander gekettet sind. Polyelektrolyte sind Polymere, die ionisierbare Monomeren enthalten. Aufgrund ihrer speziellen Eigenschaften sind Polyelektrolyte sowohl in der Molekular- und Zellbiologie von großen Bedeutung als auch in der Chemie großtechnisch relevant. Verglichen mit ungeladenen Polymeren sind Polyelektrolyte theoretisch noch wenig verstanden. Insbesondere gilt dies f{\"u}r Polyelektrolyte in sogenanntem schlechten L{\"o}sungsmittel. Ein schlechtes L{\"o}sungsmittel bewirkt eine effektive Anziehung zwischen den Monomeren. F{\"u}r Polyelektrolyte in schlechtem L{\"o}sungsmittel kommt es daher zu einer Konkurrenz zwischen dieser Anziehung und der elektrostatischen Abstoßung. Geladene Polymere werden im Rahmen der chemischen Klassifikation in starke und schwache Polyelektrolyte unterschieden. Erstere zeigen vollst{\"a}ndige Dissoziation unabh{\"a}ngig vom pH-Wert der L{\"o}sung. Die Position der Ladungen auf der Kette wird ausschließlich w{\"a}hrend der Polymersynthese festgelegt. In der Physik spricht man deshalb von Polyelektrolyten mit eingefrorener Ladungsverteilung (quenched polyelectrolytes). Im Falle von schwachen Polyelektrolyten ist die Ladungsdichte auf der Kette nicht konstant, sondern wird durch der pH-Wert der L{\"o}sung kontrolliert. Durch Rekombinations- und Dissoziationsprozesse sind die Ladungen auf der Kette beweglich. Im allgemeinen stellt sich eine inhomogene Gleichgewichtsverteilung ein, die mit der Struktur der Kette gekoppelt ist. Diese Polymere werden deshalb auch Polyelektrolyte mit Gleichgewichtsladungsverteilung (annealed polyelectrolytes) genannt. Wegen des zus{\"a}tzlichen Freiheitsgrades in der Ladungsverteilung werden eine Reihe ungew{\"o}hnlicher Eigenschaften theoretisch vorhergesagt. Mit Hilfe von Simulationen ist es zum ersten Mal gelungen, zu zeigen daß 'annealed' Polyelektrolyte in relativ schlechtem L{\"o}sungsmittel einen diskontinuierlichen Phasen{\"u}bergang durchlaufen, wenn ein kritischer pH-Werts der L{\"o}sung {\"u}berschritten wird. Bei diesem Phasen{\"u}bergang, gehen die Polyelektolyte von einer schwach geladenen kompakten globul{\"a}ren Struktur zu einer stark geladenen gestreckten Konfiguration {\"u}ber. Aufgrund theoretischer Vorhersagen wird erwartet, daß die globul{\"a}re Struktur in weniger schlechtem L{\"o}sungsmittel instabil wird und sich eine Perlenkettenkonfiguration ausbildet. Diese Vorhersage konnte f{\"u}r 'annealed' Polyelektrolyte mit den durchgef{\"u}hrten Simulationen zum ersten Mal best{\"a}tigt werden - inzwischen auch durch erste experimentelle Ergebnisse. Schließlich zeigen die Simulationen auch, daß annealed Polyelektrolyte bei einer kritischen Salzkonzentration in der L{\"o}sung einen scharfen {\"U}bergang zwischen einem stark geladenen gestreckten Zustand und einem schwach geladenen globul{\"a}ren Zustand aufweisen, wiederum in {\"U}bereinstimmung mit theoretischen Erwartungen.}, language = {en} } @phdthesis{Wagner2004, author = {Wagner, Anja}, title = {Konzeption und Aufbau eines Geoinformationssystems zur Modellierung und Simulation von Offenlandschaften}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001411}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Zwischen 1990 und 1994 wurden rund 1000 Liegenschaften, die in der ehemaligen DDR von der Sowjetarmee und der NVA f{\"u}r milit{\"a}rische {\"U}bungen genutzt wurden, an Bund und L{\"a}nder {\"u}bergeben. Die gr{\"o}ßten Truppen{\"u}bungspl{\"a}tze liegen in Brandenburg und sind heute teilweise in Großschutzgebiete integriert, andere Pl{\"a}tze werden von der Bundeswehr weiterhin aktiv genutzt. Aufgrund des milit{\"a}rischen Betriebs sind die B{\"o}den dieser Truppen{\"u}bungspl{\"a}tze oft durch Blindg{\"a}nger, Munitionsreste, Treibstoff- und Schmier{\"o}lreste bis hin zu chemischen Kampfstoffen belastet. Allerdings existieren auf fast allen Liegenschaften neben diesen durch Munition und milit{\"a}rische {\"U}bungen belasteten Bereichen auch naturschutzfachlich wertvolle Fl{\"a}chen; gerade in den Offenlandbereichen kann dies durchaus mit einer Belastung durch Kampfmittel einhergehen. Charakteristisch f{\"u}r diese offenen Fl{\"a}chen, zu denen u.a. Zwergstrauchheiden, Trockenrasen, w{\"u}sten{\"a}hnliche Sandfl{\"a}chen und andere n{\"a}hrstoffarme baumlose Lebensr{\"a}ume geh{\"o}ren, sind Großfl{\"a}chigkeit, Abgeschiedenheit sowie ihre besondere Nutzung und Bewirtschaftung, d.h. die Abwesenheit von land- und forstwirtschaftlichem Betrieb sowie von Siedlungsfl{\"a}chen. Diese Charakteristik war die Grundlage f{\"u}r die Entwicklung einer speziell angepassten Flora und Fauna. Nach Beendigung des Milit{\"a}rbetriebs setzte dann in weiten Teilen eine großfl{\"a}chige Sukzession \– die allm{\"a}hliche Ver{\"a}nderung der Zusammensetzung von Pflanzen- und Tiergesellschaften \– ein, die diese offenen Bereiche teilweise bereits in Wald verwandelte und somit verschwinden ließ. Dies wiederum f{\"u}hrte zum Verlust der an diese Offenlandfl{\"a}chen gebundenen Tier- und Pflanzenarten. Zur Erhaltung, Gestaltung und Entwicklung dieser offenen Fl{\"a}chen wurden daher von einer interdisziplin{\"a}ren Gruppe von Naturwissenschaftlern verschiedene Methoden und Konzepte auf ihre jeweilige Wirksamkeit untersucht. So konnten schließlich die f{\"u}r die jeweiligen Standortbedingungen geeigneten Maßnahmen eingeleitet werden. Voraussetzung f{\"u}r die Einleitung der Maßnahmen sind zum einen Kenntnisse zu diesen jeweiligen Standortbedingungen, d.h. zum Ist-Zustand, sowie zur Entwicklung der Fl{\"a}chen, d.h. zur Dynamik. So kann eine Absch{\"a}tzung {\"u}ber die zuk{\"u}nftige Fl{\"a}chenentwicklung getroffen werden, damit ein effizienter Maßnahmeneinsatz stattfinden kann. Geoinformationssysteme (GIS) spielen dabei eine entscheidende Rolle zur digitalen Dokumentation der Biotop- und Nutzungstypen, da sie die M{\"o}glichkeit bieten, raum- und zeitbezogene Geometrie- und Sachdaten in großen Mengen zu verarbeiten. Daher wurde ein fachspezifisches GIS f{\"u}r Truppen{\"u}bungspl{\"a}tze entwickelt und implementiert. Die Aufgaben umfassten die Konzeption der Datenbank und des Objektmodells sowie fachspezifischer Modellierungs-, Analyse- und Pr{\"a}sentationsfunktionen. F{\"u}r die Integration von Fachdaten in die GIS-Datenbank wurde zudem ein Metadatenkatalog entwickelt, der in Form eines zus{\"a}tzlichen GIS-Tools verf{\"u}gbar ist. Die Basisdaten f{\"u}r das GIS wurden aus Fernerkundungsdaten, topographischen Karten sowie Gel{\"a}ndekartierungen gewonnen. Als Instrument f{\"u}r die Absch{\"a}tzung der zuk{\"u}nftigen Entwicklung wurde das Simulationstool AST4D entwickelt, in dem sowohl die Nutzung der (Raster-)Daten des GIS als Ausgangsdaten f{\"u}r die Simulationen als auch die Nutzung der Simulationsergebnisse im GIS m{\"o}glich ist. Zudem k{\"o}nnen die Daten in AST4D raumbezogen visualisiert werden. Das mathematische Konstrukt f{\"u}r das Tool war ein so genannter Zellul{\"a}rer Automat, mit dem die Fl{\"a}chenentwicklung unter verschiedenen Voraussetzungen simuliert werden kann. So war die Bildung verschiedener Szenarien m{\"o}glich, d.h. die Simulation der Fl{\"a}chenentwicklung mit verschiedenen (bekannten) Eingangsparametern und den daraus resultierenden unterschiedlichen (unbekannten) Endzust{\"a}nden. Vor der Durchf{\"u}hrung einer der drei in AST4D m{\"o}glichen Simulationsstufen k{\"o}nnen angepasst an das jeweilige Untersuchungsgebiet benutzerspezifische Festlegungen getroffen werden.}, language = {de} } @phdthesis{Moreira2001, author = {Moreira, Andr{\´e} Gu{\´e}rin}, title = {Charged systems in bulk and at interfaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000677}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {Eine der Faustregeln der Kolloid- und Oberfl{\"a}chenphysik ist, dass die meisten Oberfl{\"a}chen geladen sind, wenn sie mit einem L{\"o}sungsmittel, normalerweise Wasser, in Kontakt treten. Dies ist zum Beispiel bei ladungsstabilisierten Kolloidalen Suspensionen der Fall, bei denen die Oberfl{\"a}che der Kolloidteilchen geladen ist (gew{\"o}hnlich mit einer Ladung von mehreren Hunderttausend Elementarladungen), oder bei Monoschichten ionischer Tenside, die auf einer Luft-Wasser Grenzfl{\"a}che sitzen (wobei die wasserliebenden Kopfgruppen durch die Freisetzung von Gegenionen geladen werden), sowie bei Doppelschichten, die geladene phospholipide enthalten (wie Zellmembranen). In dieser Arbeit betrachten wir einige Modellsysteme, die zwar eine vereinfachte Fassung der Realit{\"a}t darstellen, von denen wir aber dennoch erwarten koennen, dass wir mit ihrer Hilfe einige physikalische Eigenschaften realer geladener Systeme (Kolloide und Elektrolyte) einfangen k{\"o}nnen.}, language = {en} }