@phdthesis{Moreira2001, author = {Moreira, Andr{\´e} Gu{\´e}rin}, title = {Charged systems in bulk and at interfaces}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000677}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {Eine der Faustregeln der Kolloid- und Oberfl{\"a}chenphysik ist, dass die meisten Oberfl{\"a}chen geladen sind, wenn sie mit einem L{\"o}sungsmittel, normalerweise Wasser, in Kontakt treten. Dies ist zum Beispiel bei ladungsstabilisierten Kolloidalen Suspensionen der Fall, bei denen die Oberfl{\"a}che der Kolloidteilchen geladen ist (gew{\"o}hnlich mit einer Ladung von mehreren Hunderttausend Elementarladungen), oder bei Monoschichten ionischer Tenside, die auf einer Luft-Wasser Grenzfl{\"a}che sitzen (wobei die wasserliebenden Kopfgruppen durch die Freisetzung von Gegenionen geladen werden), sowie bei Doppelschichten, die geladene phospholipide enthalten (wie Zellmembranen). In dieser Arbeit betrachten wir einige Modellsysteme, die zwar eine vereinfachte Fassung der Realit{\"a}t darstellen, von denen wir aber dennoch erwarten koennen, dass wir mit ihrer Hilfe einige physikalische Eigenschaften realer geladener Systeme (Kolloide und Elektrolyte) einfangen k{\"o}nnen.}, language = {en} } @phdthesis{Wagner2004, author = {Wagner, Anja}, title = {Konzeption und Aufbau eines Geoinformationssystems zur Modellierung und Simulation von Offenlandschaften}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001411}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Zwischen 1990 und 1994 wurden rund 1000 Liegenschaften, die in der ehemaligen DDR von der Sowjetarmee und der NVA f{\"u}r milit{\"a}rische {\"U}bungen genutzt wurden, an Bund und L{\"a}nder {\"u}bergeben. Die gr{\"o}ßten Truppen{\"u}bungspl{\"a}tze liegen in Brandenburg und sind heute teilweise in Großschutzgebiete integriert, andere Pl{\"a}tze werden von der Bundeswehr weiterhin aktiv genutzt. Aufgrund des milit{\"a}rischen Betriebs sind die B{\"o}den dieser Truppen{\"u}bungspl{\"a}tze oft durch Blindg{\"a}nger, Munitionsreste, Treibstoff- und Schmier{\"o}lreste bis hin zu chemischen Kampfstoffen belastet. Allerdings existieren auf fast allen Liegenschaften neben diesen durch Munition und milit{\"a}rische {\"U}bungen belasteten Bereichen auch naturschutzfachlich wertvolle Fl{\"a}chen; gerade in den Offenlandbereichen kann dies durchaus mit einer Belastung durch Kampfmittel einhergehen. Charakteristisch f{\"u}r diese offenen Fl{\"a}chen, zu denen u.a. Zwergstrauchheiden, Trockenrasen, w{\"u}sten{\"a}hnliche Sandfl{\"a}chen und andere n{\"a}hrstoffarme baumlose Lebensr{\"a}ume geh{\"o}ren, sind Großfl{\"a}chigkeit, Abgeschiedenheit sowie ihre besondere Nutzung und Bewirtschaftung, d.h. die Abwesenheit von land- und forstwirtschaftlichem Betrieb sowie von Siedlungsfl{\"a}chen. Diese Charakteristik war die Grundlage f{\"u}r die Entwicklung einer speziell angepassten Flora und Fauna. Nach Beendigung des Milit{\"a}rbetriebs setzte dann in weiten Teilen eine großfl{\"a}chige Sukzession \– die allm{\"a}hliche Ver{\"a}nderung der Zusammensetzung von Pflanzen- und Tiergesellschaften \– ein, die diese offenen Bereiche teilweise bereits in Wald verwandelte und somit verschwinden ließ. Dies wiederum f{\"u}hrte zum Verlust der an diese Offenlandfl{\"a}chen gebundenen Tier- und Pflanzenarten. Zur Erhaltung, Gestaltung und Entwicklung dieser offenen Fl{\"a}chen wurden daher von einer interdisziplin{\"a}ren Gruppe von Naturwissenschaftlern verschiedene Methoden und Konzepte auf ihre jeweilige Wirksamkeit untersucht. So konnten schließlich die f{\"u}r die jeweiligen Standortbedingungen geeigneten Maßnahmen eingeleitet werden. Voraussetzung f{\"u}r die Einleitung der Maßnahmen sind zum einen Kenntnisse zu diesen jeweiligen Standortbedingungen, d.h. zum Ist-Zustand, sowie zur Entwicklung der Fl{\"a}chen, d.h. zur Dynamik. So kann eine Absch{\"a}tzung {\"u}ber die zuk{\"u}nftige Fl{\"a}chenentwicklung getroffen werden, damit ein effizienter Maßnahmeneinsatz stattfinden kann. Geoinformationssysteme (GIS) spielen dabei eine entscheidende Rolle zur digitalen Dokumentation der Biotop- und Nutzungstypen, da sie die M{\"o}glichkeit bieten, raum- und zeitbezogene Geometrie- und Sachdaten in großen Mengen zu verarbeiten. Daher wurde ein fachspezifisches GIS f{\"u}r Truppen{\"u}bungspl{\"a}tze entwickelt und implementiert. Die Aufgaben umfassten die Konzeption der Datenbank und des Objektmodells sowie fachspezifischer Modellierungs-, Analyse- und Pr{\"a}sentationsfunktionen. F{\"u}r die Integration von Fachdaten in die GIS-Datenbank wurde zudem ein Metadatenkatalog entwickelt, der in Form eines zus{\"a}tzlichen GIS-Tools verf{\"u}gbar ist. Die Basisdaten f{\"u}r das GIS wurden aus Fernerkundungsdaten, topographischen Karten sowie Gel{\"a}ndekartierungen gewonnen. Als Instrument f{\"u}r die Absch{\"a}tzung der zuk{\"u}nftigen Entwicklung wurde das Simulationstool AST4D entwickelt, in dem sowohl die Nutzung der (Raster-)Daten des GIS als Ausgangsdaten f{\"u}r die Simulationen als auch die Nutzung der Simulationsergebnisse im GIS m{\"o}glich ist. Zudem k{\"o}nnen die Daten in AST4D raumbezogen visualisiert werden. Das mathematische Konstrukt f{\"u}r das Tool war ein so genannter Zellul{\"a}rer Automat, mit dem die Fl{\"a}chenentwicklung unter verschiedenen Voraussetzungen simuliert werden kann. So war die Bildung verschiedener Szenarien m{\"o}glich, d.h. die Simulation der Fl{\"a}chenentwicklung mit verschiedenen (bekannten) Eingangsparametern und den daraus resultierenden unterschiedlichen (unbekannten) Endzust{\"a}nden. Vor der Durchf{\"u}hrung einer der drei in AST4D m{\"o}glichen Simulationsstufen k{\"o}nnen angepasst an das jeweilige Untersuchungsgebiet benutzerspezifische Festlegungen getroffen werden.}, language = {de} } @phdthesis{Uyaver2004, author = {Uyaver, Sahin}, title = {Simulation of annealed polyelectrolytes in poor solvents}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001488}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Polymere sind lange kettenartige Molek{\"u}le. Sie bestehen aus vielen elementaren chemischen Einheiten, den Monomeren, die durch kovalente Bindungen aneinander gekettet sind. Polyelektrolyte sind Polymere, die ionisierbare Monomeren enthalten. Aufgrund ihrer speziellen Eigenschaften sind Polyelektrolyte sowohl in der Molekular- und Zellbiologie von großen Bedeutung als auch in der Chemie großtechnisch relevant. Verglichen mit ungeladenen Polymeren sind Polyelektrolyte theoretisch noch wenig verstanden. Insbesondere gilt dies f{\"u}r Polyelektrolyte in sogenanntem schlechten L{\"o}sungsmittel. Ein schlechtes L{\"o}sungsmittel bewirkt eine effektive Anziehung zwischen den Monomeren. F{\"u}r Polyelektrolyte in schlechtem L{\"o}sungsmittel kommt es daher zu einer Konkurrenz zwischen dieser Anziehung und der elektrostatischen Abstoßung. Geladene Polymere werden im Rahmen der chemischen Klassifikation in starke und schwache Polyelektrolyte unterschieden. Erstere zeigen vollst{\"a}ndige Dissoziation unabh{\"a}ngig vom pH-Wert der L{\"o}sung. Die Position der Ladungen auf der Kette wird ausschließlich w{\"a}hrend der Polymersynthese festgelegt. In der Physik spricht man deshalb von Polyelektrolyten mit eingefrorener Ladungsverteilung (quenched polyelectrolytes). Im Falle von schwachen Polyelektrolyten ist die Ladungsdichte auf der Kette nicht konstant, sondern wird durch der pH-Wert der L{\"o}sung kontrolliert. Durch Rekombinations- und Dissoziationsprozesse sind die Ladungen auf der Kette beweglich. Im allgemeinen stellt sich eine inhomogene Gleichgewichtsverteilung ein, die mit der Struktur der Kette gekoppelt ist. Diese Polymere werden deshalb auch Polyelektrolyte mit Gleichgewichtsladungsverteilung (annealed polyelectrolytes) genannt. Wegen des zus{\"a}tzlichen Freiheitsgrades in der Ladungsverteilung werden eine Reihe ungew{\"o}hnlicher Eigenschaften theoretisch vorhergesagt. Mit Hilfe von Simulationen ist es zum ersten Mal gelungen, zu zeigen daß 'annealed' Polyelektrolyte in relativ schlechtem L{\"o}sungsmittel einen diskontinuierlichen Phasen{\"u}bergang durchlaufen, wenn ein kritischer pH-Werts der L{\"o}sung {\"u}berschritten wird. Bei diesem Phasen{\"u}bergang, gehen die Polyelektolyte von einer schwach geladenen kompakten globul{\"a}ren Struktur zu einer stark geladenen gestreckten Konfiguration {\"u}ber. Aufgrund theoretischer Vorhersagen wird erwartet, daß die globul{\"a}re Struktur in weniger schlechtem L{\"o}sungsmittel instabil wird und sich eine Perlenkettenkonfiguration ausbildet. Diese Vorhersage konnte f{\"u}r 'annealed' Polyelektrolyte mit den durchgef{\"u}hrten Simulationen zum ersten Mal best{\"a}tigt werden - inzwischen auch durch erste experimentelle Ergebnisse. Schließlich zeigen die Simulationen auch, daß annealed Polyelektrolyte bei einer kritischen Salzkonzentration in der L{\"o}sung einen scharfen {\"U}bergang zwischen einem stark geladenen gestreckten Zustand und einem schwach geladenen globul{\"a}ren Zustand aufweisen, wiederum in {\"U}bereinstimmung mit theoretischen Erwartungen.}, language = {en} } @phdthesis{Linke2005, author = {Linke, Gunnar Torsten}, title = {Eigenschaften fluider Vesikeln bei endlichen Temperaturen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5835}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {In der vorliegenden Arbeit werden die Eigenschaften geschlossener fluider Membranen, sogenannter Vesikeln, bei endlichen Temperaturen untersucht. Dies beinhaltet Betrachtungen zur Form freier Vesikeln, eine Untersuchung des Adh{\"a}sionsverhaltens von Vesikeln an planaren Substraten sowie eine Untersuchung der Eigenschaften fluider Vesikeln in eingeschr{\"a}nkten Geometrien. Diese Untersuchungen fanden mit Hilfe von Monte-Carlo-Simulationen einer triangulierten Vesikeloberfl{\"a}che statt. Die statistischen Eigenschaften der fluktuierenden fluiden Vesikeln wurden zum Teil mittels Freier-Energie-Profile analysiert. In diesem Zusammenhang wurde eine neuartige Histogrammethode entwickelt.
Die Form f{\"u}r eine freie fluide Vesikel mit frei ver{\"a}nderlichem Volumen, die das Konfigurationsenergie-Funktional minimiert, ist im Falle verschwindender Temperatur eine Kugel. Mit Hilfe von Monte-Carlo-Simulationen sowie einem analytisch behandelbaren Modellsystem konnte gezeigt werden, daß sich dieses Ergebnis nicht auf endliche Temperaturen verallgemeinern l{\"a}sst und statt dessen leicht prolate und oblate Vesikelformen gegen{\"u}ber der Kugelgestalt {\"u}berwiegen. Dabei ist die Wahrscheinlichkeit f{\"u}r eine prolate Form ein wenig gr{\"o}oßer als f{\"u}r eine oblate. Diese spontane Asph{\"a}rizit{\"a}t ist entropischen Ursprungs und tritt nicht bei zweidimensionalen Vesikeln auf. Durch osmotische Dr{\"u}cke in der Vesikel, die gr{\"o}ßer sind als in der umgebenden Fl{\"u}ssigkeit, l{\"a}sst sich die Asph{\"a}rizit{\"a}t reduzieren oder sogar kompensieren. Die {\"U}berg{\"a}nge zwischen den beobachteten prolaten und oblaten Formen erfolgen im Bereich von Millisekunden in Abwesenheit osmotisch aktiver Partikel. Bei Vorhandensein derartiger Partikel ergeben sich {\"U}bergangszeiten im Bereich von Sekunden. Im Rahmen der Untersuchung des Adh{\"a}sionsverhaltens fluider Vesikeln an planaren, homogenen Substraten konnte mit Hilfe von Monte-Carlo-Simulationen festgestellt werden, dass die Eigenschaften der Kontaktfl{\"a}che der Vesikeln stark davon abh{\"a}ngen, welche Kr{\"a}fte den Kontakt bewirken. F{\"u}r eine dominierende attraktive Wechselwirkung zwischen Substrat und Vesikelmembran sowie im Falle eines Massendichteunterschieds der Fl{\"u}ssigkeiten innerhalb und außerhalb der Vesikel, der die Vesikel auf das Substrat sinken l{\"a}sst, fndet man innerhalb der Kontakt � ache eine ortsunabh� angige Verteilung des Abstands zwischen Vesikelmembran und Substrat. Dr{\"u}ckt die Vesikel ohne Ber{\"u}cksichtigung osmotischer Effekte auf Grund einer Differenz der Massendichten der Membran und der umgebenden Fl{\"u}ssigkeit gegen das Substrat, so erh{\"a}lt man eine Abstandsverteilung zwischen Vesikelmembran und Substrat, die mit dem Abstand vom Rand der Kontaktfl{\"a}che variiert. Dieser Effekt ist zudem temperaturabh{\"a}ngig. Ferner wurde die Adh{\"a}sion fluider Vesikeln an chemisch strukturierten planaren Substraten untersucht. Durch das Wechselspiel von entropischen Effekten und Konfigurationsenergien entsteht eine komplexe Abh{\"a}ngigkeit der Vesikelform von Biegesteifigkeit, osmotischen Bedingungen und der Geometrie der attraktiven Dom{\"a}nen. F{\"u}r die Bestimmung der Biegesteifigkeit der Vesikelmembranen liefern die existierenden Verfahren stark voneinander abweichende Ergebnisse. In der vorliegenden Arbeit konnte mittels Monte-Carlo-Simulationen zur Bestimmung der Biegesteifigkeit anhand des Mikropipettenverfahrens von Evans gezeigt werden, dass dieses Verfahren die a priori f{\"u}r die Simulation vorgegebene Biegesteifigkeit im wesentlichen reproduzieren kann. Im Hinblick auf medizinisch-pharmazeutische Anwendungen ist der Durchgang fluider Vesikeln durch enge Poren relevant. In Monte-Carlo-Simulationen konnte gezeigt werden, dass ein spontaner Transport der Vesikel durch ein Konzentrationsgef{\"a}lle osmotisch aktiver Substanzen, das den physiologischen Bedingungen entspricht, induziert werden kann. Es konnten die hierf{\"u}r notwendigen osmotischen Bedingungen sowie die charakteristischen Zeitskalen abgesch{\"a}tzt werden. Im realen Experiment sind Eindringzeiten in eine enge Pore im Bereich weniger Minuten zu erwarten. Ferner konnte beobachtet werden, dass bei Vesikeln mit einer homogenen, positiven spontanen Kr{\"u}mmung Deformationen hin zu prolaten Formen leichter erfolgen als bei Vesikeln ohne spontane Kr{\"u}mmung. Mit diesem Effekt ist eine Verringerung der Energiebarriere f{\"u}r das Eindringen in eine Pore verbunden, deren Radius nur wenig kleiner als der Vesikelradius ist.}, subject = {Membran}, language = {de} } @phdthesis{HernandezGarcia2008, author = {Hernandez Garcia, Hugo Fernando}, title = {Multiscale simulation of heterophase polymerization : application to the synthesis of multicomponent colloidal polymer particles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-25036}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Heterophase polymerization is a technique widely used for the synthesis of high performance polymeric materials with applications including paints, inks, adhesives, synthetic rubber, biomedical applications and many others. Due to the heterogeneous nature of the process, many different relevant length and time scales can be identified. Each of these scales has a direct influence on the kinetics of polymerization and on the physicochemical and performance properties of the final product. Therefore, from the point of view of product and process design and optimization, the understanding of each of these relevant scales and their integration into one single model is a very promising route for reducing the time-to-market in the development of new products, for increasing the productivity and profitability of existing processes, and for designing products with improved performance or cost/performance ratio. The process considered is the synthesis of structured or composite polymer particles by multi-stage seeded emulsion polymerization. This type of process is used for the preparation of high performance materials where a synergistic behavior of two or more different types of polymers is obtained. Some examples include the synthesis of core-shell or multilayered particles for improved impact strength materials and for high resistance coatings and adhesives. The kinetics of the most relevant events taking place in an emulsion polymerization process has been investigated using suitable numerical simulation techniques at their corresponding time and length scales. These methods, which include Molecular Dynamics (MD) simulation, Brownian Dynamics (BD) simulation and kinetic Monte Carlo (kMC) simulation, have been found to be very powerful and highly useful for gaining a deeper insight and achieving a better understanding and a more accurate description of all phenomena involved in emulsion polymerization processes, and can be potentially extended to investigate any type of heterogeneous process. The novel approach of using these kinetic-based numerical simulation methods can be regarded as a complement to the traditional thermodynamic-based macroscopic description of emulsion polymerization. The particular events investigated include molecular diffusion, diffusion-controlled polymerization reactions, particle formation, absorption/desorption of radicals and monomer, and the colloidal aggregation of polymer particles. Using BD simulation it was possible to precisely determine the kinetics of absorption/desorption of molecular species by polymer particles, and to simulate the colloidal aggregation of polymer particles. For diluted systems, a very good agreement between BD simulation and the classical theory developed by Smoluchowski was obtained. However, for concentrated systems, significant deviations from the ideal behavior predicted by Smoluchowski were evidenced. BD simulation was found to be a very valuable tool for the investigation of emulsion polymerization processes especially when the spatial and geometrical complexity of the system cannot be neglected, as is the case of concentrated dispersions, non-spherical particles, structured polymer particles, particles with non-uniform monomer concentration, and so on. In addition, BD simulation was used to describe non-equilibrium monomer swelling kinetics, which is not possible using the traditional thermodynamic approach because it is only valid for systems at equilibrium. The description of diffusion-controlled polymerization reactions was successfully achieved using a new stochastic algorithm for the kMC simulation of imperfectly mixed systems (SSA-IM). In contrast to the traditional stochastic simulation algorithm (SSA) and the deterministic rate of reaction equations, instead of assuming perfect mixing in the whole reactor, the new SSA-IM determines the volume perfectly mixed between two consecutive reactions as a function of the diffusion coefficient of the reacting species. Using this approach it was possible to describe, using a single set of kinetic parameters, typical mass transfer limitations effects during a free radical batch polymerization such as the cage effect, the gel effect and the glass effect. Using multiscale integration it was possible to investigate the formation of secondary particles during the seeded emulsion polymerization of vinyl acetate over a polystyrene seed. Three different cases of radical generation were considered: generation of radicals by thermal decomposition of water-soluble initiating compounds, generation of radicals by a redox reaction at the surface of the particles, and generation of radicals by thermal decomposition of surface-active initiators "inisurfs" attached to the surface of the particles. The simulation results demonstrated the satisfactory reduction in secondary particles formation achieved when the locus of radical generation is controlled close to the particles surface.}, language = {en} } @phdthesis{Pilz2010, author = {Pilz, Marco}, title = {A comparison of proxies for seismic site conditions and amplification for the large urban area of Santiago de Chile}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52961}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Situated in an active tectonic region, Santiago de Chile, the country´s capital with more than six million inhabitants, faces tremendous earthquake hazard. Macroseismic data for the 1985 Valparaiso and the 2010 Maule events show large variations in the distribution of damage to buildings within short distances indicating strong influence of local sediments and the shape of the sediment-bedrock interface on ground motion. Therefore, a temporary seismic network was installed in the urban area for recording earthquake activity, and a study was carried out aiming to estimate site amplification derived from earthquake data and ambient noise. The analysis of earthquake data shows significant dependence on the local geological structure with regards to amplitude and duration. Moreover, the analysis of noise spectral ratios shows that they can provide a lower bound in amplitude for site amplification and, since no variability in terms of time and amplitude is observed, that it is possible to map the fundamental resonance frequency of the soil for a 26 km x 12 km area in the northern part of the Santiago de Chile basin. By inverting the noise spectral rations, local shear wave velocity profiles could be derived under the constraint of the thickness of the sedimentary cover which had previously been determined by gravimetric measurements. The resulting 3D model was derived by interpolation between the single shear wave velocity profiles and shows locally good agreement with the few existing velocity profile data, but allows the entire area, as well as deeper parts of the basin, to be represented in greater detail. The wealth of available data allowed further to check if any correlation between the shear wave velocity in the uppermost 30 m (vs30) and the slope of topography, a new technique recently proposed by Wald and Allen (2007), exists on a local scale. While one lithology might provide a greater scatter in the velocity values for the investigated area, almost no correlation between topographic gradient and calculated vs30 exists, whereas a better link is found between vs30 and the local geology. When comparing the vs30 distribution with the MSK intensities for the 1985 Valparaiso event it becomes clear that high intensities are found where the expected vs30 values are low and over a thick sedimentary cover. Although this evidence cannot be generalized for all possible earthquakes, it indicates the influence of site effects modifying the ground motion when earthquakes occur well outside of the Santiago basin. Using the attained knowledge on the basin characteristics, simulations of strong ground motion within the Santiago Metropolitan area were carried out by means of the spectral element technique. The simulation of a regional event, which has also been recorded by a dense network installed in the city of Santiago for recording aftershock activity following the 27 February 2010 Maule earthquake, shows that the model is capable to realistically calculate ground motion in terms of amplitude, duration, and frequency and, moreover, that the surface topography and the shape of the sediment bedrock interface strongly modify ground motion in the Santiago basin. An examination on the dependency of ground motion on the hypocenter location for a hypothetical event occurring along the active San Ram{\´o}n fault, which is crossing the eastern outskirts of the city, shows that the unfavorable interaction between fault rupture, radiation mechanism, and complex geological conditions in the near-field may give rise to large values of peak ground velocity and therefore considerably increase the level of seismic risk for Santiago de Chile.}, language = {en} } @phdthesis{Fischer2014, author = {Fischer, Jost Leonhardt}, title = {Nichtlineare Kopplungsmechanismen akustischer Oszillatoren am Beispiel der Synchronisation von Orgelpfeifen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71975}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In dieser Arbeit werden nichtlineare Kopplungsmechanismen von akustischen Oszillatoren untersucht, die zu Synchronisation f{\"u}hren k{\"o}nnen. Aufbauend auf die Fragestellungen vorangegangener Arbeiten werden mit Hilfe theoretischer und experimenteller Studien sowie mit Hilfe numerischer Simulationen die Elemente der Tonentstehung in der Orgelpfeife und die Mechanismen der gegenseitigen Wechselwirkung von Orgelpfeifen identifiziert. Daraus wird erstmalig ein vollst{\"a}ndig auf den aeroakustischen und fluiddynamischen Grundprinzipien basierendes nichtlinear gekoppeltes Modell selbst-erregter Oszillatoren f{\"u}r die Beschreibung des Verhaltens zweier wechselwirkender Orgelpfeifen entwickelt. Die durchgef{\"u}hrten Modellrechnungen werden mit den experimentellen Befunden verglichen. Es zeigt sich, dass die Tonentstehung und die Kopplungsmechanismen von Orgelpfeifen durch das entwickelte Oszillatormodell in weiten Teilen richtig beschrieben werden. Insbesondere kann damit die Ursache f{\"u}r den nichtlinearen Zusammenhang von Kopplungsst{\"a}rke und Synchronisation des gekoppelten Zwei-Pfeifen Systems, welcher sich in einem nichtlinearen Verlauf der Arnoldzunge darstellt, gekl{\"a}rt werden. Mit den gewonnenen Erkenntnissen wird der Einfluss des Raumes auf die Tonentstehung bei Orgelpfeifen betrachtet. Daf{\"u}r werden numerische Simulationen der Wechselwirkung einer Orgelpfeife mit verschiedenen Raumgeometrien, wie z. B. ebene, konvexe, konkave, und gezahnte Geometrien, exemplarisch untersucht. Auch der Einfluss von Schwellk{\"a}sten auf die Tonentstehung und die Klangbildung der Orgelpfeife wird studiert. In weiteren, neuartigen Synchronisationsexperimenten mit identisch gestimmten Orgelpfeifen, sowie mit Mixturen wird die Synchronisation f{\"u}r verschiedene, horizontale und vertikale Pfeifenabst{\"a}nde in der Ebene der Schallabstrahlung, untersucht. Die dabei erstmalig beobachteten r{\"a}umlich isotropen Unstetigkeiten im Schwingungsverhalten der gekoppelten Pfeifensysteme, deuten auf abstandsabh{\"a}ngige Wechsel zwischen gegen- und gleichphasigen Sychronisationsregimen hin. Abschließend wird die M{\"o}glichkeit dokumentiert, das Ph{\"a}nomen der Synchronisation zweier Orgelpfeifen durch numerische Simulationen, also der Behandlung der kompressiblen Navier-Stokes Gleichungen mit entsprechenden Rand- und Anfangsbedingungen, realit{\"a}tsnah abzubilden. Auch dies stellt ein Novum dar.}, language = {de} } @phdthesis{Schroeder2015, author = {Schr{\"o}der, Sarah}, title = {Modelling surface evolution coupled with tectonics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90385}, school = {Universit{\"a}t Potsdam}, pages = {viii, 129}, year = {2015}, abstract = {This study presents the development of 1D and 2D Surface Evolution Codes (SECs) and their coupling to any lithospheric-scale (thermo-)mechanical code with a quadrilateral structured surface mesh. Both SECs involve diffusion as approach for hillslope processes and the stream power law to reflect riverbed incision. The 1D SEC settles sediment that was produced by fluvial incision in the appropriate minimum, while the supply-limited 2D SEC DANSER uses a fast filling algorithm to model sedimantation. It is based on a cellular automaton. A slope-dependent factor in the sediment flux extends the diffusion equation to nonlinear diffusion. The discharge accumulation is achieved with the D8-algorithm and an improved drainage accumulation routine. Lateral incision enhances the incision's modelling. Following empirical laws, it incises channels of several cells width. The coupling method enables different temporal and spatial resolutions of the SEC and the thermo-mechanical code. It transfers vertical as well as horizontal displacements to the surface model. A weighted smoothing of the 3D surface displacements is implemented. The smoothed displacement vectors transmit the deformation by bilinear interpolation to the surface model. These interpolation methods ensure mass conservation in both directions and prevent the two surfaces from drifting apart. The presented applications refer to the evolution of the Pamir orogen. A calibration of DANSER's parameters with geomorphological data and a DEM as initial topography highlights the advantage of lateral incision. Preserving the channel width and reflecting incision peaks in narrow channels, this closes the huge gap between current orogen-scale incision models and observed topographies. River capturing models in a system of fault-bounded block rotations reaffirm the importance of the lateral incision routine for capturing events with channel initiation. The models show a low probability of river capturings with large deflection angles. While the probability of river capturing is directly depending on the uplift rate, the erodibility inside of a dip-slip fault speeds up headward erosion along the fault: The model's capturing speed increases within a fault. Coupling DANSER with the thermo-mechanical code SLIM 3D emphasizes the versatility of the SEC. While DANSER has minor influence on the lithospheric evolution of an indenter model, the brittle surface deformation is strongly affected by its sedimentation, widening a basin in between two forming orogens and also the southern part of the southern orogen to south, east and west.}, language = {en} } @phdthesis{Kieling2015, author = {Kieling, Katrin}, title = {Quantification of ground motions by broadband simulations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-85989}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 118}, year = {2015}, abstract = {In many procedures of seismic risk mitigation, ground motion simulations are needed to test systems or improve their effectiveness. For example they may be used to estimate the level of ground shaking caused by future earthquakes. Good physical models for ground motion simulation are also thought to be important for hazard assessment, as they could close gaps in the existing datasets. Since the observed ground motion in nature shows a certain variability, part of which cannot be explained by macroscopic parameters such as magnitude or position of an earthquake, it would be desirable that a good physical model is not only able to produce one single seismogram, but also to reveal this natural variability. In this thesis, I develop a method to model realistic ground motions in a way that is computationally simple to handle, permitting multiple scenario simulations. I focus on two aspects of ground motion modelling. First, I use deterministic wave propagation for the whole frequency range - from static deformation to approximately 10 Hz - but account for source variability by implementing self-similar slip distributions and rough fault interfaces. Second, I scale the source spectrum so that the modelled waveforms represent the correct radiated seismic energy. With this scaling I verify whether the energy magnitude is suitable as an explanatory variable, which characterises the amount of energy radiated at high frequencies - the advantage of the energy magnitude being that it can be deduced from observations, even in real-time. Applications of the developed method for the 2008 Wenchuan (China) earthquake, the 2003 Tokachi-Oki (Japan) earthquake and the 1994 Northridge (California, USA) earthquake show that the fine source discretisations combined with the small scale source variability ensure that high frequencies are satisfactorily introduced, justifying the deterministic wave propagation approach even at high frequencies. I demonstrate that the energy magnitude can be used to calibrate the high-frequency content in ground motion simulations. Because deterministic wave propagation is applied to the whole frequency range, the simulation method permits the quantification of the variability in ground motion due to parametric uncertainties in the source description. A large number of scenario simulations for an M=6 earthquake show that the roughness of the source as well as the distribution of fault dislocations have a minor effect on the simulated variability by diminishing directivity effects, while hypocenter location and rupture velocity more strongly influence the variability. The uncertainty in energy magnitude, however, leads to the largest differences of ground motion amplitude between different events, resulting in a variability which is larger than the one observed. For the presented approach, this dissertation shows (i) the verification of the computational correctness of the code, (ii) the ability to reproduce observed ground motions and (iii) the validation of the simulated ground motion variability. Those three steps are essential to evaluate the suitability of the method for means of seismic risk mitigation.}, language = {en} } @article{KuehnAltmannsbergerHens2016, author = {K{\"u}hn, Michael and Altmannsberger, Charlotte and Hens, Carmen}, title = {Waiweras WarmwasserreservoirWelche Aussagekraft haben Modelle?}, series = {Grundwasser : Zeitschrift der Fachsektion Hydrogeologie in der Deutschen Gesellschaft f{\~A}¼r Geowissenschaften (FH-DGG)}, volume = {21}, journal = {Grundwasser : Zeitschrift der Fachsektion Hydrogeologie in der Deutschen Gesellschaft f{\~A}¼r Geowissenschaften (FH-DGG)}, publisher = {Springer}, address = {Heidelberg}, issn = {1430-483X}, doi = {10.1007/s00767-016-0323-2}, pages = {107 -- 117}, year = {2016}, abstract = {The warm water geothermal reservoir below the village of Waiwera in New Zealand has been known by the native Maori for centuries. Development by the European immigrants began in 1863. Until the year 1969, the warm water flowing from all drilled wells was artesian. Due to overproduction, water up to 50 A degrees C now needs to be pumped to surface. Further, between 1975 and 1976, all warm water seeps on the beach of Waiwera ran dry. Within the context of sustainable water management, hydrogeological models must be developed as part of a management plan. Approaches of varying complexity have been set-up and applied since the 1980s. However, none of the models directly provide all results required for optimal water management. Answers are given simply to parts of the questions, nonetheless improving resource management of the geothermal reservoir.}, language = {de} } @book{DyckGieseLambers2017, author = {Dyck, Johannes and Giese, Holger and Lambers, Leen}, title = {Automatic verification of behavior preservation at the transformation level for relational model transformation}, number = {112}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-391-6}, issn = {1613-5652}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100279}, publisher = {Universit{\"a}t Potsdam}, pages = {viii, 112}, year = {2017}, abstract = {The correctness of model transformations is a crucial element for model-driven engineering of high quality software. In particular, behavior preservation is the most important correctness property avoiding the introduction of semantic errors during the model-driven engineering process. Behavior preservation verification techniques either show that specific properties are preserved, or more generally and complex, they show some kind of behavioral equivalence or refinement between source and target model of the transformation. Both kinds of behavior preservation verification goals have been presented with automatic tool support for the instance level, i.e. for a given source and target model specified by the model transformation. However, up until now there is no automatic verification approach available at the transformation level, i.e. for all source and target models specified by the model transformation. In this report, we extend our results presented in [27] and outline a new sophisticated approach for the automatic verification of behavior preservation captured by bisimulation resp. simulation for model transformations specified by triple graph grammars and semantic definitions given by graph transformation rules. In particular, we show that the behavior preservation problem can be reduced to invariant checking for graph transformation and that the resulting checking problem can be addressed by our own invariant checker even for a complex example where a sequence chart is transformed into communicating automata. We further discuss today's limitations of invariant checking for graph transformation and motivate further lines of future work in this direction.}, language = {en} } @article{GrumBenderAlfaetal.2018, author = {Grum, Marcus and Bender, Benedict and Alfa, A. S. and Gronau, Norbert}, title = {A decision maxim for efficient task realization within analytical network infrastructures}, series = {Decision support systems : DSS ; the international journal}, volume = {112}, journal = {Decision support systems : DSS ; the international journal}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0167-9236}, doi = {10.1016/j.dss.2018.06.005}, pages = {48 -- 59}, year = {2018}, abstract = {Faced with the increasing needs of companies, optimal dimensioning of IT hardware is becoming challenging for decision makers. In terms of analytical infrastructures, a highly evolutionary environment causes volatile, time dependent workloads in its components, and intelligent, flexible task distribution between local systems and cloud services is attractive. With the aim of developing a flexible and efficient design for analytical infrastructures, this paper proposes a flexible architecture model, which allocates tasks following a machine-specific decision heuristic. A simulation benchmarks this system with existing strategies and identifies the new decision maxim as superior in a first scenario-based simulation.}, language = {en} } @article{LohmannGuoTietjen2018, author = {Lohmann, Dirk and Guo, Tong and Tietjen, Britta}, title = {Zooming in on coarse plant functional types-simulated response of savanna vegetation composition in response to aridity and grazing}, series = {Theoretical ecology}, volume = {11}, journal = {Theoretical ecology}, number = {2}, publisher = {Springer}, address = {Heidelberg}, issn = {1874-1738}, doi = {10.1007/s12080-017-0356-x}, pages = {161 -- 173}, year = {2018}, abstract = {Precipitation and land use in terms of livestock grazing have been identified as two of the most important drivers structuring the vegetation composition of semi-arid and arid savannas. Savanna research on the impact of these drivers has widely applied the so-called plant functional type (PFT) approach, grouping the vegetation into two or three broad types (here called meta-PFTs): woody plants and grasses, which are sometimes divided into perennial and annual grasses. However, little is known about the response of functional traits within these coarse types towards water availability or livestock grazing. In this study, we extended an existing eco-hydrological savanna vegetation model to capture trait diversity within the three broad meta-PFTs to assess the effects of both grazing and mean annual precipitation (MAP) on trait composition along a gradient of both drivers. Our results show a complex pattern of trait responses to grazing and aridity. The response differs for the three meta-PFTs. From our findings, we derive that trait responses to grazing and aridity for perennial grasses are similar, as suggested by the convergence model for grazing and aridity. However, we also see that this only holds for simulations below a MAP of 500 mm. This combined with the finding that trait response differs between the three meta-PFTs leads to the conclusion that there is no single, universal trait or set of traits determining the response to grazing and aridity. We finally discuss how simulation models including trait variability within meta-PFTs are necessary to understand ecosystem responses to environmental drivers, both locally and globally and how this perspective will help to extend conceptual frameworks of other ecosystems to savanna research.}, language = {en} } @phdthesis{Michalczyk2019, author = {Michalczyk, Anna}, title = {Modelling of nitrogen cycles in intensive winter wheat-summer maize double cropping systems in the North China Plain}, doi = {10.25932/publishup-44421}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-444213}, school = {Universit{\"a}t Potsdam}, pages = {X, 154}, year = {2019}, abstract = {The North China Plain (NCP) is one of the most productive and intensive agricultural regions in China. High doses of mineral nitrogen (N) fertiliser, often combined with flood irrigation, are applied, resulting in N surplus, groundwater depletion and environmental pollution. The objectives of this thesis were to use the HERMES model to simulate the N cycle in winter wheat (Triticum aestivum L.)-summer maize (Zea mays L.) double crop rotations and show the performance of the HERMES model, of the new ammonia volatilisation sub-module and of the new nitrification inhibition tool in the NCP. Further objectives were to assess the models potential to save N and water on plot and county scale, as well as on short and long-term. Additionally, improved management strategies with the help of a model-based nitrogen fertiliser recommendation (NFR) and adapted irrigation, should be found. Results showed that the HERMES model performed well under growing conditions of the NCP and was able to describe the relevant processes related to soil-plant interactions concerning N and water during a 2.5 year field experiment. No differences in grain yield between the real-time model-based NFR and the other treatments of the experiments on plot scale in Quzhou County could be found. Simulations with increasing amounts of irrigation resulted in significantly higher N leaching, higher N requirements of the NFR and reduced yields. Thus, conventional flood irrigation as currently practised by the farmers bears great uncertainties and exact irrigation amounts should be known for future simulation studies. In the best-practice scenario simulation on plot-scale, N input and N leaching, but also irrigation water could be reduced strongly within 2 years. Thus, the model-based NFR in combination with adapted irrigation had the highest potential to reduce nitrate leaching, compared to farmers practice and mineral N (Nmin)-reduced treatments. Also the calibrated and validated ammonia volatilisation sub-module of the HERMES model worked well under the climatic and soil conditions of northern China. Simple ammonia volatilisation approaches gave also satisfying results compared to process-oriented approaches. During the simulation with Ammonium sulphate Nitrate with nitrification inhibitor (ASNDMPP) ammonia volatilisation was higher than in the simulation without nitrification inhibitor, while the result for nitrate leaching was the opposite. Although nitrification worked well in the model, nitrification-born nitrous oxide emissions should be considered in future. Results of the simulated annual long-term (31 years) N losses in whole Quzhou County in Hebei Province were 296.8 kg N ha-1 under common farmers practice treatment and 101.7 kg N ha-1 under optimised treatment including NFR and automated irrigation (OPTai). Spatial differences in simulated N losses throughout Quzhou County, could only be found due to different N inputs. Simulations of an optimised treatment, could save on average more than 260 kg N ha-1a-1 from fertiliser input and 190 kg N ha-1a-1 from N losses and around 115.7 mm a-1 of water, compared to farmers practice. These long-term simulation results showed lower N and water saving potential, compared to short-term simulations and underline the necessity of long-term simulations to overcome the effect of high initial N stocks in soil. Additionally, the OPTai worked best on clay loam soil except for a high simulated denitrification loss, while the simulations using farmers practice irrigation could not match the actual water needs resulting in yield decline, especially for winter wheat. Thus, a precise adaption of management to actual weather conditions and plant growth needs is necessary for future simulations. However, the optimised treatments did not seem to be able to maintain the soil organic matter pools, even with full crop residue input. Extra organic inputs seem to be required to maintain soil quality in the optimised treatments. HERMES is a relatively simple model, with regard to data input requirements, to simulate the N cycle. It can offer interpretation of management options on plot, on county and regional scale for extension and research staff. Also in combination with other N and water saving methods the model promises to be a useful tool.}, language = {en} } @article{Gronau2019, author = {Gronau, Norbert}, title = {Determining the appropriate degree of autonomy in cyber-physical production systems}, series = {CIRP Journal of Manufacturing Science and Technology}, volume = {26}, journal = {CIRP Journal of Manufacturing Science and Technology}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1755-5817}, doi = {10.1016/j.cirpj.2019.05.001}, pages = {70 -- 80}, year = {2019}, abstract = {Existing factories face multiple problems due to their hierarchical structure of decision making and control. Cyber-physical systems principally allow to increase the degree of autonomy to new heights. But which degree of autonomy is really useful and beneficiary? This paper differentiates diverse definitions of autonomy and approaches to determine them. Some experimental findings in a lab environment help to answer the question raised in this paper.}, language = {en} } @phdthesis{Šustr2020, author = {Šustr, David}, title = {Molecular diffusion in polyelectrolyte multilayers}, doi = {10.25932/publishup-48903}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-489038}, school = {Universit{\"a}t Potsdam}, pages = {106}, year = {2020}, abstract = {Research on novel and advanced biomaterials is an indispensable step towards their applications in desirable fields such as tissue engineering, regenerative medicine, cell culture, or biotechnology. The work presented here focuses on such a promising material: polyelectrolyte multilayer (PEM) composed of hyaluronic acid (HA) and poly(L-lysine) (PLL). This gel-like polymer surface coating is able to accumulate (bio-)molecules such as proteins or drugs and release them in a controlled manner. It serves as a mimic of the extracellular matrix (ECM) in composition and intrinsic properties. These qualities make the HA/PLL multilayers a promising candidate for multiple bio-applications such as those mentioned above. The work presented aims at the development of a straightforward approach for assessment of multi-fractional diffusion in multilayers (first part) and at control of local molecular transport into or from the multilayers by laser light trigger (second part). The mechanism of the loading and release is governed by the interaction of bioactives with the multilayer constituents and by the diffusion phenomenon overall. The diffusion of a molecule in HA/PLL multilayers shows multiple fractions of different diffusion rate. Approaches, that are able to assess the mobility of molecules in such a complex system, are limited. This shortcoming motivated the design of a novel evaluation tool presented here. The tool employs a simulation-based approach for evaluation of the data acquired by fluorescence recovery after photobleaching (FRAP) method. In this approach, possible fluorescence recovery scenarios are primarily simulated and afterwards compared with the data acquired while optimizing parameters of a model until a sufficient match is achieved. Fluorescent latex particles of different sizes and fluorescein in an aqueous medium are utilized as test samples validating the analysis results. The diffusion of protein cytochrome c in HA/PLL multilayers is evaluated as well. This tool significantly broadens the possibilities of analysis of spatiotemporal FRAP data, which originate from multi-fractional diffusion, while striving to be widely applicable. This tool has the potential to elucidate the mechanisms of molecular transport and empower rational engineering of the drug release systems. The second part of the work focuses on the fabrication of such a spatiotemporarily-controlled drug release system employing the HA/PLL multilayer. This release system comprises different layers of various functionalities that together form a sandwich structure. The bottom layer, which serves as a reservoir, is formed by HA/PLL PEM deposited on a planar glass substrate. On top of the PEM, a layer of so-called hybrids is deposited. The hybrids consist of thermoresponsive poly(N-isopropylacrylamide) (PNIPAM) -based hydrogel microparticles with surface-attached gold nanorods. The layer of hybrids is intended to serve as a gate that controls the local molecular transport through the PEM-solution-interface. The possibility of stimulating the molecular transport by near-infrared (NIR) laser irradiation is being explored. From several tested approaches for the deposition of hybrids onto the PEM surface, the drying-based approach was identified as optimal. Experiments, that examine the functionality of the fabricated sandwich at elevated temperature, document the reversible volume phase transition of the PEM-attached hybrids while sustaining the sandwich stability. Further, the gold nanorods were shown to effectively absorb light radiation in the tissue- and cell-friendly NIR spectral region while transducing the energy of light into heat. The rapid and reversible shrinkage of the PEM-attached hybrids was thereby achieved. Finally, dextran was employed as a model transport molecule. It loads into the PEM reservoir in a few seconds with the partition constant of 2.4, while it spontaneously releases in a slower, sustained manner. The local laser irradiation of the sandwich, which contains the fluorescein isothiocyanate tagged dextran, leads to a gradual reduction of fluorescence intensity in the irradiated region. The release system fabricated employs renowned photoresponsivity of the hybrids in an innovative setting. The results of the research are a step towards a spatially-controlled on-demand drug release system that paves the way to spatiotemporally controlled drug release. The approaches developed in this work have the potential to elucidate the molecular dynamics in ECM and to foster engineering of multilayers with properties tuned to mimic the ECM. The work aims at spatiotemporal control over the diffusion of bioactives and their presentation to the cells.}, language = {en} } @masterthesis{Engelhardt2021, type = {Bachelor Thesis}, author = {Engelhardt, Max Angel Ronan}, title = {Zwischen Simulation und Beweis - eine mathematische Analyse des Bienaym{\´e}-Galton-Watson-Prozesses und sein Einsatz innerhalb des Mathematikunterrichts}, doi = {10.25932/publishup-52447}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-524474}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2021}, abstract = {Die Bienaym{\´e}-Galton-Watson Prozesse k{\"o}nnen f{\"u}r die Untersuchung von speziellen und sich entwickelnden Populationen verwendet werden. Die Populationen umfassen Individuen, welche sich identisch, zuf{\"a}llig, selbstst{\"a}ndig und unabh{\"a}ngig voneinander fortpflanzen und die jeweils nur eine Generation existieren. Die n-te Generation ergibt sich als zuf{\"a}llige Summe der Individuen der (n-1)-ten Generation. Die Relevanz dieser Prozesse begr{\"u}ndet sich innerhalb der Historie und der inner- und außermathematischen Bedeutung. Die Geschichte der Bienaym{\´e}-Galton-Watson-Prozesse wird anhand der Entwicklung des Konzeptes bis heute dargestellt. Dabei werden die Wissenschaftler:innen verschiedener Disziplinen angef{\"u}hrt, die Erkenntnisse zu dem Themengebiet beigetragen und das Konzept in ihren Fachbereichen angef{\"u}hrt haben. Somit ergibt sich die außermathematische Signifikanz. Des Weiteren erh{\"a}lt man die innermathematische Bedeutsamkeit mittels des Konzeptes der Verzweigungsprozesse, welches auf die Bienaym{\´e}-Galton-Watson Prozesse zur{\"u}ckzuf{\"u}hren ist. Die Verzweigungsprozesse stellen eines der aussagekr{\"a}ftigsten Modelle f{\"u}r die Beschreibung des Populationswachstums dar. Dar{\"u}ber hinaus besteht die derzeitige Wichtigkeit durch die Anwendungsm{\"o}glichkeit der Verzweigungsprozesse und der Bienaym{\´e}-Galton-Watson Prozesse innerhalb der Epidemiologie. Es werden die Ebola- und die Corona-Pandemie als Anwendungsfelder angef{\"u}hrt. Die Prozesse dienen als Entscheidungsst{\"u}tze f{\"u}r die Politik und erm{\"o}glichen Aussagen {\"u}ber die Auswirkungen von Maßnahmen bez{\"u}glich der Pandemien. Neben den Prozessen werden ebenfalls der bedingte Erwartungswert bez{\"u}glich diskreter Zufallsvariablen, die wahrscheinlichkeitserzeugende Funktion und die zuf{\"a}llige Summe eingef{\"u}hrt. Die Konzepte vereinfachen die Beschreibung der Prozesse und bilden somit die Grundlage der Betrachtungen. Außerdem werden die ben{\"o}tigten und weiterf{\"u}hrenden Eigenschaften der grundlegenden Themengebiete und der Prozesse aufgef{\"u}hrt und bewiesen. Das Kapitel erreicht seinen H{\"o}hepunkt bei dem Beweis des Kritikalit{\"a}tstheorems, wodurch eine Aussage {\"u}ber das Aussterben des Prozesses in verschiedenen F{\"a}llen und somit {\"u}ber die Aussterbewahrscheinlichkeit get{\"a}tigt werden kann. Die F{\"a}lle werden anhand der zu erwartenden Anzahl an Nachkommen eines Individuums unterschieden. Es zeigt sich, dass ein Prozess bei einer zu erwartenden Anzahl kleiner gleich Eins mit Sicherheit ausstirbt und bei einer Anzahl gr{\"o}ßer als Eins, die Population nicht in jedem Fall aussterben muss. Danach werden einzelne Beispiele, wie der linear fractional case, die Population von Fibroblasten (Bindegewebszellen) von M{\"a}usen und die Entstehungsfragestellung der Prozesse, angef{\"u}hrt. Diese werden mithilfe der erlangten Ergebnisse untersucht und einige ausgew{\"a}hlte zuf{\"a}llige Dynamiken werden im nachfolgenden Kapitel simuliert. Die Simulationen erfolgen durch ein in Python erstelltes Programm und werden mithilfe der Inversionsmethode realisiert. Die Simulationen stellen beispielhaft die Entwicklungen in den verschiedenen Kritikalit{\"a}tsf{\"a}llen der Prozesse dar. Zudem werden die H{\"a}ufigkeiten der einzelnen Populationsgr{\"o}ßen in Form von Histogrammen angebracht. Dabei l{\"a}sst sich der Unterschied zwischen den einzelnen F{\"a}llen best{\"a}tigen und es wird die Anwendungsm{\"o}glichkeit der Bienaym{\´e}-Galton-Watson Prozesse bei komplexeren Problemen deutlich. Histogramme bekr{\"a}ftigen, dass die einzelnen Populationsgr{\"o}ßen nur endlich oft vorkommen. Diese Aussage wurde von Galton aufgeworfen und in der Extinktions-Explosions-Dichotomie verwendet. Die dargestellten Erkenntnisse {\"u}ber das Themengebiet und die Betrachtung des Konzeptes werden mit einer didaktischen Analyse abgeschlossen. Die Untersuchung beinhaltet die Ber{\"u}cksichtigung der Fundamentalen Ideen, der Fundamentalen Ideen der Stochastik und der Leitidee „Daten und Zufall". Dabei ergibt sich, dass in Abh{\"a}ngigkeit der gew{\"a}hlten Perspektive die Anwendung der Bienaym{\´e}-Galton-Watson Prozesse innerhalb der Schule plausibel ist und von Vorteil f{\"u}r die Sch{\"u}ler:innen sein kann. F{\"u}r die Behandlung wird exemplarisch der Rahmenlehrplan f{\"u}r Berlin und Brandenburg analysiert und mit dem Kernlehrplan Nordrhein-Westfalens verglichen. Die Konzeption des Lehrplans aus Berlin und Brandenburg l{\"a}sst nicht den Schluss zu, dass die Bienaym{\´e}-Galton-Watson Prozesse angewendet werden sollten. Es l{\"a}sst sich feststellen, dass die zugrunde liegende Leitidee nicht vollumf{\"a}nglich mit manchen Fundamentalen Ideen der Stochastik vereinbar ist. Somit w{\"u}rde eine Modifikation hinsichtlich einer st{\"a}rkeren Orientierung des Lehrplans an den Fundamentalen Ideen die Anwendung der Prozesse erm{\"o}glichen. Die Aussage wird durch die Betrachtung und {\"U}bertragung eines nordrhein-westf{\"a}lischen Unterrichtsentwurfes f{\"u}r stochastische Prozesse auf die Bienaym{\´e}-Galton-Watson Prozesse unterst{\"u}tzt. Dar{\"u}ber hinaus werden eine Concept Map und ein Vernetzungspentagraph nach von der Bank konzipiert um diesen Aspekt hervorzuheben.}, language = {de} } @phdthesis{Seidel2021, author = {Seidel, Karen}, title = {Modelling binary classification with computability theory}, doi = {10.25932/publishup-52998}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-529988}, school = {Universit{\"a}t Potsdam}, pages = {viii, 120}, year = {2021}, abstract = {We investigate models for incremental binary classification, an example for supervised online learning. Our starting point is a model for human and machine learning suggested by E.M.Gold. In the first part, we consider incremental learning algorithms that use all of the available binary labeled training data in order to compute the current hypothesis. For this model, we observe that the algorithm can be assumed to always terminate and that the distribution of the training data does not influence learnability. This is still true if we pose additional delayable requirements that remain valid despite a hypothesis output delayed in time. Additionally, we consider the non-delayable requirement of consistent learning. Our corresponding results underpin the claim for delayability being a suitable structural property to describe and collectively investigate a major part of learning success criteria. Our first theorem states the pairwise implications or incomparabilities between an established collection of delayable learning success criteria, the so-called complete map. Especially, the learning algorithm can be assumed to only change its last hypothesis in case it is inconsistent with the current training data. Such a learning behaviour is called conservative. By referring to learning functions, we obtain a hierarchy of approximative learning success criteria. Hereby we allow an increasing finite number of errors of the hypothesized concept by the learning algorithm compared with the concept to be learned. Moreover, we observe a duality depending on whether vacillations between infinitely many different correct hypotheses are still considered a successful learning behaviour. This contrasts the vacillatory hierarchy for learning from solely positive information. We also consider a hypothesis space located between the two most common hypothesis space types in the nearby relevant literature and provide the complete map. In the second part, we model more efficient learning algorithms. These update their hypothesis referring to the current datum and without direct regress to past training data. We focus on iterative (hypothesis based) and BMS (state based) learning algorithms. Iterative learning algorithms use the last hypothesis and the current datum in order to infer the new hypothesis. Past research analyzed, for example, the above mentioned pairwise relations between delayable learning success criteria when learning from purely positive training data. We compare delayable learning success criteria with respect to iterative learning algorithms, as well as learning from either exclusively positive or binary labeled data. The existence of concept classes that can be learned by an iterative learning algorithm but not in a conservative way had already been observed, showing that conservativeness is restrictive. An additional requirement arising from cognitive science research \%and also observed when training neural networks is U-shapedness, stating that the learning algorithm does diverge from a correct hypothesis. We show that forbidding U-shapes also restricts iterative learners from binary labeled data. In order to compute the next hypothesis, BMS learning algorithms refer to the currently observed datum and the actual state of the learning algorithm. For learning algorithms equipped with an infinite amount of states, we provide the complete map. A learning success criterion is semantic if it still holds, when the learning algorithm outputs other parameters standing for the same classifier. Syntactic (non-semantic) learning success criteria, for example conservativeness and syntactic non-U-shapedness, restrict BMS learning algorithms. For proving the equivalence of the syntactic requirements, we refer to witness-based learning processes. In these, every change of the hypothesis is justified by a later on correctly classified witness from the training data. Moreover, for every semantic delayable learning requirement, iterative and BMS learning algorithms are equivalent. In case the considered learning success criterion incorporates syntactic non-U-shapedness, BMS learning algorithms can learn more concept classes than iterative learning algorithms. The proofs are combinatorial, inspired by investigating formal languages or employ results from computability theory, such as infinite recursion theorems (fixed point theorems).}, language = {en} } @phdthesis{Grum2021, author = {Grum, Marcus}, title = {Construction of a concept of neuronal modeling}, year = {2021}, abstract = {The business problem of having inefficient processes, imprecise process analyses, and simulations as well as non-transparent artificial neuronal network models can be overcome by an easy-to-use modeling concept. With the aim of developing a flexible and efficient approach to modeling, simulating, and optimizing processes, this paper proposes a flexible Concept of Neuronal Modeling (CoNM). The modeling concept, which is described by the modeling language designed and its mathematical formulation and is connected to a technical substantiation, is based on a collection of novel sub-artifacts. As these have been implemented as a computational model, the set of CoNM tools carries out novel kinds of Neuronal Process Modeling (NPM), Neuronal Process Simulations (NPS), and Neuronal Process Optimizations (NPO). The efficacy of the designed artifacts was demonstrated rigorously by means of six experiments and a simulator of real industrial production processes.}, language = {en} } @article{HoffmannMachatschekLendlein2022, author = {Hoffmann, Falk and Machatschek, Rainhard Gabriel and Lendlein, Andreas}, title = {Analytical model and Monte Carlo simulations of polymer degradation with improved chain cut statistics}, series = {Journal of materials research : JMR}, volume = {37}, journal = {Journal of materials research : JMR}, number = {5}, publisher = {Springer}, address = {Heidelberg}, issn = {0884-2914}, doi = {10.1557/s43578-022-00495-4}, pages = {1093 -- 1101}, year = {2022}, abstract = {The degradation of polymers is described by mathematical models based on bond cleavage statistics including the decreasing probability of chain cuts with decreasing average chain length. We derive equations for the degradation of chains under a random chain cut and a chain end cut mechanism, which are compared to existing models. The results are used to predict the influence of internal molecular parameters. It is shown that both chain cut mechanisms lead to a similar shape of the mass or molecular mass loss curve. A characteristic time is derived, which can be used to extract the maximum length of soluble fragments l of the polymer. We show that the complete description is needed to extract the degradation rate constant k from the molecular mass loss curve and that l can be used to design polymers that lose less mechanical stability before entering the mass loss phase.}, language = {en} } @phdthesis{Panzer2024, author = {Panzer, Marcel}, title = {Design of a hyper-heuristics based control framework for modular production systems}, doi = {10.25932/publishup-63300}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-633006}, school = {Universit{\"a}t Potsdam}, pages = {vi, 334}, year = {2024}, abstract = {Volatile supply and sales markets, coupled with increasing product individualization and complex production processes, present significant challenges for manufacturing companies. These must navigate and adapt to ever-shifting external and internal factors while ensuring robustness against process variabilities and unforeseen events. This has a pronounced impact on production control, which serves as the operational intersection between production planning and the shop- floor resources, and necessitates the capability to manage intricate process interdependencies effectively. Considering the increasing dynamics and product diversification, alongside the need to maintain constant production performances, the implementation of innovative control strategies becomes crucial. In recent years, the integration of Industry 4.0 technologies and machine learning methods has gained prominence in addressing emerging challenges in production applications. Within this context, this cumulative thesis analyzes deep learning based production systems based on five publications. Particular attention is paid to the applications of deep reinforcement learning, aiming to explore its potential in dynamic control contexts. Analysis reveal that deep reinforcement learning excels in various applications, especially in dynamic production control tasks. Its efficacy can be attributed to its interactive learning and real-time operational model. However, despite its evident utility, there are notable structural, organizational, and algorithmic gaps in the prevailing research. A predominant portion of deep reinforcement learning based approaches is limited to specific job shop scenarios and often overlooks the potential synergies in combined resources. Furthermore, it highlights the rare implementation of multi-agent systems and semi-heterarchical systems in practical settings. A notable gap remains in the integration of deep reinforcement learning into a hyper-heuristic. To bridge these research gaps, this thesis introduces a deep reinforcement learning based hyper- heuristic for the control of modular production systems, developed in accordance with the design science research methodology. Implemented within a semi-heterarchical multi-agent framework, this approach achieves a threefold reduction in control and optimisation complexity while ensuring high scalability, adaptability, and robustness of the system. In comparative benchmarks, this control methodology outperforms rule-based heuristics, reducing throughput times and tardiness, and effectively incorporates customer and order-centric metrics. The control artifact facilitates a rapid scenario generation, motivating for further research efforts and bridging the gap to real-world applications. The overarching goal is to foster a synergy between theoretical insights and practical solutions, thereby enriching scientific discourse and addressing current industrial challenges.}, language = {en} }