@phdthesis{Ellenbeck2023, author = {Ellenbeck, Saskia}, title = {Zwischen Modellierung und Stakeholderbeteiligung - Wissensproduktion in der Energiewendeforschung}, school = {Universit{\"a}t Potsdam}, pages = {130}, year = {2023}, abstract = {Die Dekarbonisierung des Energiesystems ist Teil der international im Rahmen des Pariser Klimaabkommens beschlossenen CO2-Minderungsstrategie zur Bek{\"a}mpfung des Klimawandels. Nach den Verhandlungen und Beschl{\"u}ssen der Klimaziele stehen politische Entscheider weltweit nun vor der Frage, wie sie diese erreichen k{\"o}nnen. Dies produziert eine hohe politische Nachfrage nach Wissen um die direkten und indirekten Effekte verschiedener Instrumente und potentiellen Entwicklungspfade einer Energiewende. Dieser gesellschaftliche Bedarf an wissenschaftlichen Antworten zu L{\"o}sungsoptionen wurde im Rahmen einer Klimafolgenforschung, genauer einer Klimapolitikfolgenforschung, aufgenommen. Der relativ neue Zweig einer Energiewendeforschung hat sich weltweit entwickelt, steht dabei allerdings vor der doppelten Herausforderung: Erstens befindet sich das Objekt der Forschung nicht im luftleeren Raum, sondern innerhalb {\"o}konomischer, sozialer und politischer Zusammenh{\"a}nge, hier gesellschaftliche Einbettung genannt. Denn die Frage, wie die Energiewende erreicht werden kann, wird auch außerhalb der Wissenschaft debattiert und stellt damit ein Aushandlungsfeld unterschiedlicher Interessen und Narrative dar. Zweitens befindet sich das zu untersuchende Objekt in der Zukunft, hier unter dem Terminus des strukturellen Nicht-Wissens zusammengefasst. Diese beiden Bedingungen f{\"u}hren dazu, dass konventionelle Methoden der empirischen Sozialforschung nicht greifen und eine {\"O}ffnung und Transformation der Wissenschaft in Hinblick auf neue Methoden vonn{\"o}ten ist (Nowotny 2001, Ravetz 2006, Schneidewind 2013). In dieser Arbeit untersuche ich zwei M{\"o}glichkeiten, wie mit der Herausforderung, Wissen unter der Bedingung des strukturellen Nicht-Wissens und der gesellschaftlichen Einbettung zu produzieren, in der Energiewendeforschung umgegangen wird. Einerseits wird dies durch die Einbeziehung von Stakeholdern, also nicht-wissenschaftlicher Akteure, in den Forschungsprozess getan. Andererseits ist die Nutzung von komplexen {\"o}konometrischen Modellen zur Berechnung von Implikationen und energiewirtschaftlichen Entwicklungspfaden zu einem zentralen Mittel der Wissensgenerierung in der Energiewendeforschung avanciert. Damit wird der als Problem verstandenen strukturellen Bedingung des Nicht-Wissens insofern begegnet, als dass die Ergebnisse von Stakeholder-Involvement und von Modellierungsarbeiten zweifelsohne neues Wissen zur Verf{\"u}gung stellen. Uneinigkeit besteht jedoch darin, wor{\"u}ber dieses Wissen etwas aussagt: Sind es Interessen oder legitime Perspektiven, die Stakeholder in den Forschungsprozess einbringen und sind Modelle vereinfachte Darstellungen der Welt oder sind sie Ausdruck der Vorstellung des Modellierers?}, language = {de} } @phdthesis{Kooten2023, author = {Kooten, Willemijn Sarah Maria Theresia van}, title = {Structural inheritance of the Salta Rift basin and its control on exhumation patterns of the Eastern Cordillera between 23 and 24°S}, doi = {10.25932/publishup-61798}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617983}, school = {Universit{\"a}t Potsdam}, pages = {XIX, 188}, year = {2023}, abstract = {The deformation style of mountain belts is greatly influenced by the upper plate architecture created during preceding deformation phases. The Mesozoic Salta Rift extensional phase has created a dominant structural and lithological framework that controls Cenozoic deformation and exhumation patterns in the Central Andes. Studying the nature of these pre-existing anisotropies is a key to understanding the spatiotemporal distribution of exhumation and its controlling factors. The Eastern Cordillera in particular, has a structural grain that is in part controlled by Salta Rift structures and their orientation relative to Andean shortening. As a result, there are areas in which Andean deformation prevails and areas where the influence of the Salta Rift is the main control on deformation patterns. Between 23 and 24°S, lithological and structural heterogeneities imposed by the Lomas de Olmedo sub-basin (Salta Rift basin) affect the development of the Eastern Cordillera fold-and-thrust belt. The inverted northern margin of the sub-basin now forms the southern boundary of the intermontane Cianzo basin. The former western margin of the sub-basin is located at the confluence of the Subandean Zone, the Santa Barbara System and the Eastern Cordillera. Here, the Salta Rift basin architecture is responsible for the distribution of these morphotectonic provinces. In this study we use a multi-method approach consisting of low-temperature (U-Th-Sm)/He and apatite fission track thermochronology, detrital geochronology, structural and sedimentological analyses to investigate the Mesozoic structural inheritance of the Lomas de Olmedo sub-basin and Cenozoic exhumation patterns. Characterization of the extension-related Tacur{\´u} Group as an intermediate succession between Paleozoic basement and the syn-rift infill of the Lomas de Olmedo sub-basin reveals a Jurassic maximum depositional age. Zircon (U-Th-Sm)/He cooling ages record a pre-Cretaceous onset of exhumation for the rift shoulders in the northern part of the sub-basin, whereas the western shoulder shows a more recent onset (140-115 Ma). Variations in the sedimentary thickness of syn- and post-rift strata document the evolution of accommodation space in the sub-basin. While the thickness of syn-rift strata increases rapidly toward the northern basin margin, the post-rift strata thickness decreases toward the margin and forms a condensed section on the rift shoulder. Inversion of Salta Rift structures commenced between the late Oligocene and Miocene (24-15 Ma) in the ranges surrounding the Cianzo basin. The eastern and western limbs of the Cianzo syncline, located in the hanging wall of the basin-bounding Hornocal fault, show diachronous exhumation. At the same time, western fault blocks of Tilcara Range, south of the Cianzo basin, began exhuming in the late Oligocene to early Miocene (26-16 Ma). Eastward propagation to the frontal thrust and to the Paleozoic strata east of the Tilcara Range occurred in the middle Miocene (22-10 Ma) and the late Miocene-early Pliocene (10-4 Ma), respectively.}, language = {en} } @phdthesis{MalemShinitski2023, author = {Malem-Shinitski, Noa}, title = {Bayesian inference and modeling for point processes with applications from neuronal activity to scene viewing}, doi = {10.25932/publishup-61495}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614952}, school = {Universit{\"a}t Potsdam}, pages = {vii, 129}, year = {2023}, abstract = {Point processes are a common methodology to model sets of events. From earthquakes to social media posts, from the arrival times of neuronal spikes to the timing of crimes, from stock prices to disease spreading -- these phenomena can be reduced to the occurrences of events concentrated in points. Often, these events happen one after the other defining a time--series. Models of point processes can be used to deepen our understanding of such events and for classification and prediction. Such models include an underlying random process that generates the events. This work uses Bayesian methodology to infer the underlying generative process from observed data. Our contribution is twofold -- we develop new models and new inference methods for these processes. We propose a model that extends the family of point processes where the occurrence of an event depends on the previous events. This family is known as Hawkes processes. Whereas in most existing models of such processes, past events are assumed to have only an excitatory effect on future events, we focus on the newly developed nonlinear Hawkes process, where past events could have excitatory and inhibitory effects. After defining the model, we present its inference method and apply it to data from different fields, among others, to neuronal activity. The second model described in the thesis concerns a specific instance of point processes --- the decision process underlying human gaze control. This process results in a series of fixated locations in an image. We developed a new model to describe this process, motivated by the known Exploration--Exploitation dilemma. Alongside the model, we present a Bayesian inference algorithm to infer the model parameters. Remaining in the realm of human scene viewing, we identify the lack of best practices for Bayesian inference in this field. We survey four popular algorithms and compare their performances for parameter inference in two scan path models. The novel models and inference algorithms presented in this dissertation enrich the understanding of point process data and allow us to uncover meaningful insights.}, language = {en} } @phdthesis{Stoltnow2023, author = {Stoltnow, Malte}, title = {Magmatic-hydrothermal processes along the porphyry to epithermal transition}, doi = {10.25932/publishup-61140}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-611402}, school = {Universit{\"a}t Potsdam}, pages = {xxviii, 132}, year = {2023}, abstract = {Magmatic-hydrothermal systems form a variety of ore deposits at different proximities to upper-crustal hydrous magma chambers, ranging from greisenization in the roof zone of the intrusion, porphyry mineralization at intermediate depths to epithermal vein deposits near the surface. The physical transport processes and chemical precipitation mechanisms vary between deposit types and are often still debated. The majority of magmatic-hydrothermal ore deposits are located along the Pacific Ring of Fire, whose eastern part is characterized by the Mesozoic to Cenozoic orogenic belts of the western North and South Americas, namely the American Cordillera. Major magmatic-hydrothermal ore deposits along the American Cordillera include (i) porphyry Cu(-Mo-Au) deposits (along the western cordilleras of Mexico, the western U.S., Canada, Chile, Peru, and Argentina); (ii) Climax- (and sub-) type Mo deposits (Colorado Mineral Belt and northern New Mexico); and (iii) porphyry and IS-type epithermal Sn(-W-Ag) deposits of the Central Andean Tin Belt (Bolivia, Peru and northern Argentina). The individual studies presented in this thesis primarily focus on the formation of different styles of mineralization located at different proximities to the intrusion in magmatic-hydrothermal systems along the American Cordillera. This includes (i) two individual geochemical studies on the Sweet Home Mine in the Colorado Mineral Belt (potential endmember of peripheral Climax-type mineralization); (ii) one numerical modeling study setup in a generic porphyry Cu-environment; and (iii) a numerical modeling study on the Central Andean Tin Belt-type Pirquitas Mine in NW Argentina. Microthermometric data of fluid inclusions trapped in greisen quartz and fluorite from the Sweet Home Mine (Detroit City Portal) suggest that the early-stage mineralization precipitated from low- to medium-salinity (1.5-11.5 wt.\% equiv. NaCl), CO2-bearing fluids at temperatures between 360 and 415°C and at depths of at least 3.5 km. Stable isotope and noble gas isotope data indicate that greisen formation and base metal mineralization at the Sweet Home Mine was related to fluids of different origins. Early magmatic fluids were the principal source for mantle-derived volatiles (CO2, H2S/SO2, noble gases), which subsequently mixed with significant amounts of heated meteoric water. Mixing of magmatic fluids with meteoric water is constrained by δ2Hw-δ18Ow relationships of fluid inclusions. The deep hydrothermal mineralization at the Sweet Home Mine shows features similar to deep hydrothermal vein mineralization at Climax-type Mo deposits or on their periphery. This suggests that fluid migration and the deposition of ore and gangue minerals in the Sweet Home Mine was triggered by a deep-seated magmatic intrusion. The second study on the Sweet Home Mine presents Re-Os molybdenite ages of 65.86±0.30 Ma from a Mo-mineralized major normal fault, namely the Contact Structure, and multimineral Rb-Sr isochron ages of 26.26±0.38 Ma and 25.3±3.0 Ma from gangue minerals in greisen assemblages. The age data imply that mineralization at the Sweet Home Mine formed in two separate events: Late Cretaceous (Laramide-related) and Oligocene (Rio Grande Rift-related). Thus, the age of Mo mineralization at the Sweet Home Mine clearly predates that of the Oligocene Climax-type deposits elsewhere in the Colorado Mineral Belt. The Re-Os and Rb-Sr ages also constrain the age of the latest deformation along the Contact Structure to between 62.77±0.50 Ma and 26.26±0.38 Ma, which was employed and/or crosscut by Late Cretaceous and Oligocene fluids. Along the Contact Structure Late Cretaceous molybdenite is spatially associated with Oligocene minerals in the same vein system, a feature that precludes molybdenite recrystallization or reprecipitation by Oligocene ore fluids. Ore precipitation in porphyry copper systems is generally characterized by metal zoning (Cu-Mo to Zn-Pb-Ag), which is suggested to be variably related to solubility decreases during fluid cooling, fluid-rock interactions, partitioning during fluid phase separation and mixing with external fluids. The numerical modeling study setup in a generic porphyry Cu-environment presents new advances of a numerical process model by considering published constraints on the temperature- and salinity-dependent solubility of Cu, Pb and Zn in the ore fluid. This study investigates the roles of vapor-brine separation, halite saturation, initial metal contents, fluid mixing, and remobilization as first-order controls of the physical hydrology on ore formation. The results show that the magmatic vapor and brine phases ascend with different residence times but as miscible fluid mixtures, with salinity increases generating metal-undersaturated bulk fluids. The release rates of magmatic fluids affect the location of the thermohaline fronts, leading to contrasting mechanisms for ore precipitation: higher rates result in halite saturation without significant metal zoning, lower rates produce zoned ore shells due to mixing with meteoric water. Varying metal contents can affect the order of the final metal precipitation sequence. Redissolution of precipitated metals results in zoned ore shell patterns in more peripheral locations and also decouples halite saturation from ore precipitation. The epithermal Pirquitas Sn-Ag-Pb-Zn mine in NW Argentina is hosted in a domain of metamorphosed sediments without geological evidence for volcanic activity within a distance of about 10 km from the deposit. However, recent geochemical studies of ore-stage fluid inclusions indicate a significant contribution of magmatic volatiles. This study tested different formation models by applying an existing numerical process model for porphyry-epithermal systems with a magmatic intrusion located either at a distance of about 10 km underneath the nearest active volcano or hidden underneath the deposit. The results show that the migration of the ore fluid over a 10-km distance results in metal precipitation by cooling before the deposit site is reached. In contrast, simulations with a hidden magmatic intrusion beneath the Pirquitas deposit are in line with field observations, which include mineralized hydrothermal breccias in the deposit area.}, language = {en} } @phdthesis{Baeckemo2022, author = {B{\"a}ckemo, Johan Dag Valentin}, title = {Digital tools and bioinspiration for the implementation in science and medicine}, doi = {10.25932/publishup-57145}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571458}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 108}, year = {2022}, abstract = {Diese Doktorarbeit untersucht anhand dreier Beispiele, wie digitale Werkzeuge wie Programmierung, Modellierung, 3D-Konstruktions-Werkzeuge und additive Fertigung in Verbindung mit einer auf Biomimetik basierenden Design\-strategie zu neuen Analysemethoden und Produkten f{\"u}hren k{\"o}nnen, die in Wissenschaft und Medizin Anwendung finden. Das Verfahren der Funkenerosion (EDM) wird h{\"a}ufig angewandt, um harte Metalle zu verformen oder zu formen, die mit normalen Maschinen nur schwer zu bearbeiten sind. In dieser Arbeit wird eine neuartige Kr{\"u}mmungsanalysemethode als Alternative zur Rauheitsanalyse vorgestellt. Um besser zu verstehen, wie sich die Oberfl{\"a}che w{\"a}hrend der Bearbeitungszeit des EDM-Prozesses ver{\"a}ndert, wurde außerdem ein digitales Schlagmodell erstellt, das auf einem urspr{\"u}nglich flachen Substrat Krater auf Erhebungen erzeugte. Es wurde festgestellt, dass ein Substrat bei etwa 10.000 St{\"o}ßen ein Gleichgewicht erreicht. Die vorgeschlagene Kr{\"u}mmungsanalysemethode hat das Potenzial, bei der Entwicklung neuer Zellkultursubstrate f{\"u}r die Stammzellenforschung eingesetzt zu werden. Zwei Arten, die in dieser Arbeit aufgrund ihrer interessanten Mechanismen analysiert wurden, sind die Venusfliegenfalle und der Bandwurm. Die Venusfliegenfalle kann ihr Maul mit einer erstaunlichen Geschwindigkeit schließen. Der Schließmechanismus kann f{\"u}r die Wissenschaft interessant sein und ist ein Beispiel f{\"u}r ein so genanntes mechanisch bi-stabiles System - es gibt zwei stabile Zust{\"a}nde. Der Bandwurm ist bei S{\"a}ugetieren meist im unteren Darm zu finden und heftet sich mit seinen Saugn{\"a}pfen an die Darmw{\"a}nde. Wenn der Bandwurm eine geeignete Stelle gefunden hat, st{\"o}ßt er seine Haken aus und heftet sich dauerhaft an die Wand. Diese Funktion k{\"o}nnte in der minimalinvasiven Medizin genutzt werden, um eine bessere Kontrolle der Implantate w{\"a}hrend des Implantationsprozesses zu erm{\"o}glichen. F{\"u}r beide Projekte wurde ein mathematisches Modell, das so genannte Chained Beam Constraint Model (CBCM), verwendet, um das nichtlineare Biegeverhalten zu modellieren und somit vorherzusagen, welche Strukturen ein mechanisch bi-stabiles Verhalten aufweisen k{\"o}nnten. Daraufhin konnten zwei Prototypen mit einem 3D-Drucker gedruckt und durch Experimente veranschaulicht werden, dass sie beide ein bi-stabiles Verhalten aufweisen. Diese Arbeit verdeutlicht das hohe Anwendungspotenzial f{\"u}r neue Analysenmethoden in der Wissenschaft und f{\"u}r neue Medizinprodukte in der minimalinvasiven Medizin.}, language = {en} } @phdthesis{Kath2022, author = {Kath, Nadja Jeanette}, title = {Functional traits determine biomass dynamics, coexistence and energetics in plankton food webs}, doi = {10.25932/publishup-55123}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-551239}, school = {Universit{\"a}t Potsdam}, pages = {197}, year = {2022}, abstract = {Plankton food webs are the basis of marine and limnetic ecosystems. Especially aquatic ecosystems of high biodiversity provide important ecosystem services for humankind as providers of food, coastal protection, climate regulation, and tourism. Understanding the dynamics of biomass and coexistence in these food webs is a first step to understanding the ecosystems. It also lays the foundation for the development of management strategies for the maintenance of the marine and freshwater biodiversity despite anthropogenic influences. Natural food webs are highly complex, and thus often equally complex methods are needed to analyse and understand them well. Models can help to do so as they depict simplified parts of reality. In the attempt to get a broader understanding of the complex food webs, diverse methods are used to investigate different questions. In my first project, we compared the energetics of a food chain in two versions of an allometric trophic network model. In particular, we solved the problem of unrealistically high trophic transfer efficiencies (up to 70\%) by accounting for both basal respiration and activity respiration, which decreased the trophic transfer efficiency to realistic values of ≤30\%. Next in my second project I turned to plankton food webs and especially phytoplankton traits. Investigating a long-term data set from Lake Constance we found evidence for a trade-off between defence and growth rate in this natural phytoplankton community. I continued working with this data set in my third project focusing on ciliates, the main grazer of phytoplankton in spring. Boosted regression trees revealed that temperature and predators have the highest influence on net growth rates of ciliates. We finally investigated in my fourth project a food web model inspired by ciliates to explore the coexistence of plastic competitors and to study the new concept of maladaptive switching, which revealed some drawbacks of plasticity: faster adaptation led to higher maladaptive switching towards undefended phenotypes which reduced autotroph biomass and coexistence and increased consumer biomass. It became obvious that even well-established models should be critically questioned as it is important not to forget reality on the way to a simplistic model. The results showed furthermore that long-term data sets are necessary as they can help to disentangle complex natural processes. Last, one should keep in mind that the interplay between models and experiments/ field data can deliver fruitful insights about our complex world.}, language = {en} } @phdthesis{RodriguezPiceda2022, author = {Rodriguez Piceda, Constanza}, title = {Thermomechanical state of the southern Central Andes}, doi = {10.25932/publishup-54927}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-549275}, school = {Universit{\"a}t Potsdam}, pages = {xx, 228}, year = {2022}, abstract = {The Andes are a ~7000 km long N-S trending mountain range developed along the South American western continental margin. Driven by the subduction of the oceanic Nazca plate beneath the continental South American plate, the formation of the northern and central parts of the orogen is a type case for a non-collisional orogeny. In the southern Central Andes (SCA, 29°S-39°S), the oceanic plate changes the subduction angle between 33°S and 35°S from almost horizontal (< 5° dip) in the north to a steeper angle (~30° dip) in the south. This sector of the Andes also displays remarkable along- and across- strike variations of the tectonic deformation patterns. These include a systematic decrease of topographic elevation, of crustal shortening and foreland and orogenic width, as well as an alternation of the foreland deformation style between thick-skinned and thin-skinned recorded along- and across the strike of the subduction zone. Moreover, the SCA are a very seismically active region. The continental plate is characterized by a relatively shallow seismicity (< 30 km depth) which is mainly focussed at the transition from the orogen to the lowland areas of the foreland and the forearc; in contrast, deeper seismicity occurs below the interiors of the northern foreland. Additionally, frequent seismicity is also recorded in the shallow parts of the oceanic plate and in a sector of the flat slab segment between 31°S and 33°S. The observed spatial heterogeneity in tectonic and seismic deformation in the SCA has been attributed to multiple causes, including variations in sediment thickness, the presence of inherited structures and changes in the subduction angle of the oceanic slab. However, there is no study that inquired the relationship between the long-term rheological configuration of the SCA and the spatial deformation patterns. Moreover, the effects of the density and thickness configuration of the continental plate and of variations in the slab dip angle in the rheological state of the lithosphere have been not thoroughly investigated yet. Since rheology depends on composition, pressure and temperature, a detailed characterization of the compositional, structural and thermal fields of the lithosphere is needed. Therefore, by using multiple geophysical approaches and data sources, I constructed the following 3D models of the SCA lithosphere: (i) a seismically-constrained structural and density model that was tested against the gravity field; (ii) a thermal model integrating the conversion of mantle shear-wave velocities to temperature with steady-state conductive calculations in the uppermost lithosphere (< 50 km depth), validated by temperature and heat-flow measurements; and (iii) a rheological model of the long-term lithospheric strength using as input the previously-generated models. The results of this dissertation indicate that the present-day thermal and rheological fields of the SCA are controlled by different mechanisms at different depths. At shallow depths (< 50 km), the thermomechanical field is modulated by the heterogeneous composition of the continental lithosphere. The overprint of the oceanic slab is detectable where the oceanic plate is shallow (< 85 km depth) and the radiogenic crust is thin, resulting in overall lower temperatures and higher strength compared to regions where the slab is steep and the radiogenic crust is thick. At depths > 50 km, largest temperatures variations occur where the descending slab is detected, which implies that the deep thermal field is mainly affected by the slab dip geometry. The outcomes of this thesis suggests that long-term thermomechanical state of the lithosphere influences the spatial distribution of seismic deformation. Most of the seismicity within the continental plate occurs above the modelled transition from brittle to ductile conditions. Additionally, there is a spatial correlation between the location of these events and the transition from the mechanically strong domains of the forearc and foreland to the weak domain of the orogen. In contrast, seismicity within the oceanic plate is also detected where long-term ductile conditions are expected. I therefore analysed the possible influence of additional mechanisms triggering these earthquakes, including the compaction of sediments in the subduction interface and dehydration reactions in the slab. To that aim, I carried out a qualitative analysis of the state of hydration in the mantle using the ratio between compressional- and shear-wave velocity (vp/vs ratio) from a previous seismic tomography. The results from this analysis indicate that the majority of the seismicity spatially correlates with hydrated areas of the slab and overlying continental mantle, with the exception of the cluster within the flat slab segment. In this region, earthquakes are likely triggered by flexural processes where the slab changes from a flat to a steep subduction angle. First-order variations in the observed tectonic patterns also seem to be influenced by the thermomechanical configuration of the lithosphere. The mechanically strong domains of the forearc and foreland, due to their resistance to deformation, display smaller amounts of shortening than the relatively weak orogenic domain. In addition, the structural and thermomechanical characteristics modelled in this dissertation confirm previous analyses from geodynamic models pointing to the control of the observed heterogeneities in the orogen and foreland deformation style. These characteristics include the lithospheric and crustal thickness, the presence of weak sediments and the variations in gravitational potential energy. Specific conditions occur in the cold and strong northern foreland, which is characterized by active seismicity and thick-skinned structures, although the modelled crustal strength exceeds the typical values of externally-applied tectonic stresses. The additional mechanisms that could explain the strain localization in a region that should resist deformation are: (i) increased tectonic forces coming from the steepening of the slab and (ii) enhanced weakening along inherited structures from pre-Andean deformation events. Finally, the thermomechanical conditions of this sector of the foreland could be a key factor influencing the preservation of the flat subduction angle at these latitudes of the SCA.}, language = {en} } @phdthesis{Grum2021, author = {Grum, Marcus}, title = {Construction of a concept of neuronal modeling}, year = {2021}, abstract = {The business problem of having inefficient processes, imprecise process analyses, and simulations as well as non-transparent artificial neuronal network models can be overcome by an easy-to-use modeling concept. With the aim of developing a flexible and efficient approach to modeling, simulating, and optimizing processes, this paper proposes a flexible Concept of Neuronal Modeling (CoNM). The modeling concept, which is described by the modeling language designed and its mathematical formulation and is connected to a technical substantiation, is based on a collection of novel sub-artifacts. As these have been implemented as a computational model, the set of CoNM tools carries out novel kinds of Neuronal Process Modeling (NPM), Neuronal Process Simulations (NPS), and Neuronal Process Optimizations (NPO). The efficacy of the designed artifacts was demonstrated rigorously by means of six experiments and a simulator of real industrial production processes.}, language = {en} } @phdthesis{Dehne2021, author = {Dehne, Julian}, title = {M{\"o}glichkeiten und Limitationen der medialen Unterst{\"u}tzung forschenden Lernens}, doi = {10.25932/publishup-49789}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-497894}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 404}, year = {2021}, abstract = {Forschendes Lernen und die digitale Transformation sind zwei der wichtigsten Einfl{\"u}sse auf die Entwicklung der Hochschuldidaktik im deutschprachigen Raum. W{\"a}hrend das forschende Lernen als normative Theorie das sollen beschreibt, geben die digitalen Werkzeuge, alte wie neue, das k{\"o}nnen in vielen Bereichen vor. In der vorliegenden Arbeit wird ein Prozessmodell aufgestellt, was den Versuch unternimmt, das forschende Lernen hinsichtlich interaktiver, gruppenbasierter Prozesse zu systematisieren. Basierend auf dem entwickelten Modell wurde ein Softwareprototyp implementiert, der den gesamten Forschungsprozess begleiten kann. Dabei werden Gruppenformation, Feedback- und Reflexionsprozesse und das Peer Assessment mit Bildungstechnologien unterst{\"u}tzt. Die Entwicklungen wurden in einem qualitativen Experiment eingesetzt, um Systemwissen {\"u}ber die M{\"o}glichkeiten und Grenzen der digitalen Unterst{\"u}tzung von forschendem Lernen zu gewinnen.}, language = {de} } @phdthesis{Senftleben2020, author = {Senftleben, Robin}, title = {Earth's magnetic field over the last 1000 years}, doi = {10.25932/publishup-47315}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473150}, school = {Universit{\"a}t Potsdam}, pages = {xii, 104}, year = {2020}, abstract = {To investigate the reliability and stability of spherical harmonic models based on archeo/-paleomagnetic data, 2000 Geomagnetic models were calculated. All models are based on the same data set but with randomized uncertainties. Comparison of these models to the geomagnetic field model gufm1 showed that large scale magnetic field structures up to spherical harmonic degree 4 are stable throughout all models. Through a ranking of all models by comparing the dipole coefficients to gufm1 more realistic uncertainty estimates were derived than the authors of the data provide. The derived uncertainty estimates were used in further modelling, which combines archeo/-paleomagnetic and historical data. The huge difference in data count, accuracy and coverage of these two very different data sources made it necessary to introduce a time dependent spatial damping, which was constructed to constrain the spatial complexity of the model. Finally 501 models were calculated by considering that each data point is a Gaussian random variable, whose mean is the original value and whose standard deviation is its uncertainty. The final model arhimag1k is calculated by taking the mean of the 501 sets of Gauss coefficients. arhimag1k fits different dependent and independent data sets well. It shows an early reverse flux patch at the core-mantle boundary between 1000 AD and 1200 AD at the location of the South Atlantic Anomaly today. Another interesting feature is a high latitude flux patch over Greenland between 1200 and 1400 AD. The dipole moment shows a constant behaviour between 1600 and 1840 AD. In the second part of the thesis 4 new paleointensities from 4 different flows of the island Fogo, which is part of Cape Verde, are presented. The data is fitted well by arhimag1k with the exception of the value at 1663 of 28.3 microtesla, which is approximately 10 microtesla lower than the model suggest.}, language = {en} } @phdthesis{Raatz2019, author = {Raatz, Michael}, title = {Strategies within predator-prey interactions - from individuals to ecosystems}, doi = {10.25932/publishup-42658}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426587}, school = {Universit{\"a}t Potsdam}, pages = {175}, year = {2019}, abstract = {Predator-prey interactions provide central links in food webs. These interaction are directly or indirectly impacted by a number of factors. These factors range from physiological characteristics of individual organisms, over specifics of their interaction to impacts of the environment. They may generate the potential for the application of different strategies by predators and prey. Within this thesis, I modelled predator-prey interactions and investigated a broad range of different factors driving the application of certain strategies, that affect the individuals or their populations. In doing so, I focused on phytoplankton-zooplankton systems as established model systems of predator-prey interactions. At the level of predator physiology I proposed, and partly confirmed, adaptations to fluctuating availability of co-limiting nutrients as beneficial strategies. These may allow to store ingested nutrients or to regulate the effort put into nutrient assimilation. We found that these two strategies are beneficial at different fluctuation frequencies of the nutrients, but may positively interact at intermediate frequencies. The corresponding experiments supported our model results. We found that the temporal structure of nutrient fluctuations indeed has strong effects on the juvenile somatic growth rate of {\itshape Daphnia}. Predator colimitation by energy and essential biochemical nutrients gave rise to another physiological strategy. High-quality prey species may render themselves indispensable in a scenario of predator-mediated coexistence by being the only source of essential biochemical nutrients, such as cholesterol. Thereby, the high-quality prey may even compensate for a lacking defense and ensure its persistence in competition with other more defended prey species. We found a similar effect in a model where algae and bacteria compete for nutrients. Now, being the only source of a compound that is required by the competitor (bacteria) prevented the competitive exclusion of the algae. In this case, the essential compounds were the organic carbon provided by the algae. Here again, being indispensable served as a prey strategy that ensured its coexistence. The latter scenario also gave rise to the application of the two metabolic strategies of autotrophy and heterotrophy by algae and bacteria, respectively. We found that their coexistence allowed the recycling of resources in a microbial loop that would otherwise be lost. Instead, these resources were made available to higher trophic levels, increasing the trophic transfer efficiency in food webs. The predation process comprises the next higher level of factors shaping the predator-prey interaction, besides these factors that originated from the functioning or composition of individuals. Here, I focused on defensive mechanisms and investigated multiple scenarios of static or adaptive combinations of prey defense and predator offense. I confirmed and extended earlier reports on the coexistence-promoting effects of partially lower palatability of the prey community. When bacteria and algae are coexisting, a higher palatability of bacteria may increase the average predator biomass, with the side effect of making the population dynamics more regular. This may facilitate experimental investigations and interpretations. If defense and offense are adaptive, this allows organisms to maximize their growth rate. Besides this fitness-enhancing effect, I found that co-adaptation may provide the predator-prey system with the flexibility to buffer external perturbations. On top of these rather internal factors, environmental drivers also affect predator-prey interactions. I showed that environmental nutrient fluctuations may create a spatio-temporal resource heterogeneity that selects for different predator strategies. I hypothesized that this might favour either storage or acclimation specialists, depending on the frequency of the environmental fluctuations. We found that many of these factors promote the coexistence of different strategies and may therefore support and sustain biodiversity. Thus, they might be relevant for the maintenance of crucial ecosystem functions that also affect us humans. Besides this, the richness of factors that impact predator-prey interactions might explain why so many species, especially in the planktonic regime, are able to coexist.}, language = {en} } @phdthesis{Hoellerer2016, author = {H{\"o}llerer, Reinhard}, title = {Modellierung und Optimierung von B{\"u}rgerdiensten am Beispiel der Stadt Landshut}, doi = {10.25932/publishup-42598}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-425986}, school = {Universit{\"a}t Potsdam}, pages = {xii, 244}, year = {2016}, abstract = {Die Projektierung und Abwicklung sowie die statische und dynamische Analyse von Gesch{\"a}ftsprozessen im Bereich des Verwaltens und Regierens auf kommunaler, L{\"a}nder- wie auch Bundesebene mit Hilfe von Informations- und Kommunikationstechniken besch{\"a}ftigen Politiker und Strategen f{\"u}r Informationstechnologie ebenso wie die {\"O}ffentlichkeit seit Langem. Der hieraus entstandene Begriff E-Government wurde in der Folge aus den unterschiedlichsten technischen, politischen und semantischen Blickrichtungen beleuchtet. Die vorliegende Arbeit konzentriert sich dabei auf zwei Schwerpunktthemen: • Das erste Schwerpunktthema behandelt den Entwurf eines hierarchischen Architekturmodells, f{\"u}r welches sieben hierarchische Schichten identifiziert werden k{\"o}nnen. Diese erscheinen notwendig, aber auch hinreichend, um den allgemeinen Fall zu beschreiben. Den Hintergrund hierf{\"u}r liefert die langj{\"a}hrige Prozess- und Verwaltungserfahrung als Leiter der EDV-Abteilung der Stadtverwaltung Landshut, eine kreisfreie Stadt mit rund 69.000 Einwohnern im Nordosten von M{\"u}nchen. Sie steht als Repr{\"a}sentant f{\"u}r viele Verwaltungsvorg{\"a}nge in der Bundesrepublik Deutschland und ist dennoch als Analyseobjekt in der Gesamtkomplexit{\"a}t und Prozessquantit{\"a}t {\"u}berschaubar. Somit k{\"o}nnen aus der Analyse s{\"a}mtlicher Kernabl{\"a}ufe statische und dynamische Strukturen extrahiert und abstrakt modelliert werden. Die Schwerpunkte liegen in der Darstellung der vorhandenen Bedienabl{\"a}ufe in einer Kommune. Die Transformation der Bedienanforderung in einem hierarchischen System, die Darstellung der Kontroll- und der Operationszust{\"a}nde in allen Schichten wie auch die Strategie der Fehlererkennung und Fehlerbehebung schaffen eine transparente Basis f{\"u}r umfassende Restrukturierungen und Optimierungen. F{\"u}r die Modellierung wurde FMC-eCS eingesetzt, eine am Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik GmbH (HPI) im Fachgebiet Kommunikationssysteme entwickelte Methodik zur Modellierung zustandsdiskreter Systeme unter Ber{\"u}cksichtigung m{\"o}glicher Inkonsistenzen (Betreuer: Prof. Dr.-Ing. Werner Zorn [ZW07a, ZW07b]). • Das zweite Schwerpunktthema widmet sich der quantitativen Modellierung und Optimierung von E-Government-Bediensystemen, welche am Beispiel des B{\"u}rgerb{\"u}ros der Stadt Landshut im Zeitraum 2008 bis 2015 durchgef{\"u}hrt wurden. Dies erfolgt auf Basis einer kontinuierlichen Betriebsdatenerfassung mit aufwendiger Vorverarbeitung zur Extrahierung mathematisch beschreibbarer Wahrscheinlichkeitsverteilungen. Der hieraus entwickelte Dienstplan wurde hinsichtlich der erzielbaren Optimierungen im dauerhaften Echteinsatz verifiziert. [ZW07a] Zorn, Werner: «FMC-QE A New Approach in Quantitative Modeling», Vortrag anl{\"a}sslich: MSV'07- The 2007 International Conference on Modeling, Simulation and Visualization Methods WorldComp2007, Las Vegas, 28.6.2007. [ZW07b] Zorn, Werner: «FMC-QE, A New Approach in Quantitative Modeling», Ver{\"o}ffentlichung, Hasso-Plattner-Institut f{\"u}r Softwaresystemtechnik an der Universit{\"a}t Potsdam, 28.6.2007.}, language = {de} } @phdthesis{Fuhrmann2018, author = {Fuhrmann, Saskia}, title = {Physiologically-based pharmacokinetic and mechanism-based pharmacodynamic modelling of monoclonal antibodies with a focus on tumour targeting}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418861}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 171}, year = {2018}, abstract = {Monoclonal antibodies (mAbs) are an innovative group of drugs with increasing clinical importance in oncology, combining high specificity with generally low toxicity. There are, however, numerous challenges associated with the development of mAbs as therapeutics. Mechanistic understanding of factors that govern the pharmacokinetics (PK) of mAbs is critical for drug development and the optimisation of effective therapies; in particular, adequate dosing strategies can improve patient quality life and lower drug cost. Physiologically-based PK (PBPK) models offer a physiological and mechanistic framework, which is of advantage in the context of animal to human extrapolation. Unlike for small molecule drugs, however, there is no consensus on how to model mAb disposition in a PBPK context. Current PBPK models for mAb PK hugely vary in their representation of physiology and parameterisation. Their complexity poses a challenge for their applications, e.g., translating knowledge from animal species to humans. In this thesis, we developed and validated a consensus PBPK model for mAb disposition taking into account recent insights into mAb distribution (antibody biodistribution coefficients and interstitial immunoglobulin G (IgG) pharmacokinetics) to predict tissue PK across several pre-clinical species and humans based on plasma data only. The model allows to a priori predict target-independent (unspecific) mAb disposition processes as well as mAb disposition in concentration ranges, for which the unspecific clearance (CL) dominates target-mediated CL processes. This is often the case for mAb therapies at steady state dosing. The consensus PBPK model was then used and refined to address two important problems: 1) Immunodeficient mice are crucial models to evaluate mAb efficacy in cancer therapy. Protection from elimination by binding to the neonatal Fc receptor is known to be a major pathway influencing the unspecific CL of both, endogenous and therapeutic IgG. The concentration of endogenous IgG, however, is reduced in immunodeficient mouse models, and this effect on unspecific mAb CL is unknown, yet of great importance for the extrapolation to human in the context of mAb cancer therapy. 2) The distribution of mAbs into solid tumours is of great interest. To comprehensively investigate mAb distribution within tumour tissue and its implications for therapeutic efficacy, we extended the consensus PBPK model by a detailed tumour distribution model incorporating a cell-level model for mAb-target interaction. We studied the impact of variations in tumour microenvironment on therapeutic efficacy and explored the plausibility of different mechanisms of action in mAb cancer therapy. The mathematical findings and observed phenomena shed new light on therapeutic utility and dosing regimens in mAb cancer treatment.}, language = {en} } @phdthesis{Schroeder2015, author = {Schr{\"o}der, Sarah}, title = {Modelling surface evolution coupled with tectonics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90385}, school = {Universit{\"a}t Potsdam}, pages = {viii, 129}, year = {2015}, abstract = {This study presents the development of 1D and 2D Surface Evolution Codes (SECs) and their coupling to any lithospheric-scale (thermo-)mechanical code with a quadrilateral structured surface mesh. Both SECs involve diffusion as approach for hillslope processes and the stream power law to reflect riverbed incision. The 1D SEC settles sediment that was produced by fluvial incision in the appropriate minimum, while the supply-limited 2D SEC DANSER uses a fast filling algorithm to model sedimantation. It is based on a cellular automaton. A slope-dependent factor in the sediment flux extends the diffusion equation to nonlinear diffusion. The discharge accumulation is achieved with the D8-algorithm and an improved drainage accumulation routine. Lateral incision enhances the incision's modelling. Following empirical laws, it incises channels of several cells width. The coupling method enables different temporal and spatial resolutions of the SEC and the thermo-mechanical code. It transfers vertical as well as horizontal displacements to the surface model. A weighted smoothing of the 3D surface displacements is implemented. The smoothed displacement vectors transmit the deformation by bilinear interpolation to the surface model. These interpolation methods ensure mass conservation in both directions and prevent the two surfaces from drifting apart. The presented applications refer to the evolution of the Pamir orogen. A calibration of DANSER's parameters with geomorphological data and a DEM as initial topography highlights the advantage of lateral incision. Preserving the channel width and reflecting incision peaks in narrow channels, this closes the huge gap between current orogen-scale incision models and observed topographies. River capturing models in a system of fault-bounded block rotations reaffirm the importance of the lateral incision routine for capturing events with channel initiation. The models show a low probability of river capturings with large deflection angles. While the probability of river capturing is directly depending on the uplift rate, the erodibility inside of a dip-slip fault speeds up headward erosion along the fault: The model's capturing speed increases within a fault. Coupling DANSER with the thermo-mechanical code SLIM 3D emphasizes the versatility of the SEC. While DANSER has minor influence on the lithospheric evolution of an indenter model, the brittle surface deformation is strongly affected by its sedimentation, widening a basin in between two forming orogens and also the southern part of the southern orogen to south, east and west.}, language = {en} } @phdthesis{Lischke2015, author = {Lischke, Betty}, title = {Food web regulation under different forcing regimes in shallow lakes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89149}, school = {Universit{\"a}t Potsdam}, pages = {131}, year = {2015}, abstract = {The standing stock and production of organismal biomass depends strongly on the organisms' biotic environment, which arises from trophic and non-trophic interactions among them. The trophic interactions between the different groups of organisms form the food web of an ecosystem, with the autotrophic and bacterial production at the basis and potentially several levels of consumers on top of the producers. Feeding interactions can regulate communities either by severe grazing pressure or by shortage of resources or prey production, termed top-down and bottom-up control, respectively. The limitations of all communities conglomerate in the food web regulation, which is subject to abiotic and biotic forcing regimes arising from external and internal constraints. This dissertation presents the effects of alterations in two abiotic, external forcing regimes, terrestrial matter input and long-lasting low temperatures in winter. Diverse methodological approaches, a complex ecosystem model study and the analysis of two whole-lake measurements, were performed to investigate effects for the food web regulation and the resulting consequences at the species, community and ecosystem scale. Thus, all types of organisms, autotrophs and heterotrophs, at all trophic levels were investigated to gain a comprehensive overview of the effects of the two mentioned altered forcing regimes. In addition, an extensive evaluation of the trophic interactions and resulting carbon fluxes along the pelagic and benthic food web was performed to display the efficiencies of the trophic energy transfer within the food webs. All studies were conducted in shallow lakes, which is worldwide the most abundant type of lakes. The specific morphology of shallow lakes allows that the benthic production contributes substantially to the whole-lake production. Further, as shallow lakes are often small they are especially sensitive to both, changes in the input of terrestrial organic matter and the atmospheric temperature. Another characteristic of shallow lakes is their appearance in alternative stable states. They are either in a clear-water or turbid state, where macrophytes and phytoplankton dominate, respectively. Both states can stabilize themselves through various mechanisms. These two alternative states and stabilizing mechanisms are integrated in the complex ecosystem model PCLake, which was used to investigate the effects of the enhanced terrestrial particulate organic matter (t-POM) input to lakes. The food web regulation was altered by three distinct pathways: (1) Zoobenthos received more food, increased in biomass which favored benthivorous fish and those reduced the available light due to bioturbation. (2) Zooplankton substituted autochthonous organic matter in their diet by suspended t-POM, thus the autochthonous organic matter remaining in the water reduced its transparency. (3) T-POM suspended into the water and reduced directly the available light. As macrophytes are more light-sensitive than phytoplankton they suffered the most from the lower transparency. Consequently, the resilience of the clear-water state was reduced by enhanced t-POM inputs, which makes the turbid state more likely at a given nutrient concentration. In two subsequent winters long-lasting low temperatures and a concurrent long duration of ice coverage was observed which resulted in low overall adult fish biomasses in the two study lakes - Schulzensee and Gollinsee, characterized by having and not having submerged macrophytes, respectively. Before the partial winterkill of fish Schulzensee allowed for a higher proportion of piscivorous fish than Gollinsee. However, the partial winterkill of fish aligned both communities as piscivorous fish are more sensitive to low oxygen concentrations. Young of the year fish benefitted extremely from the absence of adult fish due to lower predation pressure. Therefore, they could exert a strong top-down control on crustaceans, which restructured the entire zooplankton community leading to low crustacean biomasses and a community composition characterized by copepodites and nauplii. As a result, ciliates were released from top-down control, increased to high biomasses compared to lakes of various trophic states and depths and dominated the zooplankton community. While being very abundant in the study lakes and having the highest weight specific grazing rates among the zooplankton, ciliates exerted potentially a strong top-down control on small phytoplankton and particle-attached bacteria. This resulted in a higher proportion of large phytoplankton compared to other lakes. Additionally, the phytoplankton community was evenly distributed presumably due to the numerous fast growing and highly specific ciliate grazers. Although, the pelagic food web was completely restructured after the subsequent partial winterkills of fish, both lakes were resistant to effects of this forcing regime at the ecosystem scale. The consistently high predation pressure on phytoplankton prevented that Schulzensee switched from the clear-water to the turbid state. Further mechanisms, which potentially stabilized the clear-water state, were allelopathic effects by macrophytes and nutrient limitation in summer. The pelagic autotrophic and bacterial production was an order of magnitude more efficient transferred to animal consumers than the respective benthic production, despite the alterations of the food web structure after the partial winterkill of fish. Thus, the compiled mass-balanced whole-lake food webs suggested that the benthic bacterial and autotrophic production, which exceeded those of the pelagic habitat, was not used by animal consumers. This holds even true if the food quality, additional consumers such as ciliates, benthic protozoa and meiobenthos, the pelagic-benthic link and the potential oxygen limitation of macrobenthos were considered. Therefore, low benthic efficiencies suggest that lakes are primarily pelagic systems at least at the animal consumer level. Overall, this dissertation gives insights into the regulation of organism groups in the pelagic and benthic habitat at each trophic level under two different forcing regimes and displays the efficiency of the carbon transfer in both habitats. The results underline that the alterations of external forcing regimes affect all hierarchical level including the ecosystem.}, language = {en} } @phdthesis{Becker2013, author = {Becker, Basil}, title = {Architectural modelling and verification of open service-oriented systems of systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70158}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Systems of Systems (SoS) have received a lot of attention recently. In this thesis we will focus on SoS that are built atop the techniques of Service-Oriented Architectures and thus combine the benefits and challenges of both paradigms. For this thesis we will understand SoS as ensembles of single autonomous systems that are integrated to a larger system, the SoS. The interesting fact about these systems is that the previously isolated systems are still maintained, improved and developed on their own. Structural dynamics is an issue in SoS, as at every point in time systems can join and leave the ensemble. This and the fact that the cooperation among the constituent systems is not necessarily observable means that we will consider these systems as open systems. Of course, the system has a clear boundary at each point in time, but this can only be identified by halting the complete SoS. However, halting a system of that size is practically impossible. Often SoS are combinations of software systems and physical systems. Hence a failure in the software system can have a serious physical impact what makes an SoS of this kind easily a safety-critical system. The contribution of this thesis is a modelling approach that extends OMG's SoaML and basically relies on collaborations and roles as an abstraction layer above the components. This will allow us to describe SoS at an architectural level. We will also give a formal semantics for our modelling approach which employs hybrid graph-transformation systems. The modelling approach is accompanied by a modular verification scheme that will be able to cope with the complexity constraints implied by the SoS' structural dynamics and size. Building such autonomous systems as SoS without evolution at the architectural level --- i. e. adding and removing of components and services --- is inadequate. Therefore our approach directly supports the modelling and verification of evolution.}, language = {en} } @phdthesis{Polyvyanyy2012, author = {Polyvyanyy, Artem}, title = {Structuring process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59024}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {One can fairly adopt the ideas of Donald E. Knuth to conclude that process modeling is both a science and an art. Process modeling does have an aesthetic sense. Similar to composing an opera or writing a novel, process modeling is carried out by humans who undergo creative practices when engineering a process model. Therefore, the very same process can be modeled in a myriad number of ways. Once modeled, processes can be analyzed by employing scientific methods. Usually, process models are formalized as directed graphs, with nodes representing tasks and decisions, and directed arcs describing temporal constraints between the nodes. Common process definition languages, such as Business Process Model and Notation (BPMN) and Event-driven Process Chain (EPC) allow process analysts to define models with arbitrary complex topologies. The absence of structural constraints supports creativity and productivity, as there is no need to force ideas into a limited amount of available structural patterns. Nevertheless, it is often preferable that models follow certain structural rules. A well-known structural property of process models is (well-)structuredness. A process model is (well-)structured if and only if every node with multiple outgoing arcs (a split) has a corresponding node with multiple incoming arcs (a join), and vice versa, such that the set of nodes between the split and the join induces a single-entry-single-exit (SESE) region; otherwise the process model is unstructured. The motivations for well-structured process models are manifold: (i) Well-structured process models are easier to layout for visual representation as their formalizations are planar graphs. (ii) Well-structured process models are easier to comprehend by humans. (iii) Well-structured process models tend to have fewer errors than unstructured ones and it is less probable to introduce new errors when modifying a well-structured process model. (iv) Well-structured process models are better suited for analysis with many existing formal techniques applicable only for well-structured process models. (v) Well-structured process models are better suited for efficient execution and optimization, e.g., when discovering independent regions of a process model that can be executed concurrently. Consequently, there are process modeling languages that encourage well-structured modeling, e.g., Business Process Execution Language (BPEL) and ADEPT. However, the well-structured process modeling implies some limitations: (i) There exist processes that cannot be formalized as well-structured process models. (ii) There exist processes that when formalized as well-structured process models require a considerable duplication of modeling constructs. Rather than expecting well-structured modeling from start, we advocate for the absence of structural constraints when modeling. Afterwards, automated methods can suggest, upon request and whenever possible, alternative formalizations that are "better" structured, preferably well-structured. In this thesis, we study the problem of automatically transforming process models into equivalent well-structured models. The developed transformations are performed under a strong notion of behavioral equivalence which preserves concurrency. The findings are implemented in a tool, which is publicly available.}, language = {en} } @phdthesis{Schuette2011, author = {Sch{\"u}tte, Moritz}, title = {Evolutionary fingerprints in genome-scale networks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57483}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Mathematical modeling of biological phenomena has experienced increasing interest since new high-throughput technologies give access to growing amounts of molecular data. These modeling approaches are especially able to test hypotheses which are not yet experimentally accessible or guide an experimental setup. One particular attempt investigates the evolutionary dynamics responsible for today's composition of organisms. Computer simulations either propose an evolutionary mechanism and thus reproduce a recent finding or rebuild an evolutionary process in order to learn about its mechanism. The quest for evolutionary fingerprints in metabolic and gene-coexpression networks is the central topic of this cumulative thesis based on four published articles. An understanding of the actual origin of life will probably remain an insoluble problem. However, one can argue that after a first simple metabolism has evolved, the further evolution of metabolism occurred in parallel with the evolution of the sequences of the catalyzing enzymes. Indications of such a coevolution can be found when correlating the change in sequence between two enzymes with their distance on the metabolic network which is obtained from the KEGG database. We observe that there exists a small but significant correlation primarily on nearest neighbors. This indicates that enzymes catalyzing subsequent reactions tend to be descended from the same precursor. Since this correlation is relatively small one can at least assume that, if new enzymes are no "genetic children" of the previous enzymes, they certainly be descended from any of the already existing ones. Following this hypothesis, we introduce a model of enzyme-pathway coevolution. By iteratively adding enzymes, this model explores the metabolic network in a manner similar to diffusion. With implementation of an Gillespie-like algorithm we are able to introduce a tunable parameter that controls the weight of sequence similarity when choosing a new enzyme. Furthermore, this method also defines a time difference between successive evolutionary innovations in terms of a new enzyme. Overall, these simulations generate putative time-courses of the evolutionary walk on the metabolic network. By a time-series analysis, we find that the acquisition of new enzymes appears in bursts which are pronounced when the influence of the sequence similarity is higher. This behavior strongly resembles punctuated equilibrium which denotes the observation that new species tend to appear in bursts as well rather than in a gradual manner. Thus, our model helps to establish a better understanding of punctuated equilibrium giving a potential description at molecular level. From the time-courses we also extract a tentative order of new enzymes, metabolites, and even organisms. The consistence of this order with previous findings provides evidence for the validity of our approach. While the sequence of a gene is actually subject to mutations, its expression profile might also indirectly change through the evolutionary events in the cellular interplay. Gene coexpression data is simply accessible by microarray experiments and commonly illustrated using coexpression networks where genes are nodes and get linked once they show a significant coexpression. Since the large number of genes makes an illustration of the entire coexpression network difficult, clustering helps to show the network on a metalevel. Various clustering techniques already exist. However, we introduce a novel one which maintains control of the cluster sizes and thus assures proper visual inspection. An application of the method on Arabidopsis thaliana reveals that genes causing a severe phenotype often show a functional uniqueness in their network vicinity. This leads to 20 genes of so far unknown phenotype which are however suggested to be essential for plant growth. Of these, six indeed provoke such a severe phenotype, shown by mutant analysis. By an inspection of the degree distribution of the A.thaliana coexpression network, we identified two characteristics. The distribution deviates from the frequently observed power-law by a sharp truncation which follows after an over-representation of highly connected nodes. For a better understanding, we developed an evolutionary model which mimics the growth of a coexpression network by gene duplication which underlies a strong selection criterion, and slight mutational changes in the expression profile. Despite the simplicity of our assumption, we can reproduce the observed properties in A.thaliana as well as in E.coli and S.cerevisiae. The over-representation of high-degree nodes could be identified with mutually well connected genes of similar functional families: zinc fingers (PF00096), flagella, and ribosomes respectively. In conclusion, these four manuscripts demonstrate the usefulness of mathematical models and statistical tools as a source of new biological insight. While the clustering approach of gene coexpression data leads to the phenotypic characterization of so far unknown genes and thus supports genome annotation, our model approaches offer explanations for observed properties of the coexpression network and furthermore substantiate punctuated equilibrium as an evolutionary process by a deeper understanding of an underlying molecular mechanism.}, language = {en} } @phdthesis{Reusser2011, author = {Reusser, Dominik Edwin}, title = {Combining smart model diagnostics and effective data collection for snow catchments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52574}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Complete protection against flood risks by structural measures is impossible. Therefore flood prediction is important for flood risk management. Good explanatory power of flood models requires a meaningful representation of bio-physical processes. Therefore great interest exists to improve the process representation. Progress in hydrological process understanding is achieved through a learning cycle including critical assessment of an existing model for a given catchment as a first step. The assessment will highlight deficiencies of the model, from which useful additional data requirements are derived, giving a guideline for new measurements. These new measurements may in turn lead to improved process concepts. The improved process concepts are finally summarized in an updated hydrological model. In this thesis I demonstrate such a learning cycle, focusing on the advancement of model evaluation methods and more cost effective measurements. For a successful model evaluation, I propose that three questions should be answered: 1) when is a model reproducing observations in a satisfactory way? 2) If model results deviate, of what nature is the difference? And 3) what are most likely the relevant model components affecting these differences? To answer the first two questions, I developed a new method to assess the temporal dynamics of model performance (or TIGER - TIme series of Grouped Errors). This method is powerful in highlighting recurrent patterns of insufficient model behaviour for long simulation periods. I answered the third question with the analysis of the temporal dynamics of parameter sensitivity (TEDPAS). For calculating TEDPAS, an efficient method for sensitivity analysis is necessary. I used such an efficient method called Fourier Amplitude Sensitivity Test, which has a smart sampling scheme. Combining the two methods TIGER and TEDPAS provided a powerful tool for model assessment. With WaSiM-ETH applied to the Weisseritz catchment as a case study, I found insufficient process descriptions for the snow dynamics and for the recession during dry periods in late summer and fall. Focusing on snow dynamics, reasons for poor model performance can either be a poor representation of snow processes in the model, or poor data on snow cover, or both. To obtain an improved data set on snow cover, time series of snow height and temperatures were collected with a cost efficient method based on temperature measurements on multiple levels at each location. An algorithm was developed to simultaneously estimate snow height and cold content from these measurements. Both, snow height and cold content are relevant quantities for spring flood forecasting. Spatial variability was observed at the local and the catchment scale with an adjusted sampling design. At the local scale, samples were collected on two perpendicular transects of 60 m length and analysed with geostatistical methods. The range determined from fitted theoretical variograms was within the range of the sampling design for 80\% of the plots. No patterns were found, that would explain the random variability and spatial correlation at the local scale. At the watershed scale, locations of the extensive field campaign were selected according to a stratified sample design to capture the combined effects of elevation, aspect and land use. The snow height is mainly affected by the plot elevation. The expected influence of aspect and land use was not observed. To better understand the deficiencies of the snow module in WaSiM-ETH, the same approach, a simple degree day model was checked for its capability to reproduce the data. The degree day model was capable to explain the temporal variability for plots with a continuous snow pack over the entire snow season, if parameters were estimated for single plots. However, processes described in the simple model are not sufficient to represent multiple accumulation-melt-cycles, as observed for the lower catchment. Thus, the combined spatio-temporal variability at the watershed scale is not captured by the model. Further tests on improved concepts for the representation of snow dynamics at the Weißeritz are required. From the data I suggest to include at least rain on snow and redistribution by wind as additional processes to better describe spatio-temporal variability. Alternatively an energy balance snow model could be tested. Overall, the proposed learning cycle is a useful framework for targeted model improvement. The advanced model diagnostics is valuable to identify model deficiencies and to guide field measurements. The additional data collected throughout this work helps to get a deepened understanding of the processes in the Weisseritz catchment.}, language = {en} } @phdthesis{Huber2010, author = {Huber, Veronika Emilie Charlotte}, title = {Climate impact on phytoplankton blooms in shallow lakes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42346}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Lake ecosystems across the globe have responded to climate warming of recent decades. However, correctly attributing observed changes to altered climatic conditions is complicated by multiple anthropogenic influences on lakes. This thesis contributes to a better understanding of climate impacts on freshwater phytoplankton, which forms the basis of the food chain and decisively influences water quality. The analyses were, for the most part, based on a long-term data set of physical, chemical and biological variables of a shallow, polymictic lake in north-eastern Germany (M{\"u}ggelsee), which was subject to a simultaneous change in climate and trophic state during the past three decades. Data analysis included constructing a dynamic simulation model, implementing a genetic algorithm to parameterize models, and applying statistical techniques of classification tree and time-series analysis. Model results indicated that climatic factors and trophic state interactively determine the timing of the phytoplankton spring bloom (phenology) in shallow lakes. Under equally mild spring conditions, the phytoplankton spring bloom collapsed earlier under high than under low nutrient availability, due to a switch from a bottom-up driven to a top-down driven collapse. A novel approach to model phenology proved useful to assess the timings of population peaks in an artificially forced zooplankton-phytoplankton system. Mimicking climate warming by lengthening the growing period advanced algal blooms and consequently also peaks in zooplankton abundance. Investigating the reasons for the contrasting development of cyanobacteria during two recent summer heat wave events revealed that anomalously hot weather did not always, as often hypothesized, promote cyanobacteria in the nutrient-rich lake studied. The seasonal timing and duration of heat waves determined whether critical thresholds of thermal stratification, decisive for cyanobacterial bloom formation, were crossed. In addition, the temporal patterns of heat wave events influenced the summer abundance of some zooplankton species, which as predators may serve as a buffer by suppressing phytoplankton bloom formation. This thesis adds to the growing body of evidence that lake ecosystems have strongly responded to climatic changes of recent decades. It reaches beyond many previous studies of climate impacts on lakes by focusing on underlying mechanisms and explicitly considering multiple environmental changes. Key findings show that climate impacts are more severe in nutrient-rich than in nutrient-poor lakes. Hence, to develop lake management plans for the future, limnologists need to seek a comprehensive, mechanistic understanding of overlapping effects of the multi-faceted human footprint on aquatic ecosystems.}, language = {en} }