@phdthesis{Vogel2018, author = {Vogel, Thomas}, title = {Model-driven engineering of self-adaptive software}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409755}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 357}, year = {2018}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls the underlying adaptable software by a feedback loop. State-of-the-art approaches prescribe the feedback loop in terms of numbers, how the activities (e.g., monitor, analyze, plan, and execute (MAPE)) and the knowledge are structured to a feedback loop, and the type of knowledge. Moreover, the feedback loop is usually hidden in the implementation or framework and therefore not visible in the architectural design. Additionally, an adaptation engine often employs runtime models that either represent the adaptable software or capture strategic knowledge such as reconfiguration strategies. State-of-the-art approaches do not systematically address the interplay of such runtime models, which would otherwise allow developers to freely design the entire feedback loop. This thesis presents ExecUtable RuntimE MegAmodels (EUREMA), an integrated model-driven engineering (MDE) solution that rigorously uses models for engineering feedback loops. EUREMA provides a domain-specific modeling language to specify and an interpreter to execute feedback loops. The language allows developers to freely design a feedback loop concerning the activities and runtime models (knowledge) as well as the number of feedback loops. It further supports structuring the feedback loops in the adaptation engine that follows a layered architectural style. Thus, EUREMA makes the feedback loops explicit in the design and enables developers to reason about design decisions. To address the interplay of runtime models, we propose the concept of a runtime megamodel, which is a runtime model that contains other runtime models as well as activities (e.g., MAPE) working on the contained models. This concept is the underlying principle of EUREMA. The resulting EUREMA (mega)models are kept alive at runtime and they are directly executed by the EUREMA interpreter to run the feedback loops. Interpretation provides the flexibility to dynamically adapt a feedback loop. In this context, EUREMA supports engineering self-adaptive software in which feedback loops run independently or in a coordinated fashion within the same layer as well as on top of each other in different layers of the adaptation engine. Moreover, we consider preliminary means to evolve self-adaptive software by providing a maintenance interface to the adaptation engine. This thesis discusses in detail EUREMA by applying it to different scenarios such as single, multiple, and stacked feedback loops for self-repairing and self-optimizing the mRUBiS application. Moreover, it investigates the design and expressiveness of EUREMA, reports on experiments with a running system (mRUBiS) and with alternative solutions, and assesses EUREMA with respect to quality attributes such as performance and scalability. The conducted evaluation provides evidence that EUREMA as an integrated and open MDE approach for engineering self-adaptive software seamlessly integrates the development and runtime environments using the same formalism to specify and execute feedback loops, supports the dynamic adaptation of feedback loops in layered architectures, and achieves an efficient execution of feedback loops by leveraging incrementality.}, language = {en} } @phdthesis{Ramos2018, author = {Ramos, Catalina}, title = {Structure and petrophysical properties of the Southern Chile subduction zone along 38.25°S from seismic data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409183}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 111}, year = {2018}, abstract = {Active and passive source data from two seismic experiments within the interdisciplinary project TIPTEQ (from The Incoming Plate to mega Thrust EarthQuake processes) were used to image and identify the structural and petrophysical properties (such as P- and S-velocities, Poisson's ratios, pore pressure, density and amount of fluids) within the Chilean seismogenic coupling zone at 38.25°S, where in 1960 the largest earthquake ever recorded (Mw 9.5) occurred. Two S-wave velocity models calculated using traveltime and noise tomography techniques were merged with an existing velocity model to obtain a 2D S-wave velocity model, which gathered the advantages of each individual model. In a following step, P- and S-reflectivity images of the subduction zone were obtained using different pre stack and post-stack depth migration techniques. Among them, the recent prestack line-drawing depth migration scheme yielded revealing results. Next, synthetic seismograms modelled using the reflectivity method allowed, through their input 1D synthetic P- and S-velocities, to infer the composition and rocks within the subduction zone. Finally, an image of the subduction zone is given, jointly interpreting the results from this work with results from other studies. The Chilean seismogenic coupling zone at 38.25°S shows a continental crust with highly reflective horizontal, as well as (steep) dipping events. Among them, the Lanalhue Fault Zone (LFZ), which is interpreted to be east-dipping, is imaged to very shallow depths. Some steep reflectors are observed for the first time, for example one near the coast, related to high seismicity and another one near the LFZ. Steep shallow reflectivity towards the volcanic arc could be related to a steep west-dipping reflector interpreted as fluids and/or melts, migrating upwards due to material recycling in the continental mantle wedge. The high resolution of the S-velocity model in the first kilometres allowed to identify several sedimentary basins, characterized by very low P- and S-velocities, high Poisson's ratios and possible steep reflectivity. Such high Poisson's ratios are also observed within the oceanic crust, which reaches the seismogenic zone hydrated due to bending-related faulting. It is interpreted to release water until reaching the coast and under the continental mantle wedge. In terms of seismic velocities, the inferred composition and rocks in the continental crust is in agreement with field geology observations at the surface along the proflle. Furthermore, there is no requirement to call on the existence of measurable amounts of present-day fluids above the plate interface in the continental crust of the Coastal Cordillera and the Central Valley in this part of the Chilean convergent margin. A large-scale anisotropy in the continental crust and upper mantle, previously proposed from magnetotelluric studies, is proposed from seismic velocities. However, quantitative studies on this topic in the continental crust of the Chilean seismogenic zone at 38.25°S do not exist to date.}, language = {en} } @phdthesis{Behrens2018, author = {Behrens, Ricarda}, title = {Causes for slow weathering and erosion in the steep, warm, monsoon-subjected Highlands of Sri Lanka}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408503}, school = {Universit{\"a}t Potsdam}, pages = {ix, 107, XXIV}, year = {2018}, abstract = {In the Highlands of Sri Lanka, erosion and chemical weathering rates are among the lowest for global mountain denudation. In this tropical humid setting, highly weathered deep saprolite profiles have developed from high-grade metamorphic charnockite during spheroidal weathering of the bedrock. The spheroidal weathering produces rounded corestones and spalled rindlets at the rock-saprolite interface. I used detailed textural, mineralogical, chemical, and electron-microscopic (SEM, FIB, TEM) analyses to identify the factors limiting the rate of weathering front advance in the profile, the sequence of weathering reactions, and the underlying mechanisms. The first mineral attacked by weathering was found to be pyroxene initiated by in situ Fe oxidation, followed by in situ biotite oxidation. Bulk dissolution of the primary minerals is best described with a dissolution - re-precipitation process, as no chemical gradients towards the mineral surface and sharp structural boundaries are observed at the nm scale. Only the local oxidation in pyroxene and biotite is better described with an ion by ion process. The first secondary phases are oxides and amorphous precipitates from which secondary minerals (mainly smectite and kaolinite) form. Only for biotite direct solid state transformation to kaolinite is likely. The initial oxidation of pyroxene and biotite takes place in locally restricted areas and is relatively fast: log J = -11 molmin/(m2 s). However, calculated corestone-scale mineral oxidation rates are comparable to corestone-scale mineral dissolution rates: log R = -13 molpx/(m2 s) and log R = -15 molbt/(m2 s). The oxidation reaction results in a volume increase. Volumetric calculations suggest that this observed oxidation leads to the generation of porosity due to the formation of micro-fractures in the minerals and the bedrock allowing for fluid transport and subsequent dissolution of plagioclase. At the scale of the corestone, this fracture reaction is responsible for the larger fractures that lead to spheroidal weathering and to the formation of rindlets. Since these fractures have their origin from the initial oxidational induced volume increase, oxidation is the rate limiting parameter for weathering to take place. The ensuing plagioclase weathering leads to formation of high secondary porosity in the corestone over a distance of only a few cm and eventually to the final disaggregation of bedrock to saprolite. As oxidation is the first weathering reaction, the supply of O2 is a rate-limiting factor for chemical weathering. Hence, the supply of O2 and its consumption at depth connects processes at the weathering front with erosion at the surface in a feedback mechanism. The strength of the feedback depends on the relative weight of advective versus diffusive transport of O2 through the weathering profile. The feedback will be stronger with dominating diffusive transport. The low weathering rate ultimately depends on the transport of O2 through the whole regolith, and on lithological factors such as low bedrock porosity and the amount of Fe-bearing primary minerals. In this regard the low-porosity charnockite with its low content of Fe(II) bearing minerals impedes fast weathering reactions. Fresh weatherable surfaces are a pre-requisite for chemical weathering. However, in the case of the charnockite found in the Sri Lankan Highlands, the only process that generates these surfaces is the fracturing induced by oxidation. Tectonic quiescence in this region and low pre-anthropogenic erosion rate (attributed to a dense vegetation cover) minimize the rejuvenation of the thick and cohesive regolith column, and lowers weathering through the feedback with erosion.}, language = {en} } @phdthesis{Pufahl2018, author = {Pufahl, Luise}, title = {Modeling and executing batch activities in business processes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408013}, school = {Universit{\"a}t Potsdam}, pages = {xix, 163}, year = {2018}, abstract = {Business process automation improves organizations' efficiency to perform work. Therefore, a business process is first documented as a process model which then serves as blueprint for a number of process instances representing the execution of specific business cases. In existing business process management systems, process instances run independently from each other. However, in practice, instances are also collected in groups at certain process activities for a combined execution to improve the process performance. Currently, this so-called batch processing is executed manually or supported by external software. Only few research proposals exist to explicitly represent and execute batch processing needs in business process models. These works also lack a comprehensive understanding of requirements. This thesis addresses the described issues by providing a basic concept, called batch activity. It allows an explicit representation of batch processing configurations in process models and provides a corresponding execution semantics, thereby easing automation. The batch activity groups different process instances based on their data context and can synchronize their execution over one or as well multiple process activities. The concept is conceived based on a requirements analysis considering existing literature on batch processing from different domains and industry examples. Further, this thesis provides two extensions: First, a flexible batch configuration concept, based on event processing techniques, is introduced to allow run time adaptations of batch configurations. Second, a concept for collecting and batching activity instances of multiple different process models is given. Thereby, the batch configuration is centrally defined, independently of the process models, which is especially beneficial for organizations with large process model collections. This thesis provides a technical evaluation as well as a validation of the presented concepts. A prototypical implementation in an existing open-source BPMS shows that with a few extensions, batch processing is enabled. Further, it demonstrates that the consolidated view of several work items in one user form can improve work efficiency. The validation, in which the batch activity concept is applied to different use cases in a simulated environment, implies cost-savings for business processes when a suitable batch configuration is used. For the validation, an extensible business process simulator was developed. It enables process designers to study the influence of a batch activity in a process with regards to its performance.}, language = {en} } @phdthesis{Ostrowski2018, author = {Ostrowski, Max}, title = {Modern constraint answer set solving}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407799}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2018}, abstract = {Answer Set Programming (ASP) is a declarative problem solving approach, combining a rich yet simple modeling language with high-performance solving capabilities. Although this has already resulted in various applications, certain aspects of such applications are more naturally modeled using variables over finite domains, for accounting for resources, fine timings, coordinates, or functions. Our goal is thus to extend ASP with constraints over integers while preserving its declarative nature. This allows for fast prototyping and elaboration tolerant problem descriptions of resource related applications. The resulting paradigm is called Constraint Answer Set Programming (CASP). We present three different approaches for solving CASP problems. The first one, a lazy, modular approach combines an ASP solver with an external system for handling constraints. This approach has the advantage that two state of the art technologies work hand in hand to solve the problem, each concentrating on its part of the problem. The drawback is that inter-constraint dependencies cannot be communicated back to the ASP solver, impeding its learning algorithm. The second approach translates all constraints to ASP. Using the appropriate encoding techniques, this results in a very fast, monolithic system. Unfortunately, due to the large, explicit representation of constraints and variables, translation techniques are restricted to small and mid-sized domains. The third approach merges the lazy and the translational approach, combining the strength of both while removing their weaknesses. To this end, we enhance the dedicated learning techniques of an ASP solver with the inferences of the translating approach in a lazy way. That is, the important knowledge is only made explicit when needed. By using state of the art techniques from neighboring fields, we provide ways to tackle real world, industrial size problems. By extending CASP to reactive solving, we open up new application areas such as online planning with continuous domains and durations.}, language = {en} } @phdthesis{Smith2018, author = {Smith, Taylor}, title = {Decadal changes in the snow regime of High Mountain Asia, 1987-2016}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407120}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 142}, year = {2018}, abstract = {More than a billion people rely on water from rivers sourced in High Mountain Asia (HMA), a significant portion of which is derived from snow and glacier melt. Rural communities are heavily dependent on the consistency of runoff, and are highly vulnerable to shifts in their local environment brought on by climate change. Despite this dependence, the impacts of climate change in HMA remain poorly constrained due to poor process understanding, complex terrain, and insufficiently dense in-situ measurements. HMA's glaciers contain more frozen water than any region outside of the poles. Their extensive retreat is a highly visible and much studied marker of regional and global climate change. However, in many catchments, snow and snowmelt represent a much larger fraction of the yearly water budget than glacial meltwaters. Despite their importance, climate-related changes in HMA's snow resources have not been well studied. Changes in the volume and distribution of snowpack have complex and extensive impacts on both local and global climates. Eurasian snow cover has been shown to impact the strength and direction of the Indian Summer Monsoon -- which is responsible for much of the precipitation over the Indian Subcontinent -- by modulating earth-surface heating. Shifts in the timing of snowmelt have been shown to limit the productivity of major rangelands, reduce streamflow, modify sediment transport, and impact the spread of vector-borne diseases. However, a large-scale regional study of climate impacts on snow resources had yet to be undertaken. Passive Microwave (PM) remote sensing is a well-established empirical method of studying snow resources over large areas. Since 1987, there have been consistent daily global PM measurements which can be used to derive an estimate of snow depth, and hence snow-water equivalent (SWE) -- the amount of water stored in snowpack. The SWE estimation algorithms were originally developed for flat and even terrain -- such as the Russian and Canadian Arctic -- and have rarely been used in complex terrain such as HMA. This dissertation first examines factors present in HMA that could impact the reliability of SWE estimates. Forest cover, absolute snow depth, long-term average wind speeds, and hillslope angle were found to be the strongest controls on SWE measurement reliability. While forest density and snow depth are factors accounted for in modern SWE retrieval algorithms, wind speed and hillslope angle are not. Despite uncertainty in absolute SWE measurements and differences in the magnitude of SWE retrievals between sensors, single-instrument SWE time series were found to be internally consistent and suitable for trend analysis. Building on this finding, this dissertation tracks changes in SWE across HMA using a statistical decomposition technique. An aggregate decrease in SWE was found (10.6 mm/yr), despite large spatial and seasonal heterogeneities. Winter SWE increased in almost half of HMA, despite general negative trends throughout the rest of the year. The elevation distribution of these negative trends indicates that while changes in SWE have likely impacted glaciers in the region, climate change impacts on these two pieces of the cryosphere are somewhat distinct. Following the discussion of relative changes in SWE, this dissertation explores changes in the timing of the snowmelt season in HMA using a newly developed algorithm. The algorithm is shown to accurately track the onset and end of the snowmelt season (70\% within 5 days of a control dataset, 89\% within 10). Using a 29-year time series, changes in the onset, end, and duration of snowmelt are examined. While nearly the entirety of HMA has experienced an earlier end to the snowmelt season, large regions of HMA have seen a later start to the snowmelt season. Snowmelt periods have also decreased in almost all of HMA, indicating that the snowmelt season is generally shortening and ending earlier across HMA. By examining shifts in both the spatio-temporal distribution of SWE and the timing of the snowmelt season across HMA, we provide a detailed accounting of changes in HMA's snow resources. The overall trend in HMA is towards less SWE storage and a shorter snowmelt season. However, long-term and regional trends conceal distinct seasonal, temporal, and spatial heterogeneity, indicating that changes in snow resources are strongly controlled by local climate and topography, and that inter-annual variability plays a significant role in HMA's snow regime.}, language = {en} } @phdthesis{Siegmund2018, author = {Siegmund, Jonatan Frederik}, title = {Quantifying impacts of climate extreme events on vegetation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407095}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {Together with the gradual change of mean values, ongoing climate change is projected to increase frequency and amplitude of temperature and precipitation extremes in many regions of Europe. The impacts of such in most cases short term extraordinary climate situations on terrestrial ecosystems are a matter of central interest of recent climate change research, because it can not per se be assumed that known dependencies between climate variables and ecosystems are linearly scalable. So far, yet, there is a high demand for a method to quantify such impacts in terms of simultaneities of event time series. In the course of this manuscript the new statistical approach of Event Coincidence Analysis (ECA) as well as it's R implementation is introduced, a methodology that allows assessing whether or not two types of event time series exhibit similar sequences of occurrences. Applications of the method are presented, analyzing climate impacts on different temporal and spacial scales: the impact of extraordinary expressions of various climatic variables on tree stem variations (subdaily and local scale), the impact of extreme temperature and precipitation events on the owering time of European shrub species (weekly and country scale), the impact of extreme temperature events on ecosystem health in terms of NDVI (weekly and continental scale) and the impact of El Ni{\~n}o and La Ni{\~n}a events on precipitation anomalies (seasonal and global scale). The applications presented in this thesis refine already known relationships based on classical methods and also deliver substantial new findings to the scientific community: the widely known positive correlation between flowering time and temperature for example is confirmed to be valid for the tails of the distributions while the widely assumed positive dependency between stem diameter variation and temperature is shown to be not valid for very warm and very cold days. The larger scale investigations underline the sensitivity of anthrogenically shaped landscapes towards temperature extremes in Europe and provide a comprehensive global ENSO impact map for strong precipitation events. Finally, by publishing the R implementation of the method, this thesis shall enable other researcher to further investigate on similar research questions by using Event Coincidence Analysis.}, language = {en} }