@phdthesis{Santuber2023, author = {Santuber, Joaquin}, title = {Designing for digital justice}, doi = {10.25932/publishup-60417}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604178}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 183}, year = {2023}, abstract = {At the beginning of 2020, with COVID-19, courts of justice worldwide had to move online to continue providing judicial service. Digital technologies materialized the court practices in ways unthinkable shortly before the pandemic creating resonances with judicial and legal regulation, as well as frictions. A better understanding of the dynamics at play in the digitalization of courts is paramount for designing justice systems that serve their users better, ensure fair and timely dispute resolutions, and foster access to justice. Building on three major bodies of literature —e-justice, digitalization and organization studies, and design research— Designing for Digital Justice takes a nuanced approach to account for human and more-than-human agencies. Using a qualitative approach, I have studied in depth the digitalization of Chilean courts during the pandemic, specifically between April 2020 and September 2022. Leveraging a comprehensive source of primary and secondary data, I traced back the genealogy of the novel materializations of courts' practices structured by the possibilities offered by digital technologies. In five (5) cases studies, I show in detail how the courts got to 1) work remotely, 2) host hearings via videoconference, 3) engage with users via social media (i.e., Facebook and Chat Messenger), 4) broadcast a show with judges answering questions from users via Facebook Live, and 5) record, stream, and upload judicial hearings to YouTube to fulfil the publicity requirement of criminal hearings. The digitalization of courts during the pandemic is characterized by a suspended normativity, which makes innovation possible yet presents risks. While digital technologies enabled the judiciary to provide services continuously, they also created the risk of displacing traditional judicial and legal regulation. Contributing to liminal innovation and digitalization research, Designing for Digital Justice theorizes four phases: 1) the pre-digitalization phase resulting in the development of regulation, 2) the hotspot of digitalization resulting in the extension of regulation, 3) the digital innovation redeveloping regulation (moving to a new, preliminary phase), and 4) the permanence of temporal practices displacing regulation. Contributing to design research Designing for Digital Justice provides new possibilities for innovation in the courts, focusing at different levels to better address tensions generated by digitalization. Fellow researchers will find in these pages a sound theoretical advancement at the intersection of digitalization and justice with novel methodological references. Practitioners will benefit from the actionable governance framework Designing for Digital Justice Model, which provides three fields of possibilities for action to design better justice systems. Only by taking into account digital, legal, and social factors can we design better systems that promote access to justice, the rule of law, and, ultimately social peace.}, language = {en} } @phdthesis{Illien2023, author = {Illien, Luc}, title = {Time-dependent properties of the shallow subsurface}, doi = {10.25932/publishup-59936}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-599367}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 133}, year = {2023}, abstract = {The shallow Earth's layers are at the interplay of many physical processes: some being driven by atmospheric forcing (precipitation, temperature...) whereas others take their origins at depth, for instance ground shaking due to seismic activity. These forcings cause the subsurface to continuously change its mechanical properties, therefore modulating the strength of the surface geomaterials and hydrological fluxes. Because our societies settle and rely on the layers hosting these time-dependent properties, constraining the hydro-mechanical dynamics of the shallow subsurface is crucial for our future geographical development. One way to investigate the ever-changing physical changes occurring under our feet is through the inference of seismic velocity changes from ambient noise, a technique called seismic interferometry. In this dissertation, I use this method to monitor the evolution of groundwater storage and damage induced by earthquakes. Two research lines are investigated that comprise the key controls of groundwater recharge in steep landscapes and the predictability and duration of the transient physical properties due to earthquake ground shaking. These two types of dynamics modulate each other and influence the velocity changes in ways that are challenging to disentangle. A part of my doctoral research also addresses this interaction. Seismic data from a range of field settings spanning several climatic conditions (wet to arid climate) in various seismic-prone areas are considered. I constrain the obtained seismic velocity time-series using simple physical models, independent dataset, geophysical tools and nonlinear analysis. Additionally, a methodological development is proposed to improve the time-resolution of passive seismic monitoring.}, language = {en} } @phdthesis{Calitri2023, author = {Calitri, Francesca}, title = {Co-evolution of erosion rates, weathering and profile development in soil landscapes of hummocky ground moraines}, doi = {10.25932/publishup-60138}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601387}, school = {Universit{\"a}t Potsdam}, pages = {XXVII, 163, V}, year = {2023}, abstract = {Soil is today considered a non-renewable resource on societal time scale, as the rate of soil loss is higher than the one of soil formation. Soil formation is complex, can take several thousands of years and is influenced by a variety of factors, one of them is time. Oftentimes, there is the assumption of constant and progressive conditions for soil and/or profile development (i.e., steady-state). In reality, for most of the soils, their (co-)evolution leads to a complex and irregular soil development in time and space characterised by "progressive" and "regressive" phases. Lateral transport of soil material (i.e., soil erosion) is one of the principal processes shaping the land surface and soil profile during "regressive" phases and one of the major environmental problems the world faces. Anthropogenic activities like agriculture can exacerbate soil erosion. Thus, it is of vital importance to distinguish short-term soil redistribution rates (i.e., within decades) influenced by human activities differ from long-term natural rates. To do so, soil erosion (and denudation) rates can be determined by using a set of isotope methods that cover different time scales at landscape level. With the aim to unravel the co-evolution of weathering, soil profile development and lateral redistribution on a landscape level, we used Pluthonium-239+240 (239+240Pu), Beryllium-10 (10Be, in situ and meteoric) and Radiocarbon (14C) to calculate short- and long-term erosion rates in two settings, i.e., a natural and an anthropogenic environment in the hummocky ground moraine landscape of the Uckermark, North-eastern Germany. The main research questions were: 1. How do long-term and short-term rates of soil redistributing processes differ? 2. Are rates calculated from in situ 10Be comparable to those of using meteoric 10Be? 3. How do soil redistribution rates (short- and long-term) in an agricultural and in a natural landscape compare to each other? 4. Are the soil patterns observed in northern Germany purely a result of past events (natural and/or anthropogenic) or are they imbedded in ongoing processes? Erosion and deposition are reflected in a catena of soil profiles with no or almost no erosion on flat positions (hilltop), strong erosion on the mid-slope and accumulation of soil material at the toeslope position. These three characteristic process domains were chosen within the CarboZALF-D experimental site, characterised by intense anthropogenic activities. Likewise, a hydrosequence in an ancient forest was chosen for this study and being regarded as a catena strongly influenced by natural soil transport. The following main results were obtained using the above-mentioned range of isotope methods available to measure soil redistribution rates depending on the time scale needed (e.g., 239+240Pu, 10Be, 14C): 1. Short-term erosion rates are one order of magnitude higher than long-term rates in agricultural settings. 2. Both meteoric and in situ 10Be are suitable soil tracers to measure the long-term soil redistribution rates giving similar results in an anthropogenic environment for different landscape positions (e.g., hilltop, mid-slope, toeslope) 3. Short-term rates were extremely low/negligible in a natural landscape and very high in an agricultural landscape - -0.01 t ha-1 yr-1 (average value) and -25 t ha-1 yr-1 respectively. On the contrary, long-term rates in the forested landscape are comparable to those calculated in the agricultural area investigated with average values of -1.00 t ha-1 yr-1 and -0.79 t ha-1 yr-1. 4. Soil patterns observed in the forest might be due to human impact and activities started after the first settlements in the region, earlier than previously postulated, between 4.5 and 6.8 kyr BP, and not a result of recent soil erosion. 5. Furthermore, long-term soil redistribution rates are similar independently from the settings, meaning past natural soil mass redistribution processes still overshadow the present anthropogenic erosion processes. Overall, this study could make important contributions to the deciphering of the co-evolution of weathering, soil profile development and lateral redistribution in North-eastern Germany. The multi-methodological approach used can be challenged by the application in a wider range of landscapes and geographic regions.}, language = {en} } @phdthesis{Kager2023, author = {Kager, Klara}, title = {Critical Research Needs in Lesson Study: Then, Now, and Looking Forward}, doi = {10.25932/publishup-60271}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-602711}, school = {Universit{\"a}t Potsdam}, pages = {iii, 252}, year = {2023}, abstract = {The collaboration-based professional development approach Lesson Study (LS), which has its roots in the Japanese education system, has gained international recognition over the past three decades and spread quickly throughout the world. LS is a collaborative method to professional development (PD) that incorporates multiple characteristics that have been identified in the research literature as key to effective PD. Specifically, LS is a long-term process that consists of subsequent inquiry cycles, it is site-based and integrated in teachers' practice, it encourages collaboration and reflection, places a strong emphasis on student learning, and it typically involves external experts that support the process or offer additional insights. As LS integrates all these characteristics, it has rapidly gained international popularity since the turn of the 21st century and is currently being practiced in over 40 countries around the world. This international borrowing of the idea of LS to new national contexts has given rise to a research field that aims to investigate the effectiveness of LS on teacher learning as well as the circumstances and mechanisms that make LS effective in various settings around the world. Such research is important, as borrowing educational innovations and adapting them to new contexts can be a challenging process. Educational innovations that fail to deliver the expected outcomes tend to be abandoned prematurely and before they have been completely understood or a substantial research base has been established. In order to prevent LS from early abandonment, Lewis and colleagues outlined three critical research needs in 2006, not long after LS was initially introduced to the United States. These research needs included (1) developing a descriptive knowledge base on LS, (2) examining the mechanisms by which teachers learn through LS, and (3) using design-based research cycles to analyze and improve LS. This dissertation set out to take stock of the progress that has been made on these research needs over the past 20 years. The scoping review conducted for the framework of this dissertation indicates that, while a large and international knowledge base has been developed, the field has not yet produced reliable evidence of the effectiveness of LS. Based on the scoping review, this dissertation makes the case that Lewis et al.'s (2006) critical research needs should be updated. In order to do so, a number of limitations to the current knowledge base on LS need to be addressed. These limitations include (1) the frequent lack of comparable and replicable descriptions of the LS intervention in publications, (2) the incoherent use or lack of use of theoretical frameworks to explain teacher learning through LS, (3) the inconsistent use of terminology and concepts, and (4) the lack of scientific rigor in research studies and of established ways or tools to measure the effectiveness of LS. This dissertation aims to advance the critical research needs in the field by examining the extent and nature of these limitations in three research studies. The focus of these studies lies on the LS stages of observation and reflection, as these stages have a high potential to facilitate teacher learning. The first study uses a mixed-method design to examine how teachers at German primary schools reflect critically together. The study derives a theory-based definition of critical and collaborative reflection in order to re-frame the reflection element in LS. The second study, a systematic review of 129 articles on LS, assess how transparent research articles are in reporting how teachers observed and reflected together. In addition, it is investigated whether these articles provide any kind of theorization for the stages of observation and reflection. The third study proposes a conceptual model for the field of LS that is based on existing models of continuous professional development and research findings on team effectiveness and collaboration. The model describes the dimensions of input, mediating mechanisms, and outcomes in order to provide a conceptual grid to teachers' continuous professional development through LS.}, language = {en} } @phdthesis{Libon2023, author = {Libon, L{\´e}lia}, title = {Stability of magnesite in the Earth lower mantle: insight from high-pressure and high-temperature experiments}, doi = {10.25932/publishup-60461}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604616}, school = {Universit{\"a}t Potsdam}, pages = {ix, 114, xvi}, year = {2023}, abstract = {Carbonates carried in subducting slabs may play a major role in sourcing and storing carbon in the deep Earth's interior. Current estimates indicate that between 40 to 66 million tons of carbon per year enter subduction zones, but it is uncertain how much of it reaches the lower mantle. It appears that most of this carbon might be extracted from subducting slabs at the mantle wedge and only a limited amount continues deeper and eventually reaches the deep mantle. However, estimations on deeply subducted carbon broadly range from 0.0001 to 52 million tons of carbon per year. This disparity is primarily due to the limited understanding of the survival of carbonate minerals during their transport to deep mantle conditions. Indeed, carbon has very low solubility in mantle silicates, therefore it is expected to be stored primarily in accessory phases such as carbonates. Among those carbonates, magnesite (MgCO3), as a single phase, is the most stable under all mantle conditions. However, experimental investigation on the stability of magnesite in contact with SiO2 at lower mantle conditions suggests that magnesite is stable only along a cold subducted slab geotherm. Furthermore, our understanding of magnesite's stability when interacting with more complex mantle silicate phases remains incomplete. In the first part of this dissertation, laser-heated diamond anvil cells and multi-anvil apparatus experiments were performed to investigate the stability of magnesite in contact with iron-bearing mantle silicates. Sub-solidus reactions, melting, decarbonation and diamond formation were examined from shallow to mid-lower mantle conditions (25 to 68 GPa; 1300 to 2000 K). Multi-anvil experiments at 25 GPa show the formation of carbonate-rich melt, bridgmanite, and stishovite with melting occurring at a temperature corresponding to all geotherms except the coldest one. In situ X-ray diffraction, in laser-heating diamond anvil cells experiments, shows crystallization of bridgmanite and stishovite but no melt phase was detected in situ at high temperatures. To detect decarbonation phases such as diamond, Raman spectroscopy was used. Crystallization of diamonds is observed as a sub-solidus process even at temperatures relevant and lower than the coldest slab geotherm (1350 K at 33 GPa). Data obtained from this work suggest that magnesite is unstable in contact with the surrounding peridotite mantle in the upper-most lower mantle. The presence of magnesite instead induces melting under oxidized conditions and/or foster diamond formation under more reduced conditions, at depths ∼700 km. Consequently, carbonates will be removed from the carbonate-rich slabs at shallow lower mantle conditions, where subducted slabs can stagnate. Therefore, the transport of carbonate to deeper depths will be restricted, supporting the presence of a barrier for carbon subduction at the top of the lower mantle. Moreover, the reduction of magnesite, forming diamonds provides additional evidence that super-deep diamond crystallization is related to the reduction of carbonates or carbonated-rich melt. The second part of this dissertation presents the development of a portable laser-heating system optimized for X-ray emission spectroscopy (XES) or nuclear inelastic scattering (NIS) spectroscopy with signal collection at near 90◦. The laser-heated diamond anvil cell is the only static pressure device that can replicate the pressure and temperatures of the Earth's lower mantle and core. The high temperatures are reached by using high-powered lasers focused on the sample contained between the diamond anvils. Moreover, diamonds' transparency to X-rays enables in situ X-ray spectroscopy measurements that can probe the sample under high-temperature and high-pressure conditions. Therefore, the development of portable laser-heating systems has linked high-pressure and temperature research with high-resolution X-ray spectroscopy techniques to synchrotron beamlines that do not have a dedicated, permanent, laser-heating system. A general description of the system is provided, as well as details on the use of a parabolic mirror as a reflective imaging objective for on-axis laser heating and radiospectrometric temperature measurements with zero attenuation of incoming X-rays. The parabolic mirror improves the accuracy of temperature measurements free from chromatic aberrations in a wide spectral range and its perforation permits in situ X-rays measurement at synchrotron facilities. The parabolic mirror is a well-suited alternative to refractive objectives in laser heating systems, which will facilitate future applications in the use of CO2 lasers.}, language = {en} } @phdthesis{Sakizloglou2023, author = {Sakizloglou, Lucas}, title = {Evaluating temporal queries over history-aware architectural runtime models}, doi = {10.25932/publishup-60439}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604396}, school = {Universit{\"a}t Potsdam}, pages = {v, 168}, year = {2023}, abstract = {In model-driven engineering, the adaptation of large software systems with dynamic structure is enabled by architectural runtime models. Such a model represents an abstract state of the system as a graph of interacting components. Every relevant change in the system is mirrored in the model and triggers an evaluation of model queries, which search the model for structural patterns that should be adapted. This thesis focuses on a type of runtime models where the expressiveness of the model and model queries is extended to capture past changes and their timing. These history-aware models and temporal queries enable more informed decision-making during adaptation, as they support the formulation of requirements on the evolution of the pattern that should be adapted. However, evaluating temporal queries during adaptation poses significant challenges. First, it implies the capability to specify and evaluate requirements on the structure, as well as the ordering and timing in which structural changes occur. Then, query answers have to reflect that the history-aware model represents the architecture of a system whose execution may be ongoing, and thus answers may depend on future changes. Finally, query evaluation needs to be adequately fast and memory-efficient despite the increasing size of the history---especially for models that are altered by numerous, rapid changes. The thesis presents a query language and a querying approach for the specification and evaluation of temporal queries. These contributions aim to cope with the challenges of evaluating temporal queries at runtime, a prerequisite for history-aware architectural monitoring and adaptation which has not been systematically treated by prior model-based solutions. The distinguishing features of our contributions are: the specification of queries based on a temporal logic which encodes structural patterns as graphs; the provision of formally precise query answers which account for timing constraints and ongoing executions; the incremental evaluation which avoids the re-computation of query answers after each change; and the option to discard history that is no longer relevant to queries. The query evaluation searches the model for occurrences of a pattern whose evolution satisfies a temporal logic formula. Therefore, besides model-driven engineering, another related research community is runtime verification. The approach differs from prior logic-based runtime verification solutions by supporting the representation and querying of structure via graphs and graph queries, respectively, which is more efficient for queries with complex patterns. We present a prototypical implementation of the approach and measure its speed and memory consumption in monitoring and adaptation scenarios from two application domains, with executions of an increasing size. We assess scalability by a comparison to the state-of-the-art from both related research communities. The implementation yields promising results, which pave the way for sophisticated history-aware self-adaptation solutions and indicate that the approach constitutes a highly effective technique for runtime monitoring on an architectural level.}, language = {en} } @phdthesis{Saatchi2023, author = {Saatchi, Mersa}, title = {Study on manufacturing of multifunctional bilayer systems}, doi = {10.25932/publishup-60196}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601968}, school = {Universit{\"a}t Potsdam}, pages = {116}, year = {2023}, abstract = {Layered structures are ubiquitous in nature and industrial products, in which individual layers could have different mechanical/thermal properties and functions independently contributing to the performance of the whole layered structure for their relevant application. Tuning each layer affects the performance of the whole layered system. Pores are utilized in various disciplines, where low density, but large surfaces are demanded. Besides, open and interconnected pores would act as a transferring channel for guest chemical molecules. The shape of pores influences compression behavior of the material. Moreover, introducing pores decreases the density and subsequently the mechanical strength. To maintain defined mechanical strength under various stress, porous structure can be reinforced by adding reinforcement agent such as fiber, filler or layered structure to bear the mechanical stress on demanded application. In this context, this thesis aimed to generate new functions in bilayer systems by combining layers having different moduli and/or porosity, and to develop suitable processing techniques to access these structures. Manufacturing processes of layered structures employ often organic solvents mostly causing environmental pollution. In this regard, the studied bilayer structures here were manufactured by processes free of organic solvents. In this thesis, three bilayer systems were studied to answer the individual questions. First, while various methods of introducing pores in melt-phase are reported for one-layer constructs with simple geometry, can such methods be applied to a bilayer structure, giving two porous layers? This was addressed with Bilayer System 1. Two porous layers were obtained from melt-blending of two different polyurethanes (PU) and polyvinyl alcohol (PVA) in a co-continuous phase followed by sequential injection molding and leaching the PVA phase in deionized water. A porosity of 50 ± 5\% with a high interconnectivity was obtained, in which the pore sizes in both layers ranged from 1 µm to 100 µm with an average of 22 µm in both layers. The obtained pores were tailored by applying an annealing treatment at relevant high temperatures of 110 °C and 130 °C, which allowed the porosity to be kept constant. The disadvantage of this system is that a maximum of 50\% porosity could be reached and removal of leaching material in the weld line section of both layers is not guaranteed. Such a construct serves as a model for bilayer porous structure for determining structure-property relationships with respect to the pore size, porosity and mechanical properties of each layer. This fabrication method is also applicable to complex geometries by designing a relevant mold for injection molding. Secondly, utilizing scCO2 foaming process at elevated temperature and pressure is considered as a green manufacturing process. Employing this method as a post-treatment can alter the history orientation of polymer chains created by previous fabrication methods. Can a bilayer structure be fabricated by a combination of sequential injection molding and scCO2 foaming process, in which a porous layer is supported by a compact layer? Such a construct (Bilayer System 2) was generated by sequential injection molding of a PCL (Tm ≈ 58 °C) layer and a PLLA (Tg ≈ 58 °C) layer. Soaking this structure in the autoclave with scCO2 at T = 45 °C and P = 100 bar led to the selective foaming of PCL with a porosity of 80\%, while the PLA layer was kept compact. The scCO2 autoclave led to the formation of a porous core and skin layer of the PCL, however, the degree of crystallinity of PLLA layer increased from 0 to 50\% at the defined temperature and pressure. The microcellular structure of PCL as well as the degree of crystallinity of PLLA were controlled by increasing soaking time. Thirdly, wrinkles on surfaces in micro/nano scale alter the properties, which are surface-related. Wrinkles are formed on a surface of a bilayer structure having a compliant substrate and a stiff thin film. However, the reported wrinkles were not reversible. Moreover, dynamic wrinkles in nano and micro scale have numerous examples in nature such as gecko foot hair offering reversible adhesion and an ability of lotus leaves for self-cleaning altering hydrophobicity of the surface. It was envisioned to imitate this biomimetic function on the bilayer structure, where self-assembly on/off patterns would be realized on the surface of this construct. In summary, developing layered constructs having different properties/functions in the individual layer or exhibiting a new function as the consequence of layered structure can give novel insight for designing layered constructs in various disciplines such as packaging and transport industry, aerospace industry and health technology.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} } @phdthesis{Afifi2023, author = {Afifi, Haitham}, title = {Wireless In-Network Processing for Multimedia Applications}, doi = {10.25932/publishup-60437}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604371}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 233}, year = {2023}, abstract = {With the recent growth of sensors, cloud computing handles the data processing of many applications. Processing some of this data on the cloud raises, however, many concerns regarding, e.g., privacy, latency, or single points of failure. Alternatively, thanks to the development of embedded systems, smart wireless devices can share their computation capacity, creating a local wireless cloud for in-network processing. In this context, the processing of an application is divided into smaller jobs so that a device can run one or more jobs. The contribution of this thesis to this scenario is divided into three parts. In part one, I focus on wireless aspects, such as power control and interference management, for deciding which jobs to run on which node and how to route data between nodes. Hence, I formulate optimization problems and develop heuristic and meta-heuristic algorithms to allocate wireless and computation resources. Additionally, to deal with multiple applications competing for these resources, I develop a reinforcement learning (RL) admission controller to decide which application should be admitted. Next, I look into acoustic applications to improve wireless throughput by using microphone clock synchronization to synchronize wireless transmissions. In the second part, I jointly work with colleagues from the acoustic processing field to optimize both network and application (i.e., acoustic) qualities. My contribution focuses on the network part, where I study the relation between acoustic and network qualities when selecting a subset of microphones for collecting audio data or selecting a subset of optional jobs for processing these data; too many microphones or too many jobs can lessen quality by unnecessary delays. Hence, I develop RL solutions to select the subset of microphones under network constraints when the speaker is moving while still providing good acoustic quality. Furthermore, I show that autonomous vehicles carrying microphones improve the acoustic qualities of different applications. Accordingly, I develop RL solutions (single and multi-agent ones) for controlling these vehicles. In the third part, I close the gap between theory and practice. I describe the features of my open-source framework used as a proof of concept for wireless in-network processing. Next, I demonstrate how to run some algorithms developed by colleagues from acoustic processing using my framework. I also use the framework for studying in-network delays (wireless and processing) using different distributions of jobs and network topologies.}, language = {en} } @phdthesis{Malchow2023, author = {Malchow, Anne-Kathleen}, title = {Developing an integrated platform for predicting niche and range dynamics}, doi = {10.25932/publishup-60273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-602737}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 169}, year = {2023}, abstract = {Species are adapted to the environment they live in. Today, most environments are subjected to rapid global changes induced by human activity, most prominently land cover and climate changes. Such transformations can cause adjustments or disruptions in various eco-evolutionary processes. The repercussions of this can appear at the population level as shifted ranges and altered abundance patterns. This is where global change effects on species are usually detected first. To understand how eco-evolutionary processes act and interact to generate patterns of range and abundance and how these processes themselves are influenced by environmental conditions, spatially-explicit models provide effective tools. They estimate a species' niche as the set of environmental conditions in which it can persist. However, the currently most commonly used models rely on static correlative associations that are established between a set of spatial predictors and observed species distributions. For this, they assume stationary conditions and are therefore unsuitable in contexts of global change. Better equipped are process-based models that explicitly implement algorithmic representations of eco-evolutionary mechanisms and evaluate their joint dynamics. These models have long been regarded as difficult to parameterise, but an increased data availability and improved methods for data integration lessen this challenge. Hence, the goal of this thesis is to further develop process-based models, integrate them into a complete modelling workflow, and provide the tools and guidance for their successful application. With my thesis, I presented an integrated platform for spatially-explicit eco-evolutionary modelling and provided a workflow for their inverse calibration to observational data. In the first chapter, I introduced RangeShiftR, a software tool that implements an individual-based modelling platform for the statistical programming language R. Its open-source licensing, extensive help pages and available tutorials make it accessible to a wide audience. In the second chapter, I demonstrated a comprehensive workflow for the specification, calibration and validation of RangeShiftR by the example of the red kite in Switzerland. The integration of heterogeneous data sources, such as literature and monitoring data, allowed to successfully calibrate the model. It was then used to make validated, spatio-temporal predictions of future red kite abundance. The presented workflow can be adopted to any study species if data is available. In the third chapter, I extended RangeShiftR to directly link demographic processes to climatic predictors. This allowed me to explore the climate-change responses of eight Swiss breeding birds in more detail. Specifically, the model could identify the most influential climatic predictors, delineate areas of projected demographic suitability, and attribute current population trends to contemporary climate change. My work shows that the application of complex, process-based models in conservation-relevant contexts is feasible, utilising available tools and data. Such models can be successfully calibrated and outperform other currently used modelling approaches in terms of predictive accuracy. Their projections can be used to predict future abundances or to assess alternative conservation scenarios. They further improve our mechanistic understanding of niche and range dynamics under climate change. However, only fully mechanistic models, that include all relevant processes, allow to precisely disentangle the effects of single processes on observed abundances. In this respect, the RangeShiftR model still has potential for further extensions that implement missing influential processes, such as species interactions. Dynamic, process-based models are needed to adequately model a dynamic reality. My work contributes towards the advancement, integration and dissemination of such models. This will facilitate numeric, model-based approaches for species assessments, generate ecological insights and strengthen the reliability of predictions on large spatial scales under changing conditions.}, language = {en} }