@phdthesis{Trautmann2022, author = {Trautmann, Tina}, title = {Understanding global water storage variations using model-data integration}, doi = {10.25932/publishup-56595}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-565954}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 141}, year = {2022}, abstract = {Climate change is one of the greatest challenges to humanity in this century, and most noticeable consequences are expected to be impacts on the water cycle - in particular the distribution and availability of water, which is fundamental for all life on Earth. In this context, it is essential to better understand where and when water is available and what processes influence variations in water storages. While estimates of the overall terrestrial water storage (TWS) variations are available from the GRACE satellites, these represent the vertically integrated signal over all water stored in ice, snow, soil moisture, groundwater and surface water bodies. Therefore, complementary observational data and hydrological models are still required to determine the partitioning of the measured signal among different water storages and to understand the underlying processes. However, the application of large-scale observational data is limited by their specific uncertainties and the incapacity to measure certain water fluxes and storages. Hydrological models, on the other hand, vary widely in their structure and process-representation, and rarely incorporate additional observational data to minimize uncertainties that arise from their simplified representation of the complex hydrologic cycle. In this context, this thesis aims to contribute to improving the understanding of global water storage variability by combining simple hydrological models with a variety of complementary Earth observation-based data. To this end, a model-data integration approach is developed, in which the parameters of a parsimonious hydrological model are calibrated against several observational constraints, inducing GRACE TWS, simultaneously, while taking into account each data's specific strengths and uncertainties. This approach is used to investigate 3 specific aspects that are relevant for modelling and understanding the composition of large-scale TWS variations. The first study focusses on Northern latitudes, where snow and cold-region processes define the hydrological cycle. While the study confirms previous findings that seasonal dynamics of TWS are dominated by the cyclic accumulation and melt of snow, it reveals that inter-annual TWS variations on the contrary, are determined by variations in liquid water storages. Additionally, it is found to be important to consider the impact of compensatory effects of spatially heterogeneous hydrological variables when aggregating the contribution of different storage components over large areas. Hence, the determinants of TWS variations are scale-dependent and underlying driving mechanism cannot be simply transferred between spatial and temporal scales. These findings are supported by the second study for the global land areas beyond the Northern latitudes as well. This second study further identifies the considerable impact of how vegetation is represented in hydrological models on the partitioning of TWS variations. Using spatio-temporal varying fields of Earth observation-based data to parameterize vegetation activity not only significantly improves model performance, but also reduces parameter equifinality and process uncertainties. Moreover, the representation of vegetation drastically changes the contribution of different water storages to overall TWS variability, emphasizing the key role of vegetation for water allocation, especially between sub-surface and delayed water storages. However, the study also identifies parameter equifinality regarding the decay of sub-surface and delayed water storages by either evapotranspiration or runoff, and thus emphasizes the need for further constraints hereof. The third study focuses on the role of river water storage, in particular whether it is necessary to include computationally expensive river routing for model calibration and validation against the integrated GRACE TWS. The results suggest that river routing is not required for model calibration in such a global model-data integration approach, due to the larger influence other observational constraints, and the determinability of certain model parameters and associated processes are identified as issues of greater relevance. In contrast to model calibration, considering river water storage derived from routing schemes can already significantly improve modelled TWS compared to GRACE observations, and thus should be considered for model evaluation against GRACE data. Beyond these specific findings that contribute to improved understanding and modelling of large-scale TWS variations, this thesis demonstrates the potential of combining simple modeling approaches with diverse Earth observational data to improve model simulations, overcome inconsistencies of different observational data sets, and identify areas that require further research. These findings encourage future efforts to take advantage of the increasing number of diverse global observational data.}, language = {en} } @phdthesis{Muench2021, author = {M{\"u}nch, Steffen}, title = {The relevance of the aeolian transport path for the spread of antibiotic-resistant bacteria on arable fields}, doi = {10.25932/publishup-53608}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536089}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 140}, year = {2021}, abstract = {The spread of antibiotic-resistant bacteria poses a globally increasing threat to public health care. The excessive use of antibiotics in animal husbandry can develop resistances in the stables. Transmission through direct contact with animals and contamination of food has already been proven. The excrements of the animals combined with a binding material enable a further potential path of spread into the environment, if they are used as organic manure in agricultural landscapes. As most of the airborne bacteria are attached to particulate matter, the focus of the work will be the atmospheric dispersal via the dust fraction. Field measurements on arable lands in Brandenburg, Germany and wind erosion studies in a wind tunnel were conducted to investigate the risk of a potential atmospheric dust-associated spread of antibiotic-resistant bacteria from poultry manure fertilized agricultural soils. The focus was to (i) characterize the conditions for aerosolization and (ii) qualify and quantify dust emissions during agricultural operations and wind erosion. PM10 (PM, particulate matter with an aerodynamic diameter smaller than 10 µm) emission factors and bacterial fluxes for poultry manure application and incorporation have not been previously reported before. The contribution to dust emissions depends on the water content of the manure, which is affected by the manure pretreatment (fresh, composted, stored, dried), as well as by the intensity of manure spreading from the manure spreader. During poultry manure application, PM10 emission ranged between 0.05 kg ha-1 and 8.37 kg ha-1. For comparison, the subsequent land preparation contributes to 0.35 - 1.15 kg ha-1 of PM10 emissions. Manure particles were still part of dust emissions but they were accounted to be less than 1\% of total PM10 emissions due to the dilution of poultry manure in the soil after manure incorporation. Bacterial emissions of fecal origin were more relevant during manure application than during the subsequent manure incorporation, although PM10 emissions of manure incorporation were larger than PM10 emissions of manure application for the non-dried manure variants. Wind erosion leads to preferred detachment of manure particles from sandy soils, when poultry manure has been recently incorporated. Sorting effects were determined between the low-density organic particles of manure origin and the soil particles of mineral origin close above the threshold of 7 m s-1. In dependence to the wind speed, potential erosion rates between 101 and 854 kg ha-1 were identified, if 6 t ha-1 of poultry manure were applied. Microbial investigation showed that manure bacteria got detached more easily from the soil surface during wind erosion, due to their attachment on manure particles. Although antibiotic-resistant bacteria (ESBL-producing E. coli) were still found in the poultry barns, no further contamination could be detected with them in the manure, fertilized soils or in the dust generated by manure application, land preparation or wind erosion. Parallel studies of this project showed that storage of poultry manure for a few days (36 - 72 h) is sufficient to inactivate ESBL-producing E. coli. Further antibiotic-resistant bacteria, i.e. MRSA and VRE, were only found sporadically in the stables and not at all in the dust. Therefore, based on the results of this work, the risk of a potential infection by dust-associated antibiotic-resistant bacteria can be considered as low.}, language = {en} } @phdthesis{Schmitz2023, author = {Schmitz, Se{\´a}n}, title = {Using low-cost sensors to gather high resolution measurements of air quality in urban environments and inform mobility policy}, doi = {10.25932/publishup-60105}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601053}, school = {Universit{\"a}t Potsdam}, pages = {180}, year = {2023}, abstract = {Air pollution has been a persistent global problem in the past several hundred years. While some industrialized nations have shown improvements in their air quality through stricter regulation, others have experienced declines as they rapidly industrialize. The WHO's 2021 update of their recommended air pollution limit values reflects the substantial impacts on human health of pollutants such as NO2 and O3, as recent epidemiological evidence suggests substantial long-term health impacts of air pollution even at low concentrations. Alongside developments in our understanding of air pollution's health impacts, the new technology of low-cost sensors (LCS) has been taken up by both academia and industry as a new method for measuring air pollution. Due primarily to their lower cost and smaller size, they can be used in a variety of different applications, including in the development of higher resolution measurement networks, in source identification, and in measurements of air pollution exposure. While significant efforts have been made to accurately calibrate LCS with reference instrumentation and various statistical models, accuracy and precision remain limited by variable sensor sensitivity. Furthermore, standard procedures for calibration still do not exist and most proprietary calibration algorithms are black-box, inaccessible to the public. This work seeks to expand the knowledge base on LCS in several different ways: 1) by developing an open-source calibration methodology; 2) by deploying LCS at high spatial resolution in urban environments to test their capability in measuring microscale changes in urban air pollution; 3) by connecting LCS deployments with the implementation of local mobility policies to provide policy advice on resultant changes in air quality. In a first step, it was found that LCS can be consistently calibrated with good performance against reference instrumentation using seven general steps: 1) assessing raw data distribution, 2) cleaning data, 3) flagging data, 4) model selection and tuning, 5) model validation, 6) exporting final predictions, and 7) calculating associated uncertainty. By emphasizing the need for consistent reporting of details at each step, most crucially on model selection, validation, and performance, this work pushed forward with the effort towards standardization of calibration methodologies. In addition, with the open-source publication of code and data for the seven-step methodology, advances were made towards reforming the largely black-box nature of LCS calibrations. With a transparent and reliable calibration methodology established, LCS were then deployed in various street canyons between 2017 and 2020. Using two types of LCS, metal oxide (MOS) and electrochemical (EC), their performance in capturing expected patterns of urban NO2 and O3 pollution was evaluated. Results showed that calibrated concentrations from MOS and EC sensors matched general diurnal patterns in NO2 and O3 pollution measured using reference instruments. While MOS proved to be unreliable for discerning differences among measured locations within the urban environment, the concentrations measured with calibrated EC sensors matched expectations from modelling studies on NO2 and O3 pollution distribution in street canyons. As such, it was concluded that LCS are appropriate for measuring urban air quality, including for assisting urban-scale air pollution model development, and can reveal new insights into air pollution in urban environments. To achieve the last goal of this work, two measurement campaigns were conducted in connection with the implementation of three mobility policies in Berlin. The first involved the construction of a pop-up bike lane on Kottbusser Damm in response to the COVID-19 pandemic, the second surrounded the temporary implementation of a community space on B{\"o}ckhstrasse, and the last was focused on the closure of a portion of Friedrichstrasse to all motorized traffic. In all cases, measurements of NO2 were collected before and after the measure was implemented to assess changes in air quality resultant from these policies. Results from the Kottbusser Damm experiment showed that the bike-lane reduced NO2 concentrations that cyclists were exposed to by 22 ± 19\%. On Friedrichstrasse, the street closure reduced NO2 concentrations to the level of the urban background without worsening the air quality on side streets. These valuable results were communicated swiftly to partners in the city administration responsible for evaluating the policies' success and future, highlighting the ability of LCS to provide policy-relevant results. As a new technology, much is still to be learned about LCS and their value to academic research in the atmospheric sciences. Nevertheless, this work has advanced the state of the art in several ways. First, it contributed a novel open-source calibration methodology that can be used by a LCS end-users for various air pollutants. Second, it strengthened the evidence base on the reliability of LCS for measuring urban air quality, finding through novel deployments in street canyons that LCS can be used at high spatial resolution to understand microscale air pollution dynamics. Last, it is the first of its kind to connect LCS measurements directly with mobility policies to understand their influences on local air quality, resulting in policy-relevant findings valuable for decisionmakers. It serves as an example of the potential for LCS to expand our understanding of air pollution at various scales, as well as their ability to serve as valuable tools in transdisciplinary research.}, language = {en} } @phdthesis{Wendi2018, author = {Wendi, Dadiyorto}, title = {Recurrence Plots and Quantification Analysis of Flood Runoff Dynamics}, doi = {10.25932/publishup-43191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431915}, school = {Universit{\"a}t Potsdam}, pages = {114}, year = {2018}, abstract = {This paper introduces a novel measure to assess similarity between event hydrographs. It is based on Cross Recurrence Plots and Recurrence Quantification Analysis which have recently gained attention in a range of disciplines when dealing with complex systems. The method attempts to quantify the event runoff dynamics and is based on the time delay embedded phase space representation of discharge hydrographs. A phase space trajectory is reconstructed from the event hydrograph, and pairs of hydrographs are compared to each other based on the distance of their phase space trajectories. Time delay embedding allows considering the multi-dimensional relationships between different points in time within the event. Hence, the temporal succession of discharge values is taken into account, such as the impact of the initial conditions on the runoff event. We provide an introduction to Cross Recurrence Plots and discuss their parameterization. An application example based on flood time series demonstrates how the method can be used to measure the similarity or dissimilarity of events, and how it can be used to detect events with rare runoff dynamics. It is argued that this methods provides a more comprehensive approach to quantify hydrograph similarity compared to conventional hydrological signatures.}, language = {en} } @phdthesis{Roezer2018, author = {R{\"o}zer, Viktor}, title = {Pluvial flood loss to private households}, doi = {10.25932/publishup-42991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429910}, school = {Universit{\"a}t Potsdam}, pages = {XXII, 109}, year = {2018}, abstract = {Today, more than half of the world's population lives in urban areas. With a high density of population and assets, urban areas are not only the economic, cultural and social hubs of every society, they are also highly susceptible to natural disasters. As a consequence of rising sea levels and an expected increase in extreme weather events caused by a changing climate in combination with growing cities, flooding is an increasing threat to many urban agglomerations around the globe. To mitigate the destructive consequences of flooding, appropriate risk management and adaptation strategies are required. So far, flood risk management in urban areas is almost exclusively focused on managing river and coastal flooding. Often overlooked is the risk from small-scale rainfall-triggered flooding, where the rainfall intensity of rainstorms exceeds the capacity of urban drainage systems, leading to immediate flooding. Referred to as pluvial flooding, this flood type exclusive to urban areas has caused severe losses in cities around the world. Without further intervention, losses from pluvial flooding are expected to increase in many urban areas due to an increase of impervious surfaces compounded with an aging drainage infrastructure and a projected increase in heavy precipitation events. While this requires the integration of pluvial flood risk into risk management plans, so far little is known about the adverse consequences of pluvial flooding due to a lack of both detailed data sets and studies on pluvial flood impacts. As a consequence, methods for reliably estimating pluvial flood losses, needed for pluvial flood risk assessment, are still missing. Therefore, this thesis investigates how pluvial flood losses to private households can be reliably estimated, based on an improved understanding of the drivers of pluvial flood loss. For this purpose, detailed data from pluvial flood-affected households was collected through structured telephone- and web-surveys following pluvial flood events in Germany and the Netherlands. Pluvial flood losses to households are the result of complex interactions between impact characteristics such as the water depth and a household's resistance as determined by its risk awareness, preparedness, emergency response, building properties and other influencing factors. Both exploratory analysis and machine-learning approaches were used to analyze differences in resistance and impacts between households and their effects on the resulting losses. The comparison of case studies showed that the awareness around pluvial flooding among private households is quite low. Low awareness not only challenges the effective dissemination of early warnings, but was also found to influence the implementation of private precautionary measures. The latter were predominately implemented by households with previous experience of pluvial flooding. Even cases where previous flood events affected a different part of the same city did not lead to an increase in preparedness of the surveyed households, highlighting the need to account for small-scale variability in both impact and resistance parameters when assessing pluvial flood risk. While it was concluded that the combination of low awareness, ineffective early warning and the fact that only a minority of buildings were adapted to pluvial flooding impaired the coping capacities of private households, the often low water levels still enabled households to mitigate or even prevent losses through a timely and effective emergency response. These findings were confirmed by the detection of loss-influencing variables, showing that cases in which households were able to prevent any loss to the building structure are predominately explained by resistance variables such as the household's risk awareness, while the degree of loss is mainly explained by impact variables. Based on the important loss-influencing variables detected, different flood loss models were developed. Similar to flood loss models for river floods, the empirical data from the preceding data collection was used to train flood loss models describing the relationship between impact and resistance parameters and the resulting loss to building structures. Different approaches were adapted from river flood loss models using both models with the water depth as only predictor for building structure loss and models incorporating additional variables from the preceding variable detection routine. The high predictive errors of all compared models showed that point predictions are not suitable for estimating losses on the building level, as they severely impair the reliability of the estimates. For that reason, a new probabilistic framework based on Bayesian inference was introduced that is able to provide predictive distributions instead of single loss estimates. These distributions not only give a range of probable losses, they also provide information on how likely a specific loss value is, representing the uncertainty in the loss estimate. Using probabilistic loss models, it was found that the certainty and reliability of a loss estimate on the building level is not only determined by the use of additional predictors as shown in previous studies, but also by the choice of response distribution defining the shape of the predictive distribution. Here, a mix between a beta and a Bernoulli distribution to account for households that are able to prevent losses to their building's structure was found to provide significantly more certain and reliable estimates than previous approaches using Gaussian or non-parametric response distributions. The successful model transfer and post-event application to estimate building structure loss in Houston, TX, caused by pluvial flooding during Hurricane Harvey confirmed previous findings, and demonstrated the potential of the newly developed multi-variable beta model for future risk assessments. The highly detailed input data set constructed from openly available data sources containing over 304,000 affected buildings in Harris County further showed the potential of data-driven, building-level loss models for pluvial flood risk assessment. In conclusion, pluvial flood losses to private households are the result of complex interactions between impact and resistance variables, which should be represented in loss models. The local occurrence of pluvial floods requires loss estimates on high spatial resolutions, i.e. on the building level, where losses are variable and uncertainties are high. Therefore, probabilistic loss estimates describing the uncertainty of the estimate should be used instead of point predictions. While the performance of probabilistic models on the building level are mainly driven by the choice of response distribution, multi-variable models are recommended for two reasons: First, additional resistance variables improve the detection of cases in which households were able to prevent structural losses. Second, the added variability of additional predictors provides a better representation of the uncertainties when loss estimates from multiple buildings are aggregated. This leads to the conclusion that data-driven probabilistic loss models on the building level allow for a reliable loss estimation at an unprecedented level of detail, with a consistent quantification of uncertainties on all aggregation levels. This makes the presented approach suitable for a wide range of applications, from decision support in spatial planning to impact- based early warning systems.}, language = {en} } @phdthesis{Zeitz2022, author = {Zeitz, Maria}, title = {Modeling the future resilience of the Greenland Ice Sheet}, doi = {10.25932/publishup-56883}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-568839}, school = {Universit{\"a}t Potsdam}, pages = {x, 189}, year = {2022}, abstract = {The Greenland Ice Sheet is the second-largest mass of ice on Earth. Being almost 2000 km long, more than 700 km wide, and more than 3 km thick at the summit, it holds enough ice to raise global sea levels by 7m if melted completely. Despite its massive size, it is particularly vulnerable to anthropogenic climate change: temperatures over the Greenland Ice Sheet have increased by more than 2.7◦C in the past 30 years, twice as much as the global mean temperature. Consequently, the ice sheet has been significantly losing mass since the 1980s and the rate of loss has increased sixfold since then. Moreover, it is one of the potential tipping elements of the Earth System, which might undergo irreversible change once a warming threshold is exceeded. This thesis aims at extending the understanding of the resilience of the Greenland Ice Sheet against global warming by analyzing processes and feedbacks relevant to its centennial to multi-millennial stability using ice sheet modeling. One of these feedbacks, the melt-elevation-feedback is driven by the temperature rise with decreasing altitudes: As the ice sheet melts, its thickness and surface elevation decrease, exposing the ice surface to warmer air and thus increasing the melt rates even further. The glacial isostatic adjustment (GIA) can partly mitigate this melt-elevation feedback as the bedrock lifts in response to an ice load decrease, forming the negative GIA feedback. In my thesis, I show that the interaction between these two competing feedbacks can lead to qualitatively different dynamical responses of the Greenland Ice Sheet to warming - from permanent loss to incomplete recovery, depending on the feedback parameters. My research shows that the interaction of those feedbacks can initiate self-sustained oscillations of the ice volume while the climate forcing remains constant. Furthermore, the increased surface melt changes the optical properties of the snow or ice surface, e.g. by lowering their albedo, which in turn enhances melt rates - a process known as the melt-albedo feedback. Process-based ice sheet models often neglect this melt-albedo feedback. To close this gap, I implemented a simplified version of the diurnal Energy Balance Model, a computationally efficient approach that can capture the first-order effects of the melt-albedo feedback, into the Parallel Ice Sheet Model (PISM). Using the coupled model, I show in warming experiments that the melt-albedo feedback almost doubles the ice loss until the year 2300 under the low greenhouse gas emission scenario RCP2.6, compared to simulations where the melt-albedo feedback is neglected, and adds up to 58\% additional ice loss under the high emission scenario RCP8.5. Moreover, I find that the melt-albedo feedback dominates the ice loss until 2300, compared to the melt-elevation feedback. Another process that could influence the resilience of the Greenland Ice Sheet is the warming induced softening of the ice and the resulting increase in flow. In my thesis, I show with PISM how the uncertainty in Glen's flow law impacts the simulated response to warming. In a flow line setup at fixed climatic mass balance, the uncertainty in flow parameters leads to a range of ice loss comparable to the range caused by different warming levels. While I focus on fundamental processes, feedbacks, and their interactions in the first three projects of my thesis, I also explore the impact of specific climate scenarios on the sea level rise contribution of the Greenland Ice Sheet. To increase the carbon budget flexibility, some warming scenarios - while still staying within the limits of the Paris Agreement - include a temporal overshoot of global warming. I show that an overshoot by 0.4◦C increases the short-term and long-term ice loss from Greenland by several centimeters. The long-term increase is driven by the warming at high latitudes, which persists even when global warming is reversed. This leads to a substantial long-term commitment of the sea level rise contribution from the Greenland Ice Sheet. Overall, in my thesis I show that the melt-albedo feedback is most relevant for the ice loss of the Greenland Ice Sheet on centennial timescales. In contrast, the melt-elevation feedback and its interplay with the GIA feedback become increasingly relevant on millennial timescales. All of these influence the resilience of the Greenland Ice Sheet against global warming, in the near future and on the long term.}, language = {en} }