@article{Thieme1995, author = {Thieme, Siegfried}, title = {Agrarstruktureller Wandel im Land Brandenburg}, year = {1995}, language = {de} } @article{Thieme1993, author = {Thieme, Siegfried}, title = {Agrarstruktureller Wandel im Havel{\"a}ndischen Obstbaugebiet}, year = {1993}, language = {de} } @phdthesis{Bryant2024, author = {Bryant, Seth}, title = {Aggregation and disaggregation in flood risk models}, doi = {10.25932/publishup-65095}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-650952}, school = {Universit{\"a}t Potsdam}, pages = {ix, 116}, year = {2024}, abstract = {Floods continue to be the leading cause of economic damages and fatalities among natural disasters worldwide. As future climate and exposure changes are projected to intensify these damages, the need for more accurate and scalable flood risk models is rising. Over the past decade, macro-scale flood risk models have evolved from initial proof-of-concepts to indispensable tools for decision-making at global-, nationaland, increasingly, the local-level. This progress has been propelled by the advent of high-performance computing and the availability of global, space-based datasets. However, despite such advancements, these models are rarely validated and consistently fall short of the accuracy achieved by high-resolution local models. While capabilities have improved, significant gaps persist in understanding the behaviours of such macro-scale models, particularly their tendency to overestimate risk. This dissertation aims to address such gaps by examining the scale transfers inherent in the construction and application of coarse macroscale models. To achieve this, four studies are presented that, collectively, address exposure, hazard, and vulnerability components of risk affected by upscaling or downscaling. The first study focuses on a type of downscaling where coarse flood hazard inundation grids are enhanced to a finer resolution. While such inundation downscaling has been employed in numerous global model chains, ours is the first study to focus specifically on this component, providing an evaluation of the state of the art and a novel algorithm. Findings demonstrate that our novel algorithm is eight times faster than existing methods, offers a slight improvement in accuracy, and generates more physically coherent flood maps in hydraulically challenging regions. When applied to a case study, the algorithm generated a 4m resolution inundation map from 30m hydrodynamic model outputs in 33 s, a 60-fold improvement in runtime with a 25\% increase in RMSE compared with direct hydrodynamic modelling. All evaluated downscaling algorithms yielded better accuracy than the coarse hydrodynamic model when compared to observations, demonstrating similar limits of coarse hydrodynamic models reported by others. The substitution of downscaling into flood risk model chains, in place of high-resolution modelling, can drastically improve the lead time of impactbased forecasts and the efficiency of hazard map production. With downscaling, local regions could obtain high resolution local inundation maps by post-processing a global model without the need for expensive modelling or expertise. The second study focuses on hazard aggregation and its implications for exposure, investigating implicit aggregations commonly used to intersect hazard grids with coarse exposure models. This research introduces a novel spatial classification framework to understand the effects of rescaling flood hazard grids to a coarser resolution. The study derives closed-form analytical solutions for the location and direction of bias from flood grid aggregation, showing that bias will always be present in regions near the edge of inundation. For example, inundation area will be positively biased when water depth grids are aggregated, while volume will be negatively biased when water elevation grids are aggregated. Extending the analysis to effects of hazard aggregation on building exposure, this study shows that exposure in regions at the edge of inundation are an order of magnitude more sensitive to aggregation errors than hazard alone. Among the two aggregation routines considered, averaging water surface elevation grids better preserved flood depths at buildings than averaging of water depth grids. The study provides the first mathematical proof and generalizeable treatment of flood hazard grid aggregation, demonstrating important mechanisms to help flood risk modellers understand and control model behaviour. The final two studies focus on the aggregation of vulnerability models or flood damage functions, investigating the practice of applying per-asset functions to aggregate exposure models. Both studies extend Jensen's inequality, a well-known 1906 mathematical proof, to demonstrate how the aggregation of flood damage functions leads to bias. Applying Jensen's proof in this new context, results show that typically concave flood damage functions will introduce a positive bias (overestimation) when aggregated. This behaviour was further investigated with a simulation experiment including 2 million buildings in Germany, four global flood hazard simulations and three aggregation scenarios. The results show that positive aggregation bias is not distributed evenly in space, meaning some regions identified as "hot spots of risk" in assessments may in fact just be hot spots of aggregation bias. This study provides the first application of Jensen's inequality to explain the overestimates reported elsewhere and advice for modellers to minimize such artifacts. In total, this dissertation investigates the complex ways aggregation and disaggregation influence the behaviour of risk models, focusing on the scale-transfers underpinning macro-scale flood risk assessments. Extending a key finding of the flood hazard literature to the broader context of flood risk, this dissertation concludes that all else equal, coarse models overestimate risk. This dissertation goes beyond previous studies by providing mathematical proofs for how and where such bias emerges in aggregation routines, offering a mechanistic explanation for coarse model overestimates. It shows that this bias is spatially heterogeneous, necessitating a deep understanding of how rescaling may bias models to effectively reduce or communicate uncertainties. Further, the dissertation offers specific recommendations to help modellers minimize scale transfers in problematic regions. In conclusion, I argue that such aggregation errors are epistemic, stemming from choices in model structure, and therefore hold greater potential and impetus for study and mitigation. This deeper understanding of uncertainties is essential for improving macro-scale flood risk models and their effectiveness in equitable, holistic, and sustainable flood management.}, language = {en} } @article{IshizukaHickeyVargasArculusetal.2017, author = {Ishizuka, Osamu and Hickey-Vargas, Rosemary and Arculus, Richard J. and Yogodzinski, Gene M. and Savov, Ivan P. and Kusano, Yuki and McCarthy, Anders and Brandl, Philipp A. and Sudo, Masafumi}, title = {Age of Izu-Bonin-Mariana arc basement}, series = {Earth \& planetary science letters}, volume = {481}, journal = {Earth \& planetary science letters}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0012-821X}, doi = {10.1016/j.epsl.2017.10.023}, pages = {80 -- 90}, year = {2017}, abstract = {Documenting the early tectonic and magmatic evolution of the lzu-Bonin-Mariana (IBM) arc system in the Western Pacific is critical for understanding the process and cause of subduction initiation along the current convergent margin between the Pacific and Philippine Sea plates. Forearc igneous sections provide firm evidence for seafloor spreading at the time of subduction initiation (52 Ma) and production of "forearc basalt". Ocean floor drilling (International Ocean Discovery Program Expedition 351) recovered basement-forming, low-Ti tholeiitic basalt crust formed shortly after subduction initiation but distal from the convergent margin (nominally reararc) of the future IBM arc (Amami Sankaku Basin: ASB). Radiometric dating of this basement gives an age range (49.3-46.8 Ma with a weighted average of 48.7 Ma) that overlaps that of basalt in the present-day IBM forearc, but up to 3.3 m.y. younger than the onset of forearc basalt activity. Similarity in age range and geochemical character between the reararc and forearc basalts implies that the ocean crust newly formed by seafloor spreading during subduction initiation extends from fore-to reararc of the present-day IBM arc. Given the age difference between the oldest forearc basalt and the ASB crust, asymmetric spreading caused by ridge migration might have taken place. This scenario for the formation of the ASB implies that the Mesozoic remnant arc terrane of the Daito Ridges comprised the overriding plate at subduction initiation. The juxtaposition of a relatively buoyant remnant arc terrane adjacent to an oceanic plate was more favourable for subduction initiation than would have been the case if both downgoing and overriding plates had been oceanic. (C) 2017 The Authors. Published by Elsevier B.V.}, language = {en} } @article{RamachandranRupakhetiLawrence2020, author = {Ramachandran, Srikanthan and Rupakheti, Maheswar and Lawrence, Mark}, title = {Aerosol-induced atmospheric heating rate decreases over South and East Asia as a result of changing content and composition}, series = {Scientific reports}, volume = {10}, journal = {Scientific reports}, number = {1}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {[London]}, issn = {2045-2322}, doi = {10.1038/s41598-020-76936-z}, pages = {17}, year = {2020}, abstract = {Aerosol emissions from human activities are extensive and changing rapidly over Asia. Model simulations and satellite observations indicate a dipole pattern in aerosol emissions and loading between South Asia and East Asia, two of the most heavily polluted regions of the world. We examine the previously unexplored diverging trends in the existing dipole pattern of aerosols between East and South Asia using the high quality, two-decade long ground-based time series of observations of aerosol properties from the Aerosol Robotic Network (AERONET), from satellites (Moderate Resolution Imaging Spectroradiometer (MODIS) and Ozone Monitoring Instrument (OMI)), and from model simulations (Modern-Era Retrospective Analysis for Research and Applications, version 2 (MERRA-2). The data cover the period since 2001 for Kanpur (South Asia) and Beijing (East Asia), two locations taken as being broadly representative of the respective regions. Since 2010 a dipole in aerosol optical depth (AOD) is maintained, but the trend is reversed-the decrease in AOD over Beijing (East Asia) is rapid since 2010, being 17\% less in current decade compared to first decade of twenty-first century, while the AOD over South Asia increased by 12\% during the same period. Furthermore, we find that the aerosol composition is also changing over time. The single scattering albedo (SSA), a measure of aerosol's absorption capacity and related to aerosol composition, is slightly higher over Beijing than Kanpur, and has increased from 0.91 in 2002 to 0.93 in 2017 over Beijing and from 0.89 to 0.92 during the same period over Kanpur, confirming that aerosols in this region have on an average become more scattering in nature. These changes have led to a notable decrease in aerosol-induced atmospheric heating rate (HR) over both regions between the two decades, decreasing considerably more over East Asia (- 31\%) than over South Asia (- 9\%). The annual mean HR is lower now, it is still large (>= 0.6 K per day), which has significant climate implications. The seasonal trends in AOD, SSA and HR are more pronounced than their respective annual trends over both regions. The seasonal trends are caused mainly by the increase/decrease in anthropogenic aerosol emissions (sulfate, black carbon and organic carbon) while the natural aerosols (dust and sea salt) did not change significantly over South and East Asia during the last two decades. The MERRA-2 model is able to simulate the observed trends in AODs well but not the magnitude, while it also did not simulate the SSA values or trends well. These robust findings based on observations of key aerosol parameters and previously unrecognized diverging trends over South and East Asia need to be accounted for in current state-of-the-art climate models to ensure accurate quantification of the complex and evolving impact of aerosols on the regional climate over Asia.}, language = {en} } @article{WengLuedekeZempetal.2018, author = {Weng, Wei and L{\"u}deke, Matthias K. B. and Zemp, Delphine Clara and Lakes, Tobia and Kropp, J{\"u}rgen}, title = {Aerial and surface rivers}, series = {Hydrology and earth system sciences : HESS}, volume = {22}, journal = {Hydrology and earth system sciences : HESS}, number = {1}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1027-5606}, doi = {10.5194/hess-22-911-2018}, pages = {911 -- 927}, year = {2018}, abstract = {The abundant evapotranspiration provided by the Amazon forests is an important component of the hydrological cycle, both regionally and globally. Since the last century, deforestation and expanding agricultural activities have been changing the ecosystem and its provision of moisture to the atmosphere. However, it remains uncertain how the ongoing land use change will influence rainfall, runoff, and water availability as findings from previous studies differ. Using moisture tracking experiments based on observational data, we provide a spatially detailed analysis recognizing potential teleconnection between source and sink regions of atmospheric moisture. We apply land use scenarios in upwind moisture sources and quantify the corresponding rainfall and runoff changes in downwind moisture sinks. We find spatially varying responses of water regimes to land use changes, which may explain the diverse results from previous studies. Parts of the Peruvian Amazon and western Bolivia are identified as the sink areas most sensitive to land use change in the Amazon and we highlight the current water stress by Amazonian land use change on these areas in terms of the water availability. Furthermore, we also identify the influential source areas where land use change may considerably reduce a given target sink's water reception (from our example of the Ucayali River basin outlet, rainfall by 5-12 \% and runoff by 19-50 \% according to scenarios). Sensitive sinks and influential sources are therefore suggested as hotspots for achieving sustainable land-water management.}, language = {en} } @phdthesis{Ayzel2021, author = {Ayzel, Georgy}, title = {Advancing radar-based precipitation nowcasting}, doi = {10.25932/publishup-50426}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504267}, school = {Universit{\"a}t Potsdam}, pages = {xx, 68}, year = {2021}, abstract = {Precipitation forecasting has an important place in everyday life - during the day we may have tens of small talks discussing the likelihood that it will rain this evening or weekend. Should you take an umbrella for a walk? Or should you invite your friends for a barbecue? It will certainly depend on what your weather application shows. While for years people were guided by the precipitation forecasts issued for a particular region or city several times a day, the widespread availability of weather radars allowed us to obtain forecasts at much higher spatiotemporal resolution of minutes in time and hundreds of meters in space. Hence, radar-based precipitation nowcasting, that is, very-short-range forecasting (typically up to 1-3 h), has become an essential technique, also in various professional application contexts, e.g., early warning, sewage control, or agriculture. There are two major components comprising a system for precipitation nowcasting: radar-based precipitation estimates, and models to extrapolate that precipitation to the imminent future. While acknowledging the fundamental importance of radar-based precipitation retrieval for precipitation nowcasts, this thesis focuses only on the model development: the establishment of open and competitive benchmark models, the investigation of the potential of deep learning, and the development of procedures for nowcast errors diagnosis and isolation that can guide model development. The present landscape of computational models for precipitation nowcasting still struggles with the availability of open software implementations that could serve as benchmarks for measuring progress. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. We distribute the corresponding set of models as a software library, rainymotion, which is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion). That way, the library acts as a tool for providing fast, open, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing. One of the promising directions for model development is to challenge the potential of deep learning - a subfield of machine learning that refers to artificial neural networks with deep architectures, which may consist of many computational layers. Deep learning showed promising results in many fields of computer science, such as image and speech recognition, or natural language processing, where it started to dramatically outperform reference methods. The high benefit of using "big data" for training is among the main reasons for that. Hence, the emerging interest in deep learning in atmospheric sciences is also caused and concerted with the increasing availability of data - both observational and model-based. The large archives of weather radar data provide a solid basis for investigation of deep learning potential in precipitation nowcasting: one year of national 5-min composites for Germany comprises around 85 billion data points. To this aim, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. RainNet was trained to predict continuous precipitation intensities at a lead time of 5 min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900 km x 900 km and has a resolution of 1 km in space and 5 min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In these experiments, RainNet was applied recursively in order to achieve lead times of up to 1 h. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the previously developed rainymotion library. RainNet significantly outperformed the benchmark models at all lead times up to 60 min for the routine verification metrics mean absolute error (MAE) and critical success index (CSI) at intensity thresholds of 0.125, 1, and 5 mm/h. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15 mm/h). The limited ability of RainNet to predict high rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5 min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16 km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5 min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5 min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research on model development for precipitation nowcasting, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance. The model development together with the verification experiments for both conventional and deep learning model predictions also revealed the need to better understand the source of forecast errors. Understanding the dominant sources of error in specific situations should help in guiding further model improvement. The total error of a precipitation nowcast consists of an error in the predicted location of a precipitation feature and an error in the change of precipitation intensity over lead time. So far, verification measures did not allow to isolate the location error, making it difficult to specifically improve nowcast models with regard to location prediction. To fill this gap, we introduced a framework to directly quantify the location error. To that end, we detect and track scale-invariant precipitation features (corners) in radar images. We then consider these observed tracks as the true reference in order to evaluate the performance (or, inversely, the error) of any model that aims to predict the future location of a precipitation feature. Hence, the location error of a forecast at any lead time ahead of the forecast time corresponds to the Euclidean distance between the observed and the predicted feature location at the corresponding lead time. Based on this framework, we carried out a benchmarking case study using one year worth of weather radar composites of the DWD. We evaluated the performance of four extrapolation models, two of which are based on the linear extrapolation of corner motion; and the remaining two are based on the Dense Inverse Search (DIS) method: motion vectors obtained from DIS are used to predict feature locations by linear and Semi-Lagrangian extrapolation. For all competing models, the mean location error exceeds a distance of 5 km after 60 min, and 10 km after 110 min. At least 25\% of all forecasts exceed an error of 5 km after 50 min, and of 10 km after 90 min. Even for the best models in our experiment, at least 5 percent of the forecasts will have a location error of more than 10 km after 45 min. When we relate such errors to application scenarios that are typically suggested for precipitation nowcasting, e.g., early warning, it becomes obvious that location errors matter: the order of magnitude of these errors is about the same as the typical extent of a convective cell. Hence, the uncertainty of precipitation nowcasts at such length scales - just as a result of locational errors - can be substantial already at lead times of less than 1 h. Being able to quantify the location error should hence guide any model development that is targeted towards its minimization. To that aim, we also consider the high potential of using deep learning architectures specific to the assimilation of sequential (track) data. Last but not least, the thesis demonstrates the benefits of a general movement towards open science for model development in the field of precipitation nowcasting. All the presented models and frameworks are distributed as open repositories, thus enhancing transparency and reproducibility of the methodological approach. Furthermore, they are readily available to be used for further research studies, as well as for practical applications.}, language = {en} } @article{BronstertMenzel2002, author = {Bronstert, Axel and Menzel, Lucas}, title = {Advances in Flood Research}, year = {2002}, language = {en} } @article{WutzlerHudsonThieken2022, author = {Wutzler, Bianca and Hudson, Paul and Thieken, Annegret}, title = {Adaptation strategies of flood-damaged businesses in Germany}, series = {Frontiers in Water}, journal = {Frontiers in Water}, publisher = {Frontiers Media SA}, address = {Lausanne, Schweiz}, issn = {2624-9375}, doi = {10.3389/frwa.2022.932061}, pages = {13}, year = {2022}, abstract = {Flood risk management in Germany follows an integrative approach in which both private households and businesses can make an important contribution to reducing flood damage by implementing property-level adaptation measures. While the flood adaptation behavior of private households has already been widely researched, comparatively less attention has been paid to the adaptation strategies of businesses. However, their ability to cope with flood risk plays an important role in the social and economic development of a flood-prone region. Therefore, using quantitative survey data, this study aims to identify different strategies and adaptation drivers of 557 businesses damaged by a riverine flood in 2013 and 104 businesses damaged by pluvial or flash floods between 2014 and 2017. Our results indicate that a low perceived self-efficacy may be an important factor that can reduce the motivation of businesses to adapt to flood risk. Furthermore, property-owners tended to act more proactively than tenants. In addition, high experience with previous flood events and low perceived response costs could strengthen proactive adaptation behavior. These findings should be considered in business-tailored risk communication.}, language = {en} } @misc{WutzlerHudsonThieken2022, author = {Wutzler, Bianca and Hudson, Paul and Thieken, Annegret}, title = {Adaptation strategies of flood-damaged businesses in Germany}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1304}, issn = {1866-8372}, doi = {10.25932/publishup-57735}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-577350}, pages = {13}, year = {2022}, abstract = {Flood risk management in Germany follows an integrative approach in which both private households and businesses can make an important contribution to reducing flood damage by implementing property-level adaptation measures. While the flood adaptation behavior of private households has already been widely researched, comparatively less attention has been paid to the adaptation strategies of businesses. However, their ability to cope with flood risk plays an important role in the social and economic development of a flood-prone region. Therefore, using quantitative survey data, this study aims to identify different strategies and adaptation drivers of 557 businesses damaged by a riverine flood in 2013 and 104 businesses damaged by pluvial or flash floods between 2014 and 2017. Our results indicate that a low perceived self-efficacy may be an important factor that can reduce the motivation of businesses to adapt to flood risk. Furthermore, property-owners tended to act more proactively than tenants. In addition, high experience with previous flood events and low perceived response costs could strengthen proactive adaptation behavior. These findings should be considered in business-tailored risk communication.}, language = {en} }