@article{AbonKneisCrisologoetal.2016, author = {Abon, Catherine Cristobal and Kneis, David and Crisologo, Irene and Bronstert, Axel and David, Carlos Primo Constantino and Heistermann, Maik}, title = {Evaluating the potential of radar-based rainfall estimates for streamflow and flood simulations in the Philippines}, series = {GEOMATICS NATURAL HAZARDS \& RISK}, volume = {7}, journal = {GEOMATICS NATURAL HAZARDS \& RISK}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1947-5705}, doi = {10.1080/19475705.2015.1058862}, pages = {1390 -- 1405}, year = {2016}, abstract = {This case study evaluates the suitability of radar-based quantitative precipitation estimates (QPEs) for the simulation of streamflow in the Marikina River Basin (MRB), the Philippines. Hourly radar-based QPEs were produced from reflectivity that had been observed by an S-band radar located about 90 km from the MRB. Radar data processing and precipitation estimation were carried out using the open source library wradlib. To assess the added value of the radar-based QPE, we used spatially interpolated rain gauge observations (gauge-only (GO) product) as a benchmark. Rain gauge observations were also used to quantify rainfall estimation errors at the point scale. At the point scale, the radar-based QPE outperformed the GO product in 2012, while for 2013, the performance was similar. For both periods, estimation errors substantially increased from daily to the hourly accumulation intervals. Despite this fact, both rainfall estimation methods allowed for a good representation of observed streamflow when used to force a hydrological simulation model of the MRB. Furthermore, the results of the hydrological simulation were consistent with rainfall verification at the point scale: the radar-based QPE performed better than the GO product in 2012, and equivalently in 2013. Altogether, we could demonstrate that, in terms of streamflow simulation, the radar-based QPE can perform as good as or even better than the GO product - even for a basin such as the MRB which has a comparatively dense rain gauge network. This suggests good prospects for using radar-based QPE to simulate and forecast streamflow in other parts of the Philippines where rain gauge networks are not as dense.}, language = {en} } @article{AyzelHeistermann2021, author = {Ayzel, Georgy and Heistermann, Maik}, title = {The effect of calibration data length on the performance of a conceptual hydrological model versus LSTM and GRU}, series = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, volume = {149}, journal = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0098-3004}, doi = {10.1016/j.cageo.2021.104708}, pages = {12}, year = {2021}, abstract = {We systematically explore the effect of calibration data length on the performance of a conceptual hydrological model, GR4H, in comparison to two Artificial Neural Network (ANN) architectures: Long Short-Term Memory Networks (LSTM) and Gated Recurrent Units (GRU), which have just recently been introduced to the field of hydrology. We implemented a case study for six river basins across the contiguous United States, with 25 years of meteorological and discharge data. Nine years were reserved for independent validation; two years were used as a warm-up period, one year for each of the calibration and validation periods, respectively; from the remaining 14 years, we sampled increasing amounts of data for model calibration, and found pronounced differences in model performance. While GR4H required less data to converge, LSTM and GRU caught up at a remarkable rate, considering their number of parameters. Also, LSTM and GRU exhibited the higher calibration instability in comparison to GR4H. These findings confirm the potential of modern deep-learning architectures in rainfall runoff modelling, but also highlight the noticeable differences between them in regard to the effect of calibration data length.}, language = {en} } @article{AyzelHeistermannWinterrath2019, author = {Ayzel, Georgy and Heistermann, Maik and Winterrath, Tanja}, title = {Optical flow models as an open benchmark for radar-based precipitation nowcasting (rainymotion v0.1)}, series = {Geoscientific model development}, journal = {Geoscientific model development}, number = {12}, publisher = {Copernicus Publications}, address = {G{\"o}ttingen}, issn = {1991-9603}, doi = {10.5194/gmd-12-1387-2019}, pages = {1387 -- 1402}, year = {2019}, abstract = {Quantitative precipitation nowcasting (QPN) has become an essential technique in various application contexts, such as early warning or urban sewage control. A common heuristic prediction approach is to track the motion of precipitation features from a sequence of weather radar images and then to displace the precipitation field to the imminent future (minutes to hours) based on that motion, assuming that the intensity of the features remains constant ("Lagrangian persistence"). In that context, "optical flow" has become one of the most popular tracking techniques. Yet the present landscape of computational QPN models still struggles with producing open software implementations. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. Our software library ("rainymotion") for precipitation nowcasting is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion, Ayzel et al., 2019). That way, the library may serve as a tool for providing fast, free, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing - a benchmark that is far more advanced than the conventional benchmark of Eulerian persistence commonly used in QPN verification experiments.}, language = {en} } @misc{AyzelHeistermannWinterrath2019, author = {Ayzel, Georgy and Heistermann, Maik and Winterrath, Tanja}, title = {Optical flow models as an open benchmark for radar-based precipitation nowcasting (rainymotion v0.1)}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {709}, issn = {1866-8372}, doi = {10.25932/publishup-42933}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429333}, pages = {16}, year = {2019}, abstract = {Quantitative precipitation nowcasting (QPN) has become an essential technique in various application contexts, such as early warning or urban sewage control. A common heuristic prediction approach is to track the motion of precipitation features from a sequence of weather radar images and then to displace the precipitation field to the imminent future (minutes to hours) based on that motion, assuming that the intensity of the features remains constant ("Lagrangian persistence"). In that context, "optical flow" has become one of the most popular tracking techniques. Yet the present landscape of computational QPN models still struggles with producing open software implementations. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. Our software library ("rainymotion") for precipitation nowcasting is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion, Ayzel et al., 2019). That way, the library may serve as a tool for providing fast, free, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing - a benchmark that is far more advanced than the conventional benchmark of Eulerian persistence commonly used in QPN verification experiments.}, language = {en} } @misc{AyzelSchefferHeistermann2020, author = {Ayzel, Georgy and Scheffer, Tobias and Heistermann, Maik}, title = {RainNet v1.0}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {964}, issn = {1866-8372}, doi = {10.25932/publishup-47294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472942}, pages = {16}, year = {2020}, abstract = {In this study, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. Its design was inspired by the U-Net and SegNet families of deep learning models, which were originally designed for binary segmentation tasks. RainNet was trained to predict continuous precipitation intensities at a lead time of 5min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900km × 900km and has a resolution of 1km in space and 5min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In order to achieve a lead time of 1h, a recursive approach was implemented by using RainNet predictions at 5min lead times as model inputs for longer lead times. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the rainymotion library and had previously been shown to outperform DWD's operational nowcasting model for the same set of verification events. RainNet significantly outperforms the benchmark models at all lead times up to 60min for the routine verification metrics mean absolute error (MAE) and the critical success index (CSI) at intensity thresholds of 0.125, 1, and 5mm h⁻¹. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15mm h⁻¹). The limited ability of RainNet to predict heavy rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance in terms of a binary segmentation task. Furthermore, we suggest additional input data that could help to better identify situations with imminent precipitation dynamics. The model code, pretrained weights, and training data are provided in open repositories as an input for such future studies.}, language = {en} } @article{AyzelSchefferHeistermann2020, author = {Ayzel, Georgy and Scheffer, Tobias and Heistermann, Maik}, title = {RainNet v1.0}, series = {Geoscientific Model Development}, volume = {13}, journal = {Geoscientific Model Development}, number = {6}, publisher = {Copernicus Publ.}, address = {G{\"o}ttingen}, issn = {1991-959X}, doi = {10.5194/gmd-13-2631-2020}, pages = {2631 -- 2644}, year = {2020}, abstract = {In this study, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. Its design was inspired by the U-Net and SegNet families of deep learning models, which were originally designed for binary segmentation tasks. RainNet was trained to predict continuous precipitation intensities at a lead time of 5min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900km × 900km and has a resolution of 1km in space and 5min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In order to achieve a lead time of 1h, a recursive approach was implemented by using RainNet predictions at 5min lead times as model inputs for longer lead times. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the rainymotion library and had previously been shown to outperform DWD's operational nowcasting model for the same set of verification events. RainNet significantly outperforms the benchmark models at all lead times up to 60min for the routine verification metrics mean absolute error (MAE) and the critical success index (CSI) at intensity thresholds of 0.125, 1, and 5mm h⁻¹. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15mm h⁻¹). The limited ability of RainNet to predict heavy rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance in terms of a binary segmentation task. Furthermore, we suggest additional input data that could help to better identify situations with imminent precipitation dynamics. The model code, pretrained weights, and training data are provided in open repositories as an input for such future studies.}, language = {en} } @article{BoessenkoolBruegerHeistermann2017, author = {Boessenkool, Berry and Br{\"u}ger, Gerd and Heistermann, Maik}, title = {Effects of sample size on estimation of rainfall extremes at high temperatures}, series = {Natural hazards and earth system sciences}, volume = {17}, journal = {Natural hazards and earth system sciences}, number = {9}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1561-8633}, doi = {10.5194/nhess-17-1623-2017}, pages = {1623 -- 1629}, year = {2017}, abstract = {High precipitation quantiles tend to rise with temperature, following the so-called Clausius-Clapeyron (CC) scaling. It is often reported that the CC-scaling relation breaks down and even reverts for very high temperatures. In our study, we investigate this reversal using observational climate data from 142 stations across Germany. One of the suggested meteorological explanations for the breakdown is limited moisture supply. Here we argue that, instead, it could simply originate from undersampling. As rainfall frequency generally decreases with higher temperatures, rainfall intensities as dictated by CC scaling are less likely to be recorded than for moderate temperatures. Empirical quantiles are conventionally estimated from order statistics via various forms of plotting position formulas. They have in common that their largest representable return period is given by the sample size. In small samples, high quantiles are underestimated accordingly. The small-sample effect is weaker, or disappears completely, when using parametric quantile estimates from a generalized Pareto distribution (GPD) fitted with L moments. For those, we obtain quantiles of rainfall intensities that continue to rise with temperature.}, language = {en} } @misc{BoessenkoolBruegerHeistermann2017, author = {Boessenkool, Berry and Br{\"u}ger, Gerd and Heistermann, Maik}, title = {Effects of sample size on estimation of rainfall extremes at high temperatures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403897}, pages = {7}, year = {2017}, abstract = {High precipitation quantiles tend to rise with temperature, following the so-called Clausius-Clapeyron (CC) scaling. It is often reported that the CC-scaling relation breaks down and even reverts for very high temperatures. In our study, we investigate this reversal using observational climate data from 142 stations across Germany. One of the suggested meteorological explanations for the breakdown is limited moisture supply. Here we argue that, instead, it could simply originate from undersampling. As rainfall frequency generally decreases with higher temperatures, rainfall intensities as dictated by CC scaling are less likely to be recorded than for moderate temperatures. Empirical quantiles are conventionally estimated from order statistics via various forms of plotting position formulas. They have in common that their largest representable return period is given by the sample size. In small samples, high quantiles are underestimated accordingly. The small-sample effect is weaker, or disappears completely, when using parametric quantile estimates from a generalized Pareto distribution (GPD) fitted with L moments. For those, we obtain quantiles of rainfall intensities that continue to rise with temperature.}, language = {en} } @article{BronstertAgarwalBoessenkooletal.2018, author = {Bronstert, Axel and Agarwal, Ankit and Boessenkool, Berry and Crisologo, Irene and Fischer, Madlen and Heistermann, Maik and Koehn-Reich, Lisei and Andres Lopez-Tarazon, Jose and Moran, Thomas and Ozturk, Ugur and Reinhardt-Imjela, Christian and Wendi, Dadiyorto}, title = {Forensic hydro-meteorological analysis of an extreme flash flood}, series = {The science of the total environment : an international journal for scientific research into the environment and its relationship with man}, volume = {630}, journal = {The science of the total environment : an international journal for scientific research into the environment and its relationship with man}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0048-9697}, doi = {10.1016/j.scitotenv.2018.02.241}, pages = {977 -- 991}, year = {2018}, abstract = {The flash-flood in Braunsbach in the north-eastern part of Baden-Wuerttemberg/Germany was a particularly strong and concise event which took place during the floods in southern Germany at the end of May/early June 2016. This article presents a detailed analysis of the hydro-meteorological forcing and the hydrological consequences of this event. A specific approach, the "forensic hydrological analysis" was followed in order to include and combine retrospectively a variety of data from different disciplines. Such an approach investigates the origins, mechanisms and course of such natural events if possible in a "near real time" mode, in order to follow the most recent traces of the event. The results show that it was a very rare rainfall event with extreme intensities which, in combination with catchment properties, led to extreme runoff plus severe geomorphological hazards, i.e. great debris flows, which together resulted in immense damage in this small rural town Braunsbach. It was definitely a record-breaking event and greatly exceeded existing design guidelines for extreme flood discharge for this region, i.e. by a factor of about 10. Being such a rare or even unique event, it is not reliably feasible to put it into a crisp probabilistic context. However, one can conclude that a return period clearly above 100 years can be assigned for all event components: rainfall, peak discharge and sediment transport. Due to the complex and interacting processes, no single flood cause or reason for the very high damage can be identified, since only the interplay and the cascading characteristics of those led to such an event. The roles of different human activities on the origin and/or intensification of such an extreme event are finally discussed. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @article{BronstertCreutzfeldtGraeffetal.2012, author = {Bronstert, Axel and Creutzfeldt, Benjamin and Gr{\"a}ff, Thomas and Hajnsek, Irena and Heistermann, Maik and Itzerott, Sibylle and Jagdhuber, Thomas and Kneis, David and Lueck, Erika and Reusser, Dominik and Zehe, Erwin}, title = {Potentials and constraints of different types of soil moisture observations for flood simulations in headwater catchments}, series = {Natural hazards : journal of the International Society for the Prevention and Mitigation of Natural Hazards}, volume = {60}, journal = {Natural hazards : journal of the International Society for the Prevention and Mitigation of Natural Hazards}, number = {3}, publisher = {Springer}, address = {New York}, issn = {0921-030X}, doi = {10.1007/s11069-011-9874-9}, pages = {879 -- 914}, year = {2012}, abstract = {Flood generation in mountainous headwater catchments is governed by rainfall intensities, by the spatial distribution of rainfall and by the state of the catchment prior to the rainfall, e. g. by the spatial pattern of the soil moisture, groundwater conditions and possibly snow. The work presented here explores the limits and potentials of measuring soil moisture with different methods and in different scales and their potential use for flood simulation. These measurements were obtained in 2007 and 2008 within a comprehensive multi-scale experiment in the Weisseritz headwater catchment in the Ore-Mountains, Germany. The following technologies have been applied jointly thermogravimetric method, frequency domain reflectometry (FDR) sensors, spatial time domain reflectometry (STDR) cluster, ground-penetrating radar (GPR), airborne polarimetric synthetic aperture radar (polarimetric SAR) and advanced synthetic aperture radar (ASAR) based on the satellite Envisat. We present exemplary soil measurement results, with spatial scales ranging from point scale, via hillslope and field scale, to the catchment scale. Only the spatial TDR cluster was able to record continuous data. The other methods are limited to the date of over-flights (airplane and satellite) or measurement campaigns on the ground. For possible use in flood simulation, the observation of soil moisture at multiple scales has to be combined with suitable hydrological modelling, using the hydrological model WaSiM-ETH. Therefore, several simulation experiments have been conducted in order to test both the usability of the recorded soil moisture data and the suitability of a distributed hydrological model to make use of this information. The measurement results show that airborne-based and satellite-based systems in particular provide information on the near-surface spatial distribution. However, there are still a variety of limitations, such as the need for parallel ground measurements (Envisat ASAR), uncertainties in polarimetric decomposition techniques (polarimetric SAR), very limited information from remote sensing methods about vegetated surfaces and the non-availability of continuous measurements. The model experiments showed the importance of soil moisture as an initial condition for physically based flood modelling. However, the observed moisture data reflect the surface or near-surface soil moisture only. Hence, only saturated overland flow might be related to these data. Other flood generation processes influenced by catchment wetness in the subsurface such as subsurface storm flow or quick groundwater drainage cannot be assessed by these data. One has to acknowledge that, in spite of innovative measuring techniques on all spatial scales, soil moisture data for entire vegetated catchments are still today not operationally available. Therefore, observations of soil moisture should primarily be used to improve the quality of continuous, distributed hydrological catchment models that simulate the spatial distribution of moisture internally. Thus, when and where soil moisture data are available, they should be compared with their simulated equivalents in order to improve the parameter estimates and possibly the structure of the hydrological model.}, language = {en} }