@article{EngbertHainzlZoelleretal.1998, author = {Engbert, Ralf and Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Testing for unstable periodic orbits to characterize spatiotemporal dynamics}, year = {1998}, language = {en} } @article{FiedlerHainzlZoelleretal.2018, author = {Fiedler, Bernhard and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Detection of Gutenberg-Richter b-Value Changes in Earthquake Time Series}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {5A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120180091}, pages = {2778 -- 2787}, year = {2018}, abstract = {The Gutenberg-Richter relation for earthquake magnitudes is the most famous empirical law in seismology. It states that the frequency of earthquake magnitudes follows an exponential distribution; this has been found to be a robust feature of seismicity above the completeness magnitude, and it is independent of whether global, regional, or local seismicity is analyzed. However, the exponent b of the distribution varies significantly in space and time, which is important for process understanding and seismic hazard assessment; this is particularly true because of the fact that the Gutenberg-Richter b-value acts as a proxy for the stress state and quantifies the ratio of large-to-small earthquakes. In our work, we focus on the automatic detection of statistically significant temporal changes of the b-value in seismicity data. In our approach, we use Bayes factors for model selection and estimate multiple change-points of the frequency-magnitude distribution in time. The method is first applied to synthetic data, showing its capability to detect change-points as function of the size of the sample and the b-value contrast. Finally, we apply this approach to examples of observational data sets for which b-value changes have previously been stated. Our analysis of foreshock and after-shock sequences related to mainshocks, as well as earthquake swarms, shows that only a portion of the b-value changes is statistically significant.}, language = {en} } @article{FiedlerZoellerHolschneideretal.2018, author = {Fiedler, Bernhard and Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {Multiple Change-Point Detection in Spatiotemporal Seismicity Data}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {3A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170236}, pages = {1147 -- 1159}, year = {2018}, abstract = {Earthquake rates are driven by tectonic stress buildup, earthquake-induced stress changes, and transient aseismic processes. Although the origin of the first two sources is known, transient aseismic processes are more difficult to detect. However, the knowledge of the associated changes of the earthquake activity is of great interest, because it might help identify natural aseismic deformation patterns such as slow-slip events, as well as the occurrence of induced seismicity related to human activities. For this goal, we develop a Bayesian approach to identify change-points in seismicity data automatically. Using the Bayes factor, we select a suitable model, estimate possible change-points, and we additionally use a likelihood ratio test to calculate the significance of the change of the intensity. The approach is extended to spatiotemporal data to detect the area in which the changes occur. The method is first applied to synthetic data showing its capability to detect real change-points. Finally, we apply this approach to observational data from Oklahoma and observe statistical significant changes of seismicity in space and time.}, language = {en} } @article{HainzlBrietzkeZoeller2010, author = {Hainzl, Sebastian and Brietzke, Gilbert B. and Z{\"o}ller, Gert}, title = {Quantitative earthquake forecasts resulting from static stress triggering}, issn = {0148-0227}, doi = {10.1029/2010jb007473}, year = {2010}, abstract = {In recent years, the triggering of earthquakes has been discussed controversially with respect to the underlying mechanisms and the capability to evaluate the resulting seismic hazard. Apart from static stress interactions, other mechanisms including dynamic stress transfer have been proposed to be part of a complex triggering process. Exploiting the theoretical relation between long-term earthquake rates and stressing rate, we demonstrate that static stress changes resulting from an earthquake rupture allow us to predict quantitatively the aftershock activity without tuning specific model parameters. These forecasts are found to be in excellent agreement with all first-order characteristics of aftershocks, in particular, (1) the total number, (2) the power law distance decay, (3) the scaling of the productivity with the main shock magnitude, (4) the foreshock probability, and (5) the empirical Bath law providing the maximum aftershock magnitude, which supports the conclusion that static stress transfer is the major mechanism of earthquake triggering.}, language = {en} } @article{HainzlZoellerBrietzkeetal.2013, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Brietzke, Gilbert B. and Hinzen, Klaus-G.}, title = {Comparison of deterministic and stochastic earthquake simulators for fault interactions in the Lower Rhine Embayment, Germany}, series = {Geophysical journal international}, volume = {195}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt271}, pages = {684 -- 694}, year = {2013}, abstract = {Time-dependent probabilistic seismic hazard assessment requires a stochastic description of earthquake occurrences. While short-term seismicity models are well-constrained by observations, the recurrences of characteristic on-fault earthquakes are only derived from theoretical considerations, uncertain palaeo-events or proxy data. Despite the involved uncertainties and complexity, simple statistical models for a quasi-period recurrence of on-fault events are implemented in seismic hazard assessments. To test the applicability of statistical models, such as the Brownian relaxation oscillator or the stress release model, we perform a systematic comparison with deterministic simulations based on rate- and state-dependent friction, high-resolution representations of fault systems and quasi-dynamic rupture propagation. For the specific fault network of the Lower Rhine Embayment, Germany, we run both stochastic and deterministic model simulations based on the same fault geometries and stress interactions. Our results indicate that the stochastic simulators are able to reproduce the first-order characteristics of the major earthquakes on isolated faults as well as for coupled faults with moderate stress interactions. However, we find that all tested statistical models fail to reproduce the characteristics of strongly coupled faults, because multisegment rupturing resulting from a spatiotemporally correlated stress field is underestimated in the stochastic simulators. Our results suggest that stochastic models have to be extended by multirupture probability distributions to provide more reliable results.}, language = {en} } @article{HainzlZoellerKurths2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organization of spatio-temporal earthquake clusters}, year = {2000}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organized criticality model for earthquakes : Quiescence, foreshocks and aftershocks}, year = {1999}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Similar power laws for foreshock and aftershock sequences in a spring block model for earthquakes}, year = {1999}, language = {en} } @article{HainzlZoellerKurthsetal.2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen and Zschau, Jochen}, title = {Seismic quiescence as an indicator for large earthquakes in a system of self-organized criticality}, year = {2000}, language = {en} } @article{HainzlZoellerMain2006, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Main, Ian}, title = {Introduction to special issue: Dynamics of seismicity patterns and earthquake triggering - Preface}, series = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, volume = {424}, journal = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, number = {Special issue}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0040-1951}, doi = {10.1016/j.tecto.2006.03.034}, pages = {135 -- 138}, year = {2006}, language = {en} } @article{HainzlZoellerWang2010, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Wang, Rongjiang}, title = {Impact of the receiver fault distribution on aftershock activity}, issn = {0148-0227}, doi = {10.1029/2008jb006224}, year = {2010}, abstract = {Aftershock models are usually based either on purely empirical relations ignoring the physical mechanism or on deterministic calculations of stress changes on a predefined receiver fault orientation. Here we investigate the effect of considering more realistic fault systems in models based on static Coulomb stress changes. For that purpose, we perform earthquake simulations with elastic half-space stress interactions, rate-and-state dependent frictional earthquake nucleation, and extended ruptures with heterogeneous (fractal) slip distributions. We find that the consideration of earthquake nucleation on multiple receiver fault orientations does not influence the shape of the temporal Omori-type aftershock decay, but changes significantly the predicted spatial patterns and the total number of triggered events. So-called stress shadows with decreased activity almost vanish, and activation decays continuously with increasing distance from the main shock rupture. The total aftershock productivity, which is shown to be almost independent of the assumed background rate, increases significantly if multiple receiver fault planes exist. The application to the 1992 M7.3 Landers, California, aftershock sequence indicates a good agreement with the locations and the total productivity of the observed directly triggered aftershocks.}, language = {en} } @article{HolschneiderZoellerClementsetal.2014, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Clements, R. and Schorlemmer, Danijel}, title = {Can we test for the maximum possible earthquake magnitude?}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {3}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010319}, pages = {2019 -- 2028}, year = {2014}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{LaudanZoellerThieken2020, author = {Laudan, Jonas and Z{\"o}ller, Gert and Thieken, Annegret}, title = {Flash floods versus river floods}, series = {Natural Hazards and Earth System Sciences}, volume = {20}, journal = {Natural Hazards and Earth System Sciences}, publisher = {European Geophysical Society}, address = {Katlenburg-Lindau}, issn = {1684-9981}, doi = {10.5194/nhess-20-999-2020}, pages = {999 -- 1023}, year = {2020}, abstract = {River floods are among the most damaging natural hazards that frequently occur in Germany. Flooding causes high economic losses and impacts many residents. In 2016, several southern German municipalities were hit by flash floods after unexpectedly severe heavy rainfall, while in 2013 widespread river flooding had occurred. This study investigates and compares the psychological impacts of river floods and flash floods and potential consequences for precautionary behaviour. Data were collected using computer-aided telephone interviews that were conducted among flood-affected households around 9 months after each damaging event. This study applies Bayesian statistics and negative binomial regressions to test the suitability of psychological indicators to predict the precaution motivation of individuals. The results show that it is not the particular flood type but rather the severity and local impacts of the event that are crucial for the different, and potentially negative, impacts on mental health. According to the used data, however, predictions of the individual precaution motivation should not be based on the derived psychological indicators - i.e. coping appraisal, threat appraisal, burden and evasion - since their explanatory power was generally low and results are, for the most part, non-significant. Only burden reveals a significant positive relation to planned precaution regarding weak flash floods. In contrast to weak flash floods and river floods, the perceived threat of strong flash floods is significantly lower although feelings of burden and lower coping appraisals are more pronounced. Further research is needed to better include psychological assessment procedures and to focus on alternative data sources regarding floods and the connected precaution motivation of affected residents.}, language = {en} } @article{MaghsoudiCescaHainzletal.2015, author = {Maghsoudi, Samira and Cesca, Simone and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert and Kaiser, Diethelm}, title = {Maximum Magnitude of Completeness in a Salt Mine}, series = {Bulletin of the Seismological Society of America}, volume = {105}, journal = {Bulletin of the Seismological Society of America}, number = {3}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140039}, pages = {1491 -- 1501}, year = {2015}, abstract = {In this study, we analyze acoustic emission (AE) data recorded at the Morsleben salt mine, Germany, to assess the catalog completeness, which plays an important role in any seismicity analysis. We introduce the new concept of a magnitude completeness interval consisting of a maximum magnitude of completeness (M-c(max)) in addition to the well-known minimum magnitude of completeness. This is required to describe the completeness of the catalog, both for the smallest events (for which the detection performance may be low) and for the largest ones (which may be missed because of sensors saturation). We suggest a method to compute the maximum magnitude of completeness and calculate it for a spatial grid based on (1) the prior estimation of saturation magnitude at each sensor, (2) the correction of the detection probability function at each sensor, including a drop in the detection performance when it saturates, and (3) the combination of detection probabilities of all sensors to obtain the network detection performance. The method is tested using about 130,000 AE events recorded in a period of five weeks, with sources confined within a small depth interval, and an example of the spatial distribution of M-c(max) is derived. The comparison between the spatial distribution of M-c(max) and of the maximum possible magnitude (M-max), which is here derived using a recently introduced Bayesian approach, indicates that M-max exceeds M-c(max) in some parts of the mine. This suggests that some large and important events may be missed in the catalog, which could lead to a bias in the hazard evaluation.}, language = {en} } @article{RichterHainzlDahmetal.2020, author = {Richter, Gudrun and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert}, title = {Stress-based, statistical modeling of the induced seismicity at the Groningen gas field}, series = {Environmental earth sciences}, volume = {79}, journal = {Environmental earth sciences}, number = {11}, publisher = {Springer}, address = {New York}, issn = {1866-6280}, doi = {10.1007/s12665-020-08941-4}, pages = {15}, year = {2020}, abstract = {Groningen is the largest onshore gas field under production in Europe. The pressure depletion of the gas field started in 1963. In 1991, the first induced micro-earthquakes have been located at reservoir level with increasing rates in the following decades. Most of these events are of magnitude less than 2.0 and cannot be felt. However, maximum observed magnitudes continuously increased over the years until the largest, significant event with ML=3.6 was recorded in 2014, which finally led to the decision to reduce the production. This causal sequence displays the crucial role of understanding and modeling the relation between production and induced seismicity for economic planing and hazard assessment. Here we test whether the induced seismicity related to gas exploration can be modeled by the statistical response of fault networks with rate-and-state-dependent frictional behavior. We use the long and complete local seismic catalog and additionally detailed information on production-induced changes at the reservoir level to test different seismicity models. Both the changes of the fluid pressure and of the reservoir compaction are tested as input to approximate the Coulomb stress changes. We find that the rate-and-state model with a constant tectonic background seismicity rate can reproduce the observed long delay of the seismicity onset. In contrast, so-called Coulomb failure models with instantaneous earthquake nucleation need to assume that all faults are initially far from a critical state of stress to explain the delay. Our rate-and-state model based on the fluid pore pressure fits the spatiotemporal pattern of the seismicity best, where the fit further improves by taking the fault density and orientation into account. Despite its simplicity with only three free parameters, the rate-and-state model can reproduce the main statistical features of the observed activity.}, language = {en} } @article{SalamatZareHolschneideretal.2016, author = {Salamat, Mona and Zare, Mehdi and Holschneider, Matthias and Z{\"o}ller, Gert}, title = {Calculation of Confidence Intervals for the Maximum Magnitude of Earthquakes in Different Seismotectonic Zones of Iran}, series = {Pure and applied geophysics}, volume = {174}, journal = {Pure and applied geophysics}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-016-1418-5}, pages = {763 -- 777}, year = {2016}, abstract = {The problem of estimating the maximum possible earthquake magnitude m(max) has attracted growing attention in recent years. Due to sparse data, the role of uncertainties becomes crucial. In this work, we determine the uncertainties related to the maximum magnitude in terms of confidence intervals. Using an earthquake catalog of Iran, m(max) is estimated for different predefined levels of confidence in six seismotectonic zones. Assuming the doubly truncated Gutenberg-Richter distribution as a statistical model for earthquake magnitudes, confidence intervals for the maximum possible magnitude of earthquakes are calculated in each zone. While the lower limit of the confidence interval is the magnitude of the maximum observed event, the upper limit is calculated from the catalog and the statistical model. For this aim, we use the original catalog which no declustering methods applied on as well as a declustered version of the catalog. Based on the study by Holschneider et al. (Bull Seismol Soc Am 101(4): 1649-1659, 2011), the confidence interval for m(max) is frequently unbounded, especially if high levels of confidence are required. In this case, no information is gained from the data. Therefore, we elaborate for which settings finite confidence levels are obtained. In this work, Iran is divided into six seismotectonic zones, namely Alborz, Azerbaijan, Zagros, Makran, Kopet Dagh, Central Iran. Although calculations of the confidence interval in Central Iran and Zagros seismotectonic zones are relatively acceptable for meaningful levels of confidence, results in Kopet Dagh, Alborz, Azerbaijan and Makran are not that much promising. The results indicate that estimating mmax from an earthquake catalog for reasonable levels of confidence alone is almost impossible.}, language = {en} } @article{SalamatZoellerAmini2019, author = {Salamat, Mona and Z{\"o}ller, Gert and Amini, Morteza}, title = {Prediction of the Maximum Expected Earthquake Magnitude in Iran:}, series = {Pure and applied geophysics}, volume = {176}, journal = {Pure and applied geophysics}, number = {8}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-019-02141-3}, pages = {3425 -- 3438}, year = {2019}, abstract = {This paper concerns the problem of predicting the maximum expected earthquake magnitude μ in a future time interval Tf given a catalog covering a time period T in the past. Different studies show the divergence of the confidence interval of the maximum possible earthquake magnitude m_{ max } for high levels of confidence (Salamat et al. 2017). Therefore, m_{ max } should be better replaced by μ (Holschneider et al. 2011). In a previous study (Salamat et al. 2018), μ is estimated for an instrumental earthquake catalog of Iran from 1900 onwards with a constant level of completeness ( {m0 = 5.5} ). In the current study, the Bayesian methodology developed by Z{\"o}ller et al. (2014, 2015) is applied for the purpose of predicting μ based on the catalog consisting of both historical and instrumental parts. The catalog is first subdivided into six subcatalogs corresponding to six seismotectonic zones, and each of those zone catalogs is subsequently subdivided according to changes in completeness level and magnitude uncertainty. For this, broad and small error distributions are considered for historical and instrumental earthquakes, respectively. We assume that earthquakes follow a Poisson process in time and Gutenberg-Richter law in the magnitude domain with a priori unknown a and b values which are first estimated by Bayes' theorem and subsequently used to estimate μ. Imposing different values of m_{ max } for different seismotectonic zones namely Alborz, Azerbaijan, Central Iran, Zagros, Kopet Dagh and Makran, the results show considerable probabilities for the occurrence of earthquakes with Mw ≥ 7.5 in short Tf , whereas for long Tf, μ is almost equal to m_{ max }}, language = {en} } @article{SalamatZoellerZareetal.2018, author = {Salamat, Mona and Z{\"o}ller, Gert and Zare, Mehdi and Amini, Mortaza}, title = {The maximum expected earthquake magnitudes in different future time intervals of six seismotectonic zones of Iran and its surroundings}, series = {Journal of seismology}, volume = {22}, journal = {Journal of seismology}, number = {6}, publisher = {Springer}, address = {Dordrecht}, issn = {1383-4649}, doi = {10.1007/s10950-018-9780-7}, pages = {1485 -- 1498}, year = {2018}, abstract = {One of the crucial components in seismic hazard analysis is the estimation of the maximum earthquake magnitude and associated uncertainty. In the present study, the uncertainty related to the maximum expected magnitude mu is determined in terms of confidence intervals for an imposed level of confidence. Previous work by Salamat et al. (Pure Appl Geophys 174:763-777, 2017) shows the divergence of the confidence interval of the maximum possible magnitude m(max) for high levels of confidence in six seismotectonic zones of Iran. In this work, the maximum expected earthquake magnitude mu is calculated in a predefined finite time interval and imposed level of confidence. For this, we use a conceptual model based on a doubly truncated Gutenberg-Richter law for magnitudes with constant b-value and calculate the posterior distribution of mu for the time interval T-f in future. We assume a stationary Poisson process in time and a Gutenberg-Richter relation for magnitudes. The upper bound of the magnitude confidence interval is calculated for different time intervals of 30, 50, and 100 years and imposed levels of confidence alpha = 0.5, 0.1, 0.05, and 0.01. The posterior distribution of waiting times T-f to the next earthquake with a given magnitude equal to 6.5, 7.0, and7.5 are calculated in each zone. In order to find the influence of declustering, we use the original and declustered version of the catalog. The earthquake catalog of the territory of Iran and surroundings are subdivided into six seismotectonic zones Alborz, Azerbaijan, Central Iran, Zagros, Kopet Dagh, and Makran. We assume the maximum possible magnitude m(max) = 8.5 and calculate the upper bound of the confidence interval of mu in each zone. The results indicate that for short time intervals equal to 30 and 50 years and imposed levels of confidence 1 - alpha = 0.95 and 0.90, the probability distribution of mu is around mu = 7.16-8.23 in all seismic zones.}, language = {en} } @article{SchoppaSiegVogeletal.2020, author = {Schoppa, Lukas and Sieg, Tobias and Vogel, Kristin and Z{\"o}ller, Gert and Kreibich, Heidi}, title = {Probabilistic flood loss models for companies}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2020WR027649}, pages = {19}, year = {2020}, abstract = {Flood loss modeling is a central component of flood risk analysis. Conventionally, this involves univariable and deterministic stage-damage functions. Recent advancements in the field promote the use of multivariable and probabilistic loss models, which consider variables beyond inundation depth and account for prediction uncertainty. Although companies contribute significantly to total loss figures, novel modeling approaches for companies are lacking. Scarce data and the heterogeneity among companies impede the development of company flood loss models. We present three multivariable flood loss models for companies from the manufacturing, commercial, financial, and service sector that intrinsically quantify prediction uncertainty. Based on object-level loss data (n = 1,306), we comparatively evaluate the predictive capacity of Bayesian networks, Bayesian regression, and random forest in relation to deterministic and probabilistic stage-damage functions, serving as benchmarks. The company loss data stem from four postevent surveys in Germany between 2002 and 2013 and include information on flood intensity, company characteristics, emergency response, private precaution, and resulting loss to building, equipment, and goods and stock. We find that the multivariable probabilistic models successfully identify and reproduce essential relationships of flood damage processes in the data. The assessment of model skill focuses on the precision of the probabilistic predictions and reveals that the candidate models outperform the stage-damage functions, while differences among the proposed models are negligible. Although the combination of multivariable and probabilistic loss estimation improves predictive accuracy over the entire data set, wide predictive distributions stress the necessity for the quantification of uncertainty.}, language = {en} }