@article{EngbertHainzlZoelleretal.1998, author = {Engbert, Ralf and Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Testing for unstable periodic orbits to characterize spatiotemporal dynamics}, year = {1998}, language = {en} } @article{FiedlerHainzlZoelleretal.2018, author = {Fiedler, Bernhard and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Detection of Gutenberg-Richter b-Value Changes in Earthquake Time Series}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {5A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120180091}, pages = {2778 -- 2787}, year = {2018}, abstract = {The Gutenberg-Richter relation for earthquake magnitudes is the most famous empirical law in seismology. It states that the frequency of earthquake magnitudes follows an exponential distribution; this has been found to be a robust feature of seismicity above the completeness magnitude, and it is independent of whether global, regional, or local seismicity is analyzed. However, the exponent b of the distribution varies significantly in space and time, which is important for process understanding and seismic hazard assessment; this is particularly true because of the fact that the Gutenberg-Richter b-value acts as a proxy for the stress state and quantifies the ratio of large-to-small earthquakes. In our work, we focus on the automatic detection of statistically significant temporal changes of the b-value in seismicity data. In our approach, we use Bayes factors for model selection and estimate multiple change-points of the frequency-magnitude distribution in time. The method is first applied to synthetic data, showing its capability to detect change-points as function of the size of the sample and the b-value contrast. Finally, we apply this approach to examples of observational data sets for which b-value changes have previously been stated. Our analysis of foreshock and after-shock sequences related to mainshocks, as well as earthquake swarms, shows that only a portion of the b-value changes is statistically significant.}, language = {en} } @article{FiedlerZoellerHolschneideretal.2018, author = {Fiedler, Bernhard and Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {Multiple Change-Point Detection in Spatiotemporal Seismicity Data}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {3A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170236}, pages = {1147 -- 1159}, year = {2018}, abstract = {Earthquake rates are driven by tectonic stress buildup, earthquake-induced stress changes, and transient aseismic processes. Although the origin of the first two sources is known, transient aseismic processes are more difficult to detect. However, the knowledge of the associated changes of the earthquake activity is of great interest, because it might help identify natural aseismic deformation patterns such as slow-slip events, as well as the occurrence of induced seismicity related to human activities. For this goal, we develop a Bayesian approach to identify change-points in seismicity data automatically. Using the Bayes factor, we select a suitable model, estimate possible change-points, and we additionally use a likelihood ratio test to calculate the significance of the change of the intensity. The approach is extended to spatiotemporal data to detect the area in which the changes occur. The method is first applied to synthetic data showing its capability to detect real change-points. Finally, we apply this approach to observational data from Oklahoma and observe statistical significant changes of seismicity in space and time.}, language = {en} } @article{HainzlBrietzkeZoeller2010, author = {Hainzl, Sebastian and Brietzke, Gilbert B. and Z{\"o}ller, Gert}, title = {Quantitative earthquake forecasts resulting from static stress triggering}, issn = {0148-0227}, doi = {10.1029/2010jb007473}, year = {2010}, abstract = {In recent years, the triggering of earthquakes has been discussed controversially with respect to the underlying mechanisms and the capability to evaluate the resulting seismic hazard. Apart from static stress interactions, other mechanisms including dynamic stress transfer have been proposed to be part of a complex triggering process. Exploiting the theoretical relation between long-term earthquake rates and stressing rate, we demonstrate that static stress changes resulting from an earthquake rupture allow us to predict quantitatively the aftershock activity without tuning specific model parameters. These forecasts are found to be in excellent agreement with all first-order characteristics of aftershocks, in particular, (1) the total number, (2) the power law distance decay, (3) the scaling of the productivity with the main shock magnitude, (4) the foreshock probability, and (5) the empirical Bath law providing the maximum aftershock magnitude, which supports the conclusion that static stress transfer is the major mechanism of earthquake triggering.}, language = {en} } @article{HainzlZoellerBrietzkeetal.2013, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Brietzke, Gilbert B. and Hinzen, Klaus-G.}, title = {Comparison of deterministic and stochastic earthquake simulators for fault interactions in the Lower Rhine Embayment, Germany}, series = {Geophysical journal international}, volume = {195}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt271}, pages = {684 -- 694}, year = {2013}, abstract = {Time-dependent probabilistic seismic hazard assessment requires a stochastic description of earthquake occurrences. While short-term seismicity models are well-constrained by observations, the recurrences of characteristic on-fault earthquakes are only derived from theoretical considerations, uncertain palaeo-events or proxy data. Despite the involved uncertainties and complexity, simple statistical models for a quasi-period recurrence of on-fault events are implemented in seismic hazard assessments. To test the applicability of statistical models, such as the Brownian relaxation oscillator or the stress release model, we perform a systematic comparison with deterministic simulations based on rate- and state-dependent friction, high-resolution representations of fault systems and quasi-dynamic rupture propagation. For the specific fault network of the Lower Rhine Embayment, Germany, we run both stochastic and deterministic model simulations based on the same fault geometries and stress interactions. Our results indicate that the stochastic simulators are able to reproduce the first-order characteristics of the major earthquakes on isolated faults as well as for coupled faults with moderate stress interactions. However, we find that all tested statistical models fail to reproduce the characteristics of strongly coupled faults, because multisegment rupturing resulting from a spatiotemporally correlated stress field is underestimated in the stochastic simulators. Our results suggest that stochastic models have to be extended by multirupture probability distributions to provide more reliable results.}, language = {en} } @article{HainzlZoellerKurths2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organization of spatio-temporal earthquake clusters}, year = {2000}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organized criticality model for earthquakes : Quiescence, foreshocks and aftershocks}, year = {1999}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Similar power laws for foreshock and aftershock sequences in a spring block model for earthquakes}, year = {1999}, language = {en} } @article{HainzlZoellerKurthsetal.2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen and Zschau, Jochen}, title = {Seismic quiescence as an indicator for large earthquakes in a system of self-organized criticality}, year = {2000}, language = {en} } @article{HainzlZoellerMain2006, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Main, Ian}, title = {Introduction to special issue: Dynamics of seismicity patterns and earthquake triggering - Preface}, series = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, volume = {424}, journal = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, number = {Special issue}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0040-1951}, doi = {10.1016/j.tecto.2006.03.034}, pages = {135 -- 138}, year = {2006}, language = {en} } @article{HainzlZoellerWang2010, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Wang, Rongjiang}, title = {Impact of the receiver fault distribution on aftershock activity}, issn = {0148-0227}, doi = {10.1029/2008jb006224}, year = {2010}, abstract = {Aftershock models are usually based either on purely empirical relations ignoring the physical mechanism or on deterministic calculations of stress changes on a predefined receiver fault orientation. Here we investigate the effect of considering more realistic fault systems in models based on static Coulomb stress changes. For that purpose, we perform earthquake simulations with elastic half-space stress interactions, rate-and-state dependent frictional earthquake nucleation, and extended ruptures with heterogeneous (fractal) slip distributions. We find that the consideration of earthquake nucleation on multiple receiver fault orientations does not influence the shape of the temporal Omori-type aftershock decay, but changes significantly the predicted spatial patterns and the total number of triggered events. So-called stress shadows with decreased activity almost vanish, and activation decays continuously with increasing distance from the main shock rupture. The total aftershock productivity, which is shown to be almost independent of the assumed background rate, increases significantly if multiple receiver fault planes exist. The application to the 1992 M7.3 Landers, California, aftershock sequence indicates a good agreement with the locations and the total productivity of the observed directly triggered aftershocks.}, language = {en} } @article{HolschneiderZoellerClementsetal.2014, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Clements, R. and Schorlemmer, Danijel}, title = {Can we test for the maximum possible earthquake magnitude?}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {3}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010319}, pages = {2019 -- 2028}, year = {2014}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{LaudanZoellerThieken2020, author = {Laudan, Jonas and Z{\"o}ller, Gert and Thieken, Annegret}, title = {Flash floods versus river floods}, series = {Natural Hazards and Earth System Sciences}, volume = {20}, journal = {Natural Hazards and Earth System Sciences}, publisher = {European Geophysical Society}, address = {Katlenburg-Lindau}, issn = {1684-9981}, doi = {10.5194/nhess-20-999-2020}, pages = {999 -- 1023}, year = {2020}, abstract = {River floods are among the most damaging natural hazards that frequently occur in Germany. Flooding causes high economic losses and impacts many residents. In 2016, several southern German municipalities were hit by flash floods after unexpectedly severe heavy rainfall, while in 2013 widespread river flooding had occurred. This study investigates and compares the psychological impacts of river floods and flash floods and potential consequences for precautionary behaviour. Data were collected using computer-aided telephone interviews that were conducted among flood-affected households around 9 months after each damaging event. This study applies Bayesian statistics and negative binomial regressions to test the suitability of psychological indicators to predict the precaution motivation of individuals. The results show that it is not the particular flood type but rather the severity and local impacts of the event that are crucial for the different, and potentially negative, impacts on mental health. According to the used data, however, predictions of the individual precaution motivation should not be based on the derived psychological indicators - i.e. coping appraisal, threat appraisal, burden and evasion - since their explanatory power was generally low and results are, for the most part, non-significant. Only burden reveals a significant positive relation to planned precaution regarding weak flash floods. In contrast to weak flash floods and river floods, the perceived threat of strong flash floods is significantly lower although feelings of burden and lower coping appraisals are more pronounced. Further research is needed to better include psychological assessment procedures and to focus on alternative data sources regarding floods and the connected precaution motivation of affected residents.}, language = {en} } @article{MaghsoudiCescaHainzletal.2015, author = {Maghsoudi, Samira and Cesca, Simone and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert and Kaiser, Diethelm}, title = {Maximum Magnitude of Completeness in a Salt Mine}, series = {Bulletin of the Seismological Society of America}, volume = {105}, journal = {Bulletin of the Seismological Society of America}, number = {3}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140039}, pages = {1491 -- 1501}, year = {2015}, abstract = {In this study, we analyze acoustic emission (AE) data recorded at the Morsleben salt mine, Germany, to assess the catalog completeness, which plays an important role in any seismicity analysis. We introduce the new concept of a magnitude completeness interval consisting of a maximum magnitude of completeness (M-c(max)) in addition to the well-known minimum magnitude of completeness. This is required to describe the completeness of the catalog, both for the smallest events (for which the detection performance may be low) and for the largest ones (which may be missed because of sensors saturation). We suggest a method to compute the maximum magnitude of completeness and calculate it for a spatial grid based on (1) the prior estimation of saturation magnitude at each sensor, (2) the correction of the detection probability function at each sensor, including a drop in the detection performance when it saturates, and (3) the combination of detection probabilities of all sensors to obtain the network detection performance. The method is tested using about 130,000 AE events recorded in a period of five weeks, with sources confined within a small depth interval, and an example of the spatial distribution of M-c(max) is derived. The comparison between the spatial distribution of M-c(max) and of the maximum possible magnitude (M-max), which is here derived using a recently introduced Bayesian approach, indicates that M-max exceeds M-c(max) in some parts of the mine. This suggests that some large and important events may be missed in the catalog, which could lead to a bias in the hazard evaluation.}, language = {en} } @article{RichterHainzlDahmetal.2020, author = {Richter, Gudrun and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert}, title = {Stress-based, statistical modeling of the induced seismicity at the Groningen gas field}, series = {Environmental earth sciences}, volume = {79}, journal = {Environmental earth sciences}, number = {11}, publisher = {Springer}, address = {New York}, issn = {1866-6280}, doi = {10.1007/s12665-020-08941-4}, pages = {15}, year = {2020}, abstract = {Groningen is the largest onshore gas field under production in Europe. The pressure depletion of the gas field started in 1963. In 1991, the first induced micro-earthquakes have been located at reservoir level with increasing rates in the following decades. Most of these events are of magnitude less than 2.0 and cannot be felt. However, maximum observed magnitudes continuously increased over the years until the largest, significant event with ML=3.6 was recorded in 2014, which finally led to the decision to reduce the production. This causal sequence displays the crucial role of understanding and modeling the relation between production and induced seismicity for economic planing and hazard assessment. Here we test whether the induced seismicity related to gas exploration can be modeled by the statistical response of fault networks with rate-and-state-dependent frictional behavior. We use the long and complete local seismic catalog and additionally detailed information on production-induced changes at the reservoir level to test different seismicity models. Both the changes of the fluid pressure and of the reservoir compaction are tested as input to approximate the Coulomb stress changes. We find that the rate-and-state model with a constant tectonic background seismicity rate can reproduce the observed long delay of the seismicity onset. In contrast, so-called Coulomb failure models with instantaneous earthquake nucleation need to assume that all faults are initially far from a critical state of stress to explain the delay. Our rate-and-state model based on the fluid pore pressure fits the spatiotemporal pattern of the seismicity best, where the fit further improves by taking the fault density and orientation into account. Despite its simplicity with only three free parameters, the rate-and-state model can reproduce the main statistical features of the observed activity.}, language = {en} } @article{SalamatZareHolschneideretal.2016, author = {Salamat, Mona and Zare, Mehdi and Holschneider, Matthias and Z{\"o}ller, Gert}, title = {Calculation of Confidence Intervals for the Maximum Magnitude of Earthquakes in Different Seismotectonic Zones of Iran}, series = {Pure and applied geophysics}, volume = {174}, journal = {Pure and applied geophysics}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-016-1418-5}, pages = {763 -- 777}, year = {2016}, abstract = {The problem of estimating the maximum possible earthquake magnitude m(max) has attracted growing attention in recent years. Due to sparse data, the role of uncertainties becomes crucial. In this work, we determine the uncertainties related to the maximum magnitude in terms of confidence intervals. Using an earthquake catalog of Iran, m(max) is estimated for different predefined levels of confidence in six seismotectonic zones. Assuming the doubly truncated Gutenberg-Richter distribution as a statistical model for earthquake magnitudes, confidence intervals for the maximum possible magnitude of earthquakes are calculated in each zone. While the lower limit of the confidence interval is the magnitude of the maximum observed event, the upper limit is calculated from the catalog and the statistical model. For this aim, we use the original catalog which no declustering methods applied on as well as a declustered version of the catalog. Based on the study by Holschneider et al. (Bull Seismol Soc Am 101(4): 1649-1659, 2011), the confidence interval for m(max) is frequently unbounded, especially if high levels of confidence are required. In this case, no information is gained from the data. Therefore, we elaborate for which settings finite confidence levels are obtained. In this work, Iran is divided into six seismotectonic zones, namely Alborz, Azerbaijan, Zagros, Makran, Kopet Dagh, Central Iran. Although calculations of the confidence interval in Central Iran and Zagros seismotectonic zones are relatively acceptable for meaningful levels of confidence, results in Kopet Dagh, Alborz, Azerbaijan and Makran are not that much promising. The results indicate that estimating mmax from an earthquake catalog for reasonable levels of confidence alone is almost impossible.}, language = {en} } @article{SalamatZoellerAmini2019, author = {Salamat, Mona and Z{\"o}ller, Gert and Amini, Morteza}, title = {Prediction of the Maximum Expected Earthquake Magnitude in Iran:}, series = {Pure and applied geophysics}, volume = {176}, journal = {Pure and applied geophysics}, number = {8}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-019-02141-3}, pages = {3425 -- 3438}, year = {2019}, abstract = {This paper concerns the problem of predicting the maximum expected earthquake magnitude μ in a future time interval Tf given a catalog covering a time period T in the past. Different studies show the divergence of the confidence interval of the maximum possible earthquake magnitude m_{ max } for high levels of confidence (Salamat et al. 2017). Therefore, m_{ max } should be better replaced by μ (Holschneider et al. 2011). In a previous study (Salamat et al. 2018), μ is estimated for an instrumental earthquake catalog of Iran from 1900 onwards with a constant level of completeness ( {m0 = 5.5} ). In the current study, the Bayesian methodology developed by Z{\"o}ller et al. (2014, 2015) is applied for the purpose of predicting μ based on the catalog consisting of both historical and instrumental parts. The catalog is first subdivided into six subcatalogs corresponding to six seismotectonic zones, and each of those zone catalogs is subsequently subdivided according to changes in completeness level and magnitude uncertainty. For this, broad and small error distributions are considered for historical and instrumental earthquakes, respectively. We assume that earthquakes follow a Poisson process in time and Gutenberg-Richter law in the magnitude domain with a priori unknown a and b values which are first estimated by Bayes' theorem and subsequently used to estimate μ. Imposing different values of m_{ max } for different seismotectonic zones namely Alborz, Azerbaijan, Central Iran, Zagros, Kopet Dagh and Makran, the results show considerable probabilities for the occurrence of earthquakes with Mw ≥ 7.5 in short Tf , whereas for long Tf, μ is almost equal to m_{ max }}, language = {en} } @article{SalamatZoellerZareetal.2018, author = {Salamat, Mona and Z{\"o}ller, Gert and Zare, Mehdi and Amini, Mortaza}, title = {The maximum expected earthquake magnitudes in different future time intervals of six seismotectonic zones of Iran and its surroundings}, series = {Journal of seismology}, volume = {22}, journal = {Journal of seismology}, number = {6}, publisher = {Springer}, address = {Dordrecht}, issn = {1383-4649}, doi = {10.1007/s10950-018-9780-7}, pages = {1485 -- 1498}, year = {2018}, abstract = {One of the crucial components in seismic hazard analysis is the estimation of the maximum earthquake magnitude and associated uncertainty. In the present study, the uncertainty related to the maximum expected magnitude mu is determined in terms of confidence intervals for an imposed level of confidence. Previous work by Salamat et al. (Pure Appl Geophys 174:763-777, 2017) shows the divergence of the confidence interval of the maximum possible magnitude m(max) for high levels of confidence in six seismotectonic zones of Iran. In this work, the maximum expected earthquake magnitude mu is calculated in a predefined finite time interval and imposed level of confidence. For this, we use a conceptual model based on a doubly truncated Gutenberg-Richter law for magnitudes with constant b-value and calculate the posterior distribution of mu for the time interval T-f in future. We assume a stationary Poisson process in time and a Gutenberg-Richter relation for magnitudes. The upper bound of the magnitude confidence interval is calculated for different time intervals of 30, 50, and 100 years and imposed levels of confidence alpha = 0.5, 0.1, 0.05, and 0.01. The posterior distribution of waiting times T-f to the next earthquake with a given magnitude equal to 6.5, 7.0, and7.5 are calculated in each zone. In order to find the influence of declustering, we use the original and declustered version of the catalog. The earthquake catalog of the territory of Iran and surroundings are subdivided into six seismotectonic zones Alborz, Azerbaijan, Central Iran, Zagros, Kopet Dagh, and Makran. We assume the maximum possible magnitude m(max) = 8.5 and calculate the upper bound of the confidence interval of mu in each zone. The results indicate that for short time intervals equal to 30 and 50 years and imposed levels of confidence 1 - alpha = 0.95 and 0.90, the probability distribution of mu is around mu = 7.16-8.23 in all seismic zones.}, language = {en} } @article{SchoppaSiegVogeletal.2020, author = {Schoppa, Lukas and Sieg, Tobias and Vogel, Kristin and Z{\"o}ller, Gert and Kreibich, Heidi}, title = {Probabilistic flood loss models for companies}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2020WR027649}, pages = {19}, year = {2020}, abstract = {Flood loss modeling is a central component of flood risk analysis. Conventionally, this involves univariable and deterministic stage-damage functions. Recent advancements in the field promote the use of multivariable and probabilistic loss models, which consider variables beyond inundation depth and account for prediction uncertainty. Although companies contribute significantly to total loss figures, novel modeling approaches for companies are lacking. Scarce data and the heterogeneity among companies impede the development of company flood loss models. We present three multivariable flood loss models for companies from the manufacturing, commercial, financial, and service sector that intrinsically quantify prediction uncertainty. Based on object-level loss data (n = 1,306), we comparatively evaluate the predictive capacity of Bayesian networks, Bayesian regression, and random forest in relation to deterministic and probabilistic stage-damage functions, serving as benchmarks. The company loss data stem from four postevent surveys in Germany between 2002 and 2013 and include information on flood intensity, company characteristics, emergency response, private precaution, and resulting loss to building, equipment, and goods and stock. We find that the multivariable probabilistic models successfully identify and reproduce essential relationships of flood damage processes in the data. The assessment of model skill focuses on the precision of the probabilistic predictions and reveals that the candidate models outperform the stage-damage functions, while differences among the proposed models are negligible. Although the combination of multivariable and probabilistic loss estimation improves predictive accuracy over the entire data set, wide predictive distributions stress the necessity for the quantification of uncertainty.}, language = {en} } @article{SharmaHainzlZoelleretal.2020, author = {Sharma, Shubham and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Is Coulomb stress the best choice for aftershock forecasting?}, series = {Journal of geophysical research : Solid earth}, volume = {125}, journal = {Journal of geophysical research : Solid earth}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2020JB019553}, pages = {12}, year = {2020}, abstract = {The Coulomb failure stress (CFS) criterion is the most commonly used method for predicting spatial distributions of aftershocks following large earthquakes. However, large uncertainties are always associated with the calculation of Coulomb stress change. The uncertainties mainly arise due to nonunique slip inversions and unknown receiver faults; especially for the latter, results are highly dependent on the choice of the assumed receiver mechanism. Based on binary tests (aftershocks yes/no), recent studies suggest that alternative stress quantities, a distance-slip probabilistic model as well as deep neural network (DNN) approaches, all are superior to CFS with predefined receiver mechanism. To challenge this conclusion, which might have large implications, we use 289 slip inversions from SRCMOD database to calculate more realistic CFS values for a layered half-space and variable receiver mechanisms. We also analyze the effect of the magnitude cutoff, grid size variation, and aftershock duration to verify the use of receiver operating characteristic (ROC) analysis for the ranking of stress metrics. The observations suggest that introducing a layered half-space does not improve the stress maps and ROC curves. However, results significantly improve for larger aftershocks and shorter time periods but without changing the ranking. We also go beyond binary testing and apply alternative statistics to test the ability to estimate aftershock numbers, which confirm that simple stress metrics perform better than the classic Coulomb failure stress calculations and are also better than the distance-slip probabilistic model.}, language = {en} } @article{ShcherbakovZhuangZoelleretal.2019, author = {Shcherbakov, Robert and Zhuang, Jiancang and Z{\"o}ller, Gert and Ogata, Yosihiko}, title = {Forecasting the magnitude of the largest expected earthquake}, series = {Nature Communications}, volume = {10}, journal = {Nature Communications}, publisher = {Nature Publishing Group}, address = {London}, issn = {2041-1723}, doi = {10.1038/s41467-019-11958-4}, pages = {11}, year = {2019}, abstract = {The majority of earthquakes occur unexpectedly and can trigger subsequent sequences of events that can culminate in more powerful earthquakes. This self-exciting nature of seismicity generates complex clustering of earthquakes in space and time. Therefore, the problem of constraining the magnitude of the largest expected earthquake during a future time interval is of critical importance in mitigating earthquake hazard. We address this problem by developing a methodology to compute the probabilities for such extreme earthquakes to be above certain magnitudes. We combine the Bayesian methods with the extreme value theory and assume that the occurrence of earthquakes can be described by the Epidemic Type Aftershock Sequence process. We analyze in detail the application of this methodology to the 2016 Kumamoto, Japan, earthquake sequence. We are able to estimate retrospectively the probabilities of having large subsequent earthquakes during several stages of the evolution of this sequence.}, language = {en} } @article{ShinZoellerHolschneideretal.2011, author = {Shin, Seoleun and Z{\"o}ller, Gert and Holschneider, Matthias and Reich, Sebastian}, title = {A multigrid solver for modeling complex interseismic stress fields}, series = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, volume = {37}, journal = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, number = {8}, publisher = {Elsevier}, address = {Oxford}, issn = {0098-3004}, doi = {10.1016/j.cageo.2010.11.011}, pages = {1075 -- 1082}, year = {2011}, abstract = {We develop a multigrid, multiple time stepping scheme to reduce computational efforts for calculating complex stress interactions in a strike-slip 2D planar fault for the simulation of seismicity. The key elements of the multilevel solver are separation of length scale, grid-coarsening, and hierarchy. In this study the complex stress interactions are split into two parts: the first with a small contribution is computed on a coarse level, and the rest for strong interactions is on a fine level. This partition leads to a significant reduction of the number of computations. The reduction of complexity is even enhanced by combining the multigrid with multiple time stepping. Computational efficiency is enhanced by a factor of 10 while retaining a reasonable accuracy, compared to the original full matrix-vortex multiplication. The accuracy of solution and computational efficiency depend on a given cut-off radius that splits multiplications into the two parts. The multigrid scheme is constructed in such a way that it conserves stress in the entire half-space.}, language = {en} } @article{WangHainzlZoeller2014, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert}, title = {Assessment of stress coupling among the inter-, co- and post-seismic phases related to the 2004 M6 Parkfield earthquake}, series = {Geophysical journal international}, volume = {197}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggu102}, pages = {1858 -- 1868}, year = {2014}, abstract = {Due to large uncertainties and non-uniqueness in fault slip inversion, the investigation of stress coupling based on the direct comparison of independent slip inversions, for example, between the coseismic slip distribution and the interseismic slip deficit, may lead to ambiguous conclusions. In this study, we therefore adopt the stress-constrained joint inversion in the Bayesian approach of Wang et al., and implement the physical hypothesis of stress coupling as a prior. We test the hypothesis that interseismic locking is coupled with the coseismic rupture, and the early post-seismic deformation is a stress relaxation process in response to the coseismic stress perturbation. We characterize the role of stress coupling in the seismic cycle by evaluating the efficiency of the model to explain the available data. Taking the 2004 M6 Parkfield earthquake as a study case, we find that the stress coupling hypothesis is in agreement with the data. The coseismic rupture zone is found to be strongly locked during the interseismic phase and the post-seismic slip zone is indicated to be weakly creeping. The post-seismic deformation plays an important role to rebuild stress in the coseismic rupture zone. Based on our results for the stress accumulation during both inter- and post-seismic phase in the coseismic rupture zone, together with the coseismic stress drop, we estimate a recurrence time of M6 earthquake in Parkfield around 23-41 yr, suggesting that the duration of 38 yr between the two recent M6 events in Parkfield is not a surprise.}, language = {en} } @article{WangHainzlZoelleretal.2012, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Stress- and aftershock-constrained joint inversions for coseismic and postseismic slip applied to the 2004 M6.0 Parkfield earthquake}, series = {Journal of geophysical research : Solid earth}, volume = {117}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2011JB009017}, pages = {18}, year = {2012}, abstract = {Both aftershocks and geodetically measured postseismic displacements are important markers of the stress relaxation process following large earthquakes. Postseismic displacements can be related to creep-like relaxation in the vicinity of the coseismic rupture by means of inversion methods. However, the results of slip inversions are typically non-unique and subject to large uncertainties. Therefore, we explore the possibility to improve inversions by mechanical constraints. In particular, we take into account the physical understanding that postseismic deformation is stress-driven, and occurs in the coseismically stressed zone. We do joint inversions for coseismic and postseismic slip in a Bayesian framework in the case of the 2004 M6.0 Parkfield earthquake. We perform a number of inversions with different constraints, and calculate their statistical significance. According to information criteria, the best result is preferably related to a physically reasonable model constrained by the stress-condition (namely postseismic creep is driven by coseismic stress) and the condition that coseismic slip and large aftershocks are disjunct. This model explains 97\% of the coseismic displacements and 91\% of the postseismic displacements during day 1-5 following the Parkfield event, respectively. It indicates that the major postseismic deformation can be generally explained by a stress relaxation process for the Parkfield case. This result also indicates that the data to constrain the coseismic slip model could be enriched postseismically. For the 2004 Parkfield event, we additionally observe asymmetric relaxation process at the two sides of the fault, which can be explained by material contrast ratio across the fault of similar to 1.15 in seismic velocity.}, language = {en} } @article{WangZoellerHainzl2015, author = {Wang, Lifeng and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Joint Determination of Slip and Stress Drop in a Bayesian Inversion Approach: A Case Study for the 2010 M8.8 Maule Earthquake}, series = {Pure and applied geophysics}, volume = {172}, journal = {Pure and applied geophysics}, number = {2}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-014-0868-x}, pages = {375 -- 388}, year = {2015}, abstract = {Stress drop is a key factor in earthquake mechanics and engineering seismology. However, stress drop calculations based on fault slip can be significantly biased, particularly due to subjectively determined smoothing conditions in the traditional least-square slip inversion. In this study, we introduce a mechanically constrained Bayesian approach to simultaneously invert for fault slip and stress drop based on geodetic measurements. A Gaussian distribution for stress drop is implemented in the inversion as a prior. We have done several synthetic tests to evaluate the stability and reliability of the inversion approach, considering different fault discretization, fault geometries, utilized datasets, and variability of the slip direction, respectively. We finally apply the approach to the 2010 M8.8 Maule earthquake and invert for the coseismic slip and stress drop simultaneously. Two fault geometries from the literature are tested. Our results indicate that the derived slip models based on both fault geometries are similar, showing major slip north of the hypocenter and relatively weak slip in the south, as indicated in the slip models of other studies. The derived mean stress drop is 5-6 MPa, which is close to the stress drop of similar to 7 MPa that was independently determined according to force balance in this region Luttrell et al. (J Geophys Res, 2011). These findings indicate that stress drop values can be consistently extracted from geodetic data.}, language = {en} } @article{Zoeller2022, author = {Z{\"o}ller, Gert}, title = {A note on the estimation of the maximum possible earthquake magnitude based on extreme value theory for the Groningen Gas Field}, series = {The bulletin of the Seismological Society of America : BSSA}, volume = {112}, journal = {The bulletin of the Seismological Society of America : BSSA}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerito, Calif.}, issn = {0037-1106}, doi = {10.1785/0120210307}, pages = {1825 -- 1831}, year = {2022}, abstract = {Extreme value statistics is a popular and frequently used tool to model the occurrence of large earthquakes. The problem of poor statistics arising from rare events is addressed by taking advantage of the validity of general statistical properties in asymptotic regimes. In this note, I argue that the use of extreme value statistics for the purpose of practically modeling the tail of the frequency-magnitude distribution of earthquakes can produce biased and thus misleading results because it is unknown to what degree the tail of the true distribution is sampled by data. Using synthetic data allows to quantify this bias in detail. The implicit assumption that the true M-max is close to the maximum observed magnitude M-max,M-observed restricts the class of the potential models a priori to those with M-max = M-max,M-observed + Delta M with an increment Delta M approximate to 0.5... 1.2. This corresponds to the simple heuristic method suggested by Wheeler (2009) and labeled :M-max equals M-obs plus an increment." The incomplete consideration of the entire model family for the frequency-magnitude distribution neglects, however, the scenario of a large so far unobserved earthquake.}, language = {en} } @article{Zoeller2018, author = {Z{\"o}ller, Gert}, title = {A statistical model for earthquake recurrence based on the assimilation of paleoseismicity, historic seismicity, and instrumental seismicity}, series = {Journal of geophysical research : Solid earth}, volume = {123}, journal = {Journal of geophysical research : Solid earth}, number = {6}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2017JB015099}, pages = {4906 -- 4921}, year = {2018}, abstract = {Paleoearthquakes and historic earthquakes are the most important source of information for the estimation of long-term earthquake recurrence intervals in fault zones, because corresponding sequences cover more than one seismic cycle. However, these events are often rare, dating uncertainties are enormous, and missing or misinterpreted events lead to additional problems. In the present study, I assume that the time to the next major earthquake depends on the rate of small and intermediate events between the large ones in terms of a clock change model. Mathematically, this leads to a Brownian passage time distribution for recurrence intervals. I take advantage of an earlier finding that under certain assumptions the aperiodicity of this distribution can be related to the Gutenberg-Richter b value, which can be estimated easily from instrumental seismicity in the region under consideration. In this way, both parameters of the Brownian passage time distribution can be attributed with accessible seismological quantities. This allows to reduce the uncertainties in the estimation of the mean recurrence interval, especially for short paleoearthquake sequences and high dating errors. Using a Bayesian framework for parameter estimation results in a statistical model for earthquake recurrence intervals that assimilates in a simple way paleoearthquake sequences and instrumental data. I present illustrative case studies from Southern California and compare the method with the commonly used approach of exponentially distributed recurrence times based on a stationary Poisson process.}, language = {en} } @article{Zoeller2013, author = {Z{\"o}ller, Gert}, title = {Convergence of the frequency-magnitude distribution of global earthquakes - maybe in 200 years}, series = {Geophysical research letters}, volume = {40}, journal = {Geophysical research letters}, number = {15}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0094-8276}, doi = {10.1002/grl.50779}, pages = {3873 -- 3877}, year = {2013}, abstract = {I study the ability to estimate the tail of the frequency-magnitude distribution of global earthquakes. While power-law scaling for small earthquakes is accepted by support of data, the tail remains speculative. In a recent study, Bell et al. (2013) claim that the frequency-magnitude distribution of global earthquakes converges to a tapered Pareto distribution. I show that this finding results from data fitting errors, namely from the biased maximum likelihood estimation of the corner magnitude theta in strongly undersampled models. In particular, the estimation of theta depends solely on the few largest events in the catalog. Taking this into account, I compare various state-of-the-art models for the global frequency-magnitude distribution. After discarding undersampled models, the remaining ones, including the unbounded Gutenberg-Richter distribution, perform all equally well and are, therefore, indistinguishable. Convergence to a specific distribution, if it ever takes place, requires about 200 years homogeneous recording of global seismicity, at least.}, language = {en} } @article{ZoellerBenZion2014, author = {Z{\"o}ller, Gert and Ben-Zion, Yehuda}, title = {Large earthquake hazard of the San Jacinto fault zone, CA, from long record of simulated seismicity assimilating the available instrumental and paleoseismic data}, series = {Pure and applied geophysics}, volume = {171}, journal = {Pure and applied geophysics}, number = {11}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-014-0783-1}, pages = {2955 -- 2965}, year = {2014}, abstract = {We investigate spatio-temporal properties of earthquake patterns in the San Jacinto fault zone (SJFZ), California, between Cajon Pass and the Superstition Hill Fault, using a long record of simulated seismicity constrained by available seismological and geological data. The model provides an effective realization of a large segmented strike-slip fault zone in a 3D elastic half-space, with heterogeneous distribution of static friction chosen to represent several clear step-overs at the surface. The simulated synthetic catalog reproduces well the basic statistical features of the instrumental seismicity recorded at the SJFZ area since 1981. The model also produces events larger than those included in the short instrumental record, consistent with paleo-earthquakes documented at sites along the SJFZ for the last 1,400 years. The general agreement between the synthetic and observed data allows us to address with the long-simulated seismicity questions related to large earthquakes and expected seismic hazard. The interaction between m a parts per thousand yen 7 events on different sections of the SJFZ is found to be close to random. The hazard associated with m a parts per thousand yen 7 events on the SJFZ increases significantly if the long record of simulated seismicity is taken into account. The model simulations indicate that the recent increased number of observed intermediate SJFZ earthquakes is a robust statistical feature heralding the occurrence of m a parts per thousand yen 7 earthquakes. The hypocenters of the m a parts per thousand yen 5 events in the simulation results move progressively towards the hypocenter of the upcoming m a parts per thousand yen 7 earthquake.}, language = {en} } @article{ZoellerHainzlHolschneider2010, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Holschneider, Matthias}, title = {Recurrence of Large Earthquakes : bayesian inference from catalogs in the presence of magnitude uncertainties}, issn = {0033-4553}, doi = {10.1007/s00024-010-0078-0}, year = {2010}, abstract = {We present a Bayesian method that allows continuous updating the aperiodicity of the recurrence time distribution of large earthquakes based on a catalog with magnitudes above a completeness threshold. The approach uses a recently proposed renewal model for seismicity and allows the inclusion of magnitude uncertainties in a straightforward manner. Errors accounting for grouped magnitudes and random errors are studied and discussed. The results indicate that a stable and realistic value of the aperiodicity can be predicted in an early state of seismicity evolution, even though only a small number of large earthquakes has occurred to date. Furthermore, we demonstrate that magnitude uncertainties can drastically influence the results and can therefore not be neglected. We show how to correct for the bias caused by magnitude errors. For the region of Parkfield we find that the aperiodicity, or the coefficient of variation, is clearly higher than in studies which are solely based on the large earthquakes.}, language = {en} } @article{ZoellerHainzlKurths2001, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Kurths, J{\"u}rgen}, title = {Observation of growing correlation length as an indicator for critical point behavior prior to large earthquakes}, year = {2001}, language = {en} } @article{ZoellerHainzlTilmannetal.2020, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Tilmann, Frederik and Woith, Heiko and Dahm, Torsten}, title = {Comment on: Wikelski, Martin; M{\"u}ller, Uschi; Scocco, Paola; Catorci, Andrea; Desinov, Lev V.; Belyaev, Mikhail Y.; Keim, Daniel A.; Pohlmeier, Winfried; Fechteler, Gerhard; Mai, Martin P. : Potential short-term earthquake forecasting by farm animal monitoring. - Ethology. - 126 (2020), 9. - S. 931 - 941. -ISSN 0179-1613. - eISSN 1439-0310. - doi 10.1111/eth.13078}, series = {Ethology}, volume = {127}, journal = {Ethology}, number = {3}, publisher = {Wiley}, address = {Hoboken}, issn = {0179-1613}, doi = {10.1111/eth.13105}, pages = {302 -- 306}, year = {2020}, abstract = {Based on an analysis of continuous monitoring of farm animal behavior in the region of the 2016 M6.6 Norcia earthquake in Italy, Wikelski et al., 2020; (Seismol Res Lett, 89, 2020, 1238) conclude that animal activity can be anticipated with subsequent seismic activity and that this finding might help to design a "short-term earthquake forecasting method." We show that this result is based on an incomplete analysis and misleading interpretations. Applying state-of-the-art methods of statistics, we demonstrate that the proposed anticipatory patterns cannot be distinguished from random patterns, and consequently, the observed anomalies in animal activity do not have any forecasting power.}, language = {en} } @article{ZoellerHolschneider2014, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Induced seismicity: What is the size of the largest expected earthquake?}, series = {The bulletin of the Seismological Society of America}, volume = {104}, journal = {The bulletin of the Seismological Society of America}, number = {6}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140195}, pages = {3153 -- 3158}, year = {2014}, abstract = {The injection of fluids is a well-known origin for the triggering of earthquake sequences. The growing number of projects related to enhanced geothermal systems, fracking, and others has led to the question, which maximum earthquake magnitude can be expected as a consequence of fluid injection? This question is addressed from the perspective of statistical analysis. Using basic empirical laws of earthquake statistics, we estimate the magnitude M-T of the maximum expected earthquake in a predefined future time window T-f. A case study of the fluid injection site at Paradox Valley, Colorado, demonstrates that the magnitude m 4.3 of the largest observed earthquake on 27 May 2000 lies very well within the expectation from past seismicity without adjusting any parameters. Vice versa, for a given maximum tolerable earthquake at an injection site, we can constrain the corresponding amount of injected fluids that must not be exceeded within predefined confidence bounds.}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands}, series = {Bulletin of the Seismological Society of America}, volume = {106}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160220}, pages = {2917 -- 2921}, year = {2016}, abstract = {The Groningen gas field serves as a natural laboratory for production-induced earthquakes, because no earthquakes were observed before the beginning of gas production. Increasing gas production rates resulted in growing earthquake activity and eventually in the occurrence of the 2012M(w) 3.6 Huizinge earthquake. At least since this event, a detailed seismic hazard and risk assessment including estimation of the maximum earthquake magnitude is considered to be necessary to decide on the future gas production. In this short note, we first apply state-of-the-art methods of mathematical statistics to derive confidence intervals for the maximum possible earthquake magnitude m(max). Second, we calculate the maximum expected magnitude M-T in the time between 2016 and 2024 for three assumed gas-production scenarios. Using broadly accepted physical assumptions and 90\% confidence level, we suggest a value of m(max) 4.4, whereas M-T varies between 3.9 and 4.3, depending on the production scenario.}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Earthquake History in a Fault Zone Tells Us Almost Nothing about m(max)}, series = {Seismological research letters}, volume = {87}, journal = {Seismological research letters}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220150176}, pages = {132 -- 137}, year = {2016}, abstract = {In the present study, we summarize and evaluate the endeavors from recent years to estimate the maximum possible earthquake magnitude m(max) from observed data. In particular, we use basic and physically motivated assumptions to identify best cases and worst cases in terms of lowest and highest degree of uncertainty of m(max). In a general framework, we demonstrate that earthquake data and earthquake proxy data recorded in a fault zone provide almost no information about m(max) unless reliable and homogeneous data of a long time interval, including several earthquakes with magnitude close to m(max), are available. Even if detailed earthquake information from some centuries including historic and paleoearthquakes are given, only very few, namely the largest events, will contribute at all to the estimation of m(max), and this results in unacceptably high uncertainties. As a consequence, estimators of m(max) in a fault zone, which are based solely on earthquake-related information from this region, have to be dismissed.}, language = {en} } @article{ZoellerHolschneiderHainzl2013, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {The Maximum Earthquake Magnitude in a Time Horizon: Theory and Case Studies}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {2A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120120013}, pages = {860 -- 875}, year = {2013}, abstract = {We show how the maximum magnitude within a predefined future time horizon may be estimated from an earthquake catalog within the context of Gutenberg-Richter statistics. The aim is to carry out a rigorous uncertainty assessment, and calculate precise confidence intervals based on an imposed level of confidence a. In detail, we present a model for the estimation of the maximum magnitude to occur in a time interval T-f in the future, given a complete earthquake catalog for a time period T in the past and, if available, paleoseismic events. For this goal, we solely assume that earthquakes follow a stationary Poisson process in time with unknown productivity Lambda and obey the Gutenberg-Richter law in magnitude domain with unknown b-value. The random variables. and b are estimated by means of Bayes theorem with noninformative prior distributions. Results based on synthetic catalogs and on retrospective calculations of historic catalogs from the highly active area of Japan and the low-seismicity, but high-risk region lower Rhine embayment (LRE) in Germany indicate that the estimated magnitudes are close to the true values. Finally, we discuss whether the techniques can be extended to meet the safety requirements for critical facilities such as nuclear power plants. For this aim, the maximum magnitude for all times has to be considered. In agreement with earlier work, we find that this parameter is not a useful quantity from the viewpoint of statistical inference.}, language = {en} } @article{ZoellerHolschneiderHainzletal.2014, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian and Zhuang, Jiancang}, title = {The largest expected earthquake magnitudes in Japan: The statistical perspective}, series = {Bulletin of the Seismological Society of America}, volume = {104}, journal = {Bulletin of the Seismological Society of America}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130103}, pages = {769 -- 779}, year = {2014}, abstract = {Earthquake catalogs are probably the most informative data source about spatiotemporal seismicity evolution. The catalog quality in one of the most active seismogenic zones in the world, Japan, is excellent, although changes in quality arising, for example, from an evolving network are clearly present. Here, we seek the best estimate for the largest expected earthquake in a given future time interval from a combination of historic and instrumental earthquake catalogs. We extend the technique introduced by Zoller et al. (2013) to estimate the maximum magnitude in a time window of length T-f for earthquake catalogs with varying level of completeness. In particular, we consider the case in which two types of catalogs are available: a historic catalog and an instrumental catalog. This leads to competing interests with respect to the estimation of the two parameters from the Gutenberg-Richter law, the b-value and the event rate lambda above a given lower-magnitude threshold (the a-value). The b-value is estimated most precisely from the frequently occurring small earthquakes; however, the tendency of small events to cluster in aftershocks, swarms, etc. violates the assumption of a Poisson process that is used for the estimation of lambda. We suggest addressing conflict by estimating b solely from instrumental seismicity and using large magnitude events from historic catalogs for the earthquake rate estimation. Applying the method to Japan, there is a probability of about 20\% that the maximum expected magnitude during any future time interval of length T-f = 30 years is m >= 9.0. Studies of different subregions in Japan indicates high probabilities for M 8 earthquakes along the Tohoku arc and relatively low probabilities in the Tokai, Tonankai, and Nankai region. Finally, for scenarios related to long-time horizons and high-confidence levels, the maximum expected magnitude will be around 10.}, language = {en} } @article{ZoellerUllahBindietal.2017, author = {Z{\"o}ller, Gert and Ullah, Shahid and Bindi, Dino and Parolai, Stefano and Mikhailova, Natalya}, title = {The largest expected earthquake magnitudes in Central Asia}, series = {Seismicity, fault rupture and earthquake hazards in slowly deforming regions}, volume = {432}, journal = {Seismicity, fault rupture and earthquake hazards in slowly deforming regions}, publisher = {The Geological Society}, address = {London}, isbn = {978-1-86239-745-3}, issn = {0305-8719}, doi = {10.1144/SP432.3}, pages = {29 -- 40}, year = {2017}, abstract = {The knowledge of the largest expected earthquake magnitude in a region is one of the key issues in probabilistic seismic hazard calculations and the estimation of worst-case scenarios. Earthquake catalogues are the most informative source of information for the inference of earthquake magnitudes. We analysed the earthquake catalogue for Central Asia with respect to the largest expected magnitudes m(T) in a pre-defined time horizon T-f using a recently developed statistical methodology, extended by the explicit probabilistic consideration of magnitude errors. For this aim, we assumed broad error distributions for historical events, whereas the magnitudes of recently recorded instrumental earthquakes had smaller errors. The results indicate high probabilities for the occurrence of large events (M >= 8), even in short time intervals of a few decades. The expected magnitudes relative to the assumed maximum possible magnitude are generally higher for intermediate-depth earthquakes (51-300 km) than for shallow events (0-50 km). For long future time horizons, for example, a few hundred years, earthquakes with M >= 8.5 have to be taken into account, although, apart from the 1889 Chilik earthquake, it is probable that no such event occurred during the observation period of the catalogue.}, language = {en} }