@article{WangHainzlZoeller2014, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert}, title = {Assessment of stress coupling among the inter-, co- and post-seismic phases related to the 2004 M6 Parkfield earthquake}, series = {Geophysical journal international}, volume = {197}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggu102}, pages = {1858 -- 1868}, year = {2014}, abstract = {Due to large uncertainties and non-uniqueness in fault slip inversion, the investigation of stress coupling based on the direct comparison of independent slip inversions, for example, between the coseismic slip distribution and the interseismic slip deficit, may lead to ambiguous conclusions. In this study, we therefore adopt the stress-constrained joint inversion in the Bayesian approach of Wang et al., and implement the physical hypothesis of stress coupling as a prior. We test the hypothesis that interseismic locking is coupled with the coseismic rupture, and the early post-seismic deformation is a stress relaxation process in response to the coseismic stress perturbation. We characterize the role of stress coupling in the seismic cycle by evaluating the efficiency of the model to explain the available data. Taking the 2004 M6 Parkfield earthquake as a study case, we find that the stress coupling hypothesis is in agreement with the data. The coseismic rupture zone is found to be strongly locked during the interseismic phase and the post-seismic slip zone is indicated to be weakly creeping. The post-seismic deformation plays an important role to rebuild stress in the coseismic rupture zone. Based on our results for the stress accumulation during both inter- and post-seismic phase in the coseismic rupture zone, together with the coseismic stress drop, we estimate a recurrence time of M6 earthquake in Parkfield around 23-41 yr, suggesting that the duration of 38 yr between the two recent M6 events in Parkfield is not a surprise.}, language = {en} } @article{SalamatZareHolschneideretal.2016, author = {Salamat, Mona and Zare, Mehdi and Holschneider, Matthias and Z{\"o}ller, Gert}, title = {Calculation of Confidence Intervals for the Maximum Magnitude of Earthquakes in Different Seismotectonic Zones of Iran}, series = {Pure and applied geophysics}, volume = {174}, journal = {Pure and applied geophysics}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-016-1418-5}, pages = {763 -- 777}, year = {2016}, abstract = {The problem of estimating the maximum possible earthquake magnitude m(max) has attracted growing attention in recent years. Due to sparse data, the role of uncertainties becomes crucial. In this work, we determine the uncertainties related to the maximum magnitude in terms of confidence intervals. Using an earthquake catalog of Iran, m(max) is estimated for different predefined levels of confidence in six seismotectonic zones. Assuming the doubly truncated Gutenberg-Richter distribution as a statistical model for earthquake magnitudes, confidence intervals for the maximum possible magnitude of earthquakes are calculated in each zone. While the lower limit of the confidence interval is the magnitude of the maximum observed event, the upper limit is calculated from the catalog and the statistical model. For this aim, we use the original catalog which no declustering methods applied on as well as a declustered version of the catalog. Based on the study by Holschneider et al. (Bull Seismol Soc Am 101(4): 1649-1659, 2011), the confidence interval for m(max) is frequently unbounded, especially if high levels of confidence are required. In this case, no information is gained from the data. Therefore, we elaborate for which settings finite confidence levels are obtained. In this work, Iran is divided into six seismotectonic zones, namely Alborz, Azerbaijan, Zagros, Makran, Kopet Dagh, Central Iran. Although calculations of the confidence interval in Central Iran and Zagros seismotectonic zones are relatively acceptable for meaningful levels of confidence, results in Kopet Dagh, Alborz, Azerbaijan and Makran are not that much promising. The results indicate that estimating mmax from an earthquake catalog for reasonable levels of confidence alone is almost impossible.}, language = {en} } @article{FiedlerHainzlZoelleretal.2018, author = {Fiedler, Bernhard and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Detection of Gutenberg-Richter b-Value Changes in Earthquake Time Series}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {5A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120180091}, pages = {2778 -- 2787}, year = {2018}, abstract = {The Gutenberg-Richter relation for earthquake magnitudes is the most famous empirical law in seismology. It states that the frequency of earthquake magnitudes follows an exponential distribution; this has been found to be a robust feature of seismicity above the completeness magnitude, and it is independent of whether global, regional, or local seismicity is analyzed. However, the exponent b of the distribution varies significantly in space and time, which is important for process understanding and seismic hazard assessment; this is particularly true because of the fact that the Gutenberg-Richter b-value acts as a proxy for the stress state and quantifies the ratio of large-to-small earthquakes. In our work, we focus on the automatic detection of statistically significant temporal changes of the b-value in seismicity data. In our approach, we use Bayes factors for model selection and estimate multiple change-points of the frequency-magnitude distribution in time. The method is first applied to synthetic data, showing its capability to detect change-points as function of the size of the sample and the b-value contrast. Finally, we apply this approach to examples of observational data sets for which b-value changes have previously been stated. Our analysis of foreshock and after-shock sequences related to mainshocks, as well as earthquake swarms, shows that only a portion of the b-value changes is statistically significant.}, language = {en} } @article{HainzlZoellerMain2006, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Main, Ian}, title = {Introduction to special issue: Dynamics of seismicity patterns and earthquake triggering - Preface}, series = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, volume = {424}, journal = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, number = {Special issue}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0040-1951}, doi = {10.1016/j.tecto.2006.03.034}, pages = {135 -- 138}, year = {2006}, language = {en} } @article{SharmaHainzlZoelleretal.2020, author = {Sharma, Shubham and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Is Coulomb stress the best choice for aftershock forecasting?}, series = {Journal of geophysical research : Solid earth}, volume = {125}, journal = {Journal of geophysical research : Solid earth}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2020JB019553}, pages = {12}, year = {2020}, abstract = {The Coulomb failure stress (CFS) criterion is the most commonly used method for predicting spatial distributions of aftershocks following large earthquakes. However, large uncertainties are always associated with the calculation of Coulomb stress change. The uncertainties mainly arise due to nonunique slip inversions and unknown receiver faults; especially for the latter, results are highly dependent on the choice of the assumed receiver mechanism. Based on binary tests (aftershocks yes/no), recent studies suggest that alternative stress quantities, a distance-slip probabilistic model as well as deep neural network (DNN) approaches, all are superior to CFS with predefined receiver mechanism. To challenge this conclusion, which might have large implications, we use 289 slip inversions from SRCMOD database to calculate more realistic CFS values for a layered half-space and variable receiver mechanisms. We also analyze the effect of the magnitude cutoff, grid size variation, and aftershock duration to verify the use of receiver operating characteristic (ROC) analysis for the ranking of stress metrics. The observations suggest that introducing a layered half-space does not improve the stress maps and ROC curves. However, results significantly improve for larger aftershocks and shorter time periods but without changing the ranking. We also go beyond binary testing and apply alternative statistics to test the ability to estimate aftershock numbers, which confirm that simple stress metrics perform better than the classic Coulomb failure stress calculations and are also better than the distance-slip probabilistic model.}, language = {en} } @article{SchoppaSiegVogeletal.2020, author = {Schoppa, Lukas and Sieg, Tobias and Vogel, Kristin and Z{\"o}ller, Gert and Kreibich, Heidi}, title = {Probabilistic flood loss models for companies}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2020WR027649}, pages = {19}, year = {2020}, abstract = {Flood loss modeling is a central component of flood risk analysis. Conventionally, this involves univariable and deterministic stage-damage functions. Recent advancements in the field promote the use of multivariable and probabilistic loss models, which consider variables beyond inundation depth and account for prediction uncertainty. Although companies contribute significantly to total loss figures, novel modeling approaches for companies are lacking. Scarce data and the heterogeneity among companies impede the development of company flood loss models. We present three multivariable flood loss models for companies from the manufacturing, commercial, financial, and service sector that intrinsically quantify prediction uncertainty. Based on object-level loss data (n = 1,306), we comparatively evaluate the predictive capacity of Bayesian networks, Bayesian regression, and random forest in relation to deterministic and probabilistic stage-damage functions, serving as benchmarks. The company loss data stem from four postevent surveys in Germany between 2002 and 2013 and include information on flood intensity, company characteristics, emergency response, private precaution, and resulting loss to building, equipment, and goods and stock. We find that the multivariable probabilistic models successfully identify and reproduce essential relationships of flood damage processes in the data. The assessment of model skill focuses on the precision of the probabilistic predictions and reveals that the candidate models outperform the stage-damage functions, while differences among the proposed models are negligible. Although the combination of multivariable and probabilistic loss estimation improves predictive accuracy over the entire data set, wide predictive distributions stress the necessity for the quantification of uncertainty.}, language = {en} } @article{RichterHainzlDahmetal.2020, author = {Richter, Gudrun and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert}, title = {Stress-based, statistical modeling of the induced seismicity at the Groningen gas field}, series = {Environmental earth sciences}, volume = {79}, journal = {Environmental earth sciences}, number = {11}, publisher = {Springer}, address = {New York}, issn = {1866-6280}, doi = {10.1007/s12665-020-08941-4}, pages = {15}, year = {2020}, abstract = {Groningen is the largest onshore gas field under production in Europe. The pressure depletion of the gas field started in 1963. In 1991, the first induced micro-earthquakes have been located at reservoir level with increasing rates in the following decades. Most of these events are of magnitude less than 2.0 and cannot be felt. However, maximum observed magnitudes continuously increased over the years until the largest, significant event with ML=3.6 was recorded in 2014, which finally led to the decision to reduce the production. This causal sequence displays the crucial role of understanding and modeling the relation between production and induced seismicity for economic planing and hazard assessment. Here we test whether the induced seismicity related to gas exploration can be modeled by the statistical response of fault networks with rate-and-state-dependent frictional behavior. We use the long and complete local seismic catalog and additionally detailed information on production-induced changes at the reservoir level to test different seismicity models. Both the changes of the fluid pressure and of the reservoir compaction are tested as input to approximate the Coulomb stress changes. We find that the rate-and-state model with a constant tectonic background seismicity rate can reproduce the observed long delay of the seismicity onset. In contrast, so-called Coulomb failure models with instantaneous earthquake nucleation need to assume that all faults are initially far from a critical state of stress to explain the delay. Our rate-and-state model based on the fluid pore pressure fits the spatiotemporal pattern of the seismicity best, where the fit further improves by taking the fault density and orientation into account. Despite its simplicity with only three free parameters, the rate-and-state model can reproduce the main statistical features of the observed activity.}, language = {en} }