@article{ZoellerHolschneiderHainzl2013, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {The Maximum Earthquake Magnitude in a Time Horizon: Theory and Case Studies}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {2A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120120013}, pages = {860 -- 875}, year = {2013}, abstract = {We show how the maximum magnitude within a predefined future time horizon may be estimated from an earthquake catalog within the context of Gutenberg-Richter statistics. The aim is to carry out a rigorous uncertainty assessment, and calculate precise confidence intervals based on an imposed level of confidence a. In detail, we present a model for the estimation of the maximum magnitude to occur in a time interval T-f in the future, given a complete earthquake catalog for a time period T in the past and, if available, paleoseismic events. For this goal, we solely assume that earthquakes follow a stationary Poisson process in time with unknown productivity Lambda and obey the Gutenberg-Richter law in magnitude domain with unknown b-value. The random variables. and b are estimated by means of Bayes theorem with noninformative prior distributions. Results based on synthetic catalogs and on retrospective calculations of historic catalogs from the highly active area of Japan and the low-seismicity, but high-risk region lower Rhine embayment (LRE) in Germany indicate that the estimated magnitudes are close to the true values. Finally, we discuss whether the techniques can be extended to meet the safety requirements for critical facilities such as nuclear power plants. For this aim, the maximum magnitude for all times has to be considered. In agreement with earlier work, we find that this parameter is not a useful quantity from the viewpoint of statistical inference.}, language = {en} } @article{ZoellerHolschneiderHainzletal.2014, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian and Zhuang, Jiancang}, title = {The largest expected earthquake magnitudes in Japan: The statistical perspective}, series = {Bulletin of the Seismological Society of America}, volume = {104}, journal = {Bulletin of the Seismological Society of America}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130103}, pages = {769 -- 779}, year = {2014}, abstract = {Earthquake catalogs are probably the most informative data source about spatiotemporal seismicity evolution. The catalog quality in one of the most active seismogenic zones in the world, Japan, is excellent, although changes in quality arising, for example, from an evolving network are clearly present. Here, we seek the best estimate for the largest expected earthquake in a given future time interval from a combination of historic and instrumental earthquake catalogs. We extend the technique introduced by Zoller et al. (2013) to estimate the maximum magnitude in a time window of length T-f for earthquake catalogs with varying level of completeness. In particular, we consider the case in which two types of catalogs are available: a historic catalog and an instrumental catalog. This leads to competing interests with respect to the estimation of the two parameters from the Gutenberg-Richter law, the b-value and the event rate lambda above a given lower-magnitude threshold (the a-value). The b-value is estimated most precisely from the frequently occurring small earthquakes; however, the tendency of small events to cluster in aftershocks, swarms, etc. violates the assumption of a Poisson process that is used for the estimation of lambda. We suggest addressing conflict by estimating b solely from instrumental seismicity and using large magnitude events from historic catalogs for the earthquake rate estimation. Applying the method to Japan, there is a probability of about 20\% that the maximum expected magnitude during any future time interval of length T-f = 30 years is m >= 9.0. Studies of different subregions in Japan indicates high probabilities for M 8 earthquakes along the Tohoku arc and relatively low probabilities in the Tokai, Tonankai, and Nankai region. Finally, for scenarios related to long-time horizons and high-confidence levels, the maximum expected magnitude will be around 10.}, language = {en} } @article{BeauvalHainzlScherbaum2006, author = {Beauval, Celine and Hainzl, Sebastian and Scherbaum, Frank}, title = {The impact of the spatial uniform distribution of seismicity on probabilistic seismic-hazard estimation}, series = {Bulletin of the Seismological Society of America}, volume = {96}, journal = {Bulletin of the Seismological Society of America}, number = {6}, publisher = {GeoScienceWorld}, address = {Alexandria, Va.}, issn = {0037-1106}, doi = {10.1785/0120060073}, pages = {2465 -- 2471}, year = {2006}, abstract = {The first step in the estimation of probabilistic seismic hazard in a region commonly consists of the definition and characterization of the relevant seismic sources. Because in low-seismicity regions seismicity is often rather diffuse and faults are difficult to identify, large areal source zones are mostly used. The corresponding hypothesis is that seismicity is uniformly distributed inside each areal seismic source zone. In this study, the impact of this hypothesis on the probabilistic hazard estimation is quantified through the generation of synthetic spatial seismicity distributions. Fractal seismicity distributions are generated inside a given source zone and probabilistic hazard is computed for a set of sites located inside this zone. In our study, the impact of the spatial seismicity distribution is defined as the deviation from the hazard value obtained for a spatially uniform seismicity distribution. From the generation of a large number of synthetic distributions, the correlation between the fractal dimension D and the impact is derived. The results show that the assumption of spatially uniform seismicity tends to bias the hazard to higher values. The correlation can be used to determine the systematic biases and uncertainties for hazard estimations in real cases, where the fractal dimension has been determined. We apply the technique in Germany (Cologne area) and in France (Alps).}, language = {en} } @article{EngbertHainzlZoelleretal.1998, author = {Engbert, Ralf and Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Testing for unstable periodic orbits to characterize spatiotemporal dynamics}, year = {1998}, language = {en} } @article{RichterHainzlDahmetal.2020, author = {Richter, Gudrun and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert}, title = {Stress-based, statistical modeling of the induced seismicity at the Groningen gas field}, series = {Environmental earth sciences}, volume = {79}, journal = {Environmental earth sciences}, number = {11}, publisher = {Springer}, address = {New York}, issn = {1866-6280}, doi = {10.1007/s12665-020-08941-4}, pages = {15}, year = {2020}, abstract = {Groningen is the largest onshore gas field under production in Europe. The pressure depletion of the gas field started in 1963. In 1991, the first induced micro-earthquakes have been located at reservoir level with increasing rates in the following decades. Most of these events are of magnitude less than 2.0 and cannot be felt. However, maximum observed magnitudes continuously increased over the years until the largest, significant event with ML=3.6 was recorded in 2014, which finally led to the decision to reduce the production. This causal sequence displays the crucial role of understanding and modeling the relation between production and induced seismicity for economic planing and hazard assessment. Here we test whether the induced seismicity related to gas exploration can be modeled by the statistical response of fault networks with rate-and-state-dependent frictional behavior. We use the long and complete local seismic catalog and additionally detailed information on production-induced changes at the reservoir level to test different seismicity models. Both the changes of the fluid pressure and of the reservoir compaction are tested as input to approximate the Coulomb stress changes. We find that the rate-and-state model with a constant tectonic background seismicity rate can reproduce the observed long delay of the seismicity onset. In contrast, so-called Coulomb failure models with instantaneous earthquake nucleation need to assume that all faults are initially far from a critical state of stress to explain the delay. Our rate-and-state model based on the fluid pore pressure fits the spatiotemporal pattern of the seismicity best, where the fit further improves by taking the fault density and orientation into account. Despite its simplicity with only three free parameters, the rate-and-state model can reproduce the main statistical features of the observed activity.}, language = {en} } @article{WangHainzlZoelleretal.2012, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Stress- and aftershock-constrained joint inversions for coseismic and postseismic slip applied to the 2004 M6.0 Parkfield earthquake}, series = {Journal of geophysical research : Solid earth}, volume = {117}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2011JB009017}, pages = {18}, year = {2012}, abstract = {Both aftershocks and geodetically measured postseismic displacements are important markers of the stress relaxation process following large earthquakes. Postseismic displacements can be related to creep-like relaxation in the vicinity of the coseismic rupture by means of inversion methods. However, the results of slip inversions are typically non-unique and subject to large uncertainties. Therefore, we explore the possibility to improve inversions by mechanical constraints. In particular, we take into account the physical understanding that postseismic deformation is stress-driven, and occurs in the coseismically stressed zone. We do joint inversions for coseismic and postseismic slip in a Bayesian framework in the case of the 2004 M6.0 Parkfield earthquake. We perform a number of inversions with different constraints, and calculate their statistical significance. According to information criteria, the best result is preferably related to a physically reasonable model constrained by the stress-condition (namely postseismic creep is driven by coseismic stress) and the condition that coseismic slip and large aftershocks are disjunct. This model explains 97\% of the coseismic displacements and 91\% of the postseismic displacements during day 1-5 following the Parkfield event, respectively. It indicates that the major postseismic deformation can be generally explained by a stress relaxation process for the Parkfield case. This result also indicates that the data to constrain the coseismic slip model could be enriched postseismically. For the 2004 Parkfield event, we additionally observe asymmetric relaxation process at the two sides of the fault, which can be explained by material contrast ratio across the fault of similar to 1.15 in seismic velocity.}, language = {en} } @article{FaenzaHainzlScherbaum2009, author = {Faenza, Licia and Hainzl, Sebastian and Scherbaum, Frank}, title = {Statistical analysis of the Central-Europe seismicity}, issn = {0040-1951}, doi = {10.1016/j.tecto.2008.04.030}, year = {2009}, abstract = {The aim of this paper is to characterize the spatio-temporal distribution of Central-Europe seismicity. Specifically, by using a non-parametric statistical approach, the proportional hazard model, leading to an empirical estimation of the hazard function, we provide some constrains on the time behavior of earthquake generation mechanisms. The results indicate that the most conspicuous characteristics of M-w 4.0+ earthquakes is a temporal clustering lasting a couple of years. This suggests that the probability of occurrence increases immediately after a previous event. After a few years, the process becomes almost time independent. Furthermore, we investigate the cluster properties of the seismicity of Central-Europe, by comparing the obtained result with the one of synthetic catalogs generated by the epidemic type aftershock sequences (ETAS) model, which previously have been successfully applied for short term clustering. Our results indicate that the ETAS is not well suited to describe the seismicity as a whole, while it is able to capture the features of the short- term behaviour. Remarkably, similar results have been previously found for Italy using a higher magnitude threshold.}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Similar power laws for foreshock and aftershock sequences in a spring block model for earthquakes}, year = {1999}, language = {en} } @article{HainzlZoellerKurths1999, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organized criticality model for earthquakes : Quiescence, foreshocks and aftershocks}, year = {1999}, language = {en} } @article{HainzlZoellerKurths2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Self-organization of spatio-temporal earthquake clusters}, year = {2000}, language = {en} } @article{Hainzl2004, author = {Hainzl, Sebastian}, title = {Seismicity patterns of earthquake swarms due to fluid intrusion and stress triggering}, issn = {0956-540X}, year = {2004}, abstract = {Earthquake swarms are often assumed to result from an intrusion of fluids into the seismogenic zone, causing seismicity patterns which significantly differ from aftershock sequences. But neither the temporal evolution nor the energy release of earthquake swarms is generally well understood. Because of the lack of descriptive empirical laws, the comparison with model simulations is typically restricted to aspects of the overall behaviour such as the frequency- magnitude distribution. However, previous investigations into a large earthquake swarm which occurred in the year 2000 in Vogtland/northwest Bohemia, Central Europe, revealed some well-defined characteristics which allow a rigorous test of model assumptions. In this study, simulations are performed of a discretized fault plane embedded in a 3-D elastic half- space. Earthquakes are triggered by fluid intrusion as well as by co-seismic and post-seismic stress changes. The model is able to reproduce the main observations, such as the fractal temporal occurrence of earthquakes, embedded aftershock sequences, and a power-law increase of the average seismic moment release. All these characteristics are found to result from stress triggering, whereas fluid diffusion is manifested in the spatiotemporal spreading of the hypocentres}, language = {en} } @article{HainzlZoellerKurthsetal.2000, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen and Zschau, Jochen}, title = {Seismic quiescence as an indicator for large earthquakes in a system of self-organized criticality}, year = {2000}, language = {en} } @article{ZoellerHainzlHolschneider2010, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Holschneider, Matthias}, title = {Recurrence of Large Earthquakes : bayesian inference from catalogs in the presence of magnitude uncertainties}, issn = {0033-4553}, doi = {10.1007/s00024-010-0078-0}, year = {2010}, abstract = {We present a Bayesian method that allows continuous updating the aperiodicity of the recurrence time distribution of large earthquakes based on a catalog with magnitudes above a completeness threshold. The approach uses a recently proposed renewal model for seismicity and allows the inclusion of magnitude uncertainties in a straightforward manner. Errors accounting for grouped magnitudes and random errors are studied and discussed. The results indicate that a stable and realistic value of the aperiodicity can be predicted in an early state of seismicity evolution, even though only a small number of large earthquakes has occurred to date. Furthermore, we demonstrate that magnitude uncertainties can drastically influence the results and can therefore not be neglected. We show how to correct for the bias caused by magnitude errors. For the region of Parkfield we find that the aperiodicity, or the coefficient of variation, is clearly higher than in studies which are solely based on the large earthquakes.}, language = {en} } @article{HainzlBrietzkeZoeller2010, author = {Hainzl, Sebastian and Brietzke, Gilbert B. and Z{\"o}ller, Gert}, title = {Quantitative earthquake forecasts resulting from static stress triggering}, issn = {0148-0227}, doi = {10.1029/2010jb007473}, year = {2010}, abstract = {In recent years, the triggering of earthquakes has been discussed controversially with respect to the underlying mechanisms and the capability to evaluate the resulting seismic hazard. Apart from static stress interactions, other mechanisms including dynamic stress transfer have been proposed to be part of a complex triggering process. Exploiting the theoretical relation between long-term earthquake rates and stressing rate, we demonstrate that static stress changes resulting from an earthquake rupture allow us to predict quantitatively the aftershock activity without tuning specific model parameters. These forecasts are found to be in excellent agreement with all first-order characteristics of aftershocks, in particular, (1) the total number, (2) the power law distance decay, (3) the scaling of the productivity with the main shock magnitude, (4) the foreshock probability, and (5) the empirical Bath law providing the maximum aftershock magnitude, which supports the conclusion that static stress transfer is the major mechanism of earthquake triggering.}, language = {en} } @article{BeauvalHainzlScherbaum2006, author = {Beauval, C{\´e}line and Hainzl, Sebastian and Scherbaum, Frank}, title = {Probabilistic seismic hazard estimation in low-seismicity regions considering non-Poissonian seismic occurrence}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2006.02863.x}, year = {2006}, abstract = {In low-seismicity regions, such as France or Germany, the estimation of probabilistic seismic hazard must cope with the difficult identification of active faults and with the low amount of seismic data available. Since the probabilistic hazard method was initiated, most studies assume a Poissonian occurrence of earthquakes. Here we propose a method that enables the inclusion of time and space dependences between earthquakes into the probabilistic estimation of hazard. Combining the seismicity model Epidemic Type Aftershocks-Sequence (ETAS) with a Monte Carlo technique, aftershocks are naturally accounted for in the hazard determination. The method is applied to the Pyrenees region in Southern France. The impact on hazard of declustering and of the usual assumption that earthquakes occur according to a Poisson process is quantified, showing that aftershocks contribute on average less than 5 per cent to the probabilistic hazard, with an upper bound around 18 per cent}, language = {en} } @article{ZoellerHainzlKurths2001, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Kurths, J{\"u}rgen}, title = {Observation of growing correlation length as an indicator for critical point behavior prior to large earthquakes}, year = {2001}, language = {en} } @article{FiedlerZoellerHolschneideretal.2018, author = {Fiedler, Bernhard and Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {Multiple Change-Point Detection in Spatiotemporal Seismicity Data}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {3A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170236}, pages = {1147 -- 1159}, year = {2018}, abstract = {Earthquake rates are driven by tectonic stress buildup, earthquake-induced stress changes, and transient aseismic processes. Although the origin of the first two sources is known, transient aseismic processes are more difficult to detect. However, the knowledge of the associated changes of the earthquake activity is of great interest, because it might help identify natural aseismic deformation patterns such as slow-slip events, as well as the occurrence of induced seismicity related to human activities. For this goal, we develop a Bayesian approach to identify change-points in seismicity data automatically. Using the Bayes factor, we select a suitable model, estimate possible change-points, and we additionally use a likelihood ratio test to calculate the significance of the change of the intensity. The approach is extended to spatiotemporal data to detect the area in which the changes occur. The method is first applied to synthetic data showing its capability to detect real change-points. Finally, we apply this approach to observational data from Oklahoma and observe statistical significant changes of seismicity in space and time.}, language = {en} } @article{SchmedesHainzlReameretal.2005, author = {Schmedes, J. and Hainzl, Sebastian and Reamer, S. K. and Scherbaum, Frank and Hinzen, K. G.}, title = {Moment release in the Lower Rhine Embayment, Germany : seismological perspective of the deformation process}, issn = {0956-540X}, year = {2005}, abstract = {An important task of seismic hazard assessment consists of estimating the rate of seismic moment release which is correlated to the rate of tectonic deformation and the seismic coupling. However, the estimations of deformation depend on the type of information utilized (e.g. geodetic, geological, seismic) and include large uncertainties. We therefore estimate the deformation rate in the Lower Rhine Embayment (LRE), Germany, using an integrated approach where the uncertainties have been systematically incorporated. On the basis of a new homogeneous earthquake catalogue we initially determine the frequency-magnitude distribution by statistical methods. In particular, we focus on an adequate estimation of the upper bound of the Gutenberg-Richter relation and demonstrate the importance of additional palaeoseis- mological information. The integration of seismological and geological information yields a probability distribution of the upper bound magnitude. Using this distribution together with the distribution of Gutenberg-Richter a and b values, we perform Monte Carlo simulations to derive the seismic moment release as a function of the observation time. The seismic moment release estimated from synthetic earthquake catalogues with short catalogue length is found to systematically underestimate the long-term moment rate which can be analytically determined. The moment release recorded in the LRE over the last 250 yr is found to be in good agreement with the probability distribution resulting from the Monte Carlo simulations. Furthermore, the long-term distribution is within its uncertainties consistent with the moment rate derived by geological measurements, indicating an almost complete seismic coupling in this region. By means of Kostrov's formula, we additionally calculate the full deformation rate tensor using the distribution of known focal mechanisms in LRE. Finally, we use the same approach to calculate the seismic moment and the deformation rate for two subsets of the catalogue corresponding to the east- and west-dipping faults, respectively}, language = {en} } @article{MaghsoudiCescaHainzletal.2015, author = {Maghsoudi, Samira and Cesca, Simone and Hainzl, Sebastian and Dahm, Torsten and Z{\"o}ller, Gert and Kaiser, Diethelm}, title = {Maximum Magnitude of Completeness in a Salt Mine}, series = {Bulletin of the Seismological Society of America}, volume = {105}, journal = {Bulletin of the Seismological Society of America}, number = {3}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140039}, pages = {1491 -- 1501}, year = {2015}, abstract = {In this study, we analyze acoustic emission (AE) data recorded at the Morsleben salt mine, Germany, to assess the catalog completeness, which plays an important role in any seismicity analysis. We introduce the new concept of a magnitude completeness interval consisting of a maximum magnitude of completeness (M-c(max)) in addition to the well-known minimum magnitude of completeness. This is required to describe the completeness of the catalog, both for the smallest events (for which the detection performance may be low) and for the largest ones (which may be missed because of sensors saturation). We suggest a method to compute the maximum magnitude of completeness and calculate it for a spatial grid based on (1) the prior estimation of saturation magnitude at each sensor, (2) the correction of the detection probability function at each sensor, including a drop in the detection performance when it saturates, and (3) the combination of detection probabilities of all sensors to obtain the network detection performance. The method is tested using about 130,000 AE events recorded in a period of five weeks, with sources confined within a small depth interval, and an example of the spatial distribution of M-c(max) is derived. The comparison between the spatial distribution of M-c(max) and of the maximum possible magnitude (M-max), which is here derived using a recently introduced Bayesian approach, indicates that M-max exceeds M-c(max) in some parts of the mine. This suggests that some large and important events may be missed in the catalog, which could lead to a bias in the hazard evaluation.}, language = {en} } @article{PirliHainzlSchweitzeretal.2018, author = {Pirli, Myrto and Hainzl, Sebastian and Schweitzer, Johannes and K{\"o}hler, Andreas and Dahm, Torsten}, title = {Localised thickening and grounding of an Antarctic ice shelf from tidal triggering and sizing of cryoseismicity}, series = {Earth \& planetary science letters}, volume = {503}, journal = {Earth \& planetary science letters}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0012-821X}, doi = {10.1016/j.epsl.2018.09.024}, pages = {78 -- 87}, year = {2018}, abstract = {We observe remarkably periodic patterns of seismicity rates and magnitudes at the Fimbul Ice Shelf, East Antarctica, correlating with the cycles of the ocean tide. Our analysis covers 19 years of continuous seismic recordings from Antarctic broadband stations. Seismicity commences abruptly during austral summer 2011 at a location near the ocean front in a shallow water region. Dozens of highly repetitive events occur in semi-diurnal cycles, with magnitudes and rates fluctuating steadily with the tide. In contrast to the common unpredictability of earthquake magnitudes, the event magnitudes show deterministic trends within single cycles and strong correlations with spring tides and tide height. The events occur quasi-periodically and the highly constrained event sources migrate landwards during rising tide. We show that a simple, mechanical model can explain most of the observations. Our model assumes stick-slip motion on a patch of grounded ice shelf, which is forced by the variations of the ocean-tide height and ice flow. The well fitted observations give new insights into the general process of frictional triggering of earthquakes, while providing independent evidence of variations in ice shelf thickness and grounding.}, language = {en} } @article{WangZoellerHainzl2015, author = {Wang, Lifeng and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Joint Determination of Slip and Stress Drop in a Bayesian Inversion Approach: A Case Study for the 2010 M8.8 Maule Earthquake}, series = {Pure and applied geophysics}, volume = {172}, journal = {Pure and applied geophysics}, number = {2}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-014-0868-x}, pages = {375 -- 388}, year = {2015}, abstract = {Stress drop is a key factor in earthquake mechanics and engineering seismology. However, stress drop calculations based on fault slip can be significantly biased, particularly due to subjectively determined smoothing conditions in the traditional least-square slip inversion. In this study, we introduce a mechanically constrained Bayesian approach to simultaneously invert for fault slip and stress drop based on geodetic measurements. A Gaussian distribution for stress drop is implemented in the inversion as a prior. We have done several synthetic tests to evaluate the stability and reliability of the inversion approach, considering different fault discretization, fault geometries, utilized datasets, and variability of the slip direction, respectively. We finally apply the approach to the 2010 M8.8 Maule earthquake and invert for the coseismic slip and stress drop simultaneously. Two fault geometries from the literature are tested. Our results indicate that the derived slip models based on both fault geometries are similar, showing major slip north of the hypocenter and relatively weak slip in the south, as indicated in the slip models of other studies. The derived mean stress drop is 5-6 MPa, which is close to the stress drop of similar to 7 MPa that was independently determined according to force balance in this region Luttrell et al. (J Geophys Res, 2011). These findings indicate that stress drop values can be consistently extracted from geodetic data.}, language = {en} } @article{SharmaHainzlZoelleretal.2020, author = {Sharma, Shubham and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Is Coulomb stress the best choice for aftershock forecasting?}, series = {Journal of geophysical research : Solid earth}, volume = {125}, journal = {Journal of geophysical research : Solid earth}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2020JB019553}, pages = {12}, year = {2020}, abstract = {The Coulomb failure stress (CFS) criterion is the most commonly used method for predicting spatial distributions of aftershocks following large earthquakes. However, large uncertainties are always associated with the calculation of Coulomb stress change. The uncertainties mainly arise due to nonunique slip inversions and unknown receiver faults; especially for the latter, results are highly dependent on the choice of the assumed receiver mechanism. Based on binary tests (aftershocks yes/no), recent studies suggest that alternative stress quantities, a distance-slip probabilistic model as well as deep neural network (DNN) approaches, all are superior to CFS with predefined receiver mechanism. To challenge this conclusion, which might have large implications, we use 289 slip inversions from SRCMOD database to calculate more realistic CFS values for a layered half-space and variable receiver mechanisms. We also analyze the effect of the magnitude cutoff, grid size variation, and aftershock duration to verify the use of receiver operating characteristic (ROC) analysis for the ranking of stress metrics. The observations suggest that introducing a layered half-space does not improve the stress maps and ROC curves. However, results significantly improve for larger aftershocks and shorter time periods but without changing the ranking. We also go beyond binary testing and apply alternative statistics to test the ability to estimate aftershock numbers, which confirm that simple stress metrics perform better than the classic Coulomb failure stress calculations and are also better than the distance-slip probabilistic model.}, language = {en} } @article{HainzlZoellerMain2006, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Main, Ian}, title = {Introduction to special issue: Dynamics of seismicity patterns and earthquake triggering - Preface}, series = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, volume = {424}, journal = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, number = {Special issue}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0040-1951}, doi = {10.1016/j.tecto.2006.03.034}, pages = {135 -- 138}, year = {2006}, language = {en} } @article{MaghsoudiCescaHainzletal.2013, author = {Maghsoudi, Samira and Cesca, Simone and Hainzl, Sebastian and Kaiser, Diethelm and Becker, Dirk and Dahm, Torsten}, title = {Improving the estimation of detection probability and magnitude of completeness in strongly heterogeneous media, an application to acoustic emission (AE)}, series = {Geophysical journal international}, volume = {193}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt049}, pages = {1556 -- 1569}, year = {2013}, abstract = {Reliable estimations of magnitude of completeness (M-c) are essential for a correct interpretation of seismic catalogues. The spatial distribution of M-c may be strongly variable and difficult to assess in mining environments, owing to the presence of galleries, cavities, fractured regions, porous media and different mineralogical bodies, as well as in consequence of inhomogeneous spatial distribution of the seismicity. We apply a 3-D modification of the probabilistic magnitude of completeness (PMC) method, which relies on the analysis of network detection capabilities. In our approach, the probability to detect an event depends on its magnitude, source receiver Euclidian distance and source receiver direction. The suggested method is proposed for study of the spatial distribution of the magnitude of completeness in a mining environment and here is applied to a 2-months acoustic emission (AE) data set recorded at the Morsleben salt mine, Germany. The dense seismic network and the large data set, which includes more than one million events, enable a detailed testing of the method. This method is proposed specifically for strongly heterogeneous media. Besides, it can also be used for specific network installations, with sensors with a sensitivity, dependent on the direction of the incoming wave (e.g. some piezoelectric sensors). In absence of strong heterogeneities, the standards PMC approach should be used. We show that the PMC estimations in mines strongly depend on the source receiver direction, and cannot be correctly accounted using a standard PMC approach. However, results can be improved, when adopting the proposed 3-D modification of the PMC method. Our analysis of one central horizontal and vertical section yields a magnitude of completeness of about M-c approximate to 1 (AE magnitude) at the centre of the network, which increases up to M-c approximate to 4 at further distances outside the network; the best detection performance is estimated for a NNE-SSE elongated region, which corresponds to the strike direction of the low-attenuating salt body. Our approach provides us with small-scale details about the capability of sensors to detect an earthquake, which can be linked to the presence of heterogeneities in specific directions. Reduced detection performance in presence of strong structural heterogeneities (cavities) is confirmed by synthetic waveform modelling in heterogeneous media.}, language = {en} } @article{HainzlZoellerWang2010, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Wang, Rongjiang}, title = {Impact of the receiver fault distribution on aftershock activity}, issn = {0148-0227}, doi = {10.1029/2008jb006224}, year = {2010}, abstract = {Aftershock models are usually based either on purely empirical relations ignoring the physical mechanism or on deterministic calculations of stress changes on a predefined receiver fault orientation. Here we investigate the effect of considering more realistic fault systems in models based on static Coulomb stress changes. For that purpose, we perform earthquake simulations with elastic half-space stress interactions, rate-and-state dependent frictional earthquake nucleation, and extended ruptures with heterogeneous (fractal) slip distributions. We find that the consideration of earthquake nucleation on multiple receiver fault orientations does not influence the shape of the temporal Omori-type aftershock decay, but changes significantly the predicted spatial patterns and the total number of triggered events. So-called stress shadows with decreased activity almost vanish, and activation decays continuously with increasing distance from the main shock rupture. The total aftershock productivity, which is shown to be almost independent of the assumed background rate, increases significantly if multiple receiver fault planes exist. The application to the 1992 M7.3 Landers, California, aftershock sequence indicates a good agreement with the locations and the total productivity of the observed directly triggered aftershocks.}, language = {en} } @article{MaghsoudiHainzlCescaetal.2014, author = {Maghsoudi, Samira and Hainzl, Sebastian and Cesca, Simone and Dahm, Torsten and Kaiser, Diethelm}, title = {Identification and characterization of growing large-scale en-echelon fractures in a salt mine}, series = {Geophysical journal international}, volume = {196}, journal = {Geophysical journal international}, number = {2}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt443}, pages = {1092 -- 1105}, year = {2014}, abstract = {The spatiotemporal seismicity of acoustic emission (AE) events recorded in the Morsleben salt mine is investigated. Almost a year after backfilling of the cavities from 2003, microevents are distributed with distinctive stripe shapes above cavities at different depth levels. The physical forces driving the creation of these stripes are still unknown. This study aims to find the active stripes and track fracture developments over time by combining two different temporal and spatial clustering techniques into a single methodological approach. Anomalous seismicity parameters values like sharp b-value changes for two active stripes are good indicators to explain possible stress accumulation at the stripe tips. We identify the formation of two new seismicity stripes and show that the AE activities in active clusters are migrated mostly unidirectional to eastward and upward. This indicates that the growth of underlying macrofractures is controlled by the gradient of extensional stress. Studying size distribution characteristic in terms of frequency-magnitude distribution and b-value in active phase and phase with constant seismicity rate show that deviations from the Gutenberg-Richter power law can be explained by the inclusion of different activity phases: (1) the inactive period before the formation of macrofractures, which is characterized by a deficit of larger events (higher b-values) and (2) the period of fracture growth characterized by the occurrence of larger events (smaller b-values).}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{HainzlScherbaumBeauval2006, author = {Hainzl, Sebastian and Scherbaum, Frank and Beauval, C{\´e}line}, title = {Estimating background activity based on interevent-time distribution}, issn = {0037-1106}, doi = {10.1785/0120050053}, year = {2006}, abstract = {The statistics of time delays between successive earthquakes has recently been claimed to be universal and to show the existence of clustering beyond the duration of aftershock bursts. We demonstrate that these claims are unjustified. Stochastic simulations with Poissonian background activity and triggered Omori-type aftershock sequences are shown to reproduce the interevent-time distributions observed on different spatial and magnitude scales in California. Thus the empirical distribution can be explained without any additional long-term clustering. Furthermore, we find that the shape of the interevent-time distribution, which can be approximated by the gamma distribution, is determined by the percentage of main-shocks in the catalog. This percentage can be calculated by the mean and variance of the interevent times and varies between 5\% and 90\% for different regions in California. Our investigation of stochastic simulations indicates that the interevent-time distribution provides a nonparametric reconstruction of the mainshock magnitude-frequency distribution that is superior to standard declustering algorithm}, language = {en} } @article{EiblHainzlVeselyetal.2019, author = {Eibl, Eva P. S. and Hainzl, Sebastian and Vesely, Nele I. K. and Walter, Thomas R. and Jousset, Philippe and Hersir, Gylfi Pall and Dahm, Torsten}, title = {Eruption interval monitoring at strokkur Geyser, Iceland}, series = {Geophysical research letters}, volume = {47}, journal = {Geophysical research letters}, number = {1}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0094-8276}, doi = {10.1029/2019GL085266}, pages = {10}, year = {2019}, abstract = {Geysers are hot springs whose frequency of water eruptions remain poorly understood. We set up a local broadband seismic network for 1 year at Strokkur geyser, Iceland, and developed an unprecedented catalog of 73,466 eruptions. We detected 50,135 single eruptions but find that the geyser is also characterized by sets of up to six eruptions in quick succession. The number of single to sextuple eruptions exponentially decreased, while the mean waiting time after an eruption linearly increased (3.7 to 16.4 min). While secondary eruptions within double to sextuple eruptions have a smaller mean seismic amplitude, the amplitude of the first eruption is comparable for all eruption types. We statistically model the eruption frequency assuming discharges proportional to the eruption multiplicity and a constant probability for subsequent events within a multituple eruption. The waiting time after an eruption is predictable but not the type or amplitude of the next one.
Plain Language Summary Geysers are springs that often erupt in hot water fountains. They erupt more often than volcanoes but are quite similar. Nevertheless, it is poorly understood how often volcanoes and also geysers erupt. We created a list of 73,466 eruption times of Strokkur geyser, Iceland, from 1 year of seismic data. The geyser erupted one to six times in quick succession. We found 50,135 single eruptions but only 1 sextuple eruption, while the mean waiting time increased from 3.7 min after single eruptions to 16.4 min after sextuple eruptions. Mean amplitudes of each eruption type were higher for single eruptions, but all first eruptions in a succession were similar in height. Assuming a constant heat inflow at depth, we can predict the waiting time after an eruption but not the type or amplitude of the next one.}, language = {en} } @article{SudibyoEiblHainzletal.2022, author = {Sudibyo, Maria R. P. and Eibl, Eva P. S. and Hainzl, Sebastian and Hersir, Gylfi P{\´a}ll}, title = {Eruption Forecasting of Strokkur Geyser, Iceland, Using Permutation Entropy}, series = {Journal of geophysical research : Solid earth}, volume = {127}, journal = {Journal of geophysical research : Solid earth}, number = {10}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2022JB024840}, pages = {15}, year = {2022}, abstract = {A volcanic eruption is usually preceded by seismic precursors, but their interpretation and use for forecasting the eruption onset time remain a challenge. A part of the eruptive processes in open conduits of volcanoes may be similar to those encountered in geysers. Since geysers erupt more often, they are useful sites for testing new forecasting methods. We tested the application of Permutation Entropy (PE) as a robust method to assess the complexity in seismic recordings of the Strokkur geyser, Iceland. Strokkur features several minute-long eruptive cycles, enabling us to verify in 63 recorded cycles whether PE behaves consistently from one eruption to the next one. We performed synthetic tests to understand the effect of different parameter settings in the PE calculation. Our application to Strokkur shows a distinct, repeating PE pattern consistent with previously identified phases in the eruptive cycle. We find a systematic increase in PE within the last 15 s before the eruption, indicating that an eruption will occur. We quantified the predictive power of PE, showing that PE performs better than seismic signal strength or quiescence when it comes to forecasting eruptions.}, language = {en} } @article{DahmCescaHainzletal.2015, author = {Dahm, Torsten and Cesca, Simone and Hainzl, Sebastian and Braun, Thomas and Kr{\"u}ger, Frank}, title = {Discrimination between induced, triggered, and natural earthquakes close to hydrocarbon reservoirs: A probabilistic approach based on the modeling of depletion-induced stress changes and seismological source parameters}, series = {Journal of geophysical research : Solid earth}, volume = {120}, journal = {Journal of geophysical research : Solid earth}, number = {4}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2014JB011778}, pages = {2491 -- 2509}, year = {2015}, abstract = {Earthquakes occurring close to hydrocarbon fields under production are often under critical view of being induced or triggered. However, clear and testable rules to discriminate the different events have rarely been developed and tested. The unresolved scientific problem may lead to lengthy public disputes with unpredictable impact on the local acceptance of the exploitation and field operations. We propose a quantitative approach to discriminate induced, triggered, and natural earthquakes, which is based on testable input parameters. Maxima of occurrence probabilities are compared for the cases under question, and a single probability of being triggered or induced is reported. The uncertainties of earthquake location and other input parameters are considered in terms of the integration over probability density functions. The probability that events have been human triggered/induced is derived from the modeling of Coulomb stress changes and a rate and state-dependent seismicity model. In our case a 3-D boundary element method has been adapted for the nuclei of strain approach to estimate the stress changes outside the reservoir, which are related to pore pressure changes in the field formation. The predicted rate of natural earthquakes is either derived from the background seismicity or, in case of rare events, from an estimate of the tectonic stress rate. Instrumentally derived seismological information on the event location, source mechanism, and the size of the rupture plane is of advantage for the method. If the rupture plane has been estimated, the discrimination between induced or only triggered events is theoretically possible if probability functions are convolved with a rupture fault filter. We apply the approach to three recent main shock events: (1) the M-w 4.3 Ekofisk 2001, North Sea, earthquake close to the Ekofisk oil field; (2) the M-w 4.4 Rotenburg 2004, Northern Germany, earthquake in the vicinity of the Sohlingen gas field; and (3) the M-w 6.1 Emilia 2012, Northern Italy, earthquake in the vicinity of a hydrocarbon reservoir. The three test cases cover the complete range of possible causes: clearly human induced, not even human triggered, and a third case in between both extremes.}, language = {en} } @article{FiedlerHainzlZoelleretal.2018, author = {Fiedler, Bernhard and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Detection of Gutenberg-Richter b-Value Changes in Earthquake Time Series}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {5A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120180091}, pages = {2778 -- 2787}, year = {2018}, abstract = {The Gutenberg-Richter relation for earthquake magnitudes is the most famous empirical law in seismology. It states that the frequency of earthquake magnitudes follows an exponential distribution; this has been found to be a robust feature of seismicity above the completeness magnitude, and it is independent of whether global, regional, or local seismicity is analyzed. However, the exponent b of the distribution varies significantly in space and time, which is important for process understanding and seismic hazard assessment; this is particularly true because of the fact that the Gutenberg-Richter b-value acts as a proxy for the stress state and quantifies the ratio of large-to-small earthquakes. In our work, we focus on the automatic detection of statistically significant temporal changes of the b-value in seismicity data. In our approach, we use Bayes factors for model selection and estimate multiple change-points of the frequency-magnitude distribution in time. The method is first applied to synthetic data, showing its capability to detect change-points as function of the size of the sample and the b-value contrast. Finally, we apply this approach to examples of observational data sets for which b-value changes have previously been stated. Our analysis of foreshock and after-shock sequences related to mainshocks, as well as earthquake swarms, shows that only a portion of the b-value changes is statistically significant.}, language = {en} } @article{HainzlOgata2005, author = {Hainzl, Sebastian and Ogata, Y.}, title = {Detecting fluid signals in seismicity data through statistical earthquake modeling}, issn = {0148-0227}, year = {2005}, abstract = {[1] According to the well-known Coulomb failure criterion the variation of either stress or pore pressure can result in earthquake rupture. Aftershock sequences characterized by the Omori law are often assumed to be the consequence of varying stress, whereas earthquake swarms are thought to be triggered by fluid intrusions. The role of stress triggering can be analyzed by modeling solely three-dimensional (3-D) elastic stress changes in the crust, but fluid flows which initiate seismicity cannot be investigated without considering complex seismicity patterns resulting from both pore pressure variations and earthquake-connected stress field changes. We show that the epidemic-type aftershock sequence (ETAS) model is an appropriate tool to extract the primary fluid signal from such complex seismicity patterns. We analyze a large earthquake swarm that occurred in 2000 in Vogtland/NW Bohemia, central Europe. By fitting the stochastic ETAS model, we find that stress triggering is dominant in creating the observed seismicity patterns and explains the observed fractal interevent time distribution. External forcing, identified with pore pressure changes due to fluid intrusion, is found to directly trigger only a few percent of the total activity. However, temporal deconvolution indicates that a pronounced fluid signal initiated the swarm. These results are confirmed by our analogous investigation of model simulations in which earthquakes are triggered by fluid intrusion as well as stress transfers on a fault plane embedded in a 3-D elastic half-space. The deconvolution procedure based on the ETAS model is able to reveal the underlying pore pressure variations}, language = {en} } @article{HainzlZoellerBrietzkeetal.2013, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Brietzke, Gilbert B. and Hinzen, Klaus-G.}, title = {Comparison of deterministic and stochastic earthquake simulators for fault interactions in the Lower Rhine Embayment, Germany}, series = {Geophysical journal international}, volume = {195}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt271}, pages = {684 -- 694}, year = {2013}, abstract = {Time-dependent probabilistic seismic hazard assessment requires a stochastic description of earthquake occurrences. While short-term seismicity models are well-constrained by observations, the recurrences of characteristic on-fault earthquakes are only derived from theoretical considerations, uncertain palaeo-events or proxy data. Despite the involved uncertainties and complexity, simple statistical models for a quasi-period recurrence of on-fault events are implemented in seismic hazard assessments. To test the applicability of statistical models, such as the Brownian relaxation oscillator or the stress release model, we perform a systematic comparison with deterministic simulations based on rate- and state-dependent friction, high-resolution representations of fault systems and quasi-dynamic rupture propagation. For the specific fault network of the Lower Rhine Embayment, Germany, we run both stochastic and deterministic model simulations based on the same fault geometries and stress interactions. Our results indicate that the stochastic simulators are able to reproduce the first-order characteristics of the major earthquakes on isolated faults as well as for coupled faults with moderate stress interactions. However, we find that all tested statistical models fail to reproduce the characteristics of strongly coupled faults, because multisegment rupturing resulting from a spatiotemporally correlated stress field is underestimated in the stochastic simulators. Our results suggest that stochastic models have to be extended by multirupture probability distributions to provide more reliable results.}, language = {en} } @article{ZoellerHainzlTilmannetal.2020, author = {Z{\"o}ller, Gert and Hainzl, Sebastian and Tilmann, Frederik and Woith, Heiko and Dahm, Torsten}, title = {Comment on: Wikelski, Martin; M{\"u}ller, Uschi; Scocco, Paola; Catorci, Andrea; Desinov, Lev V.; Belyaev, Mikhail Y.; Keim, Daniel A.; Pohlmeier, Winfried; Fechteler, Gerhard; Mai, Martin P. : Potential short-term earthquake forecasting by farm animal monitoring. - Ethology. - 126 (2020), 9. - S. 931 - 941. -ISSN 0179-1613. - eISSN 1439-0310. - doi 10.1111/eth.13078}, series = {Ethology}, volume = {127}, journal = {Ethology}, number = {3}, publisher = {Wiley}, address = {Hoboken}, issn = {0179-1613}, doi = {10.1111/eth.13105}, pages = {302 -- 306}, year = {2020}, abstract = {Based on an analysis of continuous monitoring of farm animal behavior in the region of the 2016 M6.6 Norcia earthquake in Italy, Wikelski et al., 2020; (Seismol Res Lett, 89, 2020, 1238) conclude that animal activity can be anticipated with subsequent seismic activity and that this finding might help to design a "short-term earthquake forecasting method." We show that this result is based on an incomplete analysis and misleading interpretations. Applying state-of-the-art methods of statistics, we demonstrate that the proposed anticipatory patterns cannot be distinguished from random patterns, and consequently, the observed anomalies in animal activity do not have any forecasting power.}, language = {en} } @article{DanielPronoRenardetal.2011, author = {Daniel, G. and Prono, E. and Renard, F. and Thouvenot, F. and Hainzl, Sebastian and Marsan, D. and Helmstetter, A. and Traversa, P. and Got, J. L. and Jenatton, L. and Guiguet, R.}, title = {Changes in effective stress during the 2003-2004 Ubaye seismic swarm, France}, series = {Journal of geophysical research : Solid earth}, volume = {116}, journal = {Journal of geophysical research : Solid earth}, number = {4}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2010JB007551}, pages = {13}, year = {2011}, abstract = {We study changes in effective stress (normal stress minus pore pressure) that occurred in the French Alps during the 2003-2004 Ubaye earthquake swarm. Two complementary data sets are used. First, a set of 974 relocated events allows us to finely characterize the shape of the seismogenic area and the spatial migration of seismicity during the crisis. Relocations are performed by a double-difference algorithm. We compute differences in travel times at stations both from absolute picking times and from cross-correlation delays of multiplets. The resulting catalog reveals a swarm alignment along a single planar structure striking N130 degrees E and dipping 80 degrees W. This relocated activity displays migration properties consistent with a triggering by a diffusive fluid overpressure front. This observation argues in favor of a deep-seated fluid circulation responsible for a significant part of the seismic activity in Ubaye. Second, we analyze time series of earthquake detections at a single seismological station located just above the swarm. This time series forms a dense chronicle of +16,000 events. We use it to estimate the history of effective stress changes during this sequence. For this purpose we model the rate of events by a stochastic epidemic-type aftershock sequence model with a nonstationary background seismic rate lambda(0)(t). This background rate is estimated in discrete time windows. Window lengths are determined optimally according to a new change-point method on the basis of the interevent times distribution. We propose that background events are triggered directly by a transient fluid circulation at depth. Then, using rate-and-state constitutive friction laws, we estimate changes in effective stress for the observed rate of background events. We assume that changes in effective stress occurred under constant shear stressing rate conditions. We finally obtain a maximum change in effective stress close to -8 MPa, which corresponds to a maximum fluid overpressure of about 8 MPa under constant normal stress conditions. This estimate is in good agreement with values obtained from numerical modeling of fluid flow at depth, or with direct measurements reported from fluid injection experiments.}, language = {en} } @article{WangHainzlZoeller2014, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert}, title = {Assessment of stress coupling among the inter-, co- and post-seismic phases related to the 2004 M6 Parkfield earthquake}, series = {Geophysical journal international}, volume = {197}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggu102}, pages = {1858 -- 1868}, year = {2014}, abstract = {Due to large uncertainties and non-uniqueness in fault slip inversion, the investigation of stress coupling based on the direct comparison of independent slip inversions, for example, between the coseismic slip distribution and the interseismic slip deficit, may lead to ambiguous conclusions. In this study, we therefore adopt the stress-constrained joint inversion in the Bayesian approach of Wang et al., and implement the physical hypothesis of stress coupling as a prior. We test the hypothesis that interseismic locking is coupled with the coseismic rupture, and the early post-seismic deformation is a stress relaxation process in response to the coseismic stress perturbation. We characterize the role of stress coupling in the seismic cycle by evaluating the efficiency of the model to explain the available data. Taking the 2004 M6 Parkfield earthquake as a study case, we find that the stress coupling hypothesis is in agreement with the data. The coseismic rupture zone is found to be strongly locked during the interseismic phase and the post-seismic slip zone is indicated to be weakly creeping. The post-seismic deformation plays an important role to rebuild stress in the coseismic rupture zone. Based on our results for the stress accumulation during both inter- and post-seismic phase in the coseismic rupture zone, together with the coseismic stress drop, we estimate a recurrence time of M6 earthquake in Parkfield around 23-41 yr, suggesting that the duration of 38 yr between the two recent M6 events in Parkfield is not a surprise.}, language = {en} } @article{PassarelliHainzlCescaetal.2015, author = {Passarelli, Luigi and Hainzl, Sebastian and Cesca, Simone and Maccaferri, Francesco and Mucciarelli, Marco and R{\"o}ßler, Dirk and Corbi, Fabio and Dahm, Torsten and Rivalta, Eleonora}, title = {Aseismic transient driving the swarm-like seismic sequence in the Pollino range, Southern Italy}, series = {Geophysical journal international}, volume = {201}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggv111}, pages = {1553 -- 1567}, year = {2015}, abstract = {Tectonic earthquake swarms challenge our understanding of earthquake processes since it is difficult to link observations to the underlying physical mechanisms and to assess the hazard they pose. Transient forcing is thought to initiate and drive the spatio-temporal release of energy during swarms. The nature of the transient forcing may vary across sequences and range from aseismic creeping or transient slip to diffusion of pore pressure pulses to fluid redistribution and migration within the seismogenic crust. Distinguishing between such forcing mechanisms may be critical to reduce epistemic uncertainties in the assessment of hazard due to seismic swarms, because it can provide information on the frequency-magnitude distribution of the earthquakes (often deviating from the assumed Gutenberg-Richter relation) and on the expected source parameters influencing the ground motion (for example the stress drop). Here we study the ongoing Pollino range (Southern Italy) seismic swarm, a long-lasting seismic sequence with more than five thousand events recorded and located since October 2010. The two largest shocks (magnitude M-w = 4.2 and M-w = 5.1) are among the largest earthquakes ever recorded in an area which represents a seismic gap in the Italian historical earthquake catalogue. We investigate the geometrical, mechanical and statistical characteristics of the largest earthquakes and of the entire swarm. We calculate the focal mechanisms of the M-l > 3 events in the sequence and the transfer of Coulomb stress on nearby known faults and analyse the statistics of the earthquake catalogue. We find that only 25 per cent of the earthquakes in the sequence can be explained as aftershocks, and the remaining 75 per cent may be attributed to a transient forcing. The b-values change in time throughout the sequence, with low b-values correlated with the period of highest rate of activity and with the occurrence of the largest shock. In the light of recent studies on the palaeoseismic and historical activity in the Pollino area, we identify two scenarios consistent with the observations and our analysis: This and past seismic swarms may have been 'passive' features, with small fault patches failing on largely locked faults, or may have been accompanied by an 'active', largely aseismic, release of a large portion of the accumulated tectonic strain. Those scenarios have very different implications for the seismic hazard of the area.}, language = {en} } @article{ZollerHainzlHolschneideretal.2005, author = {Zoller, Gert and Hainzl, Sebastian and Holschneider, Matthias and Ben-Zion, Yehuda}, title = {Aftershocks resulting from creeping sections in a heterogeneous fault}, issn = {0094-8276}, year = {2005}, abstract = {We show that realistic aftershock sequences with space-time characteristics compatible with observations are generated by a model consisting of brittle fault segments separated by creeping zones. The dynamics of the brittle regions is governed by static/kinetic friction, 3D elastic stress transfer and small creep deformation. The creeping parts are characterized by high ongoing creep velocities. These regions store stress during earthquake failures and then release it in the interseismic periods. The resulting postseismic deformation leads to aftershock sequences following the modified Omori law. The ratio of creep coefficients in the brittle and creeping sections determines the duration of the postseismic transients and the exponent p of the modified Omori law}, language = {en} } @article{KuehnHainzlDahmetal.2022, author = {K{\"u}hn, Daniela and Hainzl, Sebastian and Dahm, Torsten and Richter, Gudrun and Vera Rodriguez, Ismael}, title = {A review of source models to further the understanding of the seismicity of the Groningen field}, series = {Netherlands journal of geosciences : NJG}, volume = {101}, journal = {Netherlands journal of geosciences : NJG}, publisher = {Cambridge Univ. Press}, address = {Cambridge}, issn = {0016-7746}, doi = {10.1017/njg.2022.7}, pages = {12}, year = {2022}, abstract = {The occurrence of felt earthquakes due to gas production in Groningen has initiated numerous studies and model attempts to understand and quantify induced seismicity in this region. The whole bandwidth of available models spans the range from fully deterministic models to purely empirical and stochastic models. In this article, we summarise the most important model approaches, describing their main achievements and limitations. In addition, we discuss remaining open questions and potential future directions of development.}, language = {en} } @article{BayonaViverosvonSpechtStraderetal.2019, author = {Bayona Viveros, Jose Antonio and von Specht, Sebastian and Strader, Anne and Hainzl, Sebastian and Cotton, Fabrice Pierre and Schorlemmer, Danijel}, title = {A Regionalized Seismicity Model for Subduction Zones Based on Geodetic Strain Rates, Geomechanical Parameters, and Earthquake-Catalog Data}, series = {Bulletin of the Seismological Society of America}, volume = {109}, journal = {Bulletin of the Seismological Society of America}, number = {5}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120190034}, pages = {2036 -- 2049}, year = {2019}, abstract = {The Seismic Hazard Inferred from Tectonics based on the Global Strain Rate Map (SHIFT_GSRM) earthquake forecast was designed to provide high-resolution estimates of global shallow seismicity to be used in seismic hazard assessment. This model combines geodetic strain rates with global earthquake parameters to characterize long-term rates of seismic moment and earthquake activity. Although SHIFT_GSRM properly computes seismicity rates in seismically active continental regions, it underestimates earthquake rates in subduction zones by an average factor of approximately 3. We present a complementary method to SHIFT_GSRM to more accurately forecast earthquake rates in 37 subduction segments, based on the conservation of moment principle and the use of regional interface seismicity parameters, such as subduction dip angles, corner magnitudes, and coupled seismogenic thicknesses. In seven progressive steps, we find that SHIFT_GSRM earthquake-rate underpredictions are mainly due to the utilization of a global probability function of seismic moment release that poorly captures the great variability among subduction megathrust interfaces. Retrospective test results show that the forecast is consistent with the observations during the 1 January 1977 to 31 December 2014 period. Moreover, successful pseudoprospective evaluations for the 1 January 2015 to 31 December 2018 period demonstrate the power of the regionalized earthquake model to properly estimate subduction-zone seismicity.}, language = {en} }