@article{HainzlZoellerBrietzkeetal.2013, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Brietzke, Gilbert B. and Hinzen, Klaus-G.}, title = {Comparison of deterministic and stochastic earthquake simulators for fault interactions in the Lower Rhine Embayment, Germany}, series = {Geophysical journal international}, volume = {195}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggt271}, pages = {684 -- 694}, year = {2013}, abstract = {Time-dependent probabilistic seismic hazard assessment requires a stochastic description of earthquake occurrences. While short-term seismicity models are well-constrained by observations, the recurrences of characteristic on-fault earthquakes are only derived from theoretical considerations, uncertain palaeo-events or proxy data. Despite the involved uncertainties and complexity, simple statistical models for a quasi-period recurrence of on-fault events are implemented in seismic hazard assessments. To test the applicability of statistical models, such as the Brownian relaxation oscillator or the stress release model, we perform a systematic comparison with deterministic simulations based on rate- and state-dependent friction, high-resolution representations of fault systems and quasi-dynamic rupture propagation. For the specific fault network of the Lower Rhine Embayment, Germany, we run both stochastic and deterministic model simulations based on the same fault geometries and stress interactions. Our results indicate that the stochastic simulators are able to reproduce the first-order characteristics of the major earthquakes on isolated faults as well as for coupled faults with moderate stress interactions. However, we find that all tested statistical models fail to reproduce the characteristics of strongly coupled faults, because multisegment rupturing resulting from a spatiotemporally correlated stress field is underestimated in the stochastic simulators. Our results suggest that stochastic models have to be extended by multirupture probability distributions to provide more reliable results.}, language = {en} } @article{Zoeller2013, author = {Z{\"o}ller, Gert}, title = {Convergence of the frequency-magnitude distribution of global earthquakes - maybe in 200 years}, series = {Geophysical research letters}, volume = {40}, journal = {Geophysical research letters}, number = {15}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0094-8276}, doi = {10.1002/grl.50779}, pages = {3873 -- 3877}, year = {2013}, abstract = {I study the ability to estimate the tail of the frequency-magnitude distribution of global earthquakes. While power-law scaling for small earthquakes is accepted by support of data, the tail remains speculative. In a recent study, Bell et al. (2013) claim that the frequency-magnitude distribution of global earthquakes converges to a tapered Pareto distribution. I show that this finding results from data fitting errors, namely from the biased maximum likelihood estimation of the corner magnitude theta in strongly undersampled models. In particular, the estimation of theta depends solely on the few largest events in the catalog. Taking this into account, I compare various state-of-the-art models for the global frequency-magnitude distribution. After discarding undersampled models, the remaining ones, including the unbounded Gutenberg-Richter distribution, perform all equally well and are, therefore, indistinguishable. Convergence to a specific distribution, if it ever takes place, requires about 200 years homogeneous recording of global seismicity, at least.}, language = {en} } @phdthesis{Zoeller2005, author = {Z{\"o}ller, Gert}, title = {Critical states of seismicity : modeling and data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7427}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The occurrence of earthquakes is characterized by a high degree of spatiotemporal complexity. Although numerous patterns, e.g. fore- and aftershock sequences, are well-known, the underlying mechanisms are not observable and thus not understood. Because the recurrence times of large earthquakes are usually decades or centuries, the number of such events in corresponding data sets is too small to draw conclusions with reasonable statistical significance. Therefore, the present study combines both, numerical modeling and analysis of real data in order to unveil the relationships between physical mechanisms and observational quantities. The key hypothesis is the validity of the so-called "critical point concept" for earthquakes, which assumes large earthquakes to occur as phase transitions in a spatially extended many-particle system, similar to percolation models. New concepts are developed to detect critical states in simulated and in natural data sets. The results indicate that important features of seismicity like the frequency-size distribution and the temporal clustering of earthquakes depend on frictional and structural fault parameters. In particular, the degree of quenched spatial disorder (the "roughness") of a fault zone determines whether large earthquakes occur quasiperiodically or more clustered. This illustrates the power of numerical models in order to identify regions in parameter space, which are relevant for natural seismicity. The critical point concept is verified for both, synthetic and natural seismicity, in terms of a critical state which precedes a large earthquake: a gradual roughening of the (unobservable) stress field leads to a scale-free (observable) frequency-size distribution. Furthermore, the growth of the spatial correlation length and the acceleration of the seismic energy release prior to large events is found. The predictive power of these precursors is, however, limited. Instead of forecasting time, location, and magnitude of individual events, a contribution to a broad multiparameter approach is encouraging.}, subject = {Seismizit{\"a}t}, language = {en} } @article{FiedlerHainzlZoelleretal.2018, author = {Fiedler, Bernhard and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Detection of Gutenberg-Richter b-Value Changes in Earthquake Time Series}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {5A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120180091}, pages = {2778 -- 2787}, year = {2018}, abstract = {The Gutenberg-Richter relation for earthquake magnitudes is the most famous empirical law in seismology. It states that the frequency of earthquake magnitudes follows an exponential distribution; this has been found to be a robust feature of seismicity above the completeness magnitude, and it is independent of whether global, regional, or local seismicity is analyzed. However, the exponent b of the distribution varies significantly in space and time, which is important for process understanding and seismic hazard assessment; this is particularly true because of the fact that the Gutenberg-Richter b-value acts as a proxy for the stress state and quantifies the ratio of large-to-small earthquakes. In our work, we focus on the automatic detection of statistically significant temporal changes of the b-value in seismicity data. In our approach, we use Bayes factors for model selection and estimate multiple change-points of the frequency-magnitude distribution in time. The method is first applied to synthetic data, showing its capability to detect change-points as function of the size of the sample and the b-value contrast. Finally, we apply this approach to examples of observational data sets for which b-value changes have previously been stated. Our analysis of foreshock and after-shock sequences related to mainshocks, as well as earthquake swarms, shows that only a portion of the b-value changes is statistically significant.}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{LaudanZoellerThieken2020, author = {Laudan, Jonas and Z{\"o}ller, Gert and Thieken, Annegret}, title = {Flash floods versus river floods}, series = {Natural Hazards and Earth System Sciences}, volume = {20}, journal = {Natural Hazards and Earth System Sciences}, publisher = {European Geophysical Society}, address = {Katlenburg-Lindau}, issn = {1684-9981}, doi = {10.5194/nhess-20-999-2020}, pages = {999 -- 1023}, year = {2020}, abstract = {River floods are among the most damaging natural hazards that frequently occur in Germany. Flooding causes high economic losses and impacts many residents. In 2016, several southern German municipalities were hit by flash floods after unexpectedly severe heavy rainfall, while in 2013 widespread river flooding had occurred. This study investigates and compares the psychological impacts of river floods and flash floods and potential consequences for precautionary behaviour. Data were collected using computer-aided telephone interviews that were conducted among flood-affected households around 9 months after each damaging event. This study applies Bayesian statistics and negative binomial regressions to test the suitability of psychological indicators to predict the precaution motivation of individuals. The results show that it is not the particular flood type but rather the severity and local impacts of the event that are crucial for the different, and potentially negative, impacts on mental health. According to the used data, however, predictions of the individual precaution motivation should not be based on the derived psychological indicators - i.e. coping appraisal, threat appraisal, burden and evasion - since their explanatory power was generally low and results are, for the most part, non-significant. Only burden reveals a significant positive relation to planned precaution regarding weak flash floods. In contrast to weak flash floods and river floods, the perceived threat of strong flash floods is significantly lower although feelings of burden and lower coping appraisals are more pronounced. Further research is needed to better include psychological assessment procedures and to focus on alternative data sources regarding floods and the connected precaution motivation of affected residents.}, language = {en} } @misc{LaudanZoellerThieken2020, author = {Laudan, Jonas and Z{\"o}ller, Gert and Thieken, Annegret}, title = {Flash floods versus river floods}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {968}, issn = {1866-8372}, doi = {10.25932/publishup-47397}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-473974}, pages = {999 -- 1023}, year = {2020}, abstract = {River floods are among the most damaging natural hazards that frequently occur in Germany. Flooding causes high economic losses and impacts many residents. In 2016, several southern German municipalities were hit by flash floods after unexpectedly severe heavy rainfall, while in 2013 widespread river flooding had occurred. This study investigates and compares the psychological impacts of river floods and flash floods and potential consequences for precautionary behaviour. Data were collected using computer-aided telephone interviews that were conducted among flood-affected households around 9 months after each damaging event. This study applies Bayesian statistics and negative binomial regressions to test the suitability of psychological indicators to predict the precaution motivation of individuals. The results show that it is not the particular flood type but rather the severity and local impacts of the event that are crucial for the different, and potentially negative, impacts on mental health. According to the used data, however, predictions of the individual precaution motivation should not be based on the derived psychological indicators - i.e. coping appraisal, threat appraisal, burden and evasion - since their explanatory power was generally low and results are, for the most part, non-significant. Only burden reveals a significant positive relation to planned precaution regarding weak flash floods. In contrast to weak flash floods and river floods, the perceived threat of strong flash floods is significantly lower although feelings of burden and lower coping appraisals are more pronounced. Further research is needed to better include psychological assessment procedures and to focus on alternative data sources regarding floods and the connected precaution motivation of affected residents.}, language = {en} } @article{ShcherbakovZhuangZoelleretal.2019, author = {Shcherbakov, Robert and Zhuang, Jiancang and Z{\"o}ller, Gert and Ogata, Yosihiko}, title = {Forecasting the magnitude of the largest expected earthquake}, series = {Nature Communications}, volume = {10}, journal = {Nature Communications}, publisher = {Nature Publishing Group}, address = {London}, issn = {2041-1723}, doi = {10.1038/s41467-019-11958-4}, pages = {11}, year = {2019}, abstract = {The majority of earthquakes occur unexpectedly and can trigger subsequent sequences of events that can culminate in more powerful earthquakes. This self-exciting nature of seismicity generates complex clustering of earthquakes in space and time. Therefore, the problem of constraining the magnitude of the largest expected earthquake during a future time interval is of critical importance in mitigating earthquake hazard. We address this problem by developing a methodology to compute the probabilities for such extreme earthquakes to be above certain magnitudes. We combine the Bayesian methods with the extreme value theory and assume that the occurrence of earthquakes can be described by the Epidemic Type Aftershock Sequence process. We analyze in detail the application of this methodology to the 2016 Kumamoto, Japan, earthquake sequence. We are able to estimate retrospectively the probabilities of having large subsequent earthquakes during several stages of the evolution of this sequence.}, language = {en} } @article{HainzlZoellerWang2010, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Wang, Rongjiang}, title = {Impact of the receiver fault distribution on aftershock activity}, issn = {0148-0227}, doi = {10.1029/2008jb006224}, year = {2010}, abstract = {Aftershock models are usually based either on purely empirical relations ignoring the physical mechanism or on deterministic calculations of stress changes on a predefined receiver fault orientation. Here we investigate the effect of considering more realistic fault systems in models based on static Coulomb stress changes. For that purpose, we perform earthquake simulations with elastic half-space stress interactions, rate-and-state dependent frictional earthquake nucleation, and extended ruptures with heterogeneous (fractal) slip distributions. We find that the consideration of earthquake nucleation on multiple receiver fault orientations does not influence the shape of the temporal Omori-type aftershock decay, but changes significantly the predicted spatial patterns and the total number of triggered events. So-called stress shadows with decreased activity almost vanish, and activation decays continuously with increasing distance from the main shock rupture. The total aftershock productivity, which is shown to be almost independent of the assumed background rate, increases significantly if multiple receiver fault planes exist. The application to the 1992 M7.3 Landers, California, aftershock sequence indicates a good agreement with the locations and the total productivity of the observed directly triggered aftershocks.}, language = {en} } @article{ZoellerHolschneider2014, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Induced seismicity: What is the size of the largest expected earthquake?}, series = {The bulletin of the Seismological Society of America}, volume = {104}, journal = {The bulletin of the Seismological Society of America}, number = {6}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140195}, pages = {3153 -- 3158}, year = {2014}, abstract = {The injection of fluids is a well-known origin for the triggering of earthquake sequences. The growing number of projects related to enhanced geothermal systems, fracking, and others has led to the question, which maximum earthquake magnitude can be expected as a consequence of fluid injection? This question is addressed from the perspective of statistical analysis. Using basic empirical laws of earthquake statistics, we estimate the magnitude M-T of the maximum expected earthquake in a predefined future time window T-f. A case study of the fluid injection site at Paradox Valley, Colorado, demonstrates that the magnitude m 4.3 of the largest observed earthquake on 27 May 2000 lies very well within the expectation from past seismicity without adjusting any parameters. Vice versa, for a given maximum tolerable earthquake at an injection site, we can constrain the corresponding amount of injected fluids that must not be exceeded within predefined confidence bounds.}, language = {en} }