@misc{Zoeller2017, author = {Z{\"o}ller, Gert}, title = {Comment on "Estimation of Earthquake Hazard Parameters from Incomplete Data Files. Part III. Incorporation of Uncertainty of Earthquake-Occurrence Model" by Andrzej Kijko, Ansie Smit, and Markvard A. Sellevoll}, series = {Bulletin of the Seismological Society of America}, volume = {107}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160193}, pages = {1975 -- 1978}, year = {2017}, abstract = {Kijko et al. (2016) present various methods to estimate parameters that are relevant for probabilistic seismic-hazard assessment. One of these parameters, although not the most influential, is the maximum possible earthquake magnitude m(max). I show that the proposed estimation of m(max) is based on an erroneous equation related to a misuse of the estimator in Cooke (1979) and leads to unstable results. So far, reported finite estimations of m(max) arise from data selection, because the estimator in Kijko et al. (2016) diverges with finite probability. This finding is independent of the assumed distribution of earthquake magnitudes. For the specific choice of the doubly truncated Gutenberg-Richter distribution, I illustrate the problems by deriving explicit equations. Finally, I conclude that point estimators are generally not a suitable approach to constrain m(max).}, language = {en} } @article{ShcherbakovZhuangZoelleretal.2019, author = {Shcherbakov, Robert and Zhuang, Jiancang and Z{\"o}ller, Gert and Ogata, Yosihiko}, title = {Forecasting the magnitude of the largest expected earthquake}, series = {Nature Communications}, volume = {10}, journal = {Nature Communications}, publisher = {Nature Publishing Group}, address = {London}, issn = {2041-1723}, doi = {10.1038/s41467-019-11958-4}, pages = {11}, year = {2019}, abstract = {The majority of earthquakes occur unexpectedly and can trigger subsequent sequences of events that can culminate in more powerful earthquakes. This self-exciting nature of seismicity generates complex clustering of earthquakes in space and time. Therefore, the problem of constraining the magnitude of the largest expected earthquake during a future time interval is of critical importance in mitigating earthquake hazard. We address this problem by developing a methodology to compute the probabilities for such extreme earthquakes to be above certain magnitudes. We combine the Bayesian methods with the extreme value theory and assume that the occurrence of earthquakes can be described by the Epidemic Type Aftershock Sequence process. We analyze in detail the application of this methodology to the 2016 Kumamoto, Japan, earthquake sequence. We are able to estimate retrospectively the probabilities of having large subsequent earthquakes during several stages of the evolution of this sequence.}, language = {en} } @article{SalamatZoellerAmini2019, author = {Salamat, Mona and Z{\"o}ller, Gert and Amini, Morteza}, title = {Prediction of the Maximum Expected Earthquake Magnitude in Iran:}, series = {Pure and applied geophysics}, volume = {176}, journal = {Pure and applied geophysics}, number = {8}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-019-02141-3}, pages = {3425 -- 3438}, year = {2019}, abstract = {This paper concerns the problem of predicting the maximum expected earthquake magnitude μ in a future time interval Tf given a catalog covering a time period T in the past. Different studies show the divergence of the confidence interval of the maximum possible earthquake magnitude m_{ max } for high levels of confidence (Salamat et al. 2017). Therefore, m_{ max } should be better replaced by μ (Holschneider et al. 2011). In a previous study (Salamat et al. 2018), μ is estimated for an instrumental earthquake catalog of Iran from 1900 onwards with a constant level of completeness ( {m0 = 5.5} ). In the current study, the Bayesian methodology developed by Z{\"o}ller et al. (2014, 2015) is applied for the purpose of predicting μ based on the catalog consisting of both historical and instrumental parts. The catalog is first subdivided into six subcatalogs corresponding to six seismotectonic zones, and each of those zone catalogs is subsequently subdivided according to changes in completeness level and magnitude uncertainty. For this, broad and small error distributions are considered for historical and instrumental earthquakes, respectively. We assume that earthquakes follow a Poisson process in time and Gutenberg-Richter law in the magnitude domain with a priori unknown a and b values which are first estimated by Bayes' theorem and subsequently used to estimate μ. Imposing different values of m_{ max } for different seismotectonic zones namely Alborz, Azerbaijan, Central Iran, Zagros, Kopet Dagh and Makran, the results show considerable probabilities for the occurrence of earthquakes with Mw ≥ 7.5 in short Tf , whereas for long Tf, μ is almost equal to m_{ max }}, language = {en} } @article{HainzlZoellerMain2006, author = {Hainzl, Sebastian and Z{\"o}ller, Gert and Main, Ian}, title = {Introduction to special issue: Dynamics of seismicity patterns and earthquake triggering - Preface}, series = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, volume = {424}, journal = {Tectonophysics : international journal of geotectonics and the geology and physics of the interior of the earth}, number = {Special issue}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0040-1951}, doi = {10.1016/j.tecto.2006.03.034}, pages = {135 -- 138}, year = {2006}, language = {en} } @article{Zoeller2022, author = {Z{\"o}ller, Gert}, title = {A note on the estimation of the maximum possible earthquake magnitude based on extreme value theory for the Groningen Gas Field}, series = {The bulletin of the Seismological Society of America : BSSA}, volume = {112}, journal = {The bulletin of the Seismological Society of America : BSSA}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerito, Calif.}, issn = {0037-1106}, doi = {10.1785/0120210307}, pages = {1825 -- 1831}, year = {2022}, abstract = {Extreme value statistics is a popular and frequently used tool to model the occurrence of large earthquakes. The problem of poor statistics arising from rare events is addressed by taking advantage of the validity of general statistical properties in asymptotic regimes. In this note, I argue that the use of extreme value statistics for the purpose of practically modeling the tail of the frequency-magnitude distribution of earthquakes can produce biased and thus misleading results because it is unknown to what degree the tail of the true distribution is sampled by data. Using synthetic data allows to quantify this bias in detail. The implicit assumption that the true M-max is close to the maximum observed magnitude M-max,M-observed restricts the class of the potential models a priori to those with M-max = M-max,M-observed + Delta M with an increment Delta M approximate to 0.5... 1.2. This corresponds to the simple heuristic method suggested by Wheeler (2009) and labeled :M-max equals M-obs plus an increment." The incomplete consideration of the entire model family for the frequency-magnitude distribution neglects, however, the scenario of a large so far unobserved earthquake.}, language = {en} } @misc{ZoellerHolschneider2018, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Reply to "Comment on 'The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands' by Gert Z{\"o}ller and Matthias Holschneider" by Mathias Raschke}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170131}, pages = {1029 -- 1030}, year = {2018}, language = {en} } @article{ZollerHolschneiderBenZion2005, author = {Zoller, Gert and Holschneider, Matthias and Ben-Zion, Yehuda}, title = {The role of heterogeneities as a tuning parameter of earthquake dynamics}, issn = {0033-4553}, year = {2005}, abstract = {We investigate the influence of spatial heterogeneities on various aspects of brittle failure and seismicity in a model of a large strike-slip fault. The model dynamics is governed by realistic boundary conditions consisting of constant velocity motion of regions around the fault, static/kinetic friction laws, creep with depth-dependent coefficients, and 3-D elastic stress transfer. The dynamic rupture is approximated on a continuous time scale using a finite stress propagation velocity ("quasidynamic model''). The model produces a "brittle- ductile'' transition at a depth of about 12.5 km, realistic hypocenter distributions, and other features of seismicity compatible with observations. Previous work suggested that the range of size scales in the distribution of strength-stress heterogeneities acts as a tuning parameter of the dynamics. Here we test this hypothesis by performing a systematic parameter-space study with different forms of heterogeneities. In particular, we analyze spatial heterogeneities that can be tuned by a single parameter in two distributions: ( 1) high stress drop barriers in near- vertical directions and ( 2) spatial heterogeneities with fractal properties and variable fractal dimension. The results indicate that the first form of heterogeneities provides an effective means of tuning the behavior while the second does not. In relatively homogeneous cases, the fault self-organizes to large-scale patches and big events are associated with inward failure of individual patches and sequential failures of different patches. The frequency-size event statistics in such cases are compatible with the characteristic earthquake distribution and large events are quasi-periodic in time. In strongly heterogeneous or near-critical cases, the rupture histories are highly discontinuous and consist of complex migration patterns of slip on the fault. In such cases, the frequency-size and temporal statistics follow approximately power-law relations}, language = {en} } @article{ZollerHainzlHolschneideretal.2005, author = {Zoller, Gert and Hainzl, Sebastian and Holschneider, Matthias and Ben-Zion, Yehuda}, title = {Aftershocks resulting from creeping sections in a heterogeneous fault}, issn = {0094-8276}, year = {2005}, abstract = {We show that realistic aftershock sequences with space-time characteristics compatible with observations are generated by a model consisting of brittle fault segments separated by creeping zones. The dynamics of the brittle regions is governed by static/kinetic friction, 3D elastic stress transfer and small creep deformation. The creeping parts are characterized by high ongoing creep velocities. These regions store stress during earthquake failures and then release it in the interseismic periods. The resulting postseismic deformation leads to aftershock sequences following the modified Omori law. The ratio of creep coefficients in the brittle and creeping sections determines the duration of the postseismic transients and the exponent p of the modified Omori law}, language = {en} } @article{ZollerHolschneiderBenZion2004, author = {Zoller, Gert and Holschneider, Matthias and Ben-Zion, Yehuda}, title = {Quasi-static and quasi-dynamic modeling of earthquake failure at intermediate scales}, year = {2004}, abstract = {We present a model for earthquake failure at intermediate scales (space: 100 m-100 km, time: 100 m/nu(shear}, language = {en} } @phdthesis{Zoeller2005, author = {Z{\"o}ller, Gert}, title = {Critical states of seismicity : modeling and data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7427}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {The occurrence of earthquakes is characterized by a high degree of spatiotemporal complexity. Although numerous patterns, e.g. fore- and aftershock sequences, are well-known, the underlying mechanisms are not observable and thus not understood. Because the recurrence times of large earthquakes are usually decades or centuries, the number of such events in corresponding data sets is too small to draw conclusions with reasonable statistical significance. Therefore, the present study combines both, numerical modeling and analysis of real data in order to unveil the relationships between physical mechanisms and observational quantities. The key hypothesis is the validity of the so-called "critical point concept" for earthquakes, which assumes large earthquakes to occur as phase transitions in a spatially extended many-particle system, similar to percolation models. New concepts are developed to detect critical states in simulated and in natural data sets. The results indicate that important features of seismicity like the frequency-size distribution and the temporal clustering of earthquakes depend on frictional and structural fault parameters. In particular, the degree of quenched spatial disorder (the "roughness") of a fault zone determines whether large earthquakes occur quasiperiodically or more clustered. This illustrates the power of numerical models in order to identify regions in parameter space, which are relevant for natural seismicity. The critical point concept is verified for both, synthetic and natural seismicity, in terms of a critical state which precedes a large earthquake: a gradual roughening of the (unobservable) stress field leads to a scale-free (observable) frequency-size distribution. Furthermore, the growth of the spatial correlation length and the acceleration of the seismic energy release prior to large events is found. The predictive power of these precursors is, however, limited. Instead of forecasting time, location, and magnitude of individual events, a contribution to a broad multiparameter approach is encouraging.}, subject = {Seismizit{\"a}t}, language = {en} } @inproceedings{HainzlScherbaumZoeller2006, author = {Hainzl, Sebastian and Scherbaum, Frank and Z{\"o}ller, Gert}, title = {Spatiotemporal earthquake patterns}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7267}, year = {2006}, abstract = {Interdisziplin{\"a}res Zentrum f{\"u}r Musterdynamik und Angewandte Fernerkundung Workshop vom 9. - 10. Februar 2006}, language = {en} } @phdthesis{Zoeller1999, author = {Z{\"o}ller, Gert}, title = {Analyse raumzeitlicher Muster in Erdbebendaten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000122}, school = {Universit{\"a}t Potsdam}, year = {1999}, abstract = {Die vorliegende Arbeit besch{\"a}ftigt sich mit der Charakterisierung von Seismizit{\"a}t anhand von Erdbebenkatalogen. Es werden neue Verfahren der Datenanalyse entwickelt, die Aufschluss dar{\"u}ber geben sollen, ob der seismischen Dynamik ein stochastischer oder ein deterministischer Prozess zugrunde liegt und was daraus f{\"u}r die Vorhersagbarkeit starker Erdbeben folgt. Es wird gezeigt, dass seismisch aktive Regionen h{\"a}ufig durch nichtlinearen Determinismus gekennzeichent sind. Dies schließt zumindest die M{\"o}glichkeit einer Kurzzeitvorhersage ein. Das Auftreten seismischer Ruhe wird h{\"a}ufig als Vorl{\"a}uferphaenomen f{\"u}r starke Erdbeben gedeutet. Es wird eine neue Methode pr{\"a}sentiert, die eine systematische raumzeitliche Kartierung seismischer Ruhephasen erm{\"o}glicht. Die statistische Signifikanz wird mit Hilfe des Konzeptes der Ersatzdaten bestimmt. Als Resultat erh{\"a}lt man deutliche Korrelationen zwischen seismischen Ruheperioden und starken Erdbeben. Gleichwohl ist die Signifikanz daf{\"u}r nicht hoch genug, um eine Vorhersage im Sinne einer Aussage {\"u}ber den Ort, die Zeit und die St{\"a}rke eines zu erwartenden Hauptbebens zu erm{\"o}glichen.}, language = {en} } @article{SalamatZareHolschneideretal.2016, author = {Salamat, Mona and Zare, Mehdi and Holschneider, Matthias and Z{\"o}ller, Gert}, title = {Calculation of Confidence Intervals for the Maximum Magnitude of Earthquakes in Different Seismotectonic Zones of Iran}, series = {Pure and applied geophysics}, volume = {174}, journal = {Pure and applied geophysics}, publisher = {Springer}, address = {Basel}, issn = {0033-4553}, doi = {10.1007/s00024-016-1418-5}, pages = {763 -- 777}, year = {2016}, abstract = {The problem of estimating the maximum possible earthquake magnitude m(max) has attracted growing attention in recent years. Due to sparse data, the role of uncertainties becomes crucial. In this work, we determine the uncertainties related to the maximum magnitude in terms of confidence intervals. Using an earthquake catalog of Iran, m(max) is estimated for different predefined levels of confidence in six seismotectonic zones. Assuming the doubly truncated Gutenberg-Richter distribution as a statistical model for earthquake magnitudes, confidence intervals for the maximum possible magnitude of earthquakes are calculated in each zone. While the lower limit of the confidence interval is the magnitude of the maximum observed event, the upper limit is calculated from the catalog and the statistical model. For this aim, we use the original catalog which no declustering methods applied on as well as a declustered version of the catalog. Based on the study by Holschneider et al. (Bull Seismol Soc Am 101(4): 1649-1659, 2011), the confidence interval for m(max) is frequently unbounded, especially if high levels of confidence are required. In this case, no information is gained from the data. Therefore, we elaborate for which settings finite confidence levels are obtained. In this work, Iran is divided into six seismotectonic zones, namely Alborz, Azerbaijan, Zagros, Makran, Kopet Dagh, Central Iran. Although calculations of the confidence interval in Central Iran and Zagros seismotectonic zones are relatively acceptable for meaningful levels of confidence, results in Kopet Dagh, Alborz, Azerbaijan and Makran are not that much promising. The results indicate that estimating mmax from an earthquake catalog for reasonable levels of confidence alone is almost impossible.}, language = {en} } @article{Zoeller2018, author = {Z{\"o}ller, Gert}, title = {A statistical model for earthquake recurrence based on the assimilation of paleoseismicity, historic seismicity, and instrumental seismicity}, series = {Journal of geophysical research : Solid earth}, volume = {123}, journal = {Journal of geophysical research : Solid earth}, number = {6}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2017JB015099}, pages = {4906 -- 4921}, year = {2018}, abstract = {Paleoearthquakes and historic earthquakes are the most important source of information for the estimation of long-term earthquake recurrence intervals in fault zones, because corresponding sequences cover more than one seismic cycle. However, these events are often rare, dating uncertainties are enormous, and missing or misinterpreted events lead to additional problems. In the present study, I assume that the time to the next major earthquake depends on the rate of small and intermediate events between the large ones in terms of a clock change model. Mathematically, this leads to a Brownian passage time distribution for recurrence intervals. I take advantage of an earlier finding that under certain assumptions the aperiodicity of this distribution can be related to the Gutenberg-Richter b value, which can be estimated easily from instrumental seismicity in the region under consideration. In this way, both parameters of the Brownian passage time distribution can be attributed with accessible seismological quantities. This allows to reduce the uncertainties in the estimation of the mean recurrence interval, especially for short paleoearthquake sequences and high dating errors. Using a Bayesian framework for parameter estimation results in a statistical model for earthquake recurrence intervals that assimilates in a simple way paleoearthquake sequences and instrumental data. I present illustrative case studies from Southern California and compare the method with the commonly used approach of exponentially distributed recurrence times based on a stationary Poisson process.}, language = {en} } @article{FiedlerZoellerHolschneideretal.2018, author = {Fiedler, Bernhard and Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {Multiple Change-Point Detection in Spatiotemporal Seismicity Data}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {3A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170236}, pages = {1147 -- 1159}, year = {2018}, abstract = {Earthquake rates are driven by tectonic stress buildup, earthquake-induced stress changes, and transient aseismic processes. Although the origin of the first two sources is known, transient aseismic processes are more difficult to detect. However, the knowledge of the associated changes of the earthquake activity is of great interest, because it might help identify natural aseismic deformation patterns such as slow-slip events, as well as the occurrence of induced seismicity related to human activities. For this goal, we develop a Bayesian approach to identify change-points in seismicity data automatically. Using the Bayes factor, we select a suitable model, estimate possible change-points, and we additionally use a likelihood ratio test to calculate the significance of the change of the intensity. The approach is extended to spatiotemporal data to detect the area in which the changes occur. The method is first applied to synthetic data showing its capability to detect real change-points. Finally, we apply this approach to observational data from Oklahoma and observe statistical significant changes of seismicity in space and time.}, language = {en} } @misc{WangZoellerHainzl2014, author = {Wang, Lifeng and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Joint determination of slip and stress drop in a Bayesian inversion approach:}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {888}, issn = {1866-8372}, doi = {10.25932/publishup-43551}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-435511}, pages = {375 -- 388}, year = {2014}, abstract = {Stress drop is a key factor in earthquake mechanics and engineering seismology. However, stress drop calculations based on fault slip can be significantly biased, particularly due to subjectively determined smoothing conditions in the traditional least-square slip inversion. In this study, we introduce a mechanically constrained Bayesian approach to simultaneously invert for fault slip and stress drop based on geodetic measurements. A Gaussian distribution for stress drop is implemented in the inversion as a prior. We have done several synthetic tests to evaluate the stability and reliability of the inversion approach, considering different fault discretization, fault geometries, utilized datasets, and variability of the slip direction, respectively. We finally apply the approach to the 2010 M8.8 Maule earthquake and invert for the coseismic slip and stress drop simultaneously. Two fault geometries from the literature are tested. Our results indicate that the derived slip models based on both fault geometries are similar, showing major slip north of the hypocenter and relatively weak slip in the south, as indicated in the slip models of other studies. The derived mean stress drop is 5-6 MPa, which is close to the stress drop of similar to 7 MPa that was independently determined according to force balance in this region Luttrell et al. (J Geophys Res, 2011). These findings indicate that stress drop values can be consistently extracted from geodetic data.}, language = {en} } @article{SchoppaSiegVogeletal.2020, author = {Schoppa, Lukas and Sieg, Tobias and Vogel, Kristin and Z{\"o}ller, Gert and Kreibich, Heidi}, title = {Probabilistic flood loss models for companies}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2020WR027649}, pages = {19}, year = {2020}, abstract = {Flood loss modeling is a central component of flood risk analysis. Conventionally, this involves univariable and deterministic stage-damage functions. Recent advancements in the field promote the use of multivariable and probabilistic loss models, which consider variables beyond inundation depth and account for prediction uncertainty. Although companies contribute significantly to total loss figures, novel modeling approaches for companies are lacking. Scarce data and the heterogeneity among companies impede the development of company flood loss models. We present three multivariable flood loss models for companies from the manufacturing, commercial, financial, and service sector that intrinsically quantify prediction uncertainty. Based on object-level loss data (n = 1,306), we comparatively evaluate the predictive capacity of Bayesian networks, Bayesian regression, and random forest in relation to deterministic and probabilistic stage-damage functions, serving as benchmarks. The company loss data stem from four postevent surveys in Germany between 2002 and 2013 and include information on flood intensity, company characteristics, emergency response, private precaution, and resulting loss to building, equipment, and goods and stock. We find that the multivariable probabilistic models successfully identify and reproduce essential relationships of flood damage processes in the data. The assessment of model skill focuses on the precision of the probabilistic predictions and reveals that the candidate models outperform the stage-damage functions, while differences among the proposed models are negligible. Although the combination of multivariable and probabilistic loss estimation improves predictive accuracy over the entire data set, wide predictive distributions stress the necessity for the quantification of uncertainty.}, language = {en} } @article{HolschneiderZoellerClementsetal.2014, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Clements, R. and Schorlemmer, Danijel}, title = {Can we test for the maximum possible earthquake magnitude?}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {3}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010319}, pages = {2019 -- 2028}, year = {2014}, language = {en} } @article{EngbertHainzlZoelleretal.1998, author = {Engbert, Ralf and Hainzl, Sebastian and Z{\"o}ller, Gert and Kurths, J{\"u}rgen}, title = {Testing for unstable periodic orbits to characterize spatiotemporal dynamics}, year = {1998}, language = {en} } @article{ZoellerHolschneiderHainzl2013, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {The Maximum Earthquake Magnitude in a Time Horizon: Theory and Case Studies}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {2A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120120013}, pages = {860 -- 875}, year = {2013}, abstract = {We show how the maximum magnitude within a predefined future time horizon may be estimated from an earthquake catalog within the context of Gutenberg-Richter statistics. The aim is to carry out a rigorous uncertainty assessment, and calculate precise confidence intervals based on an imposed level of confidence a. In detail, we present a model for the estimation of the maximum magnitude to occur in a time interval T-f in the future, given a complete earthquake catalog for a time period T in the past and, if available, paleoseismic events. For this goal, we solely assume that earthquakes follow a stationary Poisson process in time with unknown productivity Lambda and obey the Gutenberg-Richter law in magnitude domain with unknown b-value. The random variables. and b are estimated by means of Bayes theorem with noninformative prior distributions. Results based on synthetic catalogs and on retrospective calculations of historic catalogs from the highly active area of Japan and the low-seismicity, but high-risk region lower Rhine embayment (LRE) in Germany indicate that the estimated magnitudes are close to the true values. Finally, we discuss whether the techniques can be extended to meet the safety requirements for critical facilities such as nuclear power plants. For this aim, the maximum magnitude for all times has to be considered. In agreement with earlier work, we find that this parameter is not a useful quantity from the viewpoint of statistical inference.}, language = {en} }