@article{StraderSchneiderSchorlemmer2017, author = {Strader, Anne and Schneider, Max and Schorlemmer, Danijel}, title = {Prospective and retrospective evaluation of five-year earthquake forecast models for California}, series = {Geophysical journal international}, volume = {211}, journal = {Geophysical journal international}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggx268}, pages = {239 -- 251}, year = {2017}, language = {en} } @misc{StraderSchneiderSchorlemmer2017, author = {Strader, Anne and Schneider, Max and Schorlemmer, Danijel}, title = {Erratum zu: Strader, Anne; Schneider, Max; Schorlemmer, Danijel: Prospective and retrospective evaluation of five-year earthquake forecast models for California (Geophysical Journal International, 211 (2017) 1, S. 239 - 251, https://doi.org/10.1093/gji/ggx268)}, series = {Geophysical journal international}, volume = {212}, journal = {Geophysical journal international}, number = {2}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggx496}, pages = {1314 -- 1314}, year = {2017}, abstract = {S-test results for the USGS and RELM forecasts. The differences between the simulated log-likelihoods and the observed log-likelihood are labelled on the horizontal axes, with scaling adjustments for the 40year.retro experiment. The horizontal lines represent the confidence intervals, within the 0.05 significance level, for each forecast and experiment. If this range contains a log-likelihood difference of zero, the forecasted log-likelihoods are consistent with the observed, and the forecast passes the S-test (denoted by thin lines). If the minimum difference within this range does not contain zero, the forecast fails the S-test for that particular experiment, denoted by thick lines. Colours distinguish between experiments (see Table 2 for explanation of experiment durations). Due to anomalously large likelihood differences, S-test results for Wiemer-Schorlemmer.ALM during the 10year.retro and 40year.retro experiments are not displayed. The range of log-likelihoods for the Holliday-et-al.PI forecast is lower than for the other forecasts due to relatively homogeneous forecasted seismicity rates and use of a small fraction of the RELM testing region.}, language = {en} } @article{ShebalinNarteauHolschneideretal.2011, author = {Shebalin, Peter and Narteau, Clement and Holschneider, Matthias and Schorlemmer, Danijel}, title = {Short-Term earthquake forecasting using early aftershock statistics}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {1}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100119}, pages = {297 -- 312}, year = {2011}, abstract = {We present an alarm-based earthquake forecast model that uses the early aftershock statistics (EAST). This model is based on the hypothesis that the time delay before the onset of the power-law aftershock decay rate decreases as the level of stress and the seismogenic potential increase. Here, we estimate this time delay from < t(g)>, the time constant of the Omori-Utsu law. To isolate space-time regions with a relative high level of stress, the single local variable of our forecast model is the E-a value, the ratio between the long-term and short-term estimations of < t(g)>. When and where the E-a value exceeds a given threshold (i.e., the c value is abnormally small), an alarm is issued, and an earthquake is expected to occur during the next time step. Retrospective tests show that the EAST model has better predictive power than a stationary reference model based on smoothed extrapolation of past seismicity. The official prospective test for California started on 1 July 2009 in the testing center of the Collaboratory for the Study of Earthquake Predictability (CSEP). During the first nine months, 44 M >= 4 earthquakes occurred in the testing area. For this time period, the EAST model has better predictive power than the reference model at a 1\% level of significance. Because the EAST model has also a better predictive power than several time-varying clustering models tested in CSEP at a 1\% level of significance, we suggest that our successful prospective results are not due only to the space-time clustering of aftershocks.}, language = {en} } @article{NievasPilzPrehnetal.2022, author = {Nievas, Cecilia and Pilz, Marco and Prehn, Karsten and Schorlemmer, Danijel and Weatherill, Graeme and Cotton, Fabrice}, title = {Calculating earthquake damage building by building}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {20}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, number = {3}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-021-01303-w}, pages = {1519 -- 1565}, year = {2022}, abstract = {The creation of building exposure models for seismic risk assessment is frequently challenging due to the lack of availability of detailed information on building structures. Different strategies have been developed in recent years to overcome this, including the use of census data, remote sensing imagery and volunteered graphic information (VGI). This paper presents the development of a building-by-building exposure model based exclusively on openly available datasets, including both VGI and census statistics, which are defined at different levels of spatial resolution and for different moments in time. The initial model stemming purely from building-level data is enriched with statistics aggregated at the neighbourhood and city level by means of a Monte Carlo simulation that enables the generation of full realisations of damage estimates when using the exposure model in the context of an earthquake scenario calculation. Though applicable to any other region of interest where analogous datasets are available, the workflow and approach followed are explained by focusing on the case of the German city of Cologne, for which a scenario earthquake is defined and the potential damage is calculated. The resulting exposure model and damage estimates are presented, and it is shown that the latter are broadly consistent with damage data from the 1978 Albstadt earthquake, notwithstanding the differences in the scenario. Through this real-world application we demonstrate the potential of VGI and open data to be used for exposure modelling for natural risk assessment, when combined with suitable knowledge on building fragility and accounting for the inherent uncertainties.}, language = {en} } @article{HolschneiderZoellerClementsetal.2014, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Clements, R. and Schorlemmer, Danijel}, title = {Can we test for the maximum possible earthquake magnitude?}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {3}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010319}, pages = {2019 -- 2028}, year = {2014}, language = {en} } @article{HolschneiderNarteauShebalinetal.2012, author = {Holschneider, Matthias and Narteau, C. and Shebalin, P. and Peng, Z. and Schorlemmer, Danijel}, title = {Bayesian analysis of the modified Omori law}, series = {Journal of geophysical research : Solid earth}, volume = {117}, journal = {Journal of geophysical research : Solid earth}, number = {6089}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2011JB009054}, pages = {12}, year = {2012}, abstract = {In order to examine variations in aftershock decay rate, we propose a Bayesian framework to estimate the {K, c, p}-values of the modified Omori law (MOL), lambda(t) = K(c + t)(-p). The Bayesian setting allows not only to produce a point estimator of these three parameters but also to assess their uncertainties and posterior dependencies with respect to the observed aftershock sequences. Using a new parametrization of the MOL, we identify the trade-off between the c and p-value estimates and discuss its dependence on the number of aftershocks. Then, we analyze the influence of the catalog completeness interval [t(start), t(stop)] on the various estimates. To test this Bayesian approach on natural aftershock sequences, we use two independent and non-overlapping aftershock catalogs of the same earthquakes in Japan. Taking into account the posterior uncertainties, we show that both the handpicked (short times) and the instrumental (long times) catalogs predict the same ranges of parameter values. We therefore conclude that the same MOL may be valid over short and long times.}, language = {en} } @article{BayonaViverosvonSpechtStraderetal.2019, author = {Bayona Viveros, Jose Antonio and von Specht, Sebastian and Strader, Anne and Hainzl, Sebastian and Cotton, Fabrice and Schorlemmer, Danijel}, title = {A Regionalized Seismicity Model for Subduction Zones Based on Geodetic Strain Rates, Geomechanical Parameters, and Earthquake-Catalog Data}, series = {Bulletin of the Seismological Society of America}, volume = {109}, journal = {Bulletin of the Seismological Society of America}, number = {5}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120190034}, pages = {2036 -- 2049}, year = {2019}, abstract = {The Seismic Hazard Inferred from Tectonics based on the Global Strain Rate Map (SHIFT_GSRM) earthquake forecast was designed to provide high-resolution estimates of global shallow seismicity to be used in seismic hazard assessment. This model combines geodetic strain rates with global earthquake parameters to characterize long-term rates of seismic moment and earthquake activity. Although SHIFT_GSRM properly computes seismicity rates in seismically active continental regions, it underestimates earthquake rates in subduction zones by an average factor of approximately 3. We present a complementary method to SHIFT_GSRM to more accurately forecast earthquake rates in 37 subduction segments, based on the conservation of moment principle and the use of regional interface seismicity parameters, such as subduction dip angles, corner magnitudes, and coupled seismogenic thicknesses. In seven progressive steps, we find that SHIFT_GSRM earthquake-rate underpredictions are mainly due to the utilization of a global probability function of seismic moment release that poorly captures the great variability among subduction megathrust interfaces. Retrospective test results show that the forecast is consistent with the observations during the 1 January 1977 to 31 December 2014 period. Moreover, successful pseudoprospective evaluations for the 1 January 2015 to 31 December 2018 period demonstrate the power of the regionalized earthquake model to properly estimate subduction-zone seismicity.}, language = {en} }