@article{KummerowKindOnckenetal.2004, author = {Kummerow, J. and Kind, Rainer and Oncken, Onno and Giese, Peter and Ryberg, Trond and Wylegalla, Kurt and Scherbaum, Frank}, title = {A natural and controlled source seismic profile through the Eastern Alps : TRANSALP}, year = {2004}, abstract = {The combined passive and active seismic TRANSALP experiment produced an unprecedented high-resolution crustal image of the Eastern Alps between Munich and Venice. The European and Adriatic Mohos (EM and AM, respectively) are clearly imaged with different seismic techniques: near-vertical incidence reflections and receiver functions (RFs). The European Moho dips gently southward from 35 km beneath the northern foreland to a maximum depth of 55 km beneath the central part of the Eastern Alps, whereas the Adriatic Moho is imaged primarily by receiver functions at a relatively constant depth of about 40 km. In both data sets, we have also detected first-order Alpine shear zones, such as the Helvetic detachment, Inntal fault and SubTauern ramp in the north. Apart from the Valsugana thrust, receiver functions in the southern part of the Eastern Alps have also observed a north dipping interface, which may penetrate the entire Adriatic crust [Adriatic Crust Interface (ACI)]. Deep crustal seismicity may be related to the ACI. We interpret the ACI as the currently active retroshear zone in the doubly vergent Alpine collisional belt. (C) 2004 Elsevier B.V. All rights reserved}, language = {en} } @article{HinzenWeberScherbaum2004, author = {Hinzen, K. G. and Weber, B. and Scherbaum, Frank}, title = {On the resolution of H/V measurements to determine sediment thickness, a case study across a normal fault in the Lower Rhine Embayment, Germany}, issn = {1363-2469}, year = {2004}, abstract = {In recent years, H/V measurements have been increasingly used to map the thickness of sediment fill in sedimentary basins in the context of seismic hazard assessment. This parameter is believed to be an important proxy for the site effects in sedimentary basins (e.g. in the Los Angeles basin). Here we present the results of a test using this approach across an active normal fault in a structurally well known situation. Measurements on a 50 km long profile with 1 km station spacing clearly show a change in the frequency of the fundamental peak of H/V ratios with increasing thickness of the sediment layer in the eastern part of the Lower Rhine Embayment. Subsequently, a section of 10 km length across the Erft-Sprung system, a normal fault with ca. 750 m vertical offset, was measured with a station distance of 100 m. Frequencies of the first and second peaks and the first trough in the H/V spectra are used in a simple resonance model to estimate depths of the bedrock. While the frequency of the first peak shows a large scatter for sediment depths larger than ca. 500 m, the frequency of the first trough follows the changing thickness of the sediments across the fault. The lateral resolution is in the range of the station distance of 100 m. A power law for the depth dependence of the S-wave velocity derived from down hole measurements in an earlier study [Budny, 1984] and power laws inverted from dispersion analysis of micro array measurements [Scherbaum et al., 2002] agree with the results from the H/V ratios of this study}, language = {en} } @article{MalischewskyScherbaum2004, author = {Malischewsky, Peter G. and Scherbaum, Frank}, title = {Love's formula and H/V-ratio (ellipticity) of Rayleigh waves}, issn = {0165-2125}, year = {2004}, abstract = {The ellipticity of Rayleigh surface waves, which is an important parameter characterizing the propagation medium, is studied for several models with increasing complexity. While the main focus lies on theory, practical implications of the use of the horizontal to vertical component ratio (H/V-ratio) to Study the subsurface structure are considered as well. Love's approximation of the ellipticity for an incompressible layer over an incompressible half-space is critically discussed especially concerning its applicability for different impedance contrasts. The main result is an analytically exact formula of H/V for a 2-layer model of compressible media, which is a generalization of Love's formula. It turns out that for a limited range of models Love's approximation can be used also in the general case. (C) 2003 Elsevier B.V. All rights reserved}, language = {en} } @article{FalsaperlaWassermannScherbaum2002, author = {Falsaperla, Susanna and Wassermann, Joachim and Scherbaum, Frank}, title = {Solid earth - 29. Polarization analyses of broadband seismic data recorded on Stromboli Volcano (Italy) from 1996 to 1999 (DOI 10.1029-2001GLO14300)}, year = {2002}, language = {en} } @article{VogelRiggelsenKorupetal.2014, author = {Vogel, Kristin and Riggelsen, Carsten and Korup, Oliver and Scherbaum, Frank}, title = {Bayesian network learning for natural hazard analyses}, series = {Natural hazards and earth system sciences}, volume = {14}, journal = {Natural hazards and earth system sciences}, number = {9}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1561-8633}, doi = {10.5194/nhess-14-2605-2014}, pages = {2605 -- 2626}, year = {2014}, abstract = {Modern natural hazards research requires dealing with several uncertainties that arise from limited process knowledge, measurement errors, censored and incomplete observations, and the intrinsic randomness of the governing processes. Nevertheless, deterministic analyses are still widely used in quantitative hazard assessments despite the pitfall of misestimating the hazard and any ensuing risks. In this paper we show that Bayesian networks offer a flexible framework for capturing and expressing a broad range of uncertainties encountered in natural hazard assessments. Although Bayesian networks are well studied in theory, their application to real-world data is far from straightforward, and requires specific tailoring and adaptation of existing algorithms. We offer suggestions as how to tackle frequently arising problems in this context and mainly concentrate on the handling of continuous variables, incomplete data sets, and the interaction of both. By way of three case studies from earthquake, flood, and landslide research, we demonstrate the method of data-driven Bayesian network learning, and showcase the flexibility, applicability, and benefits of this approach. Our results offer fresh and partly counterintuitive insights into well-studied multivariate problems of earthquake-induced ground motion prediction, accurate flood damage quantification, and spatially explicit landslide prediction at the regional scale. In particular, we highlight how Bayesian networks help to express information flow and independence assumptions between candidate predictors. Such knowledge is pivotal in providing scientists and decision makers with well-informed strategies for selecting adequate predictor variables for quantitative natural hazard assessments.}, language = {en} } @article{HinzenReamerScherbaum2013, author = {Hinzen, Klaus-G and Reamer, Sharon K. and Scherbaum, Frank}, title = {Slow fourier transform}, series = {Seismological research letters}, volume = {84}, journal = {Seismological research letters}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220120139}, pages = {251 -- 257}, year = {2013}, language = {en} } @article{KuehnScherbaumRiggelsen2009, author = {K{\"u}hn, Nicolas M. and Scherbaum, Frank and Riggelsen, Carsten}, title = {Deriving empirical ground-motion models : balancing data constraints and physical assumptions to optimize prediction capability}, issn = {0037-1106}, doi = {10.1785/0120080136}, year = {2009}, abstract = {Empirical ground-motion models used in seismic hazard analysis are commonly derived by regression of observed ground motions against a chosen set of predictor variables. Commonly, the model building process is based on residual analysis and/or expert knowledge and/or opinion, while the quality of the model is assessed by the goodness-of-fit to the data. Such an approach, however, bears no immediate relation to the predictive power of the model and with increasing complexity of the models is increasingly susceptible to the danger of overfitting. Here, a different, primarily data-driven method for the development of ground-motion models is proposed that makes use of the notion of generalization error to counteract the problem of overfitting. Generalization error directly estimates the average prediction error on data not used for the model generation and, thus, is a good criterion to assess the predictive capabilities of a model. The approach taken here makes only few a priori assumptions. At first, peak ground acceleration and response spectrum values are modeled by flexible, nonphysical functions (polynomials) of the predictor variables. The inclusion of a particular predictor and the order of the polynomials are based on minimizing generalization error. The approach is illustrated for the next generation of ground-motion attenuation dataset. The resulting model is rather complex, comprising 48 parameters, but has considerably lower generalization error than functional forms commonly used in ground-motion models. The model parameters have no physical meaning, but a visual interpretation is possible and can reveal relevant characteristics of the data, for example, the Moho bounce in the distance scaling. In a second step, the regression model is approximated by an equivalent stochastic model, making it physically interpretable. The resulting resolvable stochastic model parameters are comparable to published models for western North America. In general, for large datasets generalization error minimization provides a viable method for the development of empirical ground-motion models.}, language = {en} } @article{DelavaudScherbaumKuehnetal.2009, author = {Delavaud, Elise and Scherbaum, Frank and Kuehn, Nicolas and Riggelsen, Carsten}, title = {Information-theoretic selection of ground-motion prediction equations for seismic hazard analysis : an applicability study using Californian data}, issn = {0037-1106}, doi = {10.1785/0120090055}, year = {2009}, abstract = {Considering the increasing number and complexity of ground-motion prediction equations available for seismic hazard assessment, there is a definite need for an efficient, quantitative, and robust method to select and rank these models for a particular region of interest. In a recent article, Scherbaum et al. (2009) have suggested an information- theoretic approach for this purpose that overcomes several shortcomings of earlier attempts at using data-driven ground- motion prediction equation selection procedures. The results of their theoretical study provides evidence that in addition to observed response spectra, macroseismic intensity data might be useful for model selection and ranking. We present here an applicability study for this approach using response spectra and macroseismic intensities from eight Californian earthquakes. A total of 17 ground-motion prediction equations, from different regions, for response spectra, combined with the equation of Atkinson and Kaka (2007) for macroseismic intensities are tested for their relative performance. The resulting data-driven rankings show that the models that best estimate ground motion in California are, as one would expect, Californian and western U. S. models, while some European models also perform fairly well. Moreover, the model performance appears to be strongly dependent on both distance and frequency. The relative information of intensity versus response spectral data is also explored. The strong correlation we obtain between intensity-based rankings and spectral-based ones demonstrates the great potential of macroseismic intensities data for model selection in the context of seismic hazard assessment.}, language = {en} } @article{FaenzaHainzlScherbaum2009, author = {Faenza, Licia and Hainzl, Sebastian and Scherbaum, Frank}, title = {Statistical analysis of the Central-Europe seismicity}, issn = {0040-1951}, doi = {10.1016/j.tecto.2008.04.030}, year = {2009}, abstract = {The aim of this paper is to characterize the spatio-temporal distribution of Central-Europe seismicity. Specifically, by using a non-parametric statistical approach, the proportional hazard model, leading to an empirical estimation of the hazard function, we provide some constrains on the time behavior of earthquake generation mechanisms. The results indicate that the most conspicuous characteristics of M-w 4.0+ earthquakes is a temporal clustering lasting a couple of years. This suggests that the probability of occurrence increases immediately after a previous event. After a few years, the process becomes almost time independent. Furthermore, we investigate the cluster properties of the seismicity of Central-Europe, by comparing the obtained result with the one of synthetic catalogs generated by the epidemic type aftershock sequences (ETAS) model, which previously have been successfully applied for short term clustering. Our results indicate that the ETAS is not well suited to describe the seismicity as a whole, while it is able to capture the features of the short- term behaviour. Remarkably, similar results have been previously found for Italy using a higher magnitude threshold.}, language = {en} } @article{WassermannOhrnbergerScherbaumetal.1998, author = {Wassermann, Joachim and Ohrnberger, Matthias and Scherbaum, Frank and Gossler, J. and Zschau, Jochen}, title = {Kontinuierliche seismologische Netz- und Arraymessungen am Dekadenvulkan Merapi (Java, Indonesien) : ein Zwischenres{\"u}mee = Continuous measurements at Merapi volcano (Java, Indonesia) using anetwork of small-scale seismograph arrays}, issn = {0947-1944}, year = {1998}, language = {de} }