@misc{ZaliReinKruegeretal.2023, author = {Zali, Zahra and Rein, Teresa and Kr{\"u}ger, Frank and Ohrnberger, Matthias and Scherbaum, Frank}, title = {Ocean bottom seismometer (OBS) noise reduction from horizontal and vertical components using harmonic-percussive separation algorithms}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1320}, issn = {1866-8372}, doi = {10.25932/publishup-58882}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-588828}, pages = {15}, year = {2023}, abstract = {Records from ocean bottom seismometers (OBSs) are highly contaminated by noise, which is much stronger compared to data from most land stations, especially on the horizontal components. As a consequence, the high energy of the oceanic noise at frequencies below 1 Hz considerably complicates the analysis of the teleseismic earthquake signals recorded by OBSs. Previous studies suggested different approaches to remove low-frequency noises from OBS recordings but mainly focused on the vertical component. The records of horizontal components, which are crucial for the application of many methods in passive seismological analysis of body and surface waves, could not be much improved in the teleseismic frequency band. Here we introduce a noise reduction method, which is derived from the harmonic-percussive separation algorithms used in Zali et al. (2021), in order to separate long-lasting narrowband signals from broadband transients in the OBS signal. This leads to significant noise reduction of OBS records on both the vertical and horizontal components and increases the earthquake signal-to-noise ratio (SNR) without distortion of the broadband earthquake waveforms. This is demonstrated through tests with synthetic data. Both SNR and cross-correlation coefficients showed significant improvements for different realistic noise realizations. The application of denoised signals in surface wave analysis and receiver functions is discussed through tests with synthetic and real data.}, language = {en} } @article{ZaliReinKruegeretal.2023, author = {Zali, Zahra and Rein, Teresa and Kr{\"u}ger, Frank and Ohrnberger, Matthias and Scherbaum, Frank}, title = {Ocean bottom seismometer (OBS) noise reduction from horizontal and vertical components using harmonic-percussive separation algorithms}, series = {Solid earth}, volume = {14}, journal = {Solid earth}, number = {2}, publisher = {Coepernicus Publ.}, address = {G{\"o}ttingen}, issn = {1869-9529}, doi = {10.5194/se-14-181-2023}, pages = {181 -- 195}, year = {2023}, abstract = {Records from ocean bottom seismometers (OBSs) are highly contaminated by noise, which is much stronger compared to data from most land stations, especially on the horizontal components. As a consequence, the high energy of the oceanic noise at frequencies below 1 Hz considerably complicates the analysis of the teleseismic earthquake signals recorded by OBSs. Previous studies suggested different approaches to remove low-frequency noises from OBS recordings but mainly focused on the vertical component. The records of horizontal components, which are crucial for the application of many methods in passive seismological analysis of body and surface waves, could not be much improved in the teleseismic frequency band. Here we introduce a noise reduction method, which is derived from the harmonic-percussive separation algorithms used in Zali et al. (2021), in order to separate long-lasting narrowband signals from broadband transients in the OBS signal. This leads to significant noise reduction of OBS records on both the vertical and horizontal components and increases the earthquake signal-to-noise ratio (SNR) without distortion of the broadband earthquake waveforms. This is demonstrated through tests with synthetic data. Both SNR and cross-correlation coefficients showed significant improvements for different realistic noise realizations. The application of denoised signals in surface wave analysis and receiver functions is discussed through tests with synthetic and real data.}, language = {en} } @article{ZaliOhrnbergerScherbaumetal.2021, author = {Zali, Zahra and Ohrnberger, Matthias and Scherbaum, Frank and Cotton, Fabrice and Eibl, Eva P. S.}, title = {Volcanic tremor extraction and earthquake detection using music information retrieval algorithms}, series = {Seismological research letters}, volume = {92}, journal = {Seismological research letters}, number = {6}, publisher = {Seismological Society of America}, address = {Boulder, Colo.}, issn = {0895-0695}, doi = {10.1785/0220210016}, pages = {3668 -- 3681}, year = {2021}, abstract = {Volcanic tremor signals are usually observed before or during volcanic eruptions and must be monitored to evaluate the volcanic activity. A challenge in studying seismic signals of volcanic origin is the coexistence of transient signal swarms and long-lasting volcanic tremor signals. Separating transient events from volcanic tremors can, therefore, contrib-ute to improving upon our understanding of the underlying physical processes. Exploiting the idea of harmonic-percussive separation in musical signal processing, we develop a method to extract the harmonic volcanic tremor signals and to detect tran-sient events from seismic recordings. Based on the similarity properties of spectrogram frames in the time-frequency domain, we decompose the signal into two separate spec-trograms representing repeating (harmonic) and nonrepeating (transient) patterns, which correspond to volcanic tremor signals and earthquake signals, respectively. We reconstruct the harmonic tremor signal in the time domain from the complex spectrogram of the repeating pattern by only considering the phase components for the frequency range in which the tremor amplitude spectrum is significantly contribut-ing to the energy of the signal. The reconstructed signal is, therefore, clean tremor signal without transient events. Furthermore, we derive a characteristic function suitable for the detection of tran-sient events (e.g., earthquakes) by integrating amplitudes of the nonrepeating spectro-gram over frequency at each time frame. Considering transient events like earthquakes, 78\% of the events are detected for signal-to-noise ratio = 0.1 in our semisynthetic tests. In addition, we compared the number of detected earthquakes using our method for one month of continuous data recorded during the Holuhraun 2014-2015 eruption in Iceland with the bulletin presented in Agustsdottir et al. (2019). Our single station event detection algorithm identified 84\% of the bulletin events. Moreover, we detected a total of 12,619 events, which is more than twice the number of the bulletin events.}, language = {en} } @article{WeberAbuAyyashAbueladasetal.2004, author = {Weber, Michael H. and Abu-Ayyash, Khalil and Abueladas, Abdel-Rahman and Agnon, Amotz and Al-Amoush, H. and Babeyko, Andrey and Bartov, Yosef and Baumann, M. and Ben-Avraham, Zvi and Bock, G{\"u}nter and Bribach, Jens and El-Kelani, R. and Forster, A. and F{\"o}rster, Hans-J{\"u}rgen and Frieslander, U. and Garfunkel, Zvi and Grunewald, Steffen and Gotze, Hans-J{\"u}rgen and Haak, Volker and Haberland, Christian and Hassouneh, Mohammed and Helwig, S. and Hofstetter, Alfons and Jackel, K. H. and Kesten, Dagmar and Kind, Rainer and Maercklin, Nils and Mechie, James and Mohsen, Amjad and Neubauer, F. M. and Oberh{\"a}nsli, Roland and Qabbani, I. and Ritter, O. and Rumpker, G. and Rybakov, M. and Ryberg, Trond and Scherbaum, Frank and Schmidt, J. and Schulze, A. and Sobolev, Stephan Vladimir and Stiller, M. and Th,}, title = {The crustal structure of the Dead Sea Transform}, year = {2004}, abstract = {To address one of the central questions of plate tectonics-How do large transform systems work and what are their typical features?-seismic investigations across the Dead Sea Transform (DST), the boundary between the African and Arabian plates in the Middle East, were conducted for the first time. A major component of these investigations was a combined reflection/ refraction survey across the territories of Palestine, Israel and Jordan. The main results of this study are: (1) The seismic basement is offset by 3-5 km under the DST, (2) The DST cuts through the entire crust, broadening in the lower crust, (3) Strong lower crustal reflectors are imaged only on one side of the DST, (4) The seismic velocity sections show a steady increase in the depth of the crust-mantle transition (Moho) from 26 km at the Mediterranean to 39 km under the Jordan highlands, with only a small but visible, asymmetric topography of the Moho under the DST. These observations can be linked to the left-lateral movement of 105 km of the two plates in the last 17 Myr, accompanied by strong deformation within a narrow zone cutting through the entire crust. Comparing the DST and the San Andreas Fault (SAF) system, a strong asymmetry in subhorizontal lower crustal reflectors and a deep reaching deformation zone both occur around the DST and the SAF. The fact that such lower crustal reflectors and deep deformation zones are observed in such different transform systems suggests that these structures are possibly fundamental features of large transform plate boundaries}, language = {en} } @article{VogelRiggelsenKorupetal.2014, author = {Vogel, Kristin and Riggelsen, Carsten and Korup, Oliver and Scherbaum, Frank}, title = {Bayesian network learning for natural hazard analyses}, series = {Natural hazards and earth system sciences}, volume = {14}, journal = {Natural hazards and earth system sciences}, number = {9}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1561-8633}, doi = {10.5194/nhess-14-2605-2014}, pages = {2605 -- 2626}, year = {2014}, abstract = {Modern natural hazards research requires dealing with several uncertainties that arise from limited process knowledge, measurement errors, censored and incomplete observations, and the intrinsic randomness of the governing processes. Nevertheless, deterministic analyses are still widely used in quantitative hazard assessments despite the pitfall of misestimating the hazard and any ensuing risks. In this paper we show that Bayesian networks offer a flexible framework for capturing and expressing a broad range of uncertainties encountered in natural hazard assessments. Although Bayesian networks are well studied in theory, their application to real-world data is far from straightforward, and requires specific tailoring and adaptation of existing algorithms. We offer suggestions as how to tackle frequently arising problems in this context and mainly concentrate on the handling of continuous variables, incomplete data sets, and the interaction of both. By way of three case studies from earthquake, flood, and landslide research, we demonstrate the method of data-driven Bayesian network learning, and showcase the flexibility, applicability, and benefits of this approach. Our results offer fresh and partly counterintuitive insights into well-studied multivariate problems of earthquake-induced ground motion prediction, accurate flood damage quantification, and spatially explicit landslide prediction at the regional scale. In particular, we highlight how Bayesian networks help to express information flow and independence assumptions between candidate predictors. Such knowledge is pivotal in providing scientists and decision makers with well-informed strategies for selecting adequate predictor variables for quantitative natural hazard assessments.}, language = {en} } @article{TranThanhTuanScherbaumMalischewsky2011, author = {Tran Thanh Tuan, and Scherbaum, Frank and Malischewsky, Peter G.}, title = {On the relationship of peaks and troughs of the ellipticity (H/V) of Rayleigh waves and the transmission response of single layer over half-space models}, series = {Geophysical journal international}, volume = {184}, journal = {Geophysical journal international}, number = {2}, publisher = {Wiley-Blackwell}, address = {Malden}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2010.04863.x}, pages = {793 -- 800}, year = {2011}, abstract = {One of the key challenges in the context of local site effect studies is the determination of frequencies where the shakeability of the ground is enhanced. In this context, the H/V technique has become increasingly popular and peak frequencies of H/V spectral ratio are sometimes interpreted as resonance frequencies of the transmission response. In the present study, assuming that Rayleigh surface wave is dominant in H/V spectral ratio, we analyse theoretically under which conditions this may be justified and when not. We focus on 'layer over half-space' models which, although seemingly simple, capture many aspects of local site effects in real sedimentary structures. Our starting point is the ellipticity of Rayleigh waves. We use the exact formula of the H/V-ratio presented by Malischewsky \& Scherbaum (2004) to investigate the main characteristics of peak and trough frequencies. We present a simple formula illustrating if and where H/V-ratio curves have sharp peaks in dependence of model parameters. In addition, we have constructed a map, which demonstrates the relation between the H/V-peak frequency and the peak frequency of the transmission response in the domain of the layer's Poisson ratio and the impedance contrast. Finally, we have derived maps showing the relationship between the H/V-peak and trough frequency and key parameters of the model such as impedance contrast. These maps are seen as diagnostic tools, which can help to guide the interpretation of H/V spectral ratio diagrams in the context of site effect studies.}, language = {en} } @article{ThomasWeberWicksetal.1999, author = {Thomas, Ch. and Weber, Michael H. and Wicks, Chuck and Scherbaum, Frank}, title = {Small scatterers in the lower mantle observed at German broadband arrays}, year = {1999}, language = {en} } @article{ThomasIgelWeberetal.2000, author = {Thomas, Ch. and Igel, Heiner and Weber, Michael H. and Scherbaum, Frank}, title = {Acoustic simulation of P-wave propagation in a heterogeneous spherical earth : numerical method and application to precursor waves to PKPdf}, year = {2000}, language = {en} } @article{SuryantoIgelWassermannetal.2006, author = {Suryanto, Wiwit and Igel, Heiner and Wassermann, Joachim and Cochard, Alain and Schuberth, B. S. A. and Vollmer, Daniel and Scherbaum, Frank and Schreiber, U. and Velikoseltsev, A.}, title = {First comparison of array-derived rotational ground motions with direct ring laser measurements}, series = {Bulletin of the Seismological Society of America}, volume = {96}, journal = {Bulletin of the Seismological Society of America}, number = {6}, publisher = {GeoScienceWorld}, address = {Alexandria, Va.}, issn = {0037-1106}, doi = {10.1785/0120060004}, pages = {2059 -- 2071}, year = {2006}, abstract = {Recently, ring laser technology has provided the first consistent observations of rotational ground motions around a vertical axis induced by earthquakes. "Consistent," in this context, implies that the observed waveforms and amplitudes are compatible with collocated recordings of translational ground motions. In particular, transverse accelerations should be in phase with rotation rate and their ratio proportional to local horizontal phase velocity assuming plane-wave propagation. The ring laser installed at the Fundamental station Wettzell in the Bavarian Forest, Southeast Germany, is recording the rotation rate around a vertical axis, theoretically a linear combination of the space derivatives of the horizontal components of motion. This suggests that, in principle, rotation can be derived from seismic-array experiments by "finite differencing." This has been attempted previously in several studies; however, the accuracy of these observations could never be tested in the absence of direct measurements. We installed a double cross-shaped array of nine stations from December 2003 to March 2004 around the ring laser instrument and observed several large earthquakes on both the ring laser and the seismic array. Here we present for the first time a comparison of array-derived rotations with direct measurements of rotations for ground motions induced by the M 6.3 Al Hoceima, Morocco, earthquake of 24 February 2004. With complete 3D synthetic seismograms calculated for this event we show that even low levels of noise may considerably influence the accuracy of the array-derived rotations when the minimum number of required stations (three) is used. Nevertheless, when using all nine stations, the overall fit between direct and array-derived measurements is surprisingly good (maximum correlation coefficient of 0.94).}, language = {en} } @article{SchroeterKreibichVogeletal.2014, author = {Schroeter, Kai and Kreibich, Heidi and Vogel, Kristin and Riggelsen, Carsten and Scherbaum, Frank and Merz, Bruno}, title = {How useful are complex flood damage models?}, series = {Water resources research}, volume = {50}, journal = {Water resources research}, number = {4}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1002/2013WR014396}, pages = {3378 -- 3395}, year = {2014}, abstract = {We investigate the usefulness of complex flood damage models for predicting relative damage to residential buildings in a spatial and temporal transfer context. We apply eight different flood damage models to predict relative building damage for five historic flood events in two different regions of Germany. Model complexity is measured in terms of the number of explanatory variables which varies from 1 variable up to 10 variables which are singled out from 28 candidate variables. Model validation is based on empirical damage data, whereas observation uncertainty is taken into consideration. The comparison of model predictive performance shows that additional explanatory variables besides the water depth improve the predictive capability in a spatial and temporal transfer context, i.e., when the models are transferred to different regions and different flood events. Concerning the trade-off between predictive capability and reliability the model structure seem more important than the number of explanatory variables. Among the models considered, the reliability of Bayesian network-based predictions in space-time transfer is larger than for the remaining models, and the uncertainties associated with damage predictions are reflected more completely.}, language = {en} } @article{SchmelzbachScherbaumTronickeetal.2011, author = {Schmelzbach, C. and Scherbaum, Frank and Tronicke, Jens and Dietrich, P.}, title = {Bayesian frequency-domain blind deconvolution of ground-penetrating radar data}, series = {Journal of applied geophysics}, volume = {75}, journal = {Journal of applied geophysics}, number = {4}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-9851}, doi = {10.1016/j.jappgeo.2011.08.010}, pages = {615 -- 630}, year = {2011}, abstract = {Enhancing the resolution and accuracy of surface ground-penetrating radar (GPR) reflection data by inverse filtering to recover a zero-phased band-limited reflectivity image requires a deconvolution technique that takes the mixed-phase character of the embedded wavelet into account. In contrast, standard stochastic deconvolution techniques assume that the wavelet is minimum phase and, hence, often meet with limited success when applied to GPR data. We present a new general-purpose blind deconvolution algorithm for mixed-phase wavelet estimation and deconvolution that (1) uses the parametrization of a mixed-phase wavelet as the convolution of the wavelet's minimum-phase equivalent with a dispersive all-pass filter, (2) includes prior information about the wavelet to be estimated in a Bayesian framework, and (3) relies on the assumption of a sparse reflectivity. Solving the normal equations using the data autocorrelation function provides an inverse filter that optimally removes the minimum-phase equivalent of the wavelet from the data, which leaves traces with a balanced amplitude spectrum but distorted phase. To compensate for the remaining phase errors, we invert in the frequency domain for an all-pass filter thereby taking advantage of the fact that the action of the all-pass filter is exclusively contained in its phase spectrum. A key element of our algorithm and a novelty in blind deconvolution is the inclusion of prior information that allows resolving ambiguities in polarity and timing that cannot be resolved using the sparseness measure alone. We employ a global inversion approach for non-linear optimization to find the all-pass filter phase values for each signal frequency. We tested the robustness and reliability of our algorithm on synthetic data with different wavelets, 1-D reflectivity models of different complexity, varying levels of added noise, and different types of prior information. When applied to realistic synthetic 2-D data and 2-D field data, we obtain images with increased temporal resolution compared to the results of standard processing.}, language = {en} } @article{SchmedesHainzlReameretal.2005, author = {Schmedes, J. and Hainzl, Sebastian and Reamer, S. K. and Scherbaum, Frank and Hinzen, K. G.}, title = {Moment release in the Lower Rhine Embayment, Germany : seismological perspective of the deformation process}, issn = {0956-540X}, year = {2005}, abstract = {An important task of seismic hazard assessment consists of estimating the rate of seismic moment release which is correlated to the rate of tectonic deformation and the seismic coupling. However, the estimations of deformation depend on the type of information utilized (e.g. geodetic, geological, seismic) and include large uncertainties. We therefore estimate the deformation rate in the Lower Rhine Embayment (LRE), Germany, using an integrated approach where the uncertainties have been systematically incorporated. On the basis of a new homogeneous earthquake catalogue we initially determine the frequency-magnitude distribution by statistical methods. In particular, we focus on an adequate estimation of the upper bound of the Gutenberg-Richter relation and demonstrate the importance of additional palaeoseis- mological information. The integration of seismological and geological information yields a probability distribution of the upper bound magnitude. Using this distribution together with the distribution of Gutenberg-Richter a and b values, we perform Monte Carlo simulations to derive the seismic moment release as a function of the observation time. The seismic moment release estimated from synthetic earthquake catalogues with short catalogue length is found to systematically underestimate the long-term moment rate which can be analytically determined. The moment release recorded in the LRE over the last 250 yr is found to be in good agreement with the probability distribution resulting from the Monte Carlo simulations. Furthermore, the long-term distribution is within its uncertainties consistent with the moment rate derived by geological measurements, indicating an almost complete seismic coupling in this region. By means of Kostrov's formula, we additionally calculate the full deformation rate tensor using the distribution of known focal mechanisms in LRE. Finally, we use the same approach to calculate the seismic moment and the deformation rate for two subsets of the catalogue corresponding to the east- and west-dipping faults, respectively}, language = {en} } @article{ScherbaumWeberBorm2000, author = {Scherbaum, Frank and Weber, Michael H. and Borm, G.}, title = {The deep seismological lab in the KTB borehole: Status 1999}, year = {2000}, language = {en} } @article{ScherbaumSchmidtke2001, author = {Scherbaum, Frank and Schmidtke, E.}, title = {Digital seismology tutor}, year = {2001}, language = {en} } @article{ScherbaumSchmedesCotton2004, author = {Scherbaum, Frank and Schmedes, J. and Cotton, Fabrice}, title = {On the conversion of source-to-site distance measures for extended earthquake source models}, issn = {0037-1106}, year = {2004}, abstract = {One of the major challenges in engineering seismology is the reliable prediction of site-specific ground motion for particular earthquakes, observed at specific distances. For larger events, a special problem arises, at short distances, with the source-to-site distance measure, because distance metrics based on a point-source model are no longer appropriate. As a consequence, different attenuation relations differ in the distance metric that they use. In addition to being a source of confusion, this causes problems to quantitatively compare or combine different ground- motion models; for example, in the context of Probabilistic Seismic Hazard Assessment, in cases where ground-motion models with different distance metrics occupy neighboring branches of a logic tree. In such a situation, very crude assumptions about source sizes and orientations often have to be used to be able to derive an estimate of the particular metric required. Even if this solves the problem of providing a number to put into the attenuation relation, a serious problem remains. When converting distance measures, the corresponding uncertainties map onto the estimated ground motions according to the laws of error propagation. To make matters worse, conversion of distance metrics can cause the uncertainties of the adapted ground-motion model to become magnitude and distance dependent, even if they are not in the original relation. To be able to treat this problem quantitatively, the variability increase caused by the distance metric conversion has to be quantified. For this purpose, we have used well established scaling laws to determine explicit distance conversion relations using regression analysis on simulated data. We demonstrate that, for all practical purposes, most popular distance metrics can be related to the Joyner-Boore distance using models based on gamma distributions to express the shape of some "residual function." The functional forms are magnitude and distance dependent and are expressed as polynomials. We compare the performance of these relations with manually derived individual distance estimates for the Landers, the Imperial Valley, and the Chi-Chi earthquakes}, language = {en} } @book{ScherbaumMzhavanadzeArometal.2020, author = {Scherbaum, Frank and Mzhavanadze, Nana and Arom, Simha and Rosenzweig, Sebastian and M{\"u}ller, Meinard}, title = {Tonal Organization of the Erkomaishvili Dataset: Pitches, Scales, Melodies and Harmonies}, series = {Computational Analysis Of Traditional Georgian Vocal Music}, journal = {Computational Analysis Of Traditional Georgian Vocal Music}, number = {1}, editor = {Scherbaum, Frank}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2702-2641}, doi = {10.25932/publishup-47614}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476141}, publisher = {Universit{\"a}t Potsdam}, pages = {64}, year = {2020}, abstract = {In this study we examine the tonal organization of a series of recordings of liturgical chants, sung in 1966 by the Georgian master singer Artem Erkomaishvili. This dataset is the oldest corpus of Georgian chants from which the time synchronous F0-trajectories for all three voices have been reliably determined (M{\"u}ller et al. 2017). It is therefore of outstanding importance for the understanding of the tuning principles of traditional Georgian vocal music. The aim of the present study is to use various computational methods to analyze what these recordings can contribute to the ongoing scientific dispute about traditional Georgian tuning systems. Starting point for the present analysis is the re-release of the original audio data together with estimated fundamental frequency (F0) trajectories for each of the three voices, beat annotations, and digital scores (Rosenzweig et al. 2020). We present synoptic models for the pitch and the harmonic interval distributions, which are the first of such models for which the complete Erkomaishvili dataset was used. We show that these distributions can be very compactly be expressed as Gaussian mixture models, anchored on discrete sets of pitch or interval values for the pitch and interval distributions, respectively. As part of our study we demonstrate that these pitch values, which we refer to as scale pitches, and which are determined as the mean values of the Gaussian mixture elements, define the scale degrees of the melodic sound scales which build the skeleton of Artem Erkomaishvili's intonation. The observation of consistent pitch bending of notes in melodic phrases, which appear in identical form in a group of chants, as well as the observation of harmonically driven intonation adjustments, which are clearly documented for all pure harmonic intervals, demonstrate that Artem Erkomaishvili intentionally deviates from the scale pitch skeleton quite freely. As a central result of our study, we proof that this melodic freedom is always constrained by the attracting influence of the scale pitches. Deviations of the F0-values of individual note events from the scale pitches at one instance of time are compensated for in the subsequent melodic steps. This suggests a deviation-compensation mechanism at the core of Artem Erkomaishvili's melody generation, which clearly honors the scales but still allows for a large degree of melodic flexibility. This model, which summarizes all partial aspects of our analysis, is consistent with the melodic scale models derived from the observed pitch distributions, as well as with the melodic and harmonic interval distributions. In addition to the tangible results of our work, we believe that our work has general implications for the determination of tuning models from audio data, in particular for non-tempered music.}, language = {en} } @article{ScherbaumKuehn2011, author = {Scherbaum, Frank and K{\"u}hn, Nicolas M.}, title = {Logic tree branch weights and probabilities summing up to one is not enough}, series = {Earthquake spectra : the professional journal of the Earthquake Engineering Research Institute}, volume = {27}, journal = {Earthquake spectra : the professional journal of the Earthquake Engineering Research Institute}, number = {4}, publisher = {Earthquake Engineering Research Institute}, address = {Oakland}, issn = {8755-2930}, doi = {10.1193/1.3652744}, pages = {1237 -- 1251}, year = {2011}, abstract = {Logic trees have become the most popular tool for the quantification of epistemic uncertainties in probabilistic seismic hazard assessment (PSHA). In a logic-tree framework, epistemic uncertainty is expressed in a set of branch weights, by which an expert or an expert group assigns degree-of-belief values to the applicability of the corresponding branch models. Despite the popularity of logic-trees, however, one finds surprisingly few clear commitments to what logic-tree branch weights are assumed to be (even by hazard analysts designing logic trees). In the present paper we argue that it is important for hazard analysts to accept the probabilistic framework from the beginning for assigning logic-tree branch weights. In other words, to accept that logic-tree branch weights are probabilities in the axiomatic sense, independent of one's preference for the philosophical interpretation of probabilities. We demonstrate that interpreting logic-tree branch weights merely as a numerical measure of "model quality," which are then subsequently normalized to sum up to unity, will with increasing number of models inevitably lead to an apparent insensitivity of hazard curves on the logic-tree branch weights, which may even be mistaken for robustness of the results. Finally, we argue that assigning logic-tree branch weights in a sequential fashion may improve their logical consistency.}, language = {en} } @article{ScherbaumKruegerWeber1997, author = {Scherbaum, Frank and Kr{\"u}ger, Frank and Weber, Michael H.}, title = {Double beam imaging : mapping lower mantle heterogeneities using combinations of source and receiver arrays}, year = {1997}, language = {en} } @article{ScherbaumDelavaudRiggelsen2009, author = {Scherbaum, Frank and Delavaud, Elise and Riggelsen, Carsten}, title = {Model selection in seismic hazard analysis : an information-theoretic perspective}, issn = {0037-1106}, doi = {10.1785/0120080347}, year = {2009}, abstract = {Although the methodological framework of probabilistic seismic hazard analysis is well established, the selection of models to predict the ground motion at the sites of interest remains a major challenge. Information theory provides a powerful theoretical framework that can guide this selection process in a consistent way. From an information- theoretic perspective, the appropriateness of models can be expressed in terms of their relative information loss (Kullback-Leibler distance) and hence in physically meaningful units (bits). In contrast to hypothesis testing, information-theoretic model selection does not require ad hoc decisions regarding significance levels nor does it require the models to be mutually exclusive and collectively exhaustive. The key ingredient, the Kullback-Leibler distance, can be estimated from the statistical expectation of log-likelihoods of observations for the models under consideration. In the present study, data-driven ground-motion model selection based on Kullback-Leibler-distance differences is illustrated for a set of simulated observations of response spectra and macroseismic intensities. Information theory allows for a unified treatment of both quantities. The application of Kullback-Leibler-distance based model selection to real data using the model generating data set for the Abrahamson and Silva (1997) ground-motion model demonstrates the superior performance of the information-theoretic perspective in comparison to earlier attempts at data- driven model selection (e.g., Scherbaum et al., 2004).}, language = {en} } @article{ScherbaumCottonStaedtke2006, author = {Scherbaum, Frank and Cotton, Fabrice and Staedtke, Helmut}, title = {The estimation of minimum-misfit stochastic models from empirical ground-motion prediction equations}, doi = {10.1785/0120050015}, year = {2006}, abstract = {In areas of moderate to low seismic activity there is commonly a lack of recorded strong ground motion. As a consequence, the prediction of ground motion expected for hypothetical future earthquakes is often performed by employing empirical models from other regions. In this context, Campbell's hybrid empirical approach (Campbell, 2003, 2004) provides a methodological framework to adapt ground-motion prediction equations to arbitrary target regions by using response spectral host-to-target-region-conversion filters. For this purpose, the empirical ground-motion prediction equation has to be quantified in terms of a stochastic model. The problem we address here is how to do this in a systematic way and how to assess the corresponding uncertainties. For the determination of the model parameters we use a genetic algorithm search. The stochastic model spectra were calculated by using a speed-optimized version of SMSIM (Boore, 2000). For most of the empirical ground-motion models, we obtain sets of stochastic models that match the empirical models within the full magnitude and distance ranges of their generating data sets fairly well. The overall quality of fit and the resulting model parameter sets strongly depend on the particular choice of the distance metric used for the stochastic model. We suggest the use of the hypocentral distance metric for the stochastic Simulation of strong ground motion because it provides the lowest-misfit stochastic models for most empirical equations. This is in agreement with the results of two recent studies of hypocenter locations in finite-source models which indicate that hypocenters are often located close to regions of large slip (Mai et al., 2005; Manighetti et al., 2005). Because essentially all empirical ground-motion prediction equations contain data from different geographical regions, the model parameters corresponding to the lowest-misfit stochastic models cannot necessarily be expected to represent single, physically realizable host regions but to model the generating data sets in an average way. In addition, the differences between the lowest-misfit stochastic models and the empirical ground-motion prediction equation are strongly distance, magnitude, and frequency dependent, which, according to the laws of uncertainty propagation, will increase the variance of the corresponding hybrid empirical model predictions (Scherbaum et al., 2005). As a consequence, the selection of empirical ground-motion models for host-to-target-region conversions requires considerable judgment of the ground-motion analyst}, language = {en} } @article{ScherbaumCottonSmit2004, author = {Scherbaum, Frank and Cotton, Fabrice and Smit, P.}, title = {On the use of response spectral-reference data for the selection and ranking of ground-motion models for seismic-hazard analysis in regions of moderate seismicity : the case of rock motion}, issn = {0037-1106}, year = {2004}, abstract = {The use of ground-motion-prediction equations to estimate ground shaking has become a very popular approach for seismic-hazard assessment, especially in the framework of a logic-tree approach. Owing to the large number of existing published ground-motion models, however, the selection and ranking of appropriate models for a particular target area often pose serious practical problems. Here we show how observed around-motion records can help to guide this process in a systematic and comprehensible way. A key element in this context is a new, likelihood based, goodness-of-fit measure that has the property not only to quantify the model fit but also to measure in some degree how well the underlying statistical model assumptions are met. By design, this measure naturally scales between 0 and 1, with a value of 0.5 for a situation in which the model perfectly matches the sample distribution both in terms of mean and standard deviation. We have used it in combination with other goodness-of-fit measures to derive a simple classification scheme to quantify how well a candidate ground-rnotion-prediction equation models a particular set of observed-response spectra. This scheme is demonstrated to perform well in recognizing a number of popular ground-motion models from their rock-site- recording, subsets. This indicates its potential for aiding the assignment of logic-tree weights in a consistent and reproducible way. We have applied our scheme to the border region of France, Germany, and Switzerland where the M-w 4.8 St. Die earthquake of 22 February 2003 in eastern France recently provided a small set of observed-response spectra. These records are best modeled by the ground-motion-prediction equation of Berge-Thierry et al. (2003), which is based on the analysis of predominantly European data. The fact that the Swiss model of Bay et al. (2003) is not able to model the observed records in an acceptable way may indicate general problems arising from the use of weak-motion data for strong-motion prediction}, language = {en} } @article{ScherbaumBouin1997, author = {Scherbaum, Frank and Bouin, M. P.}, title = {FIR filter effects and nucleation phases}, year = {1997}, language = {en} } @article{ScherbaumBommerBungumetal.2005, author = {Scherbaum, Frank and Bommer, Julian J. and Bungum, Hilmar and Cotton, Fabrice and Abrahamson, Norman A.}, title = {Composite ground-motion models and logic trees: Methodology, sensitivities, and uncertainties}, issn = {0037-1106}, year = {2005}, abstract = {Logic trees have become a popular tool in seismic hazard studies. Commonly, the models corresponding to the end branches of the complete logic tree in a probabalistic seismic hazard analysis (PSHA) are treated separately until the final calculation of the set of hazard curves. This comes at the price that information regarding sensitivities and uncertainties in the ground-motion sections of the logic tree are only obtainable after disaggregation. Furthermore, from this end-branch model perspective even the designers of the logic tree cannot directly tell what ground-motion scenarios most likely would result from their logic trees for a given earthquake at a particular distance, nor how uncertain these scenarios might be or how they would be affected by the choices of the hazard analyst. On the other hand, all this information is already implicitly present in the logic tree. Therefore, with the ground-motion perspective that we propose in the present article, we treat the ground-motion sections of a complete logic tree for seismic hazard as a single composite model representing the complete state-of-knowledge-and-belief of a particular analyst on ground motion in a particular target region. We implement this view by resampling the ground-motion models represented in the ground-motion sections of the logic tree by Monte Carlo simulation (separately for the median values and the sigma values) and then recombining the sets of simulated values in proportion to their logic-tree branch weights. The quantiles of this resampled composite model provide the hazard analyst and the decision maker with a simple, clear, and quantitative representation of the overall physical meaning of the ground-motion section of a logic tree and the accompanying epistemic uncertainty. Quantiles of the composite model also provide an easy way to analyze the sensitivities and uncertainties related to a given logic-tree model. We illustrate this for a composite ground- motion model for central Europe. Further potential fields of applications are seen wherever individual best estimates of ground motion have to be derived from a set of candidate models, for example, for hazard rnaps, sensitivity studies, or for modeling scenario earthquakes}, language = {en} } @article{Scherbaum1997, author = {Scherbaum, Frank}, title = {Zero Phase FIR filters in digital seismic acquisition systems : blessing or curse}, year = {1997}, language = {en} } @book{Scherbaum2001, author = {Scherbaum, Frank}, title = {Of poles and zeros : fundamentals of digital seismology}, series = {Modern approaches in geophysics}, volume = {15}, journal = {Modern approaches in geophysics}, edition = {Rev. 2. ed., reprint with corr}, publisher = {Springer}, address = {Dordrecht}, isbn = {0-7923-6834-7}, pages = {265 p.}, year = {2001}, language = {en} } @misc{RoesslerHiemerBachetal.2009, author = {R{\"o}ßler, Dirk and Hiemer, Stephan and Bach, Christoph and Delavaud, Elise and Kr{\"u}ger, Frank and Ohrnberger, Matthias and Sauer, David and Scherbaum, Frank and Vollmer, Daniel}, title = {Small-aperture seismic array monitors Vogtland earthquake swarm in 2008/09}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29185}, year = {2009}, abstract = {The most recent intense earthquake swarm in the Vogtland lasted from 6 October 2008 until January 2009. Greatest magnitudes exceeded M3.5 several times in October making it the greatest swarm since 1985/86. In contrast to the swarms in 1985 and 2000, seismic moment release was concentrated near swarm onset. Focal area and temporal evolution are similar to the swarm in 2000. Work hypothysis: uprising upper-mantle fluids trigger swarm earthquakes at low stress level. To monitor the seismicity, the University of Potsdam operated a small aperture seismic array at 10 km epicentral distance between 18 October 2008 and 18 March 2009. Consisting of 12 seismic stations and 3 additional microphones, the array is capable of detecting earthquakes from larger to very low magnitudes (M<-1) as well as associated air waves. We use array techniques to determine properties of the incoming wavefield: noise, direct P and S waves, and converted phases.}, language = {en} } @article{RungeScherbaumCurtisetal.2013, author = {Runge, Antonia K. and Scherbaum, Frank and Curtis, Andrew and Riggelsen, Carsten}, title = {An interactive tool for the elicitation of subjective probabilities in probabilistic seismic-hazard analysis}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {5}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130026}, pages = {2862 -- 2874}, year = {2013}, abstract = {In probabilistic seismic-hazard analysis, epistemic uncertainties are commonly treated within a logic-tree framework in which the branch weights express the degree of belief of an expert in a set of models. For the calculation of the distribution of hazard curves, these branch weights represent subjective probabilities. A major challenge for experts is to provide logically consistent weight estimates (in the sense of Kolmogorovs axioms), to be aware of the multitude of heuristics, and to minimize the biases which affect human judgment under uncertainty. We introduce a platform-independent, interactive program enabling us to quantify, elicit, and transfer expert knowledge into a set of subjective probabilities by applying experimental design theory, following the approach of Curtis and Wood (2004). Instead of determining the set of probabilities for all models in a single step, the computer-driven elicitation process is performed as a sequence of evaluations of relative weights for small subsets of models. From these, the probabilities for the whole model set are determined as a solution of an optimization problem. The result of this process is a set of logically consistent probabilities together with a measure of confidence determined from the amount of conflicting information which is provided by the expert during the relative weighting process. We experiment with different scenarios simulating likely expert behaviors in the context of knowledge elicitation and show the impact this has on the results. The overall aim is to provide a smart elicitation technique, and our findings serve as a guide for practical applications.}, language = {en} } @article{RodriguezMarekRathjeBommeretal.2014, author = {Rodriguez-Marek, A. and Rathje, E. M. and Bommer, Julian J. and Scherbaum, Frank and Stafford, P. J.}, title = {Application of single-station sigma and site-response characterization in a probabilistic Seismic-Hazard analysis for new uclear site}, series = {Bulletin of the Seismological Society of America}, volume = {104}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130196}, pages = {1601 -- 1619}, year = {2014}, abstract = {Aleatory variability in ground-motion prediction, represented by the standard deviation (sigma) of a ground-motion prediction equation, exerts a very strong influence on the results of probabilistic seismic-hazard analysis (PSHA). This is especially so at the low annual exceedance frequencies considered for nuclear facilities; in these cases, even small reductions in sigma can have a marked effect on the hazard estimates. Proper separation and quantification of aleatory variability and epistemic uncertainty can lead to defensible reductions in sigma. One such approach is the single-station sigma concept, which removes that part of sigma corresponding to repeatable site-specific effects. However, the site-to-site component must then be constrained by site-specific measurements or else modeled as epistemic uncertainty and incorporated into the modeling of site effects. The practical application of the single-station sigma concept, including the characterization of the dynamic properties of the site and the incorporation of site-response effects into the hazard calculations, is illustrated for a PSHA conducted at a rock site under consideration for the potential construction of a nuclear power plant.}, language = {en} } @article{RietbrockScherbaum1998, author = {Rietbrock, Andreas and Scherbaum, Frank}, title = {Crustal scattering at the KTB from a combined microearthquake and receiver analysis}, year = {1998}, language = {en} } @article{RietbrockScherbaum1998, author = {Rietbrock, Andreas and Scherbaum, Frank}, title = {The GIANT analysis system (Graphical Interaktive Aftershock Network Toolbox)}, year = {1998}, language = {en} } @article{OhrnbergerWassermannScherbaumetal.1999, author = {Ohrnberger, Matthias and Wassermann, J{\"u}rgen and Scherbaum, Frank and Budi, E. N. and Gossler, J.}, title = {Detection and classification of seismic signals of volcanic origin at Mt. Merapi (Indonesia)}, year = {1999}, language = {en} } @article{MussonToroCoppersmithetal.2005, author = {Musson, R. M. W. and Toro, G. R. and Coppersmith, Kevin J. and Bommer, Julian J. and Deichmann, N. and Bungum, Hilmar and Cotton, Fabrice and Scherbaum, Frank and Slejko, Dario and Abrahamson, Norman A.}, title = {Evaluating hazard results for Switzerland and how not to do it : a discussion of "Problems in the application of the SSHAC probability method for assessing earthquake hazards at Swiss nuclear power plants" by J-U Klugel}, year = {2005}, abstract = {The PEGASOS project was a major international seismic hazard study, one of the largest ever conducted anywhere in the world, to assess seismic hazard at four nuclear power plant sites in Switzerland. Before the report of this project has become publicly available, a paper attacking both methodology and results has appeared. Since the general scientific readership may have difficulty in assessing this attack in the absence of the report being attacked, we supply a response in the present paper. The bulk of the attack, besides some misconceived arguments about the role of uncertainties in seismic hazard analysis, is carried by some exercises that purport to be validation exercises. In practice, they are no such thing; they are merely independent sets of hazard calculations based on varying assumptions and procedures, often rather questionable, which come up with various different answers which have no particular significance. (C) 2005 Elsevier B.V. All rights reserved}, language = {en} } @article{MolkenthinScherbaumGriewanketal.2017, author = {Molkenthin, Christian and Scherbaum, Frank and Griewank, Andreas and Leovey, Hernan and Kucherenko, Sergei and Cotton, Fabrice}, title = {Derivative-Based Global Sensitivity Analysis: Upper Bounding of Sensitivities in Seismic-Hazard Assessment Using Automatic Differentiation}, series = {Bulletin of the Seismological Society of America}, volume = {107}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160185}, pages = {984 -- 1004}, year = {2017}, abstract = {Seismic-hazard assessment is of great importance within the field of engineering seismology. Nowadays, it is common practice to define future seismic demands using probabilistic seismic-hazard analysis (PSHA). Often it is neither obvious nor transparent how PSHA responds to changes in its inputs. In addition, PSHA relies on many uncertain inputs. Sensitivity analysis (SA) is concerned with the assessment and quantification of how changes in the model inputs affect the model response and how input uncertainties influence the distribution of the model response. Sensitivity studies are challenging primarily for computational reasons; hence, the development of efficient methods is of major importance. Powerful local (deterministic) methods widely used in other fields can make SA feasible, even for complex models with a large number of inputs; for example, automatic/algorithmic differentiation (AD)-based adjoint methods. Recently developed derivative-based global sensitivity measures can combine the advantages of such local SA methods with efficient sampling strategies facilitating quantitative global sensitivity analysis (GSA) for complex models. In our study, we propose and implement exactly this combination. It allows an upper bounding of the sensitivities involved in PSHA globally and, therefore, an identification of the noninfluential and the most important uncertain inputs. To the best of our knowledge, it is the first time that derivative-based GSA measures are combined with AD in practice. In addition, we show that first-order uncertainty propagation using the delta method can give satisfactory approximations of global sensitivity measures and allow a rough characterization of the model output distribution in the case of PSHA. An illustrative example is shown for the suggested derivative-based GSA of a PSHA that uses stochastic ground-motion simulations.}, language = {en} } @article{MolkenthinScherbaumGriewanketal.2015, author = {Molkenthin, Christian and Scherbaum, Frank and Griewank, Andreas and K{\"u}hn, Nicolas and Stafford, Peter J. and Leovey, Hernan}, title = {Sensitivity of Probabilistic Seismic Hazard Obtained by Algorithmic Differentiation: A Feasibility Study}, series = {Bulletin of the Seismological Society of America}, volume = {105}, journal = {Bulletin of the Seismological Society of America}, number = {3}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140294}, pages = {1810 -- 1822}, year = {2015}, abstract = {Probabilistic seismic-hazard analysis (PSHA) is the current tool of the trade used to estimate the future seismic demands at a site of interest. A modern PSHA represents a complex framework that combines different models with numerous inputs. It is important to understand and assess the impact of these inputs on the model output in a quantitative way. Sensitivity analysis is a valuable tool for quantifying changes of a model output as inputs are perturbed, identifying critical input parameters, and obtaining insight about the model behavior. Differential sensitivity analysis relies on calculating first-order partial derivatives of the model output with respect to its inputs; however, obtaining the derivatives of complex models can be challenging. In this study, we show how differential sensitivity analysis of a complex framework such as PSHA can be carried out using algorithmic/automatic differentiation (AD). AD has already been successfully applied for sensitivity analyses in various domains such as oceanography and aerodynamics. First, we demonstrate the feasibility of the AD methodology by comparing AD-derived sensitivities with analytically derived sensitivities for a basic case of PSHA using a simple ground-motion prediction equation. Second, we derive sensitivities via AD for a more complex PSHA study using a stochastic simulation approach for the prediction of ground motions. The presented approach is general enough to accommodate more advanced PSHA studies of greater complexity.}, language = {en} } @article{MolkenthinScherbaumGriewanketal.2014, author = {Molkenthin, Christian and Scherbaum, Frank and Griewank, Andreas and Kuehn, Nicolas and Stafford, Peter}, title = {A Study of the sensitivity of response spectral amplitudes on seismological parameters using algorithmic differentiation}, series = {Bulletin of the Seismological Society of America}, volume = {104}, journal = {Bulletin of the Seismological Society of America}, number = {5}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140022}, pages = {2240 -- 2252}, year = {2014}, abstract = {Response spectra are of fundamental importance in earthquake engineering and represent a standard measure in seismic design for the assessment of structural performance. However, unlike Fourier spectral amplitudes, the relationship of response spectral amplitudes to seismological source, path, and site characteristics is not immediately obvious and might even be considered counterintuitive for high oscillator frequencies. The understanding of this relationship is nevertheless important for seismic-hazard analysis. The purpose of the present study is to comprehensively characterize the variation of response spectral amplitudes due to perturbations of the causative seismological parameters. This is done by calculating the absolute parameter sensitivities (sensitivity coefficients) defined as the partial derivatives of the model output with respect to its input parameters. To derive sensitivities, we apply algorithmic differentiation (AD). This powerful approach is extensively used for sensitivity analysis of complex models in meteorology or aerodynamics. To the best of our knowledge, AD has not been explored yet in the seismic-hazard context. Within the present study, AD was successfully implemented for a proven and extensively applied simulation program for response spectra (Stochastic Method SIMulation [SMSIM]) using the TAPENADE AD tool. We assess the effects and importance of input parameter perturbations on the shape of response spectra for different regional stochastic models in a quantitative way. Additionally, we perform sensitivity analysis regarding adjustment issues of groundmotion prediction equations.}, language = {en} } @article{MalischewskyScherbaum2004, author = {Malischewsky, Peter G. and Scherbaum, Frank}, title = {Love's formula and H/V-ratio (ellipticity) of Rayleigh waves}, issn = {0165-2125}, year = {2004}, abstract = {The ellipticity of Rayleigh surface waves, which is an important parameter characterizing the propagation medium, is studied for several models with increasing complexity. While the main focus lies on theory, practical implications of the use of the horizontal to vertical component ratio (H/V-ratio) to Study the subsurface structure are considered as well. Love's approximation of the ellipticity for an incompressible layer over an incompressible half-space is critically discussed especially concerning its applicability for different impedance contrasts. The main result is an analytically exact formula of H/V for a 2-layer model of compressible media, which is a generalization of Love's formula. It turns out that for a limited range of models Love's approximation can be used also in the general case. (C) 2003 Elsevier B.V. All rights reserved}, language = {en} } @book{LoosScherbaum1999, author = {Loos, Wolfgang and Scherbaum, Frank}, title = {Inner earth : a seismosonic symphony}, publisher = {Traumton (Indigo Vertrieb)}, address = {[s.l.]}, pages = {Audio-CD}, year = {1999}, language = {en} } @article{KuehnScherbaumRiggelsen2009, author = {K{\"u}hn, Nicolas M. and Scherbaum, Frank and Riggelsen, Carsten}, title = {Deriving empirical ground-motion models : balancing data constraints and physical assumptions to optimize prediction capability}, issn = {0037-1106}, doi = {10.1785/0120080136}, year = {2009}, abstract = {Empirical ground-motion models used in seismic hazard analysis are commonly derived by regression of observed ground motions against a chosen set of predictor variables. Commonly, the model building process is based on residual analysis and/or expert knowledge and/or opinion, while the quality of the model is assessed by the goodness-of-fit to the data. Such an approach, however, bears no immediate relation to the predictive power of the model and with increasing complexity of the models is increasingly susceptible to the danger of overfitting. Here, a different, primarily data-driven method for the development of ground-motion models is proposed that makes use of the notion of generalization error to counteract the problem of overfitting. Generalization error directly estimates the average prediction error on data not used for the model generation and, thus, is a good criterion to assess the predictive capabilities of a model. The approach taken here makes only few a priori assumptions. At first, peak ground acceleration and response spectrum values are modeled by flexible, nonphysical functions (polynomials) of the predictor variables. The inclusion of a particular predictor and the order of the polynomials are based on minimizing generalization error. The approach is illustrated for the next generation of ground-motion attenuation dataset. The resulting model is rather complex, comprising 48 parameters, but has considerably lower generalization error than functional forms commonly used in ground-motion models. The model parameters have no physical meaning, but a visual interpretation is possible and can reveal relevant characteristics of the data, for example, the Moho bounce in the distance scaling. In a second step, the regression model is approximated by an equivalent stochastic model, making it physically interpretable. The resulting resolvable stochastic model parameters are comparable to published models for western North America. In general, for large datasets generalization error minimization provides a viable method for the development of empirical ground-motion models.}, language = {en} } @article{KuehnRiggelsenScherbaum2011, author = {K{\"u}hn, Nicolas M. and Riggelsen, Carsten and Scherbaum, Frank}, title = {Modeling the joint probability of earthquake, site, and ground-motion parameters using bayesian networks}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {1}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100080}, pages = {235 -- 249}, year = {2011}, abstract = {Bayesian networks are a powerful and increasingly popular tool for reasoning under uncertainty, offering intuitive insight into (probabilistic) data-generating processes. They have been successfully applied to many different fields, including bioinformatics. In this paper, Bayesian networks are used to model the joint-probability distribution of selected earthquake, site, and ground-motion parameters. This provides a probabilistic representation of the independencies and dependencies between these variables. In particular, contrary to classical regression, Bayesian networks do not distinguish between target and predictors, treating each variable as random variable. The capability of Bayesian networks to model the ground-motion domain in probabilistic seismic hazard analysis is shown for a generic situation. A Bayesian network is learned based on a subset of the Next Generation Attenuation (NGA) dataset, using 3342 records from 154 earthquakes. Because no prior assumptions about dependencies between particular parameters are made, the learned network displays the most probable model given the data. The learned network shows that the ground-motion parameter (horizontal peak ground acceleration, PGA) is directly connected only to the moment magnitude, Joyner-Boore distance, fault mechanism, source-to-site azimuth, and depth to a shear-wave horizon of 2: 5 km/s (Z2.5). In particular, the effect of V-S30 is mediated by Z2.5. Comparisons of the PGA distributions based on the Bayesian networks with the NGA model of Boore and Atkinson (2008) show a reasonable agreement in ranges of good data coverage.}, language = {en} } @article{KuehnScherbaum2015, author = {K{\"u}hn, Nico M. and Scherbaum, Frank}, title = {Ground-motion prediction model building: a multilevel approach}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {13}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, number = {9}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-015-9732-3}, pages = {2481 -- 2491}, year = {2015}, abstract = {A Bayesian ground-motion model is presented that directly estimates the coefficients of the model and the correlation between different ground-motion parameters of interest. The model is developed as a multi-level model with levels for earthquake, station and record terms. This separation allows to estimate residuals for each level and thus the estimation of the associated aleatory variability. In particular, the usually estimated within-event variability is split into a between-station and between-record variability. In addition, the covariance structure between different ground-motion parameters of interest is estimated for each level, i.e. directly the between-event, between-station and between-record correlation coefficients are available. All parameters of the model are estimated via Bayesian inference, which allows to assess their epistemic uncertainty in a principled way. The model is developed using a recently compiled European strong-motion database. The target variables are peak ground velocity, peak ground acceleration and spectral acceleration at eight oscillator periods. The model performs well with respect to its residuals, and is similar to other ground-motion models using the same underlying database. The correlation coefficients are similar to those estimated for other parts of the world, with nearby periods having a high correlation. The between-station, between-event and between-record correlations follow generally a similar trend.}, language = {en} } @article{KummerowKindOnckenetal.2004, author = {Kummerow, J. and Kind, Rainer and Oncken, Onno and Giese, Peter and Ryberg, Trond and Wylegalla, Kurt and Scherbaum, Frank}, title = {A natural and controlled source seismic profile through the Eastern Alps : TRANSALP}, year = {2004}, abstract = {The combined passive and active seismic TRANSALP experiment produced an unprecedented high-resolution crustal image of the Eastern Alps between Munich and Venice. The European and Adriatic Mohos (EM and AM, respectively) are clearly imaged with different seismic techniques: near-vertical incidence reflections and receiver functions (RFs). The European Moho dips gently southward from 35 km beneath the northern foreland to a maximum depth of 55 km beneath the central part of the Eastern Alps, whereas the Adriatic Moho is imaged primarily by receiver functions at a relatively constant depth of about 40 km. In both data sets, we have also detected first-order Alpine shear zones, such as the Helvetic detachment, Inntal fault and SubTauern ramp in the north. Apart from the Valsugana thrust, receiver functions in the southern part of the Eastern Alps have also observed a north dipping interface, which may penetrate the entire Adriatic crust [Adriatic Crust Interface (ACI)]. Deep crustal seismicity may be related to the ACI. We interpret the ACI as the currently active retroshear zone in the doubly vergent Alpine collisional belt. (C) 2004 Elsevier B.V. All rights reserved}, language = {en} } @article{KuleshHolschneiderDialloetal.2005, author = {Kulesh, Michail and Holschneider, Matthias and Diallo, Mamadou Sanou and Xie, Q. and Scherbaum, Frank}, title = {Modeling of wave dispersion using continuous wavelet transforms}, issn = {0033-4553}, year = {2005}, abstract = {In the estimate of dispersion with the help of wavelet analysis considerable emphasis has been put on the extraction of the group velocity using the modulus of the wavelet transform. In this paper we give an asymptotic expression of the full propagator in wavelet space that comprises the phase velocity as well. This operator establishes a relationship between the observed signals at two different stations during wave propagation in a dispersive and attenuating medium. Numerical and experimental examples are presented to show that the method accurately models seismic wave dispersion and attenuation}, language = {en} } @article{KuehnScherbaum2016, author = {Kuehn, Nicolas M. and Scherbaum, Frank}, title = {A partially non-ergodic ground-motion prediction equation for Europe and the Middle East}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {14}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-016-9911-x}, pages = {2629 -- 2642}, year = {2016}, abstract = {A partially non-ergodic ground-motion prediction equation is estimated for Europe and the Middle East. Therefore, a hierarchical model is presented that accounts for regional differences. For this purpose, the scaling of ground-motion intensity measures is assumed to be similar, but not identical in different regions. This is achieved by assuming a hierarchical model, where some coefficients are treated as random variables which are sampled from an underlying global distribution. The coefficients are estimated by Bayesian inference. This allows one to estimate the epistemic uncertainty in the coefficients, and consequently in model predictions, in a rigorous way. The model is estimated based on peak ground acceleration data from nine different European/Middle Eastern regions. There are large differences in the amount of earthquakes and records in the different regions. However, due to the hierarchical nature of the model, regions with only few data points borrow strength from other regions with more data. This makes it possible to estimate a separate set of coefficients for all regions. Different regionalized models are compared, for which different coefficients are assumed to be regionally dependent. Results show that regionalizing the coefficients for magnitude and distance scaling leads to better performance of the models. The models for all regions are physically sound, even if only very few earthquakes comprise one region.}, language = {en} } @article{KruegerScherbaum2014, author = {Kr{\"u}ger, Frank and Scherbaum, Frank}, title = {The 29 September 1969, Ceres, South Africa, Earthquake: full waveform moment tensor inversion for point source and kinematic source parameters}, series = {Bulletin of the Seismological Society of America}, volume = {104}, journal = {Bulletin of the Seismological Society of America}, number = {1}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130209}, pages = {576 -- 581}, year = {2014}, abstract = {The Ceres earthquake of 29 September 1969 is the largest known earthquake in southern Africa. Digitized analog recordings from Worldwide Standardized Seismographic Network stations (Powell and Fries, 1964) are used to retrieve the point source moment tensor and the most likely centroid depth of the event using full waveform modeling. A scalar seismic moment of 2.2-2.4 x 10(18) N center dot m corresponding to a moment magnitude of 6.2-6.3 is found. The analysis confirms the pure strike-slip mechanism previously determined from onset polarities by Green and Bloch (1971). Overall good agreement with the fault orientation previously estimated from local aftershock recordings is found. The centroid depth can be constrained to be less than 15 km. In a second analysis step, we use a higher order moment tensor based inversion scheme for simple extended rupture models to constrain the lateral fault dimensions. We find rupture propagated unilaterally for 4.7 s from east-southwest to west-northwest for about 17 km ( average rupture velocity of about 3: 1 km/s).}, language = {en} } @article{KohlerOhrnbergerScherbaumetal.2004, author = {Kohler, A. and Ohrnberger, Matthias and Scherbaum, Frank and Stange, S. and Kind, F.}, title = {Ambient vibration measurements in the Southern Rhine Graben close to Basle}, issn = {1593-5213}, year = {2004}, abstract = {This study presents results of ambient noise measurements from temporary single station and small-scale array deployments in the northeast of Basle. H/V spectral ratios were determined along various profiles crossing the eastern masterfault of the Rhine Rift Valley and the adjacent sedimentary rift fills. The fundamental H/V peak frequencies are decreasing along the profile towards the eastern direction being consistent with the dip of the tertiary sediments within the rift. Using existing empirical relationships between H/V frequency peaks and the depth of the dominant seismic contrast, derived on basis of the lambda/4-resonance hypothesis and a power law depth dependence of the S-wave velocity, we obtain thicknesses of the rift fill from about 155 m in the west to 280 in in the east. This is in agreement with previous studies. The array analysis of the ambient noise wavefield yielded a stable dispersion relation consistent with Rayleigh wave propagation velocities. We conclude that a significant amount of surface waves is contained in the observed wavefield. The computed ellipticity for fundamental mode Rayleigh waves for the velocity depth models used for the estimation of the sediment thicknesses is in agreement with the observed H/V spectra over a large frequency band}, language = {en} } @article{KoehlerOhrnbergerScherbaum2009, author = {Koehler, Andreas and Ohrnberger, Matthias and Scherbaum, Frank}, title = {Unsupervised feature selection and general pattern discovery using Self-Organizing Maps for gaining insights into the nature of seismic wavefields}, issn = {0098-3004}, doi = {10.1016/j.cageo.2009.02.004}, year = {2009}, abstract = {This study presents an unsupervised feature selection and learning approach for the discovery and intuitive imaging of significant temporal patterns in seismic single-station or network recordings. For this purpose, the data are parametrized by real-valued feature vectors for short time windows using standard analysis tools for seismic data, such as frequency-wavenumber, polarization, and spectral analysis. We use Self-Organizing Maps (SOMs) for a data-driven feature selection, visualization and clustering procedure, which is in particular suitable for high-dimensional data sets. Our feature selection method is based on significance testing using the Wald-Wolfowitz runs test for-individual features and on correlation hunting with SOMs in feature subsets. Using synthetics composed of Rayleigh and Love waves and real-world data, we show the robustness and the improved discriminative power of that approach compared to feature subsets manually selected from individual wavefield parametrization methods. Furthermore, the capability of the clustering and visualization techniques to investigate the discrimination of wave phases is shown by means of synthetic waveforms and regional earthquake recordings.}, language = {en} } @article{HaendelvonSpechtKuehnetal.2015, author = {H{\"a}ndel, Annabel and von Specht, Sebastian and Kuehn, Nicolas M. and Scherbaum, Frank}, title = {Mixtures of ground-motion prediction equations as backbone models for a logic tree: an application to the subduction zone in Northern Chile}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {13}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-014-9636-7}, pages = {483 -- 501}, year = {2015}, abstract = {In probabilistic seismic hazard analysis, different ground-motion prediction equations (GMPEs) are commonly combined within a logic tree framework. The selection of appropriate GMPEs, however, is a non-trivial task, especially for regions where strong motion data are sparse and where no indigenous GMPE exists because the set of models needs to capture the whole range of ground-motion uncertainty. In this study we investigate the aggregation of GMPEs into a mixture model with the aim to infer a backbone model that is able to represent the center of the ground-motion distribution in a logic tree analysis. This central model can be scaled up and down to obtain the full range of ground-motion uncertainty. The combination of models into a mixture is inferred from observed ground-motion data. We tested the new approach for Northern Chile, a region for which no indigenous GMPE exists. Mixture models were calculated for interface and intraslab type events individually. For each source type we aggregated eight subduction zone GMPEs using mainly new strong-motion data that were recorded within the Plate Boundary Observatory Chile project and that were processed within this study. We can show that the mixture performs better than any of its component GMPEs, and that it performs comparable to a regression model that was derived for the same dataset. The mixture model seems to represent the median ground motions in that region fairly well. It is thus able to serve as a backbone model for the logic tree.}, language = {en} } @article{HinzenReamerScherbaum2013, author = {Hinzen, Klaus-G and Reamer, Sharon K. and Scherbaum, Frank}, title = {Slow fourier transform}, series = {Seismological research letters}, volume = {84}, journal = {Seismological research letters}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220120139}, pages = {251 -- 257}, year = {2013}, language = {en} } @article{HinzenWeberScherbaum2004, author = {Hinzen, K. G. and Weber, B. and Scherbaum, Frank}, title = {On the resolution of H/V measurements to determine sediment thickness, a case study across a normal fault in the Lower Rhine Embayment, Germany}, issn = {1363-2469}, year = {2004}, abstract = {In recent years, H/V measurements have been increasingly used to map the thickness of sediment fill in sedimentary basins in the context of seismic hazard assessment. This parameter is believed to be an important proxy for the site effects in sedimentary basins (e.g. in the Los Angeles basin). Here we present the results of a test using this approach across an active normal fault in a structurally well known situation. Measurements on a 50 km long profile with 1 km station spacing clearly show a change in the frequency of the fundamental peak of H/V ratios with increasing thickness of the sediment layer in the eastern part of the Lower Rhine Embayment. Subsequently, a section of 10 km length across the Erft-Sprung system, a normal fault with ca. 750 m vertical offset, was measured with a station distance of 100 m. Frequencies of the first and second peaks and the first trough in the H/V spectra are used in a simple resonance model to estimate depths of the bedrock. While the frequency of the first peak shows a large scatter for sediment depths larger than ca. 500 m, the frequency of the first trough follows the changing thickness of the sediments across the fault. The lateral resolution is in the range of the station distance of 100 m. A power law for the depth dependence of the S-wave velocity derived from down hole measurements in an earlier study [Budny, 1984] and power laws inverted from dispersion analysis of micro array measurements [Scherbaum et al., 2002] agree with the results from the H/V ratios of this study}, language = {en} } @article{HiemerScherbaumRoessleretal.2011, author = {Hiemer, Stefan and Scherbaum, Frank and R{\"o}ßler, Dirk and K{\"u}hn, Nicolas}, title = {Determination of tau(0) and Rock Site kappa from Records of the 2008/2009 Earthquake Swarm in Western Bohemia}, series = {Seismological research letters}, volume = {82}, journal = {Seismological research letters}, number = {3}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0895-0695}, doi = {10.1785/gssrl.82.3.387}, pages = {387 -- 393}, year = {2011}, language = {en} }