@article{ShinZoellerHolschneideretal.2011, author = {Shin, Seoleun and Z{\"o}ller, Gert and Holschneider, Matthias and Reich, Sebastian}, title = {A multigrid solver for modeling complex interseismic stress fields}, series = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, volume = {37}, journal = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, number = {8}, publisher = {Elsevier}, address = {Oxford}, issn = {0098-3004}, doi = {10.1016/j.cageo.2010.11.011}, pages = {1075 -- 1082}, year = {2011}, abstract = {We develop a multigrid, multiple time stepping scheme to reduce computational efforts for calculating complex stress interactions in a strike-slip 2D planar fault for the simulation of seismicity. The key elements of the multilevel solver are separation of length scale, grid-coarsening, and hierarchy. In this study the complex stress interactions are split into two parts: the first with a small contribution is computed on a coarse level, and the rest for strong interactions is on a fine level. This partition leads to a significant reduction of the number of computations. The reduction of complexity is even enhanced by combining the multigrid with multiple time stepping. Computational efficiency is enhanced by a factor of 10 while retaining a reasonable accuracy, compared to the original full matrix-vortex multiplication. The accuracy of solution and computational efficiency depend on a given cut-off radius that splits multiplications into the two parts. The multigrid scheme is constructed in such a way that it conserves stress in the entire half-space.}, language = {en} } @article{MakaravaBenmehdiHolschneider2011, author = {Makarava, Natallia and Benmehdi, Sabah and Holschneider, Matthias}, title = {Bayesian estimation of self-similarity exponent}, series = {Physical review : E, Statistical, nonlinear and soft matter physics}, volume = {84}, journal = {Physical review : E, Statistical, nonlinear and soft matter physics}, number = {2}, publisher = {American Physical Society}, address = {College Park}, issn = {1539-3755}, doi = {10.1103/PhysRevE.84.021109}, pages = {9}, year = {2011}, abstract = {In this study we propose a Bayesian approach to the estimation of the Hurst exponent in terms of linear mixed models. Even for unevenly sampled signals and signals with gaps, our method is applicable. We test our method by using artificial fractional Brownian motion of different length and compare it with the detrended fluctuation analysis technique. The estimation of the Hurst exponent of a Rosenblatt process is shown as an example of an H-self-similar process with non-Gaussian dimensional distribution. Additionally, we perform an analysis with real data, the Dow-Jones Industrial Average closing values, and analyze its temporal variation of the Hurst exponent.}, language = {en} } @article{BenmehdiMakaravaBenhamidoucheetal.2011, author = {Benmehdi, Sabah and Makarava, Natallia and Benhamidouche, N. and Holschneider, Matthias}, title = {Bayesian estimation of the self-similarity exponent of the Nile River fluctuation}, series = {Nonlinear processes in geophysics}, volume = {18}, journal = {Nonlinear processes in geophysics}, number = {3}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1023-5809}, doi = {10.5194/npg-18-441-2011}, pages = {441 -- 446}, year = {2011}, abstract = {The aim of this paper is to estimate the Hurst parameter of Fractional Gaussian Noise (FGN) using Bayesian inference. We propose an estimation technique that takes into account the full correlation structure of this process. Instead of using the integrated time series and then applying an estimator for its Hurst exponent, we propose to use the noise signal directly. As an application we analyze the time series of the Nile River, where we find a posterior distribution which is compatible with previous findings. In addition, our technique provides natural error bars for the Hurst exponent.}, language = {en} } @article{SchuetzHolschneider2011, author = {Sch{\"u}tz, Nadine and Holschneider, Matthias}, title = {Detection of trend changes in time series using Bayesian inference}, series = {Physical review : E, Statistical, nonlinear and soft matter physics}, volume = {84}, journal = {Physical review : E, Statistical, nonlinear and soft matter physics}, number = {2}, publisher = {American Physical Society}, address = {College Park}, issn = {1539-3755}, doi = {10.1103/PhysRevE.84.021120}, pages = {10}, year = {2011}, abstract = {Change points in time series are perceived as isolated singularities where two regular trends of a given signal do not match. The detection of such transitions is of fundamental interest for the understanding of the system's internal dynamics or external forcings. In practice observational noise makes it difficult to detect such change points in time series. In this work we elaborate on a Bayesian algorithm to estimate the location of the singularities and to quantify their credibility. We validate the performance and sensitivity of our inference method by estimating change points of synthetic data sets. As an application we use our algorithm to analyze the annual flow volume of the Nile River at Aswan from 1871 to 1970, where we confirm a well-established significant transition point within the time series.}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{GaciZaourarBriqueuetal.2011, author = {Gaci, Said and Zaourar, Naima and Briqueu, Louis and Holschneider, Matthias}, title = {Regularity analysis applied to sonic logs data a case study from KTB borehole site}, series = {Arabian journal of geosciences}, volume = {4}, journal = {Arabian journal of geosciences}, number = {1-2}, publisher = {Springer}, address = {Heidelberg}, issn = {1866-7511}, doi = {10.1007/s12517-010-0129-y}, pages = {221 -- 227}, year = {2011}, abstract = {Borehole logs provide in situ information about the fluctuations of petrophysical properties with depth and thus allow the characterization of the crustal heterogeneities. A detailed investigation of these measurements may lead to extract features of the geological media. In this study, we suggest a regularity analysis based on the continuous wavelet transform to examine sonic logs data. The description of the local behavior of the logs at each depth is carried out using the local Hurst exponent estimated by two (02) approaches: the local wavelet approach and the average-local wavelet approach. Firstly, a synthetic log, generated using the random midpoints displacement algorithm, is processed by the regularity analysis. The obtained Hurst curves allowed the discernment of the different layers composing the simulated geological model. Next, this analysis is extended to real sonic logs data recorded at the Kontinentales Tiefbohrprogramm (KTB) pilot borehole (Continental Deep Drilling Program, Germany). The results show a significant correlation between the estimated Hurst exponents and the lithological discontinuities crossed by the well. Hence, the Hurst exponent can be used as a tool to characterize underground heterogeneities.}, language = {en} } @article{ShebalinNarteauHolschneideretal.2011, author = {Shebalin, Peter and Narteau, Clement and Holschneider, Matthias and Schorlemmer, Danijel}, title = {Short-Term earthquake forecasting using early aftershock statistics}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {1}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100119}, pages = {297 -- 312}, year = {2011}, abstract = {We present an alarm-based earthquake forecast model that uses the early aftershock statistics (EAST). This model is based on the hypothesis that the time delay before the onset of the power-law aftershock decay rate decreases as the level of stress and the seismogenic potential increase. Here, we estimate this time delay from < t(g)>, the time constant of the Omori-Utsu law. To isolate space-time regions with a relative high level of stress, the single local variable of our forecast model is the E-a value, the ratio between the long-term and short-term estimations of < t(g)>. When and where the E-a value exceeds a given threshold (i.e., the c value is abnormally small), an alarm is issued, and an earthquake is expected to occur during the next time step. Retrospective tests show that the EAST model has better predictive power than a stationary reference model based on smoothed extrapolation of past seismicity. The official prospective test for California started on 1 July 2009 in the testing center of the Collaboratory for the Study of Earthquake Predictability (CSEP). During the first nine months, 44 M >= 4 earthquakes occurred in the testing area. For this time period, the EAST model has better predictive power than the reference model at a 1\% level of significance. Because the EAST model has also a better predictive power than several time-varying clustering models tested in CSEP at a 1\% level of significance, we suggest that our successful prospective results are not due only to the space-time clustering of aftershocks.}, language = {en} } @article{RochaVasseurHaynetal.2011, author = {Rocha, Marcia R. and Vasseur, David A. and Hayn, Michael and Holschneider, Matthias and Gaedke, Ursula}, title = {Variability patterns differ between standing stock and process rates}, series = {Oikos}, volume = {120}, journal = {Oikos}, number = {1}, publisher = {Wiley-Blackwell}, address = {Malden}, issn = {0030-1299}, doi = {10.1111/j.1600-0706.2010.18786.x}, pages = {17 -- 25}, year = {2011}, abstract = {Standing stocks are typically easier to measure than process rates such as production. Hence, stocks are often used as indicators of ecosystem functions although the latter are generally more strongly related to rates than to stocks. The regulation of stocks and rates and thus their variability over time may differ, as stocks constitute the net result of production and losses. Based on long-term high frequency measurements in a large, deep lake we explore the variability patterns in primary and bacterial production and relate them to those of the corresponding standing stocks, i.e. chlorophyll concentration, phytoplankton and bacterial biomass. We employ different methods (coefficient of variation, spline fitting and spectral analysis) which complement each other for assessing the variability present in the plankton data, at different temporal scales. In phytoplankton, we found that the overall variability of primary production is dominated by fluctuations at low frequencies, such as the annual, whereas in stocks and chlorophyll in particular, higher frequencies contribute substantially to the overall variance. This suggests that using standing stocks instead of rate measures leads to an under- or overestimation of food shortage for consumers during distinct periods of the year. The range of annual variation in bacterial production is 8 times greater than biomass, showing that the variability of bacterial activity (e.g. oxygen consumption, remineralisation) would be underestimated if biomass is used. The P/B ratios were variable and although clear trends are present in both bacteria and phytoplankton, no systematic relationship between stock and rate measures were found for the two groups. Hence, standing stock and process rate measures exhibit different variability patterns and care is needed when interpreting the mechanisms and implications of the variability encountered.}, language = {en} } @article{PanetKuroishiHolschneider2011, author = {Panet, Isabelle and Kuroishi, Yuki and Holschneider, Matthias}, title = {Wavelet modelling of the gravity field by domain decomposition methods: an example over Japan}, series = {Geophysical journal international}, volume = {184}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2010.04840.x}, pages = {203 -- 219}, year = {2011}, abstract = {With the advent of satellite gravity, large gravity data sets of unprecedented quality at low and medium resolution become available. For local, high resolution field modelling, they need to be combined with the surface gravity data. Such models are then used for various applications, from the study of the Earth interior to the determination of oceanic currents. Here we show how to realize such a combination in a flexible way using spherical wavelets and applying a domain decomposition approach. This iterative method, based on the Schwarz algorithms, allows to split a large problem into smaller ones, and avoids the calculation of the entire normal system, which may be huge if high resolution is sought over wide areas. A subdomain is defined as the harmonic space spanned by a subset of the wavelet family. Based on the localization properties of the wavelets in space and frequency, we define hierarchical subdomains of wavelets at different scales. On each scale, blocks of subdomains are defined by using a tailored spatial splitting of the area. The data weighting and regularization are iteratively adjusted for the subdomains, which allows to handle heterogeneity in the data quality or the gravity variations. Different levels of approximations of the subdomains normals are also introduced, corresponding to building local averages of the data at different resolution levels. We first provide the theoretical background on domain decomposition methods. Then, we validate the method with synthetic data, considering two kinds of noise: white noise and coloured noise. We then apply the method to data over Japan, where we combine a satellite-based geopotential model, EIGEN-GL04S, and a local gravity model from a combination of land and marine gravity data and an altimetry-derived marine gravity model. A hybrid spherical harmonics/wavelet model of the geoid is obtained at about 15 km resolution and a corrector grid for the surface model is derived.}, language = {en} }