@article{SchannerKorteHolschneider2022, author = {Schanner, Maximilian and Korte, Monika and Holschneider, Matthias}, title = {ArchKalmag14k: A kalman-filter based global geomagnetic model for the holocene}, series = {Journal of geophysical research : Solid earth}, volume = {127}, journal = {Journal of geophysical research : Solid earth}, number = {2}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2021JB023166}, pages = {17}, year = {2022}, abstract = {We propose a global geomagnetic field model for the last 14 thousand years, based on thermoremanent records. We call the model ArchKalmag14k. ArchKalmag14k is constructed by modifying recently proposed algorithms, based on space-time correlations. Due to the amount of data and complexity of the model, the full Bayesian posterior is numerically intractable. To tackle this, we sequentialize the inversion by implementing a Kalman-filter with a fixed time step. Every step consists of a prediction, based on a degree dependent temporal covariance, and a correction via Gaussian process regression. Dating errors are treated via a noisy input formulation. Cross correlations are reintroduced by a smoothing algorithm and model parameters are inferred from the data. Due to the specific statistical nature of the proposed algorithms, the model comes with space and time-dependent uncertainty estimates. The new model ArchKalmag14k shows less variation in the large-scale degrees than comparable models. Local predictions represent the underlying data and agree with comparable models, if the location is sampled well. Uncertainties are bigger for earlier times and in regions of sparse data coverage. We also use ArchKalmag14k to analyze the appearance and evolution of the South Atlantic anomaly together with reverse flux patches at the core-mantle boundary, considering the model uncertainties. While we find good agreement with earlier models for recent times, our model suggests a different evolution of intensity minima prior to 1650 CE. In general, our results suggest that prior to 6000 BCE the data is not sufficient to support global models.}, language = {en} } @article{MolkenthinDonnerReichetal.2022, author = {Molkenthin, Christian and Donner, Christian and Reich, Sebastian and Z{\"o}ller, Gert and Hainzl, Sebastian and Holschneider, Matthias and Opper, Manfred}, title = {GP-ETAS: semiparametric Bayesian inference for the spatio-temporal epidemic type aftershock sequence model}, series = {Statistics and Computing}, volume = {32}, journal = {Statistics and Computing}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0960-3174}, doi = {10.1007/s11222-022-10085-3}, pages = {25}, year = {2022}, abstract = {The spatio-temporal epidemic type aftershock sequence (ETAS) model is widely used to describe the self-exciting nature of earthquake occurrences. While traditional inference methods provide only point estimates of the model parameters, we aim at a fully Bayesian treatment of model inference, allowing naturally to incorporate prior knowledge and uncertainty quantification of the resulting estimates. Therefore, we introduce a highly flexible, non-parametric representation for the spatially varying ETAS background intensity through a Gaussian process (GP) prior. Combined with classical triggering functions this results in a new model formulation, namely the GP-ETAS model. We enable tractable and efficient Gibbs sampling by deriving an augmented form of the GP-ETAS inference problem. This novel sampling approach allows us to assess the posterior model variables conditioned on observed earthquake catalogues, i.e., the spatial background intensity and the parameters of the triggering function. Empirical results on two synthetic data sets indicate that GP-ETAS outperforms standard models and thus demonstrate the predictive power for observed earthquake catalogues including uncertainty quantification for the estimated parameters. Finally, a case study for the l'Aquila region, Italy, with the devastating event on 6 April 2009, is presented.}, language = {en} } @article{SchannerMauerbergerKorteetal.2021, author = {Schanner, Maximilian Arthus and Mauerberger, Stefan and Korte, Monika and Holschneider, Matthias}, title = {Correlation based time evolution of the archeomagnetic field}, series = {Journal of geophysical research : JGR ; an international quarterly. B, Solid earth}, volume = {126}, journal = {Journal of geophysical research : JGR ; an international quarterly. B, Solid earth}, number = {7}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2020JB021548}, pages = {22}, year = {2021}, abstract = {In a previous study, a new snapshot modeling concept for the archeomagnetic field was introduced (Mauerberger et al., 2020, ). By assuming a Gaussian process for the geomagnetic potential, a correlation-based algorithm was presented, which incorporates a closed-form spatial correlation function. This work extends the suggested modeling strategy to the temporal domain. A space-time correlation kernel is constructed from the tensor product of the closed-form spatial correlation kernel with a squared exponential kernel in time. Dating uncertainties are incorporated into the modeling concept using a noisy input Gaussian process. All but one modeling hyperparameters are marginalized, to reduce their influence on the outcome and to translate their variability to the posterior variance. The resulting distribution incorporates uncertainties related to dating, measurement and modeling process. Results from application to archeomagnetic data show less variation in the dipole than comparable models, but are in general agreement with previous findings.}, language = {en} } @article{SchindlerMoldenhawerStangeetal.2021, author = {Schindler, Daniel and Moldenhawer, Ted and Stange, Maike and Lepro, Valentino and Beta, Carsten and Holschneider, Matthias and Huisinga, Wilhelm}, title = {Analysis of protrusion dynamics in amoeboid cell motility by means of regularized contour flows}, series = {PLoS Computational Biology : a new community journal}, volume = {17}, journal = {PLoS Computational Biology : a new community journal}, number = {8}, publisher = {PLoS}, address = {San Fransisco}, issn = {1553-734X}, doi = {10.1371/journal.pcbi.1009268}, pages = {33}, year = {2021}, abstract = {Amoeboid cell motility is essential for a wide range of biological processes including wound healing, embryonic morphogenesis, and cancer metastasis. It relies on complex dynamical patterns of cell shape changes that pose long-standing challenges to mathematical modeling and raise a need for automated and reproducible approaches to extract quantitative morphological features from image sequences. Here, we introduce a theoretical framework and a computational method for obtaining smooth representations of the spatiotemporal contour dynamics from stacks of segmented microscopy images. Based on a Gaussian process regression we propose a one-parameter family of regularized contour flows that allows us to continuously track reference points (virtual markers) between successive cell contours. We use this approach to define a coordinate system on the moving cell boundary and to represent different local geometric quantities in this frame of reference. In particular, we introduce the local marker dispersion as a measure to identify localized membrane expansions and provide a fully automated way to extract the properties of such expansions, including their area and growth time. The methods are available as an open-source software package called AmoePy, a Python-based toolbox for analyzing amoeboid cell motility (based on time-lapse microscopy data), including a graphical user interface and detailed documentation. Due to the mathematical rigor of our framework, we envision it to be of use for the development of novel cell motility models. We mainly use experimental data of the social amoeba Dictyostelium discoideum to illustrate and validate our approach.
Author summary Amoeboid motion is a crawling-like cell migration that plays an important key role in multiple biological processes such as wound healing and cancer metastasis. This type of cell motility results from expanding and simultaneously contracting parts of the cell membrane. From fluorescence images, we obtain a sequence of points, representing the cell membrane, for each time step. By using regression analysis on these sequences, we derive smooth representations, so-called contours, of the membrane. Since the number of measurements is discrete and often limited, the question is raised of how to link consecutive contours with each other. In this work, we present a novel mathematical framework in which these links are described by regularized flows allowing a certain degree of concentration or stretching of neighboring reference points on the same contour. This stretching rate, the so-called local dispersion, is used to identify expansions and contractions of the cell membrane providing a fully automated way of extracting properties of these cell shape changes. We applied our methods to time-lapse microscopy data of the social amoeba Dictyostelium discoideum.}, language = {en} } @article{MauerbergerSchannerKorteetal.2020, author = {Mauerberger, Stefan and Schanner, Maximilian Arthus and Korte, Monika and Holschneider, Matthias}, title = {Correlation based snapshot models of the archeomagnetic field}, series = {Geophysical journal international}, volume = {223}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggaa336}, pages = {648 -- 665}, year = {2020}, abstract = {For the time stationary global geomagnetic field, a new modelling concept is presented. A Bayesian non-parametric approach provides realistic location dependent uncertainty estimates. Modelling related variabilities are dealt with systematically by making little subjective apriori assumptions. Rather than parametrizing the model by Gauss coefficients, a functional analytic approach is applied. The geomagnetic potential is assumed a Gaussian process to describe a distribution over functions. Apriori correlations are given by an explicit kernel function with non-informative dipole contribution. A refined modelling strategy is proposed that accommodates non-linearities of archeomagnetic observables: First, a rough field estimate is obtained considering only sites that provide full field vector records. Subsequently, this estimate supports the linearization that incorporates the remaining incomplete records. The comparison of results for the archeomagnetic field over the past 1000 yr is in general agreement with previous models while improved model uncertainty estimates are provided.}, language = {en} } @article{RoppLesurBaerenzungetal.2020, author = {Ropp, Guillaume and Lesur, Vincent and B{\"a}renzung, Julien and Holschneider, Matthias}, title = {Sequential modelling of the Earth's core magnetic field}, series = {Earth, Planets and Space}, volume = {72}, journal = {Earth, Planets and Space}, number = {1}, publisher = {Springer}, address = {New York}, issn = {1880-5981}, doi = {10.1186/s40623-020-01230-1}, pages = {15}, year = {2020}, abstract = {We describe a new, original approach to the modelling of the Earth's magnetic field. The overall objective of this study is to reliably render fast variations of the core field and its secular variation. This method combines a sequential modelling approach, a Kalman filter, and a correlation-based modelling step. Sources that most significantly contribute to the field measured at the surface of the Earth are modelled. Their separation is based on strong prior information on their spatial and temporal behaviours. We obtain a time series of model distributions which display behaviours similar to those of recent models based on more classic approaches, particularly at large temporal and spatial scales. Interesting new features and periodicities are visible in our models at smaller time and spatial scales. An important aspect of our method is to yield reliable error bars for all model parameters. These errors, however, are only as reliable as the description of the different sources and the prior information used are realistic. Finally, we used a slightly different version of our method to produce candidate models for the thirteenth edition of the International Geomagnetic Reference Field.}, language = {en} } @article{BaerenzungHolschneiderWichtetal.2020, author = {Baerenzung, Julien and Holschneider, Matthias and Wicht, Johannes and Lesur, Vincent and Sanchez, Sabrina}, title = {The Kalmag model as a candidate for IGRF-13}, series = {Earth, planets and space}, volume = {72}, journal = {Earth, planets and space}, number = {1}, publisher = {Springer}, address = {New York}, issn = {1880-5981}, doi = {10.1186/s40623-020-01295-y}, pages = {13}, year = {2020}, abstract = {We present a new model of the geomagnetic field spanning the last 20 years and called Kalmag. Deriving from the assimilation of CHAMP and Swarm vector field measurements, it separates the different contributions to the observable field through parameterized prior covariance matrices. To make the inverse problem numerically feasible, it has been sequentialized in time through the combination of a Kalman filter and a smoothing algorithm. The model provides reliable estimates of past, present and future mean fields and associated uncertainties. The version presented here is an update of our IGRF candidates; the amount of assimilated data has been doubled and the considered time window has been extended from [2000.5, 2019.74] to [2000.5, 2020.33].}, language = {en} } @article{SharmaHainzlZoelleretal.2020, author = {Sharma, Shubham and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Is Coulomb stress the best choice for aftershock forecasting?}, series = {Journal of geophysical research : Solid earth}, volume = {125}, journal = {Journal of geophysical research : Solid earth}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2020JB019553}, pages = {12}, year = {2020}, abstract = {The Coulomb failure stress (CFS) criterion is the most commonly used method for predicting spatial distributions of aftershocks following large earthquakes. However, large uncertainties are always associated with the calculation of Coulomb stress change. The uncertainties mainly arise due to nonunique slip inversions and unknown receiver faults; especially for the latter, results are highly dependent on the choice of the assumed receiver mechanism. Based on binary tests (aftershocks yes/no), recent studies suggest that alternative stress quantities, a distance-slip probabilistic model as well as deep neural network (DNN) approaches, all are superior to CFS with predefined receiver mechanism. To challenge this conclusion, which might have large implications, we use 289 slip inversions from SRCMOD database to calculate more realistic CFS values for a layered half-space and variable receiver mechanisms. We also analyze the effect of the magnitude cutoff, grid size variation, and aftershock duration to verify the use of receiver operating characteristic (ROC) analysis for the ranking of stress metrics. The observations suggest that introducing a layered half-space does not improve the stress maps and ROC curves. However, results significantly improve for larger aftershocks and shorter time periods but without changing the ranking. We also go beyond binary testing and apply alternative statistics to test the ability to estimate aftershock numbers, which confirm that simple stress metrics perform better than the classic Coulomb failure stress calculations and are also better than the distance-slip probabilistic model.}, language = {en} } @article{SanchezWichtBaerenzungetal.2019, author = {Sanchez, S. and Wicht, J. and Baerenzung, Julien and Holschneider, Matthias}, title = {Sequential assimilation of geomagnetic observations}, series = {Geophysical journal international}, volume = {217}, journal = {Geophysical journal international}, number = {2}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggz090}, pages = {1434 -- 1450}, year = {2019}, abstract = {High-precision observations of the present-day geomagnetic field by ground-based observatories and satellites provide unprecedented conditions for unveiling the dynamics of the Earth's core. Combining geomagnetic observations with dynamo simulations in a data assimilation (DA) framework allows the reconstruction of past and present states of the internal core dynamics. The essential information that couples the internal state to the observations is provided by the statistical correlations from a numerical dynamo model in the form of a model covariance matrix. Here we test a sequential DA framework, working through a succession of forecast and analysis steps, that extracts the correlations from an ensemble of dynamo models. The primary correlations couple variables of the same azimuthal wave number, reflecting the predominant axial symmetry of the magnetic field. Synthetic tests show that the scheme becomes unstable when confronted with high-precision geomagnetic observations. Our study has identified spurious secondary correlations as the origin of the problem. Keeping only the primary correlations by localizing the covariance matrix with respect to the azimuthal wave number suffices to stabilize the assimilation. While the first analysis step is fundamental in constraining the large-scale interior state, further assimilation steps refine the smaller and more dynamical scales. This refinement turns out to be critical for long-term geomagnetic predictions. Increasing the assimilation steps from one to 18 roughly doubles the prediction horizon for the dipole from about  tree to six centuries, and from 30 to about  60 yr for smaller observable scales. This improvement is also reflected on the predictability of surface intensity features such as the South Atlantic Anomaly. Intensity prediction errors are decreased roughly by a half when assimilating long observation sequences.}, language = {en} } @misc{ZoellerHolschneider2018, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Reply to "Comment on 'The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands' by Gert Z{\"o}ller and Matthias Holschneider" by Mathias Raschke}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170131}, pages = {1029 -- 1030}, year = {2018}, language = {en} } @article{FiedlerZoellerHolschneideretal.2018, author = {Fiedler, Bernhard and Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {Multiple Change-Point Detection in Spatiotemporal Seismicity Data}, series = {Bulletin of the Seismological Society of America}, volume = {108}, journal = {Bulletin of the Seismological Society of America}, number = {3A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120170236}, pages = {1147 -- 1159}, year = {2018}, abstract = {Earthquake rates are driven by tectonic stress buildup, earthquake-induced stress changes, and transient aseismic processes. Although the origin of the first two sources is known, transient aseismic processes are more difficult to detect. However, the knowledge of the associated changes of the earthquake activity is of great interest, because it might help identify natural aseismic deformation patterns such as slow-slip events, as well as the occurrence of induced seismicity related to human activities. For this goal, we develop a Bayesian approach to identify change-points in seismicity data automatically. Using the Bayes factor, we select a suitable model, estimate possible change-points, and we additionally use a likelihood ratio test to calculate the significance of the change of the intensity. The approach is extended to spatiotemporal data to detect the area in which the changes occur. The method is first applied to synthetic data showing its capability to detect real change-points. Finally, we apply this approach to observational data from Oklahoma and observe statistical significant changes of seismicity in space and time.}, language = {en} } @article{LesurWardinskiBaerenzungetal.2017, author = {Lesur, Vincent and Wardinski, Ingo and B{\"a}renzung, Julien and Holschneider, Matthias}, title = {On the frequency spectra of the core magnetic field Gauss coefficients}, series = {Physics of the earth and planetary interiors}, volume = {276}, journal = {Physics of the earth and planetary interiors}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0031-9201}, doi = {10.1016/j.pepi.2017.05.017}, pages = {145 -- 158}, year = {2017}, abstract = {From monthly mean observatory data spanning 1957-2014, geomagnetic field secular variation values were calculated by annual differences. Estimates of the spherical harmonic Gauss coefficients of the core field secular variation were then derived by applying a correlation based modelling. Finally, a Fourier transform was applied to the time series of the Gauss coefficients. This process led to reliable temporal spectra of the Gauss coefficients up to spherical harmonic degree 5 or 6, and down to periods as short as 1 or 2 years depending on the coefficient. We observed that a k(-2) slope, where k is the frequency, is an acceptable approximation for these spectra, with possibly an exception for the dipole field. The monthly estimates of the core field secular variation at the observatory sites also show that large and rapid variations of the latter happen. This is an indication that geomagnetic jerks are frequent phenomena and that significant secular variation signals at short time scales - i.e. less than 2 years, could still be extracted from data to reveal an unexplored part of the core dynamics.}, language = {en} } @article{CotroneiDiSalvoHolschneideretal.2017, author = {Cotronei, Mariantonia and Di Salvo, Rosa and Holschneider, Matthias and Puccio, Luigia}, title = {Interpolation in reproducing kernel Hilbert spaces based on random subdivision schemes}, series = {Journal of computational and applied mathematics}, volume = {311}, journal = {Journal of computational and applied mathematics}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0377-0427}, doi = {10.1016/j.cam.2016.08.002}, pages = {342 -- 353}, year = {2017}, abstract = {In this paper we present a Bayesian framework for interpolating data in a reproducing kernel Hilbert space associated with a random subdivision scheme, where not only approximations of the values of a function at some missing points can be obtained, but also uncertainty estimates for such predicted values. This random scheme generalizes the usual subdivision by taking into account, at each level, some uncertainty given in terms of suitably scaled noise sequences of i.i.d. Gaussian random variables with zero mean and given variance, and generating, in the limit, a Gaussian process whose correlation structure is characterized and used for computing realizations of the conditional posterior distribution. The hierarchical nature of the procedure may be exploited to reduce the computational cost compared to standard techniques in the case where many prediction points need to be considered.}, language = {en} } @article{HolschneiderLesurMauerbergeretal.2016, author = {Holschneider, Matthias and Lesur, Vincent and Mauerberger, Stefan and Baerenzung, Julien}, title = {Correlation-based modeling and separation of geomagnetic field components}, series = {Journal of geophysical research : Solid earth}, volume = {121}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2015JB012629}, pages = {3142 -- 3160}, year = {2016}, abstract = {We introduce a technique for the modeling and separation of geomagnetic field components that is based on an analysis of their correlation structures alone. The inversion is based on a Bayesian formulation, which allows the computation of uncertainties. The technique allows the incorporation of complex measurement geometries like observatory data in a simple way. We show how our technique is linked to other well-known inversion techniques. A case study based on observational data is given.}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands}, series = {Bulletin of the Seismological Society of America}, volume = {106}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160220}, pages = {2917 -- 2921}, year = {2016}, abstract = {The Groningen gas field serves as a natural laboratory for production-induced earthquakes, because no earthquakes were observed before the beginning of gas production. Increasing gas production rates resulted in growing earthquake activity and eventually in the occurrence of the 2012M(w) 3.6 Huizinge earthquake. At least since this event, a detailed seismic hazard and risk assessment including estimation of the maximum earthquake magnitude is considered to be necessary to decide on the future gas production. In this short note, we first apply state-of-the-art methods of mathematical statistics to derive confidence intervals for the maximum possible earthquake magnitude m(max). Second, we calculate the maximum expected magnitude M-T in the time between 2016 and 2024 for three assumed gas-production scenarios. Using broadly accepted physical assumptions and 90\% confidence level, we suggest a value of m(max) 4.4, whereas M-T varies between 3.9 and 4.3, depending on the production scenario.}, language = {en} } @article{BaerenzungHolschneiderLesur2016, author = {B{\"a}renzung, Julien and Holschneider, Matthias and Lesur, Vincent}, title = {constraints}, series = {Journal of geophysical research : Solid earth}, volume = {121}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2015JB012464}, pages = {1343 -- 1364}, year = {2016}, abstract = {Prior information in ill-posed inverse problem is of critical importance because it is conditioning the posterior solution and its associated variability. The problem of determining the flow evolving at the Earth's core-mantle boundary through magnetic field models derived from satellite or observatory data is no exception to the rule. This study aims to estimate what information can be extracted on the velocity field at the core-mantle boundary, when the frozen flux equation is inverted under very weakly informative, but realistic, prior constraints. Instead of imposing a converging spectrum to the flow, we simply assume that its poloidal and toroidal energy spectra are characterized by power laws. The parameters of the spectra, namely, their magnitudes, and slopes are unknown. The connection between the velocity field, its spectra parameters, and the magnetic field model is established through the Bayesian formulation of the problem. Working in two steps, we determined the time-averaged spectra of the flow within the 2001-2009.5 period, as well as the flow itself and its associated uncertainties in 2005.0. According to the spectra we obtained, we can conclude that the large-scale approximation of the velocity field is not an appropriate assumption within the time window we considered. For the flow itself, we show that although it is dominated by its equatorial symmetric component, it is very unlikely to be perfectly symmetric. We also demonstrate that its geostrophic state is questioned in different locations of the outer core.}, language = {en} } @article{SchroeterRitterHolschneideretal.2016, author = {Schroeter, M-A and Ritter, M. and Holschneider, Matthias and Sturm, H.}, title = {Enhanced DySEM imaging of cantilever motion using artificial structures patterned by focused ion beam techniques}, series = {Journal of micromechanics and microengineering}, volume = {26}, journal = {Journal of micromechanics and microengineering}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0960-1317}, doi = {10.1088/0960-1317/26/3/035010}, pages = {7}, year = {2016}, abstract = {We use a dynamic scanning electron microscope (DySEM) to map the spatial distribution of the vibration of a cantilever beam. The DySEM measurements are based on variations of the local secondary electron signal within the imaging electron beam diameter during an oscillation period of the cantilever. For this reason, the surface of a cantilever without topography or material variation does not allow any conclusions about the spatial distribution of vibration due to a lack of dynamic contrast. In order to overcome this limitation, artificial structures were added at defined positions on the cantilever surface using focused ion beam lithography patterning. The DySEM signal of such high-contrast structures is strongly improved, hence information about the surface vibration becomes accessible. Simulations of images of the vibrating cantilever have also been performed. The results of the simulation are in good agreement with the experimental images.}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Earthquake History in a Fault Zone Tells Us Almost Nothing about m(max)}, series = {Seismological research letters}, volume = {87}, journal = {Seismological research letters}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220150176}, pages = {132 -- 137}, year = {2016}, abstract = {In the present study, we summarize and evaluate the endeavors from recent years to estimate the maximum possible earthquake magnitude m(max) from observed data. In particular, we use basic and physically motivated assumptions to identify best cases and worst cases in terms of lowest and highest degree of uncertainty of m(max). In a general framework, we demonstrate that earthquake data and earthquake proxy data recorded in a fault zone provide almost no information about m(max) unless reliable and homogeneous data of a long time interval, including several earthquakes with magnitude close to m(max), are available. Even if detailed earthquake information from some centuries including historic and paleoearthquakes are given, only very few, namely the largest events, will contribute at all to the estimation of m(max), and this results in unacceptably high uncertainties. As a consequence, estimators of m(max) in a fault zone, which are based solely on earthquake-related information from this region, have to be dismissed.}, language = {en} } @article{BoehmHolschneiderLignieresetal.2015, author = {Boehm, Thorsten and Holschneider, Matthias and Lignieres, Frederic and Petit, Pascal and Rainer, Monica and Paletou, Francois and Wade, Gregg and Alecian, Evelyne and Carfantan, Herve and Blazere, Aurore and Mirouh, Giovanni M.}, title = {Discovery of starspots on Vega First spectroscopic detection of surface structures on a normal A-type star}, series = {Astronomy and astrophysics : an international weekly journal}, volume = {577}, journal = {Astronomy and astrophysics : an international weekly journal}, publisher = {EDP Sciences}, address = {Les Ulis}, issn = {0004-6361}, doi = {10.1051/0004-6361/201425425}, pages = {12}, year = {2015}, abstract = {Context. The theoretically studied impact of rapid rotation on stellar evolution needs to be compared with these results of high-resolution spectroscopy-velocimetry observations. Early-type stars present a perfect laboratory for these studies. The prototype A0 star Vega has been extensively monitored in recent years in spectropolarimetry. A weak surface magnetic field was detected, implying that there might be a (still undetected) structured surface. First indications of the presence of small amplitude stellar radial velocity variations have been reported recently, but the confirmation and in-depth study with the highly stabilized spectrograph SOPHIE/OHP was required. Aims. The goal of this article is to present a thorough analysis of the line profile variations and associated estimators in the early-type standard star Vega (A0) in order to reveal potential activity tracers, exoplanet companions, and stellar oscillations. Methods. Vega was monitored in quasi-continuous high-resolution echelle spectroscopy with the highly stabilized velocimeter SOPHIE/OHP. A total of 2588 high signal-to-noise spectra was obtained during 34.7 h on five nights (2 to 6 of August 2012) in high-resolution mode at R = 75 000 and covering the visible domain from 3895 6270 angstrom. For each reduced spectrum, least square deconvolved equivalent photospheric profiles were calculated with a T-eff = 9500 and log g = 4.0 spectral line mask. Several methods were applied to study the dynamic behaviour of the profile variations (evolution of radial velocity, bisectors, vspan, 2D profiles, amongst others). Results. We present the discovery of a spotted stellar surface on an A-type standard star (Vega) with very faint spot amplitudes Delta F/Fc similar to 5 x 10(-4). A rotational modulation of spectral lines with a period of rotation P = 0.68 d has clearly been exhibited, unambiguously confirming the results of previous spectropolarimetric studies. Most of these brightness inhomogeneities seem to be located in lower equatorial latitudes. Either a very thin convective layer can be responsible for magnetic field generation at small amplitudes, or a new mechanism has to be invoked to explain the existence of activity tracing starspots. At this stage it is difficult to disentangle a rotational from a stellar pulsational origin for the existing higher frequency periodic variations. Conclusions. This first strong evidence that standard A-type stars can show surface structures opens a new field of research and ask about a potential link with the recently discovered weak magnetic field discoveries in this category of stars.}, language = {en} } @article{HolschneiderZoellerClementsetal.2014, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Clements, R. and Schorlemmer, Danijel}, title = {Can we test for the maximum possible earthquake magnitude?}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {3}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010319}, pages = {2019 -- 2028}, year = {2014}, language = {en} } @article{BaerenzungHolschneiderLesur2014, author = {Baerenzung, Julien and Holschneider, Matthias and Lesur, Vincent}, title = {Bayesian inversion for the filtered flow at the Earth's core-mantle boundary}, series = {Journal of geophysical research : Solid earth}, volume = {119}, journal = {Journal of geophysical research : Solid earth}, number = {4}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2013JB010358}, pages = {2695 -- 2720}, year = {2014}, abstract = {The inverse problem of determining the flow at the Earth's core-mantle boundary according to an outer core magnetic field and secular variation model has been investigated through a Bayesian formalism. To circumvent the issue arising from the truncated nature of the available fields, we combined two modeling methods. In the first step, we applied a filter on the magnetic field to isolate its large scales by reducing the energy contained in its small scales, we then derived the dynamical equation, referred as filtered frozen flux equation, describing the spatiotemporal evolution of the filtered part of the field. In the second step, we proposed a statistical parametrization of the filtered magnetic field in order to account for both its remaining unresolved scales and its large-scale uncertainties. These two modeling techniques were then included in the Bayesian formulation of the inverse problem. To explore the complex posterior distribution of the velocity field resulting from this development, we numerically implemented an algorithm based on Markov chain Monte Carlo methods. After evaluating our approach on synthetic data and comparing it to previously introduced methods, we applied it to a magnetic field model derived from satellite data for the single epoch 2005.0. We could confirm the existence of specific features already observed in previous studies. In particular, we retrieved the planetary scale eccentric gyre characteristic of flow evaluated under the compressible quasi-geostrophy assumption although this hypothesis was not considered in our study. In addition, through the sampling of the velocity field posterior distribution, we could evaluate the reliability, at any spatial location and at any scale, of the flow we calculated. The flow uncertainties we determined are nevertheless conditioned by the choice of the prior constraints we applied to the velocity field.}, language = {en} } @article{ZoellerHolschneider2014, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Induced seismicity: What is the size of the largest expected earthquake?}, series = {The bulletin of the Seismological Society of America}, volume = {104}, journal = {The bulletin of the Seismological Society of America}, number = {6}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120140195}, pages = {3153 -- 3158}, year = {2014}, abstract = {The injection of fluids is a well-known origin for the triggering of earthquake sequences. The growing number of projects related to enhanced geothermal systems, fracking, and others has led to the question, which maximum earthquake magnitude can be expected as a consequence of fluid injection? This question is addressed from the perspective of statistical analysis. Using basic empirical laws of earthquake statistics, we estimate the magnitude M-T of the maximum expected earthquake in a predefined future time window T-f. A case study of the fluid injection site at Paradox Valley, Colorado, demonstrates that the magnitude m 4.3 of the largest observed earthquake on 27 May 2000 lies very well within the expectation from past seismicity without adjusting any parameters. Vice versa, for a given maximum tolerable earthquake at an injection site, we can constrain the corresponding amount of injected fluids that must not be exceeded within predefined confidence bounds.}, language = {en} } @article{ShebalinNarteauZecharetal.2014, author = {Shebalin, Peter N. and Narteau, Clement and Zechar, Jeremy Douglas and Holschneider, Matthias}, title = {Combining earthquake forecasts using differential probability gains}, series = {Earth, planets and space}, volume = {66}, journal = {Earth, planets and space}, publisher = {Springer}, address = {Heidelberg}, issn = {1880-5981}, doi = {10.1186/1880-5981-66-37}, pages = {14}, year = {2014}, abstract = {We describe an iterative method to combine seismicity forecasts. With this method, we produce the next generation of a starting forecast by incorporating predictive skill from one or more input forecasts. For a single iteration, we use the differential probability gain of an input forecast relative to the starting forecast. At each point in space and time, the rate in the next-generation forecast is the product of the starting rate and the local differential probability gain. The main advantage of this method is that it can produce high forecast rates using all types of numerical forecast models, even those that are not rate-based. Naturally, a limitation of this method is that the input forecast must have some information not already contained in the starting forecast. We illustrate this method using the Every Earthquake a Precursor According to Scale (EEPAS) and Early Aftershocks Statistics (EAST) models, which are currently being evaluated at the US testing center of the Collaboratory for the Study of Earthquake Predictability. During a testing period from July 2009 to December 2011 (with 19 target earthquakes), the combined model we produce has better predictive performance - in terms of Molchan diagrams and likelihood - than the starting model (EEPAS) and the input model (EAST). Many of the target earthquakes occur in regions where the combined model has high forecast rates. Most importantly, the rates in these regions are substantially higher than if we had simply averaged the models.}, language = {en} } @article{ZoellerHolschneiderHainzl2013, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {The Maximum Earthquake Magnitude in a Time Horizon: Theory and Case Studies}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {2A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120120013}, pages = {860 -- 875}, year = {2013}, abstract = {We show how the maximum magnitude within a predefined future time horizon may be estimated from an earthquake catalog within the context of Gutenberg-Richter statistics. The aim is to carry out a rigorous uncertainty assessment, and calculate precise confidence intervals based on an imposed level of confidence a. In detail, we present a model for the estimation of the maximum magnitude to occur in a time interval T-f in the future, given a complete earthquake catalog for a time period T in the past and, if available, paleoseismic events. For this goal, we solely assume that earthquakes follow a stationary Poisson process in time with unknown productivity Lambda and obey the Gutenberg-Richter law in magnitude domain with unknown b-value. The random variables. and b are estimated by means of Bayes theorem with noninformative prior distributions. Results based on synthetic catalogs and on retrospective calculations of historic catalogs from the highly active area of Japan and the low-seismicity, but high-risk region lower Rhine embayment (LRE) in Germany indicate that the estimated magnitudes are close to the true values. Finally, we discuss whether the techniques can be extended to meet the safety requirements for critical facilities such as nuclear power plants. For this aim, the maximum magnitude for all times has to be considered. In agreement with earlier work, we find that this parameter is not a useful quantity from the viewpoint of statistical inference.}, language = {en} } @article{ZaourarHamoudiHolschneideretal.2013, author = {Zaourar, Naima and Hamoudi, Mohamed and Holschneider, Matthias and Mandea, Mioara}, title = {Fractal dynamics of geomagnetic storms}, series = {Arabian journal of geosciences}, volume = {6}, journal = {Arabian journal of geosciences}, number = {6}, publisher = {Springer}, address = {Heidelberg}, issn = {1866-7511}, doi = {10.1007/s12517-011-0487-0}, pages = {1693 -- 1702}, year = {2013}, abstract = {We explore fluctuations of the horizontal component of the Earth's magnetic field to identify scaling behaviour of the temporal variability in geomagnetic data recorded by the Intermagnet observatories during the solar cycle 23 (years 1996 to 2005). In this work, we use the remarkable ability of scaling wavelet exponents to highlight the singularities associated with discontinuities present in the magnetograms obtained at two magnetic observatories for six intense magnetic storms, including the sudden storm commencements of 14 July 2000, 29-31 October and 20-21 November 2003. In the active intervals that occurred during geomagnetic storms, we observe a rapid and unidirectional change in the spectral scaling exponent at the time of storm onset. The corresponding fractal features suggest that the dynamics of the whole time series is similar to that of a fractional Brownian motion. Our findings point to an evident relatively sudden change related to the emergence of persistency of the fractal power exponent fluctuations precedes an intense magnetic storm. These first results could be useful in the framework of extreme events prediction studies.}, language = {en} } @article{SchroeterSturmHolschneider2013, author = {Schr{\"o}ter, M-A and Sturm, H. and Holschneider, Matthias}, title = {Phase and amplitude patterns in DySEM mappings of vibrating microstructures}, series = {Nanotechnology}, volume = {24}, journal = {Nanotechnology}, number = {21}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0957-4484}, doi = {10.1088/0957-4484/24/21/215701}, pages = {10}, year = {2013}, abstract = {We use a dynamic scanning electron microscope (DySEM) to analyze the movement of oscillating micromechanical structures. A dynamic secondary electron (SE) signal is recorded and correlated to the oscillatory excitation of scanning force microscope (SFM) cantilever by means of lock-in amplifiers. We show, how the relative phase of the oscillations modulate the resulting real part and phase pictures of the DySEM mapping. This can be used to obtain information about the underlying oscillatory dynamics. We apply the theory to the case of a cantilever in oscillation, driven at different flexural and torsional resonance modes. This is an extension of a recent work (Schroter et al 2012 Nanotechnology 23 435501), where we reported on a general methodology to distinguish nonlinear features caused by the imaging process from those caused by cantilever motion.}, language = {en} } @article{CotroneiHolschneider2013, author = {Cotronei, Mariantonia and Holschneider, Matthias}, title = {Partial parameterization of orthogonal wavelet matrix filters}, series = {Journal of computational and applied mathematics}, volume = {243}, journal = {Journal of computational and applied mathematics}, number = {4}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0377-0427}, doi = {10.1016/j.cam.2012.11.016}, pages = {113 -- 125}, year = {2013}, abstract = {In this paper we propose a procedure which allows the construction of a large family of FIR d x d matrix wavelet filters by exploiting the one-to-one correspondence between QMF systems and orthogonal operators which commute with the shifts by two. A characterization of the class of filters of full rank type that can be obtained with such procedure is given. In particular, we restrict our attention to a special construction based on the representation of SO(2d) in terms of the elements of its Lie algebra. Explicit expressions for the filters in the case d = 2 are given, as a result of a local analysis of the parameterization obtained from perturbing the Haar system.}, language = {en} } @article{BettenbuehlRusconiEngbertetal.2012, author = {Bettenb{\"u}hl, Mario and Rusconi, Marco and Engbert, Ralf and Holschneider, Matthias}, title = {Bayesian selection of Markov Models for symbol sequences application to microsaccadic eye movements}, series = {PLoS one}, volume = {7}, journal = {PLoS one}, number = {9}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0043388}, pages = {10}, year = {2012}, abstract = {Complex biological dynamics often generate sequences of discrete events which can be described as a Markov process. The order of the underlying Markovian stochastic process is fundamental for characterizing statistical dependencies within sequences. As an example for this class of biological systems, we investigate the Markov order of sequences of microsaccadic eye movements from human observers. We calculate the integrated likelihood of a given sequence for various orders of the Markov process and use this in a Bayesian framework for statistical inference on the Markov order. Our analysis shows that data from most participants are best explained by a first-order Markov process. This is compatible with recent findings of a statistical coupling of subsequent microsaccade orientations. Our method might prove to be useful for a broad class of biological systems.}, language = {en} } @article{HolschneiderNarteauShebalinetal.2012, author = {Holschneider, Matthias and Narteau, C. and Shebalin, P. and Peng, Z. and Schorlemmer, Danijel}, title = {Bayesian analysis of the modified Omori law}, series = {Journal of geophysical research : Solid earth}, volume = {117}, journal = {Journal of geophysical research : Solid earth}, number = {6089}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2011JB009054}, pages = {12}, year = {2012}, abstract = {In order to examine variations in aftershock decay rate, we propose a Bayesian framework to estimate the {K, c, p}-values of the modified Omori law (MOL), lambda(t) = K(c + t)(-p). The Bayesian setting allows not only to produce a point estimator of these three parameters but also to assess their uncertainties and posterior dependencies with respect to the observed aftershock sequences. Using a new parametrization of the MOL, we identify the trade-off between the c and p-value estimates and discuss its dependence on the number of aftershocks. Then, we analyze the influence of the catalog completeness interval [t(start), t(stop)] on the various estimates. To test this Bayesian approach on natural aftershock sequences, we use two independent and non-overlapping aftershock catalogs of the same earthquakes in Japan. Taking into account the posterior uncertainties, we show that both the handpicked (short times) and the instrumental (long times) catalogs predict the same ranges of parameter values. We therefore conclude that the same MOL may be valid over short and long times.}, language = {en} } @article{ShebalinNarteauHolschneider2012, author = {Shebalin, Peter and Narteau, Clement and Holschneider, Matthias}, title = {From alarm-based to rate-based earthquake forecast models}, series = {Bulletin of the Seismological Society of America}, volume = {102}, journal = {Bulletin of the Seismological Society of America}, number = {1}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120110126}, pages = {64 -- 72}, year = {2012}, abstract = {We propose a conversion method from alarm-based to rate-based earthquake forecast models. A differential probability gain g(alarm)(ref) is the absolute value of the local slope of the Molchan trajectory that evaluates the performance of the alarm-based model with respect to the chosen reference model. We consider that this differential probability gain is constant over time. Its value at each point of the testing region depends only on the alarm function value. The rate-based model is the product of the event rate of the reference model at this point multiplied by the corresponding differential probability gain. Thus, we increase or decrease the initial rates of the reference model according to the additional amount of information contained in the alarm-based model. Here, we apply this method to the Early Aftershock STatistics (EAST) model, an alarm-based model in which early aftershocks are used to identify space-time regions with a higher level of stress and, consequently, a higher seismogenic potential. The resulting rate-based model shows similar performance to the original alarm-based model for all ranges of earthquake magnitude in both retrospective and prospective tests. This conversion method offers the opportunity to perform all the standard evaluation tests of the earthquake testing centers on alarm-based models. In addition, we infer that it can also be used to consecutively combine independent forecast models and, with small modifications, seismic hazard maps with short- and medium-term forecasts.}, language = {en} } @article{WangHainzlZoelleretal.2012, author = {Wang, Lifeng and Hainzl, Sebastian and Z{\"o}ller, Gert and Holschneider, Matthias}, title = {Stress- and aftershock-constrained joint inversions for coseismic and postseismic slip applied to the 2004 M6.0 Parkfield earthquake}, series = {Journal of geophysical research : Solid earth}, volume = {117}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1029/2011JB009017}, pages = {18}, year = {2012}, abstract = {Both aftershocks and geodetically measured postseismic displacements are important markers of the stress relaxation process following large earthquakes. Postseismic displacements can be related to creep-like relaxation in the vicinity of the coseismic rupture by means of inversion methods. However, the results of slip inversions are typically non-unique and subject to large uncertainties. Therefore, we explore the possibility to improve inversions by mechanical constraints. In particular, we take into account the physical understanding that postseismic deformation is stress-driven, and occurs in the coseismically stressed zone. We do joint inversions for coseismic and postseismic slip in a Bayesian framework in the case of the 2004 M6.0 Parkfield earthquake. We perform a number of inversions with different constraints, and calculate their statistical significance. According to information criteria, the best result is preferably related to a physically reasonable model constrained by the stress-condition (namely postseismic creep is driven by coseismic stress) and the condition that coseismic slip and large aftershocks are disjunct. This model explains 97\% of the coseismic displacements and 91\% of the postseismic displacements during day 1-5 following the Parkfield event, respectively. It indicates that the major postseismic deformation can be generally explained by a stress relaxation process for the Parkfield case. This result also indicates that the data to constrain the coseismic slip model could be enriched postseismically. For the 2004 Parkfield event, we additionally observe asymmetric relaxation process at the two sides of the fault, which can be explained by material contrast ratio across the fault of similar to 1.15 in seismic velocity.}, language = {en} } @article{SchachtschneiderHolschneiderMandea2012, author = {Schachtschneider, R. and Holschneider, Matthias and Mandea, M.}, title = {Error distribution in regional modelling of the geomagnetic field}, series = {Geophysical journal international}, volume = {191}, journal = {Geophysical journal international}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2012.05675.x}, pages = {1015 -- 1024}, year = {2012}, abstract = {In this study we analyse the error distribution in regional models of the geomagnetic field. Our main focus is to investigate the distribution of errors when combining two regional patches to obtain a global field from regional ones. To simulate errors in overlapping patches we choose two different data region shapes that resemble that scenario. First, we investigate the errors in elliptical regions and secondly we choose a region obtained from two overlapping circular spherical caps. We conduct a Monte-Carlo simulation using synthetic data to obtain the expected mean errors. For the elliptical regions the results are similar to the ones obtained for circular spherical caps: the maximum error at the boundary decreases towards the centre of the region. A new result emerges as errors at the boundary vary with azimuth, being largest in the major axis direction and minimal in the minor axis direction. Inside the region there is an error decay towards a minimum at the centre at a rate similar to the one in circular regions. In the case of two combined circular regions there is also an error decay from the boundary towards the centre. The minimum error occurs at the centre of the combined regions. The maximum error at the boundary occurs on the line containing the two cap centres, the minimum in the perpendicular direction where the two circular cap boundaries meet. The large errors at the boundary are eliminated by combining regional patches. We propose an algorithm for finding the boundary region that is applicable to irregularly shaped model regions.}, language = {en} } @article{KurtenbachEickerMayerGuerretal.2012, author = {Kurtenbach, E. and Eicker, A. and Mayer-Guerr, T. and Holschneider, Matthias and Hayn, M. and Fuhrmann, M. and Kusche, J.}, title = {Improved daily GRACE gravity field solutions using a Kalman smoother}, series = {Journal of geodynamics}, volume = {59}, journal = {Journal of geodynamics}, number = {3}, publisher = {Elsevier}, address = {Oxford}, issn = {0264-3707}, doi = {10.1016/j.jog.2012.02.006}, pages = {39 -- 48}, year = {2012}, abstract = {Different GRACE data analysis centers provide temporal variations of the Earth's gravity field as monthly, 10-daily or weekly solutions. These temporal mean fields cannot model the variations occurring during the respective time span. The aim of our approach is to extract as much temporal information as possible out of the given GRACE data. Therefore the temporal resolution shall be increased with the goal to derive daily snapshots. Yet, such an increase in temporal resolution is accompanied by a loss of redundancy and therefore in a reduced accuracy if the daily solutions are calculated individually. The approach presented here therefore introduces spatial and temporal correlations of the expected gravity field signal derived from geophysical models in addition to the daily observations, thus effectively constraining the spatial and temporal evolution of the GRACE solution. The GRACE data processing is then performed within the framework of a Kalman filter and smoother estimation procedure. The approach is at first investigated in a closed-loop simulation scenario and then applied to the original GRACE observations (level-1B data) to calculate daily solutions as part of the gravity field model ITG-Grace2010. Finally, the daily models are compared to vertical GPS station displacements and ocean bottom pressure observations. From these comparisons it can be concluded that particular in higher latitudes the daily solutions contain high-frequent temporal gravity field information and represent an improvement to existing geophysical models.}, language = {en} } @article{HaynPanetDiamentetal.2012, author = {Hayn, Michael and Panet, I. and Diament, M. and Holschneider, Matthias and Mandea, Mioara and Davaille, A.}, title = {Wavelet-based directional analysis of the gravity field evidence for large-scale undulations}, series = {Geophysical journal international}, volume = {189}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2012.05455.x}, pages = {1430 -- 1456}, year = {2012}, abstract = {In the eighties, the analysis of satellite altimetry data leads to the major discovery of gravity lineations in the oceans, with wavelengths between 200 and 1400 km. While the existence of the 200 km scale undulations is widely accepted, undulations at scales larger than 400 km are still a matter of debate. In this paper, we revisit the topic of the large-scale geoid undulations over the oceans in the light of the satellite gravity data provided by the GRACE mission, considerably more precise than the altimetry data at wavelengths larger than 400 km. First, we develop a dedicated method of directional Poisson wavelet analysis on the sphere with significance testing, in order to detect and characterize directional structures in geophysical data on the sphere at different spatial scales. This method is particularly well suited for potential field analysis. We validate it on a series of synthetic tests, and then apply it to analyze recent gravity models, as well as a bathymetry data set independent from gravity. Our analysis confirms the existence of gravity undulations at large scale in the oceans, with characteristic scales between 600 and 2000 km. Their direction correlates well with present-day plate motion over the Pacific ocean, where they are particularly clear, and associated with a conjugate direction at 1500 km scale. A major finding is that the 2000 km scale geoid undulations dominate and had never been so clearly observed previously. This is due to the great precision of GRACE data at those wavelengths. Given the large scale of these undulations, they are most likely related to mantle processes. Taking into account observations and models from other geophysical information, as seismological tomography, convection and geochemical models and electrical conductivity in the mantle, we conceive that all these inputs indicate a directional fabric of the mantle flows at depth, reflecting how the history of subduction influences the organization of lower mantle upwellings.}, language = {en} } @article{HolschneiderZoellerHainzl2011, author = {Holschneider, Matthias and Z{\"o}ller, Gert and Hainzl, Sebastian}, title = {Estimation of the maximum possible magnitude in the framework of a doubly truncated Gutenberg-Richter Model}, series = {Bulletin of the Seismological Society of America}, volume = {101}, journal = {Bulletin of the Seismological Society of America}, number = {4}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120100289}, pages = {1649 -- 1659}, year = {2011}, abstract = {We discuss to what extent a given earthquake catalog and the assumption of a doubly truncated Gutenberg-Richter distribution for the earthquake magnitudes allow for the calculation of confidence intervals for the maximum possible magnitude M. We show that, without further assumptions such as the existence of an upper bound of M, only very limited information may be obtained. In a frequentist formulation, for each confidence level alpha the confidence interval diverges with finite probability. In a Bayesian formulation, the posterior distribution of the upper magnitude is not normalizable. We conclude that the common approach to derive confidence intervals from the variance of a point estimator fails. Technically, this problem can be overcome by introducing an upper bound (M) over tilde for the maximum magnitude. Then the Bayesian posterior distribution can be normalized, and its variance decreases with the number of observed events. However, because the posterior depends significantly on the choice of the unknown value of (M) over tilde, the resulting confidence intervals are essentially meaningless. The use of an informative prior distribution accounting for pre-knowledge of M is also of little use, because the prior is only modified in the case of the occurrence of an extreme event. Our results suggest that the maximum possible magnitude M should be better replaced by M(T), the maximum expected magnitude in a given time interval T, for which the calculation of exact confidence intervals becomes straightforward. From a physical point of view, numerical models of the earthquake process adjusted to specific fault regions may be a powerful alternative to overcome the shortcomings of purely statistical inference.}, language = {en} } @article{ShinZoellerHolschneideretal.2011, author = {Shin, Seoleun and Z{\"o}ller, Gert and Holschneider, Matthias and Reich, Sebastian}, title = {A multigrid solver for modeling complex interseismic stress fields}, series = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, volume = {37}, journal = {Computers \& geosciences : an international journal devoted to the publication of papers on all aspects of geocomputation and to the distribution of computer programs and test data sets ; an official journal of the International Association for Mathematical Geology}, number = {8}, publisher = {Elsevier}, address = {Oxford}, issn = {0098-3004}, doi = {10.1016/j.cageo.2010.11.011}, pages = {1075 -- 1082}, year = {2011}, abstract = {We develop a multigrid, multiple time stepping scheme to reduce computational efforts for calculating complex stress interactions in a strike-slip 2D planar fault for the simulation of seismicity. The key elements of the multilevel solver are separation of length scale, grid-coarsening, and hierarchy. In this study the complex stress interactions are split into two parts: the first with a small contribution is computed on a coarse level, and the rest for strong interactions is on a fine level. This partition leads to a significant reduction of the number of computations. The reduction of complexity is even enhanced by combining the multigrid with multiple time stepping. Computational efficiency is enhanced by a factor of 10 while retaining a reasonable accuracy, compared to the original full matrix-vortex multiplication. The accuracy of solution and computational efficiency depend on a given cut-off radius that splits multiplications into the two parts. The multigrid scheme is constructed in such a way that it conserves stress in the entire half-space.}, language = {en} } @article{GaciZaourarBriqueuetal.2011, author = {Gaci, Said and Zaourar, Naima and Briqueu, Louis and Holschneider, Matthias}, title = {Regularity analysis applied to sonic logs data a case study from KTB borehole site}, series = {Arabian journal of geosciences}, volume = {4}, journal = {Arabian journal of geosciences}, number = {1-2}, publisher = {Springer}, address = {Heidelberg}, issn = {1866-7511}, doi = {10.1007/s12517-010-0129-y}, pages = {221 -- 227}, year = {2011}, abstract = {Borehole logs provide in situ information about the fluctuations of petrophysical properties with depth and thus allow the characterization of the crustal heterogeneities. A detailed investigation of these measurements may lead to extract features of the geological media. In this study, we suggest a regularity analysis based on the continuous wavelet transform to examine sonic logs data. The description of the local behavior of the logs at each depth is carried out using the local Hurst exponent estimated by two (02) approaches: the local wavelet approach and the average-local wavelet approach. Firstly, a synthetic log, generated using the random midpoints displacement algorithm, is processed by the regularity analysis. The obtained Hurst curves allowed the discernment of the different layers composing the simulated geological model. Next, this analysis is extended to real sonic logs data recorded at the Kontinentales Tiefbohrprogramm (KTB) pilot borehole (Continental Deep Drilling Program, Germany). The results show a significant correlation between the estimated Hurst exponents and the lithological discontinuities crossed by the well. Hence, the Hurst exponent can be used as a tool to characterize underground heterogeneities.}, language = {en} } @article{GaciZaourarHamoudietal.2010, author = {Gaci, Said and Zaourar, Naima and Hamoudi, Mehdi and Holschneider, Matthias}, title = {Local regularity analysis of strata heterogeneities from sonic logs}, issn = {1023-5809}, doi = {10.5194/npg-17-455-2010}, year = {2010}, abstract = {Borehole logs provide geological information about the rocks crossed by the wells. Several properties of rocks can be interpreted in terms of lithology, type and quantity of the fluid filling the pores and fractures. Here, the logs are assumed to be nonhomogeneous Brownian motions (nhBms) which are generalized fractional Brownian motions (fBms) indexed by depth-dependent Hurst parameters H(z). Three techniques, the local wavelet approach (LWA), the average-local wavelet approach (ALWA), and Peltier Algorithm (PA), are suggested to estimate the Hurst functions (or the regularity profiles) from the logs. First, two synthetic sonic logs with different parameters, shaped by the successive random additions (SRA) algorithm, are used to demonstrate the potential of the proposed methods. The obtained Hurst functions are close to the theoretical Hurst functions. Besides, the transitions between the modeled layers are marked by Hurst values discontinuities. It is also shown that PA leads to the best Hurst value estimations. Second, we investigate the multifractional property of sonic logs data recorded at two scientific deep boreholes: the pilot hole VB and the ultra deep main hole HB, drilled for the German Continental Deep Drilling Program (KTB). All the regularity profiles independently obtained for the logs provide a clear correlation with lithology, and from each regularity profile, we derive a similar segmentation in terms of lithological units. The lithological discontinuities (strata' bounds and faults contacts) are located at the local extrema of the Hurst functions. Moreover, the regularity profiles are compared with the KTB estimated porosity logs, showing a significant relation between the local extrema of the Hurst functions and the fluid-filled fractures. The Hurst function may then constitute a tool to characterize underground heterogeneities.}, language = {en} } @article{BaileyBenZionBeckeretal.2010, author = {Bailey, Iain W. and Ben-Zion, Yehuda and Becker, Thorsten W. and Holschneider, Matthias}, title = {Quantifying focal mechanism heterogeneity for fault zones in central and southern California}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2010.04745.x}, year = {2010}, abstract = {P>We present a statistical analysis of focal mechanism orientations for nine California fault zones with the goal of quantifying variations of fault zone heterogeneity at seismogenic depths. The focal mechanism data are generated from first motion polarities for earthquakes in the time period 1983-2004, magnitude range 0-5, and depth range 0-15 km. Only mechanisms with good quality solutions are used. We define fault zones using 20 km wide rectangles and use summations of normalized potency tensors to describe the distribution of double-couple orientations for each fault zone. Focal mechanism heterogeneity is quantified using two measures computed from the tensors that relate to the scatter in orientations and rotational asymmetry or skewness of the distribution. We illustrate the use of these quantities by showing relative differences in the focal mechanism heterogeneity characteristics for different fault zones. These differences are shown to relate to properties of the fault zone surface traces such that increased scatter correlates with fault trace complexity and rotational asymmetry correlates with the dominant fault trace azimuth. These correlations indicate a link between the long-term evolution of a fault zone over many earthquake cycles and its seismic behaviour over a 20 yr time period. Analysis of the partitioning of San Jacinto fault zone focal mechanisms into different faulting styles further indicates that heterogeneity is dominantly controlled by structural properties of the fault zone, rather than time or magnitude related properties of the seismicity.}, language = {en} } @article{HaynHolschneider2009, author = {Hayn, Michael and Holschneider, Matthias}, title = {Directional spherical multipole wavelets}, issn = {0022-2488}, doi = {10.1063/1.3177198}, year = {2009}, abstract = {We construct a family of admissible analysis reconstruction pairs of wavelet families on the sphere. The construction is an extension of the isotropic Poisson wavelets. Similar to those, the directional wavelets allow a finite expansion in terms of off-center multipoles. Unlike the isotropic case, the directional wavelets are not a tight frame. However, at small scales, they almost behave like a tight frame. We give an explicit formula for the pseudodifferential operator given by the combination analysis-synthesis with respect to these wavelets. The Euclidean limit is shown to exist and an explicit formula is given. This allows us to quantify the asymptotic angular resolution of the wavelets.}, language = {en} } @article{HolschneiderTeschke2006, author = {Holschneider, Matthias and Teschke, Gerd}, title = {Existence and computation of optimally localized coherent states}, series = {Journal of mathematical physics}, volume = {47}, journal = {Journal of mathematical physics}, number = {3}, publisher = {Elsevier}, address = {Melville}, issn = {0022-2488}, doi = {10.1063/1.2375031}, pages = {211 -- 214}, year = {2006}, abstract = {This paper is concerned with localization properties of coherent states. Instead of classical uncertainty relations we consider "generalized" localization quantities. This is done by introducing measures on the reproducing kernel. In this context we may prove the existence of optimally localized states. Moreover, we provide a numerical scheme for deriving them.}, language = {en} } @article{HolschneiderDialloKuleshetal.2005, author = {Holschneider, Matthias and Diallo, Mamadou Sanou and Kulesh, Michail and Ohrnberger, Matthias and Luck, E. and Scherbaum, Frank}, title = {Characterization of dispersive surface waves using continuous wavelet transforms}, issn = {0956-540X}, year = {2005}, abstract = {In this paper, we propose a method of surface waves characterization based on the deformation of the wavelet transform of the analysed signal. An estimate of the phase velocity (the group velocity) and the attenuation coefficient is carried out using a model-based approach to determine the propagation operator in the wavelet domain, which depends nonlinearly on a set of unknown parameters. These parameters explicitly define the phase velocity, the group velocity and the attenuation. Under the assumption that the difference between waveforms observed at a couple of stations is solely due to the dispersion characteristics and the intrinsic attenuation of the medium, we then seek to find the set of unknown parameters of this model. Finding the model parameters turns out to be that of an optimization problem, which is solved through the minimization of an appropriately defined cost function. We show that, unlike time-frequency methods that exploit only the square modulus of the transform, we can achieve a complete characterization of surface waves in a dispersive and attenuating medium. Using both synthetic examples and experimental data, we also show that it is in principle possible to separate different modes in both the time domain and the frequency domain}, language = {en} }