@article{ZaourarHamoudiMandeaetal.2013, author = {Zaourar, N. and Hamoudi, M. and Mandea, M. and Balasis, G. and Holschneider, Matthias}, title = {Wavelet-based multiscale analysis of geomagnetic disturbance}, series = {EARTH PLANETS AND SPACE}, volume = {65}, journal = {EARTH PLANETS AND SPACE}, number = {12}, publisher = {TERRA SCIENTIFIC PUBL CO}, address = {TOKYO}, issn = {1343-8832}, doi = {10.5047/eps.2013.05.001}, pages = {1525 -- 1540}, year = {2013}, abstract = {The dynamics of external contributions to the geomagnetic field is investigated by applying time-frequency methods to magnetic observatory data. Fractal models and multiscale analysis enable obtaining maximum quantitative information related to the short-term dynamics of the geomagnetic field activity. The stochastic properties of the horizontal component of the transient external field are determined by searching for scaling laws in the power spectra. The spectrum fits a power law with a scaling exponent beta, a typical characteristic of self-affine time-series. Local variations in the power-law exponent are investigated by applying wavelet analysis to the same time-series. These analyses highlight the self-affine properties of geomagnetic perturbations and their persistence. Moreover, they show that the main phases of sudden storm disturbances are uniquely characterized by a scaling exponent varying between 1 and 3, possibly related to the energy contained in the external field. These new findings suggest the existence of a long-range dependence, the scaling exponent being an efficient indicator of geomagnetic activity and singularity detection. These results show that by using magnetogram regularity to reflect the magnetosphere activity, a theoretical analysis of the external geomagnetic field based on local power-law exponents is possible.}, language = {en} } @article{HaynPanetDiamentetal.2012, author = {Hayn, Michael and Panet, I. and Diament, M. and Holschneider, Matthias and Mandea, Mioara and Davaille, A.}, title = {Wavelet-based directional analysis of the gravity field evidence for large-scale undulations}, series = {Geophysical journal international}, volume = {189}, journal = {Geophysical journal international}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2012.05455.x}, pages = {1430 -- 1456}, year = {2012}, abstract = {In the eighties, the analysis of satellite altimetry data leads to the major discovery of gravity lineations in the oceans, with wavelengths between 200 and 1400 km. While the existence of the 200 km scale undulations is widely accepted, undulations at scales larger than 400 km are still a matter of debate. In this paper, we revisit the topic of the large-scale geoid undulations over the oceans in the light of the satellite gravity data provided by the GRACE mission, considerably more precise than the altimetry data at wavelengths larger than 400 km. First, we develop a dedicated method of directional Poisson wavelet analysis on the sphere with significance testing, in order to detect and characterize directional structures in geophysical data on the sphere at different spatial scales. This method is particularly well suited for potential field analysis. We validate it on a series of synthetic tests, and then apply it to analyze recent gravity models, as well as a bathymetry data set independent from gravity. Our analysis confirms the existence of gravity undulations at large scale in the oceans, with characteristic scales between 600 and 2000 km. Their direction correlates well with present-day plate motion over the Pacific ocean, where they are particularly clear, and associated with a conjugate direction at 1500 km scale. A major finding is that the 2000 km scale geoid undulations dominate and had never been so clearly observed previously. This is due to the great precision of GRACE data at those wavelengths. Given the large scale of these undulations, they are most likely related to mantle processes. Taking into account observations and models from other geophysical information, as seismological tomography, convection and geochemical models and electrical conductivity in the mantle, we conceive that all these inputs indicate a directional fabric of the mantle flows at depth, reflecting how the history of subduction influences the organization of lower mantle upwellings.}, language = {en} } @article{PanetKuroishiHolschneider2011, author = {Panet, Isabelle and Kuroishi, Yuki and Holschneider, Matthias}, title = {Wavelet modelling of the gravity field by domain decomposition methods: an example over Japan}, series = {Geophysical journal international}, volume = {184}, journal = {Geophysical journal international}, number = {1}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2010.04840.x}, pages = {203 -- 219}, year = {2011}, abstract = {With the advent of satellite gravity, large gravity data sets of unprecedented quality at low and medium resolution become available. For local, high resolution field modelling, they need to be combined with the surface gravity data. Such models are then used for various applications, from the study of the Earth interior to the determination of oceanic currents. Here we show how to realize such a combination in a flexible way using spherical wavelets and applying a domain decomposition approach. This iterative method, based on the Schwarz algorithms, allows to split a large problem into smaller ones, and avoids the calculation of the entire normal system, which may be huge if high resolution is sought over wide areas. A subdomain is defined as the harmonic space spanned by a subset of the wavelet family. Based on the localization properties of the wavelets in space and frequency, we define hierarchical subdomains of wavelets at different scales. On each scale, blocks of subdomains are defined by using a tailored spatial splitting of the area. The data weighting and regularization are iteratively adjusted for the subdomains, which allows to handle heterogeneity in the data quality or the gravity variations. Different levels of approximations of the subdomains normals are also introduced, corresponding to building local averages of the data at different resolution levels. We first provide the theoretical background on domain decomposition methods. Then, we validate the method with synthetic data, considering two kinds of noise: white noise and coloured noise. We then apply the method to data over Japan, where we combine a satellite-based geopotential model, EIGEN-GL04S, and a local gravity model from a combination of land and marine gravity data and an altimetry-derived marine gravity model. A hybrid spherical harmonics/wavelet model of the geoid is obtained at about 15 km resolution and a corrector grid for the surface model is derived.}, language = {en} } @article{ChambodutPanetMandeaetal.2005, author = {Chambodut, Aude and Panet, I. and Mandea, Mioara and Diament, M. and Holschneider, Matthias and Jamet, O.}, title = {Wavelet frames : an alternative to spherical harmonic representation of potential fields}, issn = {0956-540X}, year = {2005}, abstract = {Potential fields are classically represented on the sphere using spherical harmonics. However, this decomposition leads to numerical difficulties when data to be modelled are irregularly distributed or cover a regional zone. To overcome this drawback, we develop a new representation of the magnetic and the gravity fields based on wavelet frames. In this paper, we first describe how to build wavelet frames on the sphere. The chosen frames are based on the Poisson multipole wavelets, which are of special interest for geophysical modelling, since their scaling parameter is linked to the multipole depth (Holschneider et al.). The implementation of wavelet frames results from a discretization of the continuous wavelet transform in space and scale. We also build different frames using two kinds of spherical meshes and various scale sequences. We then validate the mathematical method through simple fits of scalar functions on the sphere, named 'scalar models'. Moreover, we propose magnetic and gravity models, referred to as 'vectorial models', taking into account geophysical constraints. We then discuss the representation of the Earth's magnetic and gravity fields from data regularly or irregularly distributed. Comparisons of the obtained wavelet models with the initial spherical harmonic models point out the advantages of wavelet modelling when the used magnetic or gravity data are sparsely distributed or cover just a very local zone}, language = {en} } @article{KuleshDialloHolschneider2005, author = {Kulesh, Michail A. and Diallo, Mamadou Sanou and Holschneider, Matthias}, title = {Wavelet analysis of ellipticity, dispersion, and dissipation properties of Rayleigh waves}, issn = {1063-7710}, year = {2005}, abstract = {This paper is devoted to the digital processing of multicomponent seismograms using wavelet analysis. The goal of this processing is to identify Rayleigh surface elastic waves and determine their properties. A new method for calculating the ellipticity parameters of a wave in the form of a time-frequency spectrum is proposed, which offers wide possibilities for filtering seismic signals in order to suppress or extract the Rayleigh components. A model of dispersion and dissipation of elliptic waves written in terms of wavelet spectra of complex (two-component) signals is also proposed. The model is used to formulate a nonlinear minimization problem that allows for a high-accuracy calculation of the group and phase velocities and the attenuation factor for a propagating elliptic Rayleigh wave. All methods considered in the paper are illustrated with the use of test signals. (c) 2005 Pleiades Publishing, Inc}, language = {en} } @article{RochaVasseurHaynetal.2011, author = {Rocha, Marcia R. and Vasseur, David A. and Hayn, Michael and Holschneider, Matthias and Gaedke, Ursula}, title = {Variability patterns differ between standing stock and process rates}, series = {Oikos}, volume = {120}, journal = {Oikos}, number = {1}, publisher = {Wiley-Blackwell}, address = {Malden}, issn = {0030-1299}, doi = {10.1111/j.1600-0706.2010.18786.x}, pages = {17 -- 25}, year = {2011}, abstract = {Standing stocks are typically easier to measure than process rates such as production. Hence, stocks are often used as indicators of ecosystem functions although the latter are generally more strongly related to rates than to stocks. The regulation of stocks and rates and thus their variability over time may differ, as stocks constitute the net result of production and losses. Based on long-term high frequency measurements in a large, deep lake we explore the variability patterns in primary and bacterial production and relate them to those of the corresponding standing stocks, i.e. chlorophyll concentration, phytoplankton and bacterial biomass. We employ different methods (coefficient of variation, spline fitting and spectral analysis) which complement each other for assessing the variability present in the plankton data, at different temporal scales. In phytoplankton, we found that the overall variability of primary production is dominated by fluctuations at low frequencies, such as the annual, whereas in stocks and chlorophyll in particular, higher frequencies contribute substantially to the overall variance. This suggests that using standing stocks instead of rate measures leads to an under- or overestimation of food shortage for consumers during distinct periods of the year. The range of annual variation in bacterial production is 8 times greater than biomass, showing that the variability of bacterial activity (e.g. oxygen consumption, remineralisation) would be underestimated if biomass is used. The P/B ratios were variable and although clear trends are present in both bacteria and phytoplankton, no systematic relationship between stock and rate measures were found for the two groups. Hence, standing stock and process rate measures exhibit different variability patterns and care is needed when interpreting the mechanisms and implications of the variability encountered.}, language = {en} } @article{ProkhorovFoersterHeetal.2014, author = {Prokhorov, Boris E. and Foerster, M. and He, M. and Namgaladze, Alexander A. and Holschneider, Matthias}, title = {Using MFACE as input in the UAM to specify the MIT dynamics}, series = {Journal of geophysical research : Space physics}, volume = {119}, journal = {Journal of geophysical research : Space physics}, number = {8}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9380}, doi = {10.1002/2014JA019981}, pages = {11}, year = {2014}, abstract = {The magnetosphere-ionosphere-thermosphere (MIT) dynamic system significantly depends on the highly variable solar wind conditions, in particular, on changes of the strength and orientation of the interplanetary magnetic field (IMF). The solar wind and IMF interactions with the magnetosphere drive the MIT system via the magnetospheric field-aligned currents (FACs). The global modeling helps us to understand the physical background of this complex system. With the present study, we test the recently developed high-resolution empirical model of field-aligned currents MFACE (a high-resolution Model of Field-Aligned Currents through Empirical orthogonal functions analysis). These FAC distributions were used as input of the time-dependent, fully self-consistent global Upper Atmosphere Model (UAM) for different seasons and various solar wind and IMF conditions. The modeling results for neutral mass density and thermospheric wind are directly compared with the CHAMP satellite measurements. In addition, we perform comparisons with the global empirical models: the thermospheric wind model (HWM07) and the atmosphere density model (Naval Research Laboratory Mass Spectrometer and Incoherent Scatter Extended 2000). The theoretical model shows a good agreement with the satellite observations and an improved behavior compared with the empirical models at high latitudes. Using the MFACE model as input parameter of the UAM model, we obtain a realistic distribution of the upper atmosphere parameters for the Northern and Southern Hemispheres during stable IMF orientation as well as during dynamic situations. This variant of the UAM can therefore be used for modeling the MIT system and space weather predictions.}, language = {en} } @article{ZollerHolschneiderBenZion2005, author = {Zoller, Gert and Holschneider, Matthias and Ben-Zion, Yehuda}, title = {The role of heterogeneities as a tuning parameter of earthquake dynamics}, issn = {0033-4553}, year = {2005}, abstract = {We investigate the influence of spatial heterogeneities on various aspects of brittle failure and seismicity in a model of a large strike-slip fault. The model dynamics is governed by realistic boundary conditions consisting of constant velocity motion of regions around the fault, static/kinetic friction laws, creep with depth-dependent coefficients, and 3-D elastic stress transfer. The dynamic rupture is approximated on a continuous time scale using a finite stress propagation velocity ("quasidynamic model''). The model produces a "brittle- ductile'' transition at a depth of about 12.5 km, realistic hypocenter distributions, and other features of seismicity compatible with observations. Previous work suggested that the range of size scales in the distribution of strength-stress heterogeneities acts as a tuning parameter of the dynamics. Here we test this hypothesis by performing a systematic parameter-space study with different forms of heterogeneities. In particular, we analyze spatial heterogeneities that can be tuned by a single parameter in two distributions: ( 1) high stress drop barriers in near- vertical directions and ( 2) spatial heterogeneities with fractal properties and variable fractal dimension. The results indicate that the first form of heterogeneities provides an effective means of tuning the behavior while the second does not. In relatively homogeneous cases, the fault self-organizes to large-scale patches and big events are associated with inward failure of individual patches and sequential failures of different patches. The frequency-size event statistics in such cases are compatible with the characteristic earthquake distribution and large events are quasi-periodic in time. In strongly heterogeneous or near-critical cases, the rupture histories are highly discontinuous and consist of complex migration patterns of slip on the fault. In such cases, the frequency-size and temporal statistics follow approximately power-law relations}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands}, series = {Bulletin of the Seismological Society of America}, volume = {106}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160220}, pages = {2917 -- 2921}, year = {2016}, abstract = {The Groningen gas field serves as a natural laboratory for production-induced earthquakes, because no earthquakes were observed before the beginning of gas production. Increasing gas production rates resulted in growing earthquake activity and eventually in the occurrence of the 2012M(w) 3.6 Huizinge earthquake. At least since this event, a detailed seismic hazard and risk assessment including estimation of the maximum earthquake magnitude is considered to be necessary to decide on the future gas production. In this short note, we first apply state-of-the-art methods of mathematical statistics to derive confidence intervals for the maximum possible earthquake magnitude m(max). Second, we calculate the maximum expected magnitude M-T in the time between 2016 and 2024 for three assumed gas-production scenarios. Using broadly accepted physical assumptions and 90\% confidence level, we suggest a value of m(max) 4.4, whereas M-T varies between 3.9 and 4.3, depending on the production scenario.}, language = {en} } @article{ZoellerHolschneiderHainzl2013, author = {Z{\"o}ller, Gert and Holschneider, Matthias and Hainzl, Sebastian}, title = {The Maximum Earthquake Magnitude in a Time Horizon: Theory and Case Studies}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {2A}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120120013}, pages = {860 -- 875}, year = {2013}, abstract = {We show how the maximum magnitude within a predefined future time horizon may be estimated from an earthquake catalog within the context of Gutenberg-Richter statistics. The aim is to carry out a rigorous uncertainty assessment, and calculate precise confidence intervals based on an imposed level of confidence a. In detail, we present a model for the estimation of the maximum magnitude to occur in a time interval T-f in the future, given a complete earthquake catalog for a time period T in the past and, if available, paleoseismic events. For this goal, we solely assume that earthquakes follow a stationary Poisson process in time with unknown productivity Lambda and obey the Gutenberg-Richter law in magnitude domain with unknown b-value. The random variables. and b are estimated by means of Bayes theorem with noninformative prior distributions. Results based on synthetic catalogs and on retrospective calculations of historic catalogs from the highly active area of Japan and the low-seismicity, but high-risk region lower Rhine embayment (LRE) in Germany indicate that the estimated magnitudes are close to the true values. Finally, we discuss whether the techniques can be extended to meet the safety requirements for critical facilities such as nuclear power plants. For this aim, the maximum magnitude for all times has to be considered. In agreement with earlier work, we find that this parameter is not a useful quantity from the viewpoint of statistical inference.}, language = {en} }