@article{StreichBeckenRitter2011, author = {Streich, Rita and Becken, Michael and Ritter, Oliver}, title = {2.5D controlled-source EM modeling with general 3D source geometries}, series = {Geophysics}, volume = {76}, journal = {Geophysics}, number = {6}, publisher = {Society of Exploration Geophysicists}, address = {Tulsa}, issn = {0016-8033}, doi = {10.1190/GEO2011-0111.1}, pages = {F387 -- F393}, year = {2011}, abstract = {Most 2.5D controlled-source electromagnetic (CSEM) modeling algorithms presented to date explicitly consider only sources that are point dipoles oriented parallel or perpendicular to the direction of constant conductivity. This makes simulations of complex source geometries expensive, requiring separate evaluations of many point dipole fields, and thus limits the practical applicability of such schemes for simulating and interpreting field data. We present a novel 2.5D CSEM modeling scheme that overcomes this limitation and permits efficient simulations of sources with general shape and orientation by evaluating fields for the entire source at once. We accommodate general sources by using a secondary field approach, in which primary fields are computed for the general source and a 1D background conductivity model. To carry out the required Fourier transforms between space and wavenumber domain using the same fast cosine and sine transform filters as in conventional algorithms, we split the primary and secondary fields into their symmetric and antisymmetric parts. For complex 3D source geometries, this approach is significantly more efficient than previous 2.5D algorithms. Our finite-difference algorithm also includes novel approaches for divergence correction at low frequencies and EM field interpolation across conductivity discontinuities. We describe the modeling scheme and demonstrate its accuracy and efficiency by comparisons of 2.5D-simulated data with 1D and 3D results.}, language = {en} } @article{WiederkehrBousquetZiemannetal.2011, author = {Wiederkehr, Michael and Bousquet, Romain and Ziemann, Martin Andreas and Berger, Alfons and Schmid, Stefan M.}, title = {3-D assessment of peak-metamorphic conditions by Raman spectroscopy of carbonaceous material an example from the margin of the Lepontine dome (Swiss Central Alps)}, series = {International journal of earth sciences}, volume = {100}, journal = {International journal of earth sciences}, number = {5}, publisher = {Springer}, address = {New York}, issn = {1437-3254}, doi = {10.1007/s00531-010-0622-2}, pages = {1029 -- 1063}, year = {2011}, abstract = {This study monitors regional changes in the crystallinity of carbonaceous matter (CM) by applying Micro-Raman spectroscopy to a total of 214 metasediment samples (largely so-called Bundnerschiefer) dominantly metamorphosed under blueschist- to amphibolite-facies conditions. They were collected within the northeastern margin of the Lepontine dome and easterly adjacent areas of the Swiss Central Alps. Three-dimensional mapping of isotemperature contours in map and profile views shows that the isotemperature contours associated with the Miocene Barrow-type Lepontine metamorphic event cut across refolded nappe contacts, both along and across strike within the northeastern margin of the Lepontine dome and adjacent areas. Further to the northeast, the isotemperature contours reflect temperatures reached during the Late Eocene subduction-related blueschist-facies event and/or during subsequent near-isothermal decompression; these contours appear folded by younger, large-scale post-nappe-stacking folds. A substantial jump in the recorded maximum temperatures across the tectonic contact between the frontal Adula nappe complex and surrounding metasediments indicates that this contact accommodated differential tectonic movement of the Adula nappe with respect to the enveloping Bundnerschiefer after maximum temperatures were reached within the northern Adula nappe, i.e. after Late Eocene time.}, language = {en} } @article{LappeKallmeyer2011, author = {Lappe, Michael and Kallmeyer, Jens}, title = {A cell extraction method for oily sediments}, series = {Frontiers in microbiology}, volume = {2}, journal = {Frontiers in microbiology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-302X}, doi = {10.3389/fmicb.2011.00233}, pages = {11}, year = {2011}, abstract = {Hydrocarbons can be found in many different habitats and represent an important carbon source for microbes. As fossil fuels, they are also an important economical resource and through natural seepage or accidental release they can be major pollutants. DNA-specific stains and molecular probes bind to hydrocarbons, causing massive background fluorescence, thereby hampering cell enumeration. The cell extraction procedure of Kallmeyer et al. (2008) separates the cells from the sediment matrix. In principle, this technique can also be used to separate cells from oily sediments, but it was not originally optimized for this application. Here we present a modified extraction method in which the hydrocarbons are removed prior to cell extraction. Due to the reduced background fluorescence the microscopic image becomes clearer, making cell identification, and enumeration much easier. Consequently, the resulting cell counts from oily samples treated according to our new protocol are significantly higher than those treated according to Kallmeyer et al. (2008). We tested different amounts of a variety of solvents for their ability to remove hydrocarbons and found that n-hexane and in samples containing more mature oils methanol, delivered the best results. However, as solvents also tend to lyse cells, it was important to find the optimum solvent to sample ratio, at which hydrocarbon extraction is maximized and cell lysis minimized. A volumetric ratio of 1:2-1:5 between a formalin-fixed sediment slurry and solvent delivered highest cell counts. Extraction efficiency was around 30-50\% and was checked on both oily samples spiked with known amounts of E. coli cells and oil-free samples amended with fresh and biodegraded oil. The method provided reproducible results on samples containing very different kinds of oils with regard to their degree of biodegradation. For strongly biodegraded oil MeOH turned out to be the most appropriate solvent, whereas for less biodegraded samples n-hexane delivered best results.}, language = {en} } @article{MaerkerPelacaniSchroeder2011, author = {Maerker, Michael and Pelacani, Samanta and Schroeder, Boris}, title = {A functional entity approach to predict soil erosion processes in a small Plio-Pleistocene Mediterranean catchment in Northern Chianti, Italy}, series = {Geomorphology : an international journal on pure and applied geomorphology}, volume = {125}, journal = {Geomorphology : an international journal on pure and applied geomorphology}, number = {4}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0169-555X}, doi = {10.1016/j.geomorph.2010.10.022}, pages = {530 -- 540}, year = {2011}, abstract = {In this paper we evaluate different methods to predict soil erosion processes. We derived different layers of predictor variables for the study area in the Northern Chianti, Italy, describing the soil-lithologic complex, land use, and topographic characteristics. For a subcatchment of the Orme River, we mapped erosion processes by interpreting aerial photographs and field observations. These were classified as erosional response units (ERU), i.e. spatial areas of homogeneous erosion processes. The ERU were used as the response variable in the soil erosion modelling process. We applied two models i) bootstrap aggregation (Random Forest: RF), and ii) stochastic gradient boosting (TreeNet: TN) to predict the potential spatial distribution of erosion processes for the entire Orme River catchment. The models are statistically evaluated using training data and a set of performance parameters such as the area under the receiver operating characteristic curve (AUC), Cohen's Kappa, and pseudo R2. Variable importance and response curves provide further insight into controlling factors of erosion. Both models provided good performance in terms of classification and calibration; however, TN outperformed RF. Similar classes such as active and inactive landslides can be discriminated and well interpreted by considering response curves and relative variable importance. The spatial distribution of the predicted erosion susceptibilities generally follows topographic constraints and is similar for both models. Hence, the model-based delineation of ERU on the basis of soil and terrain information is a valuable tool in geomorphology; it provides insights into factors controlling erosion processes and may allow the extrapolation and prediction of erosion processes in unsurveyed areas.}, language = {en} } @phdthesis{Zhang2011, author = {Zhang, Zhuodong}, title = {A regional scale study of wind erosion in the Xilingele grassland based on computational fluid dynamics}, address = {Potsdam}, pages = {143 S.}, year = {2011}, language = {en} } @article{BallatoUbaLandgrafetal.2011, author = {Ballato, Paolo and Uba, Cornelius Eji and Landgraf, Angela and Strecker, Manfred and Sudo, Masafumi and Stockli, Daniel F. and Friedrich, Anke M. and Tabatabaei, Saeid H.}, title = {Arabia-Eurasia continental collision insights from late Tertiary foreland-basin evolution in the Alborz Mountains, northern Iran}, series = {Geological Society of America bulletin}, volume = {123}, journal = {Geological Society of America bulletin}, number = {1-2}, publisher = {American Institute of Physics}, address = {Boulder}, issn = {0016-7606}, doi = {10.1130/B30091.1}, pages = {106 -- 131}, year = {2011}, abstract = {A poorly understood lag time of 15-20 m.y. exists between the initial Arabia-Eurasia continental collision in late Eocene to early Oligocene time and the acceleration of tectonic and sedimentary processes across the collision zone in the early to late Miocene. The late Eocene to Miocene-Pliocene clastic and shallow-marine sedimentary rocks of the Kond, Eyvanekey, and Semnan Basins in the Alborz Mountains (northern Iran) offer the possibility to track the evolution of this orogen in the framework of collision processes. A transition from volcaniclastic submarine deposits to shallow-marine evaporites and terrestrial sediments occurred shortly after 36 Ma in association with reversals in sediment provenance, strata tilting, and erosional unroofing. These events followed the termination of subduction arc magmatism and marked a changeover from an extensional to a contractional regime in response to initiation of continental collision with the subduction of stretched Arabian lithosphere. This early stage of collision produced topographic relief associated with shallow foreland basins, suggesting that shortening and tectonic loading occurred at low rates. Starting from the early Miocene (17.5 Ma), flexural subsidence in response to foreland basin initiation occurred. Fast sediment accumulation rates and erosional unroofing trends point to acceleration of shortening by the early Miocene. We suggest that the lag time between the initiation of continental collision (36 Ma) and the acceleration of regional deformation (20-17.5 Ma) reflects a two-stage collision process, involving the "soft" collision of stretched lithosphere at first and "hard" collision following the arrival of unstretched Arabian continental litho sphere in the subduction zone.}, language = {en} } @article{BenmehdiMakaravaBenhamidoucheetal.2011, author = {Benmehdi, Sabah and Makarava, Natallia and Benhamidouche, N. and Holschneider, Matthias}, title = {Bayesian estimation of the self-similarity exponent of the Nile River fluctuation}, series = {Nonlinear processes in geophysics}, volume = {18}, journal = {Nonlinear processes in geophysics}, number = {3}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1023-5809}, doi = {10.5194/npg-18-441-2011}, pages = {441 -- 446}, year = {2011}, abstract = {The aim of this paper is to estimate the Hurst parameter of Fractional Gaussian Noise (FGN) using Bayesian inference. We propose an estimation technique that takes into account the full correlation structure of this process. Instead of using the integrated time series and then applying an estimator for its Hurst exponent, we propose to use the noise signal directly. As an application we analyze the time series of the Nile River, where we find a posterior distribution which is compatible with previous findings. In addition, our technique provides natural error bars for the Hurst exponent.}, language = {en} } @article{SchmelzbachScherbaumTronickeetal.2011, author = {Schmelzbach, C. and Scherbaum, Frank and Tronicke, Jens and Dietrich, P.}, title = {Bayesian frequency-domain blind deconvolution of ground-penetrating radar data}, series = {Journal of applied geophysics}, volume = {75}, journal = {Journal of applied geophysics}, number = {4}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-9851}, doi = {10.1016/j.jappgeo.2011.08.010}, pages = {615 -- 630}, year = {2011}, abstract = {Enhancing the resolution and accuracy of surface ground-penetrating radar (GPR) reflection data by inverse filtering to recover a zero-phased band-limited reflectivity image requires a deconvolution technique that takes the mixed-phase character of the embedded wavelet into account. In contrast, standard stochastic deconvolution techniques assume that the wavelet is minimum phase and, hence, often meet with limited success when applied to GPR data. We present a new general-purpose blind deconvolution algorithm for mixed-phase wavelet estimation and deconvolution that (1) uses the parametrization of a mixed-phase wavelet as the convolution of the wavelet's minimum-phase equivalent with a dispersive all-pass filter, (2) includes prior information about the wavelet to be estimated in a Bayesian framework, and (3) relies on the assumption of a sparse reflectivity. Solving the normal equations using the data autocorrelation function provides an inverse filter that optimally removes the minimum-phase equivalent of the wavelet from the data, which leaves traces with a balanced amplitude spectrum but distorted phase. To compensate for the remaining phase errors, we invert in the frequency domain for an all-pass filter thereby taking advantage of the fact that the action of the all-pass filter is exclusively contained in its phase spectrum. A key element of our algorithm and a novelty in blind deconvolution is the inclusion of prior information that allows resolving ambiguities in polarity and timing that cannot be resolved using the sparseness measure alone. We employ a global inversion approach for non-linear optimization to find the all-pass filter phase values for each signal frequency. We tested the robustness and reliability of our algorithm on synthetic data with different wavelets, 1-D reflectivity models of different complexity, varying levels of added noise, and different types of prior information. When applied to realistic synthetic 2-D data and 2-D field data, we obtain images with increased temporal resolution compared to the results of standard processing.}, language = {en} } @phdthesis{Blaser2011, author = {Blaser, Lilian}, title = {Bayesian networks for tsunami early warning}, address = {Potsdam}, pages = {127 S.}, year = {2011}, language = {en} } @article{HeistermannKneis2011, author = {Heistermann, Maik and Kneis, David}, title = {Benchmarking quantitative precipitation estimation by conceptual rainfall-runoff modeling}, series = {Water resources research}, volume = {47}, journal = {Water resources research}, number = {23}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2010WR009153}, pages = {23}, year = {2011}, abstract = {Hydrologic modelers often need to know which method of quantitative precipitation estimation (QPE) is best suited for a particular catchment. Traditionally, QPE methods are verified and benchmarked against independent rain gauge observations. However, the lack of spatial representativeness limits the value of such a procedure. Alternatively, one could drive a hydrological model with different QPE products and choose the one which best reproduces observed runoff. Unfortunately, the calibration of conceptual model parameters might conceal actual differences between the QPEs. To avoid such effects, we abandoned the idea of determining optimum parameter sets for all QPE being compared. Instead, we carry out a large number of runoff simulations, confronting each QPE with a common set of random parameters. By evaluating the goodness-of-fit of all simulations, we obtain information on whether the quality of competing QPE methods is significantly different. This knowledge is inferred exactly at the scale of interest-the catchment scale. We use synthetic data to investigate the ability of this procedure to distinguish a truly superior QPE from an inferior one. We find that the procedure is prone to failure in the case of linear systems. However, we show evidence that in realistic (nonlinear) settings, the method can provide useful results even in the presence of moderate errors in model structure and streamflow observations. In a real-world case study on a small mountainous catchment, we demonstrate the ability of the verification procedure to reveal additional insights as compared to a conventional cross validation approach.}, language = {en} }