@article{SchmelzbachScherbaumTronickeetal.2011, author = {Schmelzbach, C. and Scherbaum, Frank and Tronicke, Jens and Dietrich, P.}, title = {Bayesian frequency-domain blind deconvolution of ground-penetrating radar data}, series = {Journal of applied geophysics}, volume = {75}, journal = {Journal of applied geophysics}, number = {4}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0926-9851}, doi = {10.1016/j.jappgeo.2011.08.010}, pages = {615 -- 630}, year = {2011}, abstract = {Enhancing the resolution and accuracy of surface ground-penetrating radar (GPR) reflection data by inverse filtering to recover a zero-phased band-limited reflectivity image requires a deconvolution technique that takes the mixed-phase character of the embedded wavelet into account. In contrast, standard stochastic deconvolution techniques assume that the wavelet is minimum phase and, hence, often meet with limited success when applied to GPR data. We present a new general-purpose blind deconvolution algorithm for mixed-phase wavelet estimation and deconvolution that (1) uses the parametrization of a mixed-phase wavelet as the convolution of the wavelet's minimum-phase equivalent with a dispersive all-pass filter, (2) includes prior information about the wavelet to be estimated in a Bayesian framework, and (3) relies on the assumption of a sparse reflectivity. Solving the normal equations using the data autocorrelation function provides an inverse filter that optimally removes the minimum-phase equivalent of the wavelet from the data, which leaves traces with a balanced amplitude spectrum but distorted phase. To compensate for the remaining phase errors, we invert in the frequency domain for an all-pass filter thereby taking advantage of the fact that the action of the all-pass filter is exclusively contained in its phase spectrum. A key element of our algorithm and a novelty in blind deconvolution is the inclusion of prior information that allows resolving ambiguities in polarity and timing that cannot be resolved using the sparseness measure alone. We employ a global inversion approach for non-linear optimization to find the all-pass filter phase values for each signal frequency. We tested the robustness and reliability of our algorithm on synthetic data with different wavelets, 1-D reflectivity models of different complexity, varying levels of added noise, and different types of prior information. When applied to realistic synthetic 2-D data and 2-D field data, we obtain images with increased temporal resolution compared to the results of standard processing.}, language = {en} } @article{BlaserOhrnbergerKruegeretal.2012, author = {Blaser, Lilian and Ohrnberger, Matthias and Kr{\"u}ger, Frank and Scherbaum, Frank}, title = {Probabilistic tsunami threat assessment of 10 recent earthquakes offshore Sumatra}, series = {Geophysical journal international}, volume = {188}, journal = {Geophysical journal international}, number = {3}, publisher = {Wiley-Blackwell}, address = {Malden}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2011.05324.x}, pages = {1273 -- 1284}, year = {2012}, abstract = {Tsunami early warning (TEW) is a challenging task as a decision has to be made within few minutes on the basis of incomplete and error-prone data. Deterministic warning systems have difficulties in integrating and quantifying the intrinsic uncertainties. In contrast, probabilistic approaches provide a framework that handles uncertainties in a natural way. Recently, we have proposed a method using Bayesian networks (BNs) that takes into account the uncertainties of seismic source parameter estimates in TEW. In this follow-up study, the method is applied to 10 recent large earthquakes offshore Sumatra and tested for its performance. We have evaluated both the general model performance given the best knowledge we have today about the source parameters of the 10 events and the corresponding response on seismic source information evaluated in real-time. We find that the resulting site-specific warning level probabilities represent well the available tsunami wave measurements and observations. Difficulties occur in the real-time tsunami assessment if the moment magnitude estimate is severely over- or underestimated. In general, the probabilistic analysis reveals a considerably large range of uncertainties in the near-field TEW. By quantifying the uncertainties the BN analysis provides important additional information to a decision maker in a warning centre to deal with the complexity in TEW and to reason under uncertainty.}, language = {en} } @article{HiemerRoesslerScherbaum2012, author = {Hiemer, Stefan and R{\"o}ßler, Dirk and Scherbaum, Frank}, title = {Monitoring the West Bohemian earthquake swarm in 2008/2009 by a temporary small-aperture seismic array}, series = {Journal of seismology}, volume = {16}, journal = {Journal of seismology}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {1383-4649}, doi = {10.1007/s10950-011-9256-5}, pages = {169 -- 182}, year = {2012}, abstract = {The most recent intense earthquake swarm in West Bohemia lasted from 6 October 2008 to January 2009. Starting 12 days after the onset, the University of Potsdam monitored the swarm by a temporary small-aperture seismic array at 10 km epicentral distance. The purpose of the installation was a complete monitoring of the swarm including micro-earthquakes (M (L) < 0). We identify earthquakes using a conventional short-term average/long-term average trigger combined with sliding-window frequency-wavenumber and polarisation analyses. The resulting earthquake catalogue consists of 14,530 earthquakes between 19 October 2008 and 18 March 2009 with magnitudes in the range of -aEuro parts per thousand 1.2 a parts per thousand currency signaEuro parts per thousand M (L) a parts per thousand currency signaEuro parts per thousand 2.7. The small-aperture seismic array substantially lowers the detection threshold to about M (c) = -aEuro parts per thousand 0.4, when compared to the regional networks operating in West Bohemia (M (c) > 0.0). In the course of this work, the main temporal features (frequency-magnitude distribution, propagation of back azimuth and horizontal slowness, occurrence rate of aftershock sequences and interevent-time distribution) of the recent 2008/2009 earthquake swarm are presented and discussed. Temporal changes of the coefficient of variation (based on interevent times) suggest that the swarm earthquake activity of the 2008/2009 swarm terminates by 12 January 2009. During the main phase in our studied swarm period after 19 October, the b value of the Gutenberg-Richter relation decreases from 1.2 to 0.8. This trend is also reflected in the power-law behavior of the seismic moment release. The corresponding total seismic moment release of 1.02x10(17) Nm is equivalent to M (L,max) = 5.4.}, language = {en} } @article{DelavaudScherbaumKuehnetal.2012, author = {Delavaud, Elise and Scherbaum, Frank and K{\"u}hn, Nicolas and Allen, Trevor}, title = {Testing the global applicability of ground-motion prediction equations for active shallow crustal regions}, series = {Bulletin of the Seismological Society of America}, volume = {102}, journal = {Bulletin of the Seismological Society of America}, number = {2}, publisher = {Seismological Society of America}, address = {El Cerrito}, issn = {0037-1106}, doi = {10.1785/0120110113}, pages = {707 -- 721}, year = {2012}, abstract = {Large research initiatives such as the Global Earthquake Model (GEM) or the Seismic HAzard haRmonization in Europe (SHARE) projects concentrate a great collaborative effort on defining a global standard for seismic hazard estimations. In this context, there is an increasing need for identifying ground-motion prediction equations (GMPEs) that can be applied at both global and regional scale. With increasing amounts of strong-motion records that are now available worldwide, observational data can provide a valuable resource to tackle this question. Using the global dataset of Allen and Wald (2009), we evaluate the ability of 11 GMPEs to predict ground-motion in different active shallow crustal regions worldwide. Adopting the approach of Scherbaum et al. (2009), we rank these GMPEs according to their likelihood of having generated the data. In particular, we estimate how strongly data support or reject the models with respect to the state of noninformativeness defined by a uniform weighting. Such rankings derived from this particular global dataset enable us to explore the potential of GMPEs to predict ground motions in their host region and also in other regions depending on the magnitude and distance considered. In the ranking process, we particularly focus on the influence of the distribution of the testing dataset compared with the GMPE's native dataset. One of the results of this study is that some nonindigenous models present a high degree of consistency with the data from a target region. Two models in particular demonstrated a strong power of geographically wide applicability in different geographic regions with respect to the testing dataset: the models of Akkar and Bommer (2010) and Chiou et al. (2010).}, language = {en} } @article{DelavaudCottonAkkaretal.2012, author = {Delavaud, Elise and Cotton, Fabrice and Akkar, Sinan and Scherbaum, Frank and Danciu, Laurentiu and Beauval, Celine and Drouet, Stephane and Douglas, John and Basili, Roberto and Sandikkaya, M. Abdullah and Segou, Margaret and Faccioli, Ezio and Theodoulidis, Nikos}, title = {Toward a ground-motion logic tree for probabilistic seismic hazard assessment in Europe}, series = {Journal of seismology}, volume = {16}, journal = {Journal of seismology}, number = {3}, publisher = {Springer}, address = {Dordrecht}, issn = {1383-4649}, doi = {10.1007/s10950-012-9281-z}, pages = {451 -- 473}, year = {2012}, abstract = {The Seismic Hazard Harmonization in Europe (SHARE) project, which began in June 2009, aims at establishing new standards for probabilistic seismic hazard assessment in the Euro-Mediterranean region. In this context, a logic tree for ground-motion prediction in Europe has been constructed. Ground-motion prediction equations (GMPEs) and weights have been determined so that the logic tree captures epistemic uncertainty in ground-motion prediction for six different tectonic regimes in Europe. Here we present the strategy that we adopted to build such a logic tree. This strategy has the particularity of combining two complementary and independent approaches: expert judgment and data testing. A set of six experts was asked to weight pre-selected GMPEs while the ability of these GMPEs to predict available data was evaluated with the method of Scherbaum et al. (Bull Seismol Soc Am 99:3234-3247, 2009). Results of both approaches were taken into account to commonly select the smallest set of GMPEs to capture the uncertainty in ground-motion prediction in Europe. For stable continental regions, two models, both from eastern North America, have been selected for shields, and three GMPEs from active shallow crustal regions have been added for continental crust. For subduction zones, four models, all non-European, have been chosen. Finally, for active shallow crustal regions, we selected four models, each of them from a different host region but only two of them were kept for long periods. In most cases, a common agreement has been also reached for the weights. In case of divergence, a sensitivity analysis of the weights on the seismic hazard has been conducted, showing that once the GMPEs have been selected, the associated set of weights has a smaller influence on the hazard.}, language = {en} } @article{HinzenReamerScherbaum2013, author = {Hinzen, Klaus-G and Reamer, Sharon K. and Scherbaum, Frank}, title = {Slow fourier transform}, series = {Seismological research letters}, volume = {84}, journal = {Seismological research letters}, number = {2}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220120139}, pages = {251 -- 257}, year = {2013}, language = {en} } @article{RungeScherbaumCurtisetal.2013, author = {Runge, Antonia K. and Scherbaum, Frank and Curtis, Andrew and Riggelsen, Carsten}, title = {An interactive tool for the elicitation of subjective probabilities in probabilistic seismic-hazard analysis}, series = {Bulletin of the Seismological Society of America}, volume = {103}, journal = {Bulletin of the Seismological Society of America}, number = {5}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120130026}, pages = {2862 -- 2874}, year = {2013}, abstract = {In probabilistic seismic-hazard analysis, epistemic uncertainties are commonly treated within a logic-tree framework in which the branch weights express the degree of belief of an expert in a set of models. For the calculation of the distribution of hazard curves, these branch weights represent subjective probabilities. A major challenge for experts is to provide logically consistent weight estimates (in the sense of Kolmogorovs axioms), to be aware of the multitude of heuristics, and to minimize the biases which affect human judgment under uncertainty. We introduce a platform-independent, interactive program enabling us to quantify, elicit, and transfer expert knowledge into a set of subjective probabilities by applying experimental design theory, following the approach of Curtis and Wood (2004). Instead of determining the set of probabilities for all models in a single step, the computer-driven elicitation process is performed as a sequence of evaluations of relative weights for small subsets of models. From these, the probabilities for the whole model set are determined as a solution of an optimization problem. The result of this process is a set of logically consistent probabilities together with a measure of confidence determined from the amount of conflicting information which is provided by the expert during the relative weighting process. We experiment with different scenarios simulating likely expert behaviors in the context of knowledge elicitation and show the impact this has on the results. The overall aim is to provide a smart elicitation technique, and our findings serve as a guide for practical applications.}, language = {en} } @article{DahmKuehnOhrnbergeretal.2010, author = {Dahm, Torsten and Kuehn, Daniela and Ohrnberger, Matthias and Kroeger, Jens and Wiederhold, Helga and Reuther, Claus-Dieter and Dehghani, Ali and Scherbaum, Frank}, title = {Combining geophysical data sets to study the dynamics of shallow evaporites in urban environments : application to Hamburg, Germany}, issn = {0956-540X}, doi = {10.1111/j.1365-246X.2010.04521.x}, year = {2010}, abstract = {Shallowly situated evaporites in built-up areas are of relevance for urban and cultural development and hydrological regulation. The hazard of sinkholes, subrosion depressions and gypsum karst is often difficult to evaluate and may quickly change with anthropogenic influence. The geophysical exploration of evaporites in metropolitan areas is often not feasible with active industrial techniques. We collect and combine different passive geophysical data as microgravity, ambient vibrations, deformation and hydrological information to study the roof morphology of shallow evaporites beneath Hamburg, Northern Germany. The application of a novel gravity inversion technique leads to a 3-D depth model of the salt diapir under study. We compare the gravity-based depth model to pseudo-depths from H/V measurements and depth estimates from small-scale seismological array data. While the general range and trend of the diapir roof is consistent, a few anomalous regions are identified where H/V pseudo-depths indicate shallower structures not observed in gravity or array data. These are interpreted by shallow residual caprock floaters and zones of increased porosity. The shallow salt structure clearly correlates with a relative subsidence in the order of 2 mm yr(-1). The combined interpretation of roof morphology, yearly subsidence rates, chemical analyses of groundwater and of hydraulic head in aquifers indicates that the salt diapir beneath Hamburg is subject to significant ongoing dissolution that may possibly affect subrosion depressions, sinkhole distribution and land usage. The combined analysis of passive geophysical data may be exemplary for the study of shallow evaporites beneath other urban areas.}, language = {en} } @article{BommerDouglasScherbaumetal.2010, author = {Bommer, Julian J. and Douglas, John and Scherbaum, Frank and Cotton, Fabrice and Bungum, Hilmar and Faeh, Donat}, title = {On the selection of ground-motion prediction equations for seismic hazard analysis}, issn = {0895-0695}, doi = {10.1785/gssrl.81.5.783}, year = {2010}, language = {en} } @article{BlaserKruegerOhrnbergeretal.2010, author = {Blaser, Lilian and Kr{\"u}ger, Frank and Ohrnberger, Matthias and Scherbaum, Frank}, title = {Scaling relations of earthquake source parameter estimates with special focus on subduction environment}, issn = {0037-1106}, doi = {10.1785/0120100111}, year = {2010}, abstract = {Earthquake rupture length and width estimates are in demand in many seismological applications. Earthquake magnitude estimates are often available, whereas the geometrical extensions of the rupture fault mostly are lacking. Therefore, scaling relations are needed to derive length and width from magnitude. Most frequently used are the relationships of Wells and Coppersmith (1994) derived on the basis of a large dataset including all slip types with the exception of thrust faulting events in subduction environments. However, there are many applications dealing with earthquakes in subduction zones because of their high seismic and tsunamigenic potential. There are no well-established scaling relations for moment magnitude and length/width for subduction events. Within this study, we compiled a large database of source parameter estimates of 283 earthquakes. All focal mechanisms are represented, but special focus is set on (large) subduction zone events, in particular. Scaling relations were fitted with linear least-square as well as orthogonal regression and analyzed regarding the difference between continental and subduction zone/oceanic relationships. Additionally, the effect of technical progress in earthquake parameter estimation on scaling relations was tested as well as the influence of different fault mechanisms. For a given moment magnitude we found shorter but wider rupture areas of thrust events compared to Wells and Coppersmith (1994). The thrust event relationships for pure continental and pure subduction zone rupture areas were found to be almost identical. The scaling relations differ significantly for slip types. The exclusion of events prior to 1964 when the worldwide standard seismic network was established resulted in a remarkable effect on strike-slip scaling relations: the data do not show any saturation of rupture width of strike- slip earthquakes. Generally, rupture area seems to scale with mean slip independent of magnitude. The aspect ratio L/W, however, depends on moment and differs for each slip type.}, language = {en} } @article{AlAtikAbrahamsonBommeretal.2010, author = {Al Atik, Linda and Abrahamson, Norman A. and Bommer, Julian J. and Scherbaum, Frank and Cotton, Fabrice and Kuehn, Nicolas}, title = {The variability of ground-motion prediction models and its components}, issn = {0895-0695}, doi = {10.1785/gssrl.81.5.794}, year = {2010}, language = {en} } @article{ScherbaumDelavaudRiggelsen2009, author = {Scherbaum, Frank and Delavaud, Elise and Riggelsen, Carsten}, title = {Model selection in seismic hazard analysis : an information-theoretic perspective}, issn = {0037-1106}, doi = {10.1785/0120080347}, year = {2009}, abstract = {Although the methodological framework of probabilistic seismic hazard analysis is well established, the selection of models to predict the ground motion at the sites of interest remains a major challenge. Information theory provides a powerful theoretical framework that can guide this selection process in a consistent way. From an information- theoretic perspective, the appropriateness of models can be expressed in terms of their relative information loss (Kullback-Leibler distance) and hence in physically meaningful units (bits). In contrast to hypothesis testing, information-theoretic model selection does not require ad hoc decisions regarding significance levels nor does it require the models to be mutually exclusive and collectively exhaustive. The key ingredient, the Kullback-Leibler distance, can be estimated from the statistical expectation of log-likelihoods of observations for the models under consideration. In the present study, data-driven ground-motion model selection based on Kullback-Leibler-distance differences is illustrated for a set of simulated observations of response spectra and macroseismic intensities. Information theory allows for a unified treatment of both quantities. The application of Kullback-Leibler-distance based model selection to real data using the model generating data set for the Abrahamson and Silva (1997) ground-motion model demonstrates the superior performance of the information-theoretic perspective in comparison to earlier attempts at data- driven model selection (e.g., Scherbaum et al., 2004).}, language = {en} } @article{KuehnScherbaumRiggelsen2009, author = {K{\"u}hn, Nicolas M. and Scherbaum, Frank and Riggelsen, Carsten}, title = {Deriving empirical ground-motion models : balancing data constraints and physical assumptions to optimize prediction capability}, issn = {0037-1106}, doi = {10.1785/0120080136}, year = {2009}, abstract = {Empirical ground-motion models used in seismic hazard analysis are commonly derived by regression of observed ground motions against a chosen set of predictor variables. Commonly, the model building process is based on residual analysis and/or expert knowledge and/or opinion, while the quality of the model is assessed by the goodness-of-fit to the data. Such an approach, however, bears no immediate relation to the predictive power of the model and with increasing complexity of the models is increasingly susceptible to the danger of overfitting. Here, a different, primarily data-driven method for the development of ground-motion models is proposed that makes use of the notion of generalization error to counteract the problem of overfitting. Generalization error directly estimates the average prediction error on data not used for the model generation and, thus, is a good criterion to assess the predictive capabilities of a model. The approach taken here makes only few a priori assumptions. At first, peak ground acceleration and response spectrum values are modeled by flexible, nonphysical functions (polynomials) of the predictor variables. The inclusion of a particular predictor and the order of the polynomials are based on minimizing generalization error. The approach is illustrated for the next generation of ground-motion attenuation dataset. The resulting model is rather complex, comprising 48 parameters, but has considerably lower generalization error than functional forms commonly used in ground-motion models. The model parameters have no physical meaning, but a visual interpretation is possible and can reveal relevant characteristics of the data, for example, the Moho bounce in the distance scaling. In a second step, the regression model is approximated by an equivalent stochastic model, making it physically interpretable. The resulting resolvable stochastic model parameters are comparable to published models for western North America. In general, for large datasets generalization error minimization provides a viable method for the development of empirical ground-motion models.}, language = {en} } @article{KoehlerOhrnbergerScherbaum2009, author = {Koehler, Andreas and Ohrnberger, Matthias and Scherbaum, Frank}, title = {Unsupervised feature selection and general pattern discovery using Self-Organizing Maps for gaining insights into the nature of seismic wavefields}, issn = {0098-3004}, doi = {10.1016/j.cageo.2009.02.004}, year = {2009}, abstract = {This study presents an unsupervised feature selection and learning approach for the discovery and intuitive imaging of significant temporal patterns in seismic single-station or network recordings. For this purpose, the data are parametrized by real-valued feature vectors for short time windows using standard analysis tools for seismic data, such as frequency-wavenumber, polarization, and spectral analysis. We use Self-Organizing Maps (SOMs) for a data-driven feature selection, visualization and clustering procedure, which is in particular suitable for high-dimensional data sets. Our feature selection method is based on significance testing using the Wald-Wolfowitz runs test for-individual features and on correlation hunting with SOMs in feature subsets. Using synthetics composed of Rayleigh and Love waves and real-world data, we show the robustness and the improved discriminative power of that approach compared to feature subsets manually selected from individual wavefield parametrization methods. Furthermore, the capability of the clustering and visualization techniques to investigate the discrimination of wave phases is shown by means of synthetic waveforms and regional earthquake recordings.}, language = {en} } @article{FaenzaHainzlScherbaum2009, author = {Faenza, Licia and Hainzl, Sebastian and Scherbaum, Frank}, title = {Statistical analysis of the Central-Europe seismicity}, issn = {0040-1951}, doi = {10.1016/j.tecto.2008.04.030}, year = {2009}, abstract = {The aim of this paper is to characterize the spatio-temporal distribution of Central-Europe seismicity. Specifically, by using a non-parametric statistical approach, the proportional hazard model, leading to an empirical estimation of the hazard function, we provide some constrains on the time behavior of earthquake generation mechanisms. The results indicate that the most conspicuous characteristics of M-w 4.0+ earthquakes is a temporal clustering lasting a couple of years. This suggests that the probability of occurrence increases immediately after a previous event. After a few years, the process becomes almost time independent. Furthermore, we investigate the cluster properties of the seismicity of Central-Europe, by comparing the obtained result with the one of synthetic catalogs generated by the epidemic type aftershock sequences (ETAS) model, which previously have been successfully applied for short term clustering. Our results indicate that the ETAS is not well suited to describe the seismicity as a whole, while it is able to capture the features of the short- term behaviour. Remarkably, similar results have been previously found for Italy using a higher magnitude threshold.}, language = {en} } @article{DelavaudScherbaumKuehnetal.2009, author = {Delavaud, Elise and Scherbaum, Frank and Kuehn, Nicolas and Riggelsen, Carsten}, title = {Information-theoretic selection of ground-motion prediction equations for seismic hazard analysis : an applicability study using Californian data}, issn = {0037-1106}, doi = {10.1785/0120090055}, year = {2009}, abstract = {Considering the increasing number and complexity of ground-motion prediction equations available for seismic hazard assessment, there is a definite need for an efficient, quantitative, and robust method to select and rank these models for a particular region of interest. In a recent article, Scherbaum et al. (2009) have suggested an information- theoretic approach for this purpose that overcomes several shortcomings of earlier attempts at using data-driven ground- motion prediction equation selection procedures. The results of their theoretical study provides evidence that in addition to observed response spectra, macroseismic intensity data might be useful for model selection and ranking. We present here an applicability study for this approach using response spectra and macroseismic intensities from eight Californian earthquakes. A total of 17 ground-motion prediction equations, from different regions, for response spectra, combined with the equation of Atkinson and Kaka (2007) for macroseismic intensities are tested for their relative performance. The resulting data-driven rankings show that the models that best estimate ground motion in California are, as one would expect, Californian and western U. S. models, while some European models also perform fairly well. Moreover, the model performance appears to be strongly dependent on both distance and frequency. The relative information of intensity versus response spectral data is also explored. The strong correlation we obtain between intensity-based rankings and spectral-based ones demonstrates the great potential of macroseismic intensities data for model selection in the context of seismic hazard assessment.}, language = {en} } @article{Scherbaum1997, author = {Scherbaum, Frank}, title = {Zero Phase FIR filters in digital seismic acquisition systems : blessing or curse}, year = {1997}, language = {en} } @article{ScherbaumBouin1997, author = {Scherbaum, Frank and Bouin, M. P.}, title = {FIR filter effects and nucleation phases}, year = {1997}, language = {en} } @article{ScherbaumKruegerWeber1997, author = {Scherbaum, Frank and Kr{\"u}ger, Frank and Weber, Michael H.}, title = {Double beam imaging : mapping lower mantle heterogeneities using combinations of source and receiver arrays}, year = {1997}, language = {en} } @article{RietbrockScherbaum1998, author = {Rietbrock, Andreas and Scherbaum, Frank}, title = {The GIANT analysis system (Graphical Interaktive Aftershock Network Toolbox)}, year = {1998}, language = {en} }