@phdthesis{Hannemann2016, author = {Hannemann, Katrin}, title = {Seismological investigation of the oceanic crust und upper mantle using an ocean bottom station array in the vicinity of the Gloria fault (easter mid Atlantic)}, school = {Universit{\"a}t Potsdam}, pages = {199}, year = {2016}, language = {en} } @article{HannemannKruegerDahmetal.2016, author = {Hannemann, Katrin and Kr{\"u}ger, Frank and Dahm, Torsten and Lange, Dietrich}, title = {Oceanic lithospheric S-wave velocities from the analysis of P-wave polarization at the ocean floor}, series = {Geophysical journal international}, volume = {207}, journal = {Geophysical journal international}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0956-540X}, doi = {10.1093/gji/ggw342}, pages = {1796 -- 1817}, year = {2016}, abstract = {Our knowledge of the absolute S-wave velocities of the oceanic lithosphere is mainly based on global surface wave tomography, local active seismic or compliance measurements using oceanic infragravity waves. The results of tomography give a rather smooth picture of the actual S-wave velocity structure and local measurements have limitations regarding the range of elastic parameters or the geometry of the measurement. Here, we use the P-wave polarization (apparent P-wave incidence angle) of teleseismic events to investigate the S-wave velocity structure of the oceanic crust and the upper tens of kilometres of the mantle beneath single stations. In this study, we present an up to our knowledge new relation of the apparent P-wave incidence angle at the ocean bottom dependent on the half-space S-wave velocity. We analyse the angle in different period ranges at ocean bottom stations (OBSs) to derive apparent S-wave velocity profiles. These profiles are dependent on the S-wave velocity as well as on the thickness of the layers in the subsurface. Consequently, their interpretation results in a set of equally valid models. We analyse the apparent P-wave incidence angles of an OBS data set which was collected in the Eastern Mid Atlantic. We are able to determine reasonable S-wave-velocity-depth models by a three-step quantitative modelling after a manual data quality control, although layer resonance sometimes influences the estimated apparent S-wave velocities. The apparent S-wave velocity profiles are well explained by an oceanic PREM model in which the upper part is replaced by four layers consisting of a water column, a sediment, a crust and a layer representing the uppermost mantle. The obtained sediment has a thickness between 0.3 and 0.9 km with S-wave velocities between 0.7 and 1.4 km s(-1). The estimated total crustal thickness varies between 4 and 10 km with S-wave velocities between 3.5 and 4.3 km s(-1). We find a slight increase of the total crustal thickness from similar to 5 to similar to 8 km towards the South in the direction of a major plate boundary, the Gloria Fault. The observed crustal thickening can be related with the known dominant compression in the vicinity of the fault. Furthermore, the resulting mantle S-wave velocities decrease from values around 5.5 to 4.5 km s(-1) towards the fault. This decrease is probably caused by serpentinization and indicates that the oceanic transform fault affects a broad region in the uppermost mantle. Conclusively, the presented method is useful for the estimation of the local S-wave velocity structure beneath ocean bottom seismic stations. It is easy to implement and consists of two main steps: (1) measurement of apparent P-wave incidence angles in different period ranges for real and synthetic data, and (2) comparison of the determined apparent S-wave velocities for real and synthetic data to estimate S-wave velocity-depth models.}, language = {en} } @phdthesis{JaraMunoz2016, author = {Jara Mu{\~n}oz, Julius}, title = {Quantifying forearc deformation patterns using coastal geomorphic markers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102652}, school = {Universit{\"a}t Potsdam}, pages = {XXV, 213}, year = {2016}, abstract = {Rapidly uplifting coastlines are frequently associated with convergent tectonic boundaries, like subduction zones, which are repeatedly breached by giant megathrust earthquakes. The coastal relief along tectonically active realms is shaped by the effect of sea-level variations and heterogeneous patterns of permanent tectonic deformation, which are accumulated through several cycles of megathrust earthquakes. However, the correlation between earthquake deformation patterns and the sustained long-term segmentation of forearcs, particularly in Chile, remains poorly understood. Furthermore, the methods used to estimate permanent deformation from geomorphic markers, like marine terraces, have remained qualitative and are based on unrepeatable methods. This contrasts with the increasing resolution of digital elevation models, such as Light Detection and Ranging (LiDAR) and high-resolution bathymetric surveys. Throughout this thesis I study permanent deformation in a holistic manner: from the methods to assess deformation rates, to the processes involved in its accumulation. My research focuses particularly on two aspects: Developing methodologies to assess permanent deformation using marine terraces, and comparing permanent deformation with seismic cycle deformation patterns under different spatial scales along the M8.8 Maule earthquake (2010) rupture zone. Two methods are developed to determine deformation rates from wave-built and wave-cut terraces respectively. I selected an archetypal example of a wave-built terrace at Santa Maria Island studying its stratigraphy and recognizing sequences of reoccupation events tied with eleven radiocarbon sample ages (14C ages). I developed a method to link patterns of reoccupation with sea-level proxies by iterating relative sea level curves for a range of uplift rates. I find the best fit between relative sea-level and the stratigraphic patterns for an uplift rate of 1.5 +- 0.3 m/ka. A Graphical User Interface named TerraceM® was developed in Matlab®. This novel software tool determines shoreline angles in wave-cut terraces under different geomorphic scenarios. To validate the methods, I select test sites in areas of available high-resolution LiDAR topography along the Maule earthquake rupture zone and in California, USA. The software allows determining the 3D location of the shoreline angle, which is a proxy for the estimation of permanent deformation rates. The method is based on linear interpolations to define the paleo platform and cliff on swath profiles. The shoreline angle is then located by intersecting these interpolations. The accuracy and precision of TerraceM® was tested by comparing its results with previous assessments, and through an experiment with students in a computer lab setting at the University of Potsdam. I combined the methods developed to analyze wave-built and wave-cut terraces to assess regional patterns of permanent deformation along the (2010) Maule earthquake rupture. Wave-built terraces are tied using 12 Infra Red Stimulated luminescence ages (IRSL ages) and shoreline angles in wave-cut terraces are estimated from 170 aligned swath profiles. The comparison of coseismic slip, interseismic coupling, and permanent deformation, leads to three areas of high permanent uplift, terrace warping, and sharp fault offsets. These three areas correlate with regions of high slip and low coupling, as well as with the spatial limit of at least eight historical megathrust ruptures (M8-9.5). I propose that the zones of upwarping at Arauco and Topocalma reflect changes in frictional properties of the megathrust, which result in discrete boundaries for the propagation of mega earthquakes. To explore the application of geomorphic markers and quantitative morphology in offshore areas I performed a local study of patterns of permanent deformation inferred from hitherto unrecognized drowned shorelines at the Arauco Bay, at the southern part of the (2010) Maule earthquake rupture zone. A multidisciplinary approach, including morphometry, sedimentology, paleontology, 3D morphoscopy, and a landscape Evolution Model is used to recognize, map, and assess local rates and patterns of permanent deformation in submarine environments. Permanent deformation patterns are then reproduced using elastic models to assess deformation rates of an active submarine splay fault defined as Santa Maria Fault System. The best fit suggests a reverse structure with a slip rate of 3.7 m/ka for the last 30 ka. The register of land level changes during the earthquake cycle at Santa Maria Island suggest that most of the deformation may be accrued through splay fault reactivation during mega earthquakes, like the (2010) Maule event. Considering a recurrence time of 150 to 200 years, as determined from historical and geological observations, slip between 0.3 and 0.7 m per event would be required to account for the 3.7 m/ka millennial slip rate. However, if the SMFS slips only every ~1000 years, representing a few megathrust earthquakes, then a slip of ~3.5 m per event would be required to account for the long- term rate. Such event would be equivalent to a magnitude ~6.7 earthquake capable to generate a local tsunami. The results of this thesis provide novel and fundamental information regarding the amount of permanent deformation accrued in the crust, and the mechanisms responsible for this accumulation at millennial time-scales along the M8.8 Maule earthquake (2010) rupture zone. Furthermore, the results of this thesis highlight the application of quantitative geomorphology and the use of repeatable methods to determine permanent deformation, improve the accuracy of marine terrace assessments, and estimates of vertical deformation rates in tectonically active coastal areas. This is vital information for adequate coastal-hazard assessments and to anticipate realistic earthquake and tsunami scenarios.}, language = {en} } @article{MerzVietDungNguyenVorogushyn2016, author = {Merz, Bruno and Viet Dung Nguyen, and Vorogushyn, Sergiy}, title = {Temporal clustering of floods in Germany: Do flood-rich and flood-poor periods exist?}, series = {Journal of hydrology}, volume = {541}, journal = {Journal of hydrology}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0022-1694}, doi = {10.1016/j.jhydrol.2016.07.041}, pages = {824 -- 838}, year = {2016}, abstract = {The repeated occurrence of exceptional floods within a few years, such as the Rhine floods in 1993 and 1995 and the Elbe and Danube floods in 2002 and 2013, suggests that floods in Central Europe may be organized in flood-rich and flood-poor periods. This hypothesis is studied by testing the significance of temporal clustering in flood occurrence (peak-over-threshold) time series for 68 catchments across Germany for the period 1932-2005. To assess the robustness of the results, different methods are used: Firstly, the index of dispersion, which quantifies the departure from a homogeneous Poisson process, is investigated. Further, the time-variation of the flood occurrence rate is derived by non-parametric kernel implementation and the significance of clustering is evaluated via parametric and non-parametric tests. Although the methods give consistent overall results, the specific results differ considerably. Hence, we recommend applying different methods when investigating flood clustering. For flood estimation and risk management, it is of relevance to understand whether clustering changes with flood severity and time scale. To this end, clustering is assessed for different thresholds and time scales. It is found that the majority of catchments show temporal clustering at the 5\% significance level for low thresholds and time scales of one to a few years. However, clustering decreases substantially with increasing threshold and time scale. We hypothesize that flood clustering in Germany is mainly caused by catchment memory effects along with intra- to inter-annual climate variability, and that decadal climate variability plays a minor role. (C) 2016 Elsevier B.V. All rights reserved.}, language = {en} } @article{MurawskiBuergerVorogushynetal.2016, author = {Murawski, Aline and B{\"u}rger, Gerd and Vorogushyn, Sergiy and Merz, Bruno}, title = {Can local climate variability be explained by weather patterns? A multi-station evaluation for the Rhine basin}, series = {Hydrology and earth system sciences : HESS}, volume = {20}, journal = {Hydrology and earth system sciences : HESS}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1027-5606}, doi = {10.5194/hess-20-4283-2016}, pages = {4283 -- 4306}, year = {2016}, abstract = {To understand past flood changes in the Rhine catchment and in particular the role of anthropogenic climate change in extreme flows, an attribution study relying on a proper GCM (general circulation model) downscaling is needed. A downscaling based on conditioning a stochastic weather generator on weather patterns is a promising approach. This approach assumes a strong link between weather patterns and local climate, and sufficient GCM skill in reproducing weather pattern climatology. These presuppositions are unprecedentedly evaluated here using 111 years of daily climate data from 490 stations in the Rhine basin and comprehensively testing the number of classification parameters and GCM weather pattern characteristics. A classification based on a combination of mean sea level pressure, temperature, and humidity from the ERA20C reanalysis of atmospheric fields over central Europe with 40 weather types was found to be the most appropriate for stratifying six local climate variables. The corresponding skill is quite diverse though, ranging from good for radiation to poor for precipitation. Especially for the latter it was apparent that pressure fields alone cannot sufficiently stratify local variability. To test the skill of the latest generation of GCMs from the CMIP5 ensemble in reproducing the frequency, seasonality, and persistence of the derived weather patterns, output from 15 GCMs is evaluated. Most GCMs are able to capture these characteristics well, but some models showed consistent deviations in all three evaluation criteria and should be excluded from further attribution analysis.}, language = {en} } @article{WangBiskabornRamischetal.2016, author = {Wang, Rong and Biskaborn, Boris and Ramisch, Arne and Ren, Jian and Zhang, Yongzhan and Gersonde, Rainer and Diekmann, Bernhard}, title = {Modern modes of provenance and dispersal of terrigenous sediments in the North Pacific and Bering Sea: implications and perspectives for palaeoenvironmental reconstructions}, series = {Geo-marine letters : an international journal of marine geology}, volume = {36}, journal = {Geo-marine letters : an international journal of marine geology}, publisher = {Springer}, address = {New York}, issn = {0276-0460}, doi = {10.1007/s00367-016-0445-7}, pages = {259 -- 270}, year = {2016}, abstract = {During expedition 202 aboard the RV Sonne in 2009, 39 seafloor surface sediment sites were sampled over a wide sector of the North Pacific and adjoining Bering Sea. The data served to infer land-ocean linkages of terrigenous sediment supply in terms of major sources and modes of sediment transport within an over-regional context. This is based on an integrated approach dealing with grain-size analysis, bulk mineralogy and clay mineralogy in combination with statistical data evaluation (end-member modelling of grain-size data, fuzzy cluster analysis of mineralogical data). The findings on clay mineralogy served to update those of earlier work extracted from the literature. Today, two processes of terrigenous sediment supply prevail in the study area: far-distance aeolian sediment supply to the pelagic North Pacific, and hemipelagic sediment dispersal from nearby land sources via ocean currents along the continental margins and island arcs. Aeolian particles show the finest grain sizes (clay and fine silt), whereas hemipelagic sediments have high abundances of coarse silt. Exposed sites on seamounts and the continental slope are partly swept by strong currents, leading to residual enrichment of fine sand. Four sediment sources can be distinguished on the basis of distinct index minerals revealed by statistical data analysis: dust plumes from central Asia (quartz, illite), altered materials from the volcanic regions of Kamchatka and the Aleutian Arc (smectite), detritus from the Alaskan Cordillera (chlorite, hornblende), and fluvial detritus from far-eastern Siberia and the Alaska mainland (quartz, feldspar, illite). These findings confirm those of former studies but considerably expand the geographic range of this suite of proxies as far south as 39A degrees N in the open North Pacific. The present integrated methodological approach proved useful in identifying the major modern processes of terrigenous sediment supply to the study region. This aspect deserves attention in the selection of sediment core sites for future palaeoenvironmental reconstructions related to aeolian and glacial dynamics, as well as the recognition of palaeo-ocean circulation patterns in general.}, language = {en} } @phdthesis{Falter2016, author = {Falter, Daniela}, title = {A novel approach for large-scale flood risk assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90239}, school = {Universit{\"a}t Potsdam}, pages = {95}, year = {2016}, abstract = {In the past, floods were basically managed by flood control mechanisms. The focus was set on the reduction of flood hazard. The potential consequences were of minor interest. Nowadays river flooding is increasingly seen from the risk perspective, including possible consequences. Moreover, the large-scale picture of flood risk became increasingly important for disaster management planning, national risk developments and the (re-) insurance industry. Therefore, it is widely accepted that risk-orientated flood management ap-proaches at the basin-scale are needed. However, large-scale flood risk assessment methods for areas of several 10,000 km² are still in early stages. Traditional flood risk assessments are performed reach wise, assuming constant probabilities for the entire reach or basin. This might be helpful on a local basis, but where large-scale patterns are important this approach is of limited use. Assuming a T-year flood (e.g. 100 years) for the entire river network is unrealistic and would lead to an overestimation of flood risk at the large scale. Due to the lack of damage data, additionally, the probability of peak discharge or rainfall is usually used as proxy for damage probability to derive flood risk. With a continuous and long term simulation of the entire flood risk chain, the spatial variability of probabilities could be consider and flood risk could be directly derived from damage data in a consistent way. The objective of this study is the development and application of a full flood risk chain, appropriate for the large scale and based on long term and continuous simulation. The novel approach of 'derived flood risk based on continuous simulations' is introduced, where the synthetic discharge time series is used as input into flood impact models and flood risk is directly derived from the resulting synthetic damage time series. The bottleneck at this scale is the hydrodynamic simu-lation. To find suitable hydrodynamic approaches for the large-scale a benchmark study with simplified 2D hydrodynamic models was performed. A raster-based approach with inertia formulation and a relatively high resolution of 100 m in combination with a fast 1D channel routing model was chosen. To investigate the suitability of the continuous simulation of a full flood risk chain for the large scale, all model parts were integrated into a new framework, the Regional Flood Model (RFM). RFM consists of the hydrological model SWIM, a 1D hydrodynamic river network model, a 2D raster based inundation model and the flood loss model FELMOps+r. Subsequently, the model chain was applied to the Elbe catchment, one of the largest catchments in Germany. For the proof-of-concept, a continuous simulation was per-formed for the period of 1990-2003. Results were evaluated / validated as far as possible with available observed data in this period. Although each model part introduced its own uncertainties, results and runtime were generally found to be adequate for the purpose of continuous simulation at the large catchment scale. Finally, RFM was applied to a meso-scale catchment in the east of Germany to firstly perform a flood risk assessment with the novel approach of 'derived flood risk assessment based on continuous simulations'. Therefore, RFM was driven by long term synthetic meteorological input data generated by a weather generator. Thereby, a virtual time series of climate data of 100 x 100 years was generated and served as input to RFM providing subsequent 100 x 100 years of spatially consistent river discharge series, inundation patterns and damage values. On this basis, flood risk curves and expected annual damage could be derived directly from damage data, providing a large-scale picture of flood risk. In contrast to traditional flood risk analysis, where homogenous return periods are assumed for the entire basin, the presented approach provides a coherent large-scale picture of flood risk. The spatial variability of occurrence probability is respected. Additionally, data and methods are consistent. Catchment and floodplain processes are repre-sented in a holistic way. Antecedent catchment conditions are implicitly taken into account, as well as physical processes like storage effects, flood attenuation or channel-floodplain interactions and related damage influencing effects. Finally, the simulation of a virtual period of 100 x 100 years and consequently large data set on flood loss events enabled the calculation of flood risk directly from damage distributions. Problems associated with the transfer of probabilities in rainfall or peak runoff to probabilities in damage, as often used in traditional approaches, are bypassed. RFM and the 'derived flood risk approach based on continuous simulations' has the potential to provide flood risk statements for national planning, re-insurance aspects or other questions where spatially consistent, large-scale assessments are required.}, language = {en} } @article{KreibichBottoMerzetal.2016, author = {Kreibich, Heidi and Botto, Anna and Merz, Bruno and Schr{\"o}ter, Kai}, title = {Probabilistic, Multivariable Flood Loss Modeling on the Mesoscale with BT-FLEMO}, series = {Risk analysis}, volume = {37}, journal = {Risk analysis}, number = {4}, publisher = {Wiley}, address = {Hoboken}, issn = {0272-4332}, doi = {10.1111/risa.12650}, pages = {774 -- 787}, year = {2016}, abstract = {Flood loss modeling is an important component for risk analyses and decision support in flood risk management. Commonly, flood loss models describe complex damaging processes by simple, deterministic approaches like depth-damage functions and are associated with large uncertainty. To improve flood loss estimation and to provide quantitative information about the uncertainty associated with loss modeling, a probabilistic, multivariable Bagging decision Tree Flood Loss Estimation MOdel (BT-FLEMO) for residential buildings was developed. The application of BT-FLEMO provides a probability distribution of estimated losses to residential buildings per municipality. BT-FLEMO was applied and validated at the mesoscale in 19 municipalities that were affected during the 2002 flood by the River Mulde in Saxony, Germany. Validation was undertaken on the one hand via a comparison with six deterministic loss models, including both depth-damage functions and multivariable models. On the other hand, the results were compared with official loss data. BT-FLEMO outperforms deterministic, univariable, and multivariable models with regard to model accuracy, although the prediction uncertainty remains high. An important advantage of BT-FLEMO is the quantification of prediction uncertainty. The probability distribution of loss estimates by BT-FLEMO well represents the variation range of loss estimates of the other models in the case study.}, language = {en} } @phdthesis{Nied2016, author = {Nied, Manuela}, title = {The role of soil moisture and weather patterns for flood occurrence and characteristics at the river basin scale}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94612}, school = {Universit{\"a}t Potsdam}, pages = {XVI, 86}, year = {2016}, abstract = {Flood generation at the scale of large river basins is triggered by the interaction of the hydrological pre-conditions and the meteorological event conditions at different spatial and temporal scales. This interaction controls diverse flood generating processes and results in floods varying in magnitude and extent, duration as well as socio-economic consequences. For a process-based understanding of the underlying cause-effect relationships, systematic approaches are required. These approaches have to cover the complete causal flood chain, including the flood triggering meteorological event in combination with the hydrological (pre-)conditions in the catchment, runoff generation, flood routing, possible floodplain inundation and finally flood losses. In this thesis, a comprehensive probabilistic process-based understanding of the causes and effects of floods is advanced. The spatial and temporal dynamics of flood events as well as the geophysical processes involved in the causal flood chain are revealed and the systematic interconnections within the flood chain are deciphered by means of the classification of their associated causes and effects. This is achieved by investigating the role of the hydrological pre-conditions and the meteorological event conditions with respect to flood occurrence, flood processes and flood characteristics as well as their interconnections at the river basin scale. Broadening the knowledge about flood triggers, which up to now has been limited to linking large-scale meteorological conditions to flood occurrence, the influence of large-scale pre-event hydrological conditions on flood initiation is investigated. Using the Elbe River basin as an example, a classification of soil moisture, a key variable of pre-event conditions, is developed and a probabilistic link between patterns of soil moisture and flood occurrence is established. The soil moisture classification is applied to continuously simulated soil moisture data which is generated using the semi-distributed conceptual rainfall-runoff model SWIM. Applying successively a principal component analysis and a cluster analysis, days of similar soil moisture patterns are identified in the period November 1951 to October 2003. The investigation of flood triggers is complemented by including meteorological conditions described by a common weather pattern classification that represents the main modes of atmospheric state variability. The newly developed soil moisture classification thereby provides the basis to study the combined impact of hydrological pre-conditions and large-scale meteorological event conditions on flood occurrence at the river basin scale. A process-based understanding of flood generation and its associated probabilities is attained by classifying observed flood events into process-based flood types such as snowmelt floods or long-rain floods. Subsequently, the flood types are linked to the soil moisture and weather patterns. Further understanding of the processes is gained by modeling of the complete causal flood chain, incorporating a rainfall-runoff model, a 1D/2D hydrodynamic model and a flood loss model. A reshuffling approach based on weather patterns and the month of their occurrence is developed to generate synthetic data fields of meteorological conditions, which drive the model chain, in order to increase the flood sample size. From the large number of simulated flood events, the impact of hydro-meteorological conditions on various flood characteristics is detected through the analysis of conditional cumulative distribution functions and regression trees. The results show the existence of catchment-scale soil moisture patterns, which comprise of large-scale seasonal wetting and drying components as well as of smaller-scale variations related to spatially heterogeneous catchment processes. Soil moisture patterns frequently occurring before the onset of floods are identified. In winter, floods are initiated by catchment-wide high soil moisture, whereas in summer the flood-initiating soil moisture patterns are diverse and the soil moisture conditions are less stable in time. The combined study of both soil moisture and weather patterns shows that the flood favoring hydro-meteorological patterns as well as their interactions vary seasonally. In the analysis period, 18 \% of the weather patterns only result in a flood in the case of preceding soil saturation. The classification of 82 past events into flood types reveals seasonally varying flood processes that can be linked to hydro-meteorological patterns. For instance, the highest flood potential for long-rain floods is associated with a weather pattern that is often detected in the presence of so-called 'Vb' cyclones. Rain-on-snow and snowmelt floods are associated with westerly and north-westerly wind directions. The flood characteristics vary among the flood types and can be reproduced by the applied model chain. In total, 5970 events are simulated. They reproduce the observed event characteristics between September 1957 and August 2002 and provide information on flood losses. A regression tree analysis relates the flood processes of the simulated events to the hydro-meteorological (pre-)event conditions and highlights the fact that flood magnitude is primarily controlled by the meteorological event, whereas flood extent is primarily controlled by the soil moisture conditions. Describing flood occurrence, processes and characteristics as a function of hydro-meteorological patterns, this thesis is part of a paradigm shift towards a process-based understanding of floods. The results highlight that soil moisture patterns as well as weather patterns are not only beneficial to a probabilistic conception of flood initiation but also provide information on the involved flood processes and the resulting flood characteristics.}, language = {en} } @article{GianniotisSchnoerrMolkenthinetal.2016, author = {Gianniotis, Nikolaos and Schnoerr, Christoph and Molkenthin, Christian and Bora, Sanjay Singh}, title = {Approximate variational inference based on a finite sample of Gaussian latent variables}, series = {Pattern Analysis \& Applications}, volume = {19}, journal = {Pattern Analysis \& Applications}, publisher = {Springer}, address = {New York}, issn = {1433-7541}, doi = {10.1007/s10044-015-0496-9}, pages = {475 -- 485}, year = {2016}, abstract = {Variational methods are employed in situations where exact Bayesian inference becomes intractable due to the difficulty in performing certain integrals. Typically, variational methods postulate a tractable posterior and formulate a lower bound on the desired integral to be approximated, e.g. marginal likelihood. The lower bound is then optimised with respect to its free parameters, the so-called variational parameters. However, this is not always possible as for certain integrals it is very challenging (or tedious) to come up with a suitable lower bound. Here, we propose a simple scheme that overcomes some of the awkward cases where the usual variational treatment becomes difficult. The scheme relies on a rewriting of the lower bound on the model log-likelihood. We demonstrate the proposed scheme on a number of synthetic and real examples, as well as on a real geophysical model for which the standard variational approaches are inapplicable.}, language = {en} }