@article{WendtMorriss2022, author = {Wendt, Julia and Morriss, Jayne}, title = {An examination of intolerance of uncertainty and contingency instruction on multiple indices during threat acquisition and extinction training}, series = {International journal of psychophysiology : official journal of the International Organization of Psychophysiology}, volume = {177}, journal = {International journal of psychophysiology : official journal of the International Organization of Psychophysiology}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0167-8760}, doi = {10.1016/j.ijpsycho.2022.05.005}, pages = {171 -- 178}, year = {2022}, abstract = {Individuals who score high in self-reported Intolerance of Uncertainty (IU) tend to find uncertainty aversive. Prior research has demonstrated that under uncertainty individuals with high IU display difficulties in updating learned threat associations to safety associations. Importantly, recent research has shown that providing contingency instructions about threat and safety contingencies (i.e. reducing uncertainty) to individuals with high IU promotes the updating of learned threat associations to safety associations. Here we aimed to conceptually replicate IU and contingency instruction-based effects by conducting a secondary analysis of self-reported IU, ratings, skin conductance, and functional magnetic resonance imaging (fMRI) data recorded during uninstructed/instructed blocks of threat acquisition and threat extinction training (n = 48). Generally, no significant associations were observed between self-reported IU and differential responding to learned threat and safety cues for any measure during uninstructed/instructed blocks of threat acquisition and threat extinction training. There was some tentative evidence that higher IU was associated with greater ratings of unpleasantness and arousal to the safety cue after the experiment and greater skin conductance response to the safety cue during extinction generally. Potential explanations for these null effects and directions for future research are discussed.}, language = {en} } @article{KtenidouRoumeliotiAbrahamsonetal.2018, author = {Ktenidou, Olga-Joan and Roumelioti, Zafeiria and Abrahamson, Norman and Cotton, Fabrice Pierre and Pitilakis, Kyriazis and Hollender, Fabrice}, title = {Understanding single-station ground motion variability and uncertainty (sigma)}, series = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, volume = {16}, journal = {Bulletin of earthquake engineering : official publication of the European Association for Earthquake Engineering}, number = {6}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-761X}, doi = {10.1007/s10518-017-0098-6}, pages = {2311 -- 2336}, year = {2018}, abstract = {Accelerometric data from the well-studied valley EUROSEISTEST are used to investigate ground motion uncertainty and variability. We define a simple local ground motion prediction equation (GMPE) and investigate changes in standard deviation (σ) and its components, the between-event variability (τ) and within-event variability (φ). Improving seismological metadata significantly reduces τ (30-50\%), which in turn reduces the total σ. Improving site information reduces the systematic site-to-site variability, φ S2S (20-30\%), in turn reducing φ, and ultimately, σ. Our values of standard deviations are lower than global values from literature, and closer to path-specific than site-specific values. However, our data have insufficient azimuthal coverage for single-path analysis. Certain stations have higher ground-motion variability, possibly due to topography, basin edge or downgoing wave effects. Sensitivity checks show that 3 recordings per event is a sufficient data selection criterion, however, one of the dataset's advantages is the large number of recordings per station (9-90) that yields good site term estimates. We examine uncertainty components binning our data with magnitude from 0.01 to 2 s; at smaller magnitudes, τ decreases and φ SS increases, possibly due to κ and source-site trade-offs Finally, we investigate the alternative approach of computing φ SS using existing GMPEs instead of creating an ad hoc local GMPE. This is important where data are insufficient to create one, or when site-specific PSHA is performed. We show that global GMPEs may still capture φ SS , provided that: (1) the magnitude scaling errors are accommodated by the event terms; (2) there are no distance scaling errors (use of a regionally applicable model). Site terms (φ S2S ) computed by different global GMPEs (using different site-proxies) vary significantly, especially for hard-rock sites. This indicates that GMPEs may be poorly constrained where they are sometimes most needed, i.e., for hard rock.}, language = {en} } @article{FortesaGarciaComendadorCalsamigliaetal.2019, author = {Fortesa, Josep and Garc{\´i}a-Comendador, Julian and Calsamiglia, A. and L{\´o}pez-Taraz{\´o}n, Jos{\´e} Andr{\´e}s and Latron, J. and Alorda, B. and Estrany, Joan}, title = {Comparison of stage/discharge rating curves derived from different recording systems}, series = {The science of the total environment : an international journal for scientific research into the environment and its relationship with man}, volume = {665}, journal = {The science of the total environment : an international journal for scientific research into the environment and its relationship with man}, publisher = {Elsevier Science}, address = {Amsterdam}, issn = {0048-9697}, doi = {10.1016/j.scitotenv.2019.02.158}, pages = {968 -- 981}, year = {2019}, abstract = {Obtaining representative hydrometric values is essential for characterizing extreme events, hydrological dynamics and detecting possible changes on the long-term hydrology. Reliability of streamflow data requires a temporal continuity and a maintenance of the gauging stations, which data are affected by epistemic and random sources of error. An assessment of discharge meterings' and stage-discharge rating curves' uncertainties were carried out by comparing the accuracy of the measuring instruments of two different hydrometric networks (i.e., one analogical and one digital) established in the same river location at the Mediterranean island of Mallorca. Furthermore, the effects of such uncertainties were assessed on the hydrological dynamics, considering the significant global change impacts beset this island. Evaluation was developed at four representative gauging stations of the hydrographic network with analogic (≈40 years) and digital (≈10 years) data series. The study revealed that the largest source of uncertainty in the analogical (28 to 274\%) and in the digital (17-37\%) networks were the stage-discharge rating curves. Their impact on the water resources was also evaluated at the event and annual scales, resulting in an average difference of water yields of 183\% and 142\% respectively. Such improvement on the comprehension of hydrometric networks uncertainties will dramatically benefit the interpretation of the long-term streamflow by providing better insights into the hydrologic and flood hazard planning, management and modelling.}, language = {en} } @article{RumpfTronicke2015, author = {Rumpf, Michael and Tronicke, Jens}, title = {Assessing uncertainty in refraction seismic traveltime inversion using a global inversion strategy}, series = {Geophysical prospecting}, volume = {63}, journal = {Geophysical prospecting}, number = {5}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0016-8025}, doi = {10.1111/1365-2478.12240}, pages = {1188 -- 1197}, year = {2015}, abstract = {To analyse and invert refraction seismic travel time data, different approaches and techniques have been proposed. One common approach is to invert first-break travel times employing local optimization approaches. However, these approaches result in a single velocity model, and it is difficult to assess the quality and to quantify uncertainties and non-uniqueness of the found solution. To address these problems, we propose an inversion strategy relying on a global optimization approach known as particle swarm optimization. With this approach we generate an ensemble of acceptable velocity models, i.e., models explaining our data equally well. We test and evaluate our approach using synthetic seismic travel times and field data collected across a creeping hillslope in the Austrian Alps. Our synthetic study mimics a layered near-surface environment, including a sharp velocity increase with depth and complex refractor topography. Analysing the generated ensemble of acceptable solutions using different statistical measures demonstrates that our inversion strategy is able to reconstruct the input velocity model, including reasonable, quantitative estimates of uncertainty. Our field data set is inverted, employing the same strategy, and we further compare our results with the velocity model obtained by a standard local optimization approach and the information from a nearby borehole. This comparison shows that both inversion strategies result in geologically reasonable models (in agreement with the borehole information). However, analysing the model variability of the ensemble generated using our global approach indicates that the result of the local optimization approach is part of this model ensemble. Our results show the benefit of employing a global inversion strategy to generate near-surface velocity models from refraction seismic data sets, especially in cases where no detailed a priori information regarding subsurface structures and velocity variations is available.}, language = {en} } @article{ZimmermannZimmermann2014, author = {Zimmermann, Alexander and Zimmermann, Beate}, title = {Requirements for throughfall monitoring: The roles of temporal scale and canopy complexity}, series = {Agricultural and forest meteorology}, volume = {189}, journal = {Agricultural and forest meteorology}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0168-1923}, doi = {10.1016/j.agrformet.2014.01.014}, pages = {125 -- 139}, year = {2014}, abstract = {A wide range of basic and applied problems in water resources research requires high-quality estimates of the spatial mean of throughfall. Many throughfall sampling schemes, however, are not optimally adapted to the system under study. The application of inappropriate sampling schemes may partly reflect the lack of generally applicable guidelines on throughfall sampling strategies. In this study we conducted virtual sampling experiments using simulated fields which are based on empirical throughfall data from three structurally distinct forests (a 12-year old teak plantation, a 5-year old young secondary forest, and a 130-year old secondary forest). In the virtual sampling experiments we assessed the relative error of mean throughfall estimates for 38 different throughfall sampling schemes comprising a variety of funnel- and trough-type collectors and a large range of sample sizes. Moreover, we tested the performance of each scheme for both event-based and accumulated throughfall data. The key findings of our study are threefold. First, as errors of mean throughfall estimates vary as a function of throughfall depth, the decision on which temporal scale (i.e. event-based versus accumulated data) to sample strongly influences the required sampling effort. Second, given a chosen temporal scale throughfall estimates can vary considerably as a function of canopy complexity. Accordingly, throughfall sampling in simply structured forests requires a comparatively modest effort, whereas heterogeneous forests can be extreme in terms of sampling requirements, particularly if the focus is on reliable data of small events. Third, the efficiency of trough-type collectors depends on the spatial structure of throughfall. Strong, long-ranging throughfall patterns decrease the efficiency of troughs substantially. Based on the results of our virtual sampling experiments, which we evaluated by applying two contrasting sampling approaches simultaneously, we derive readily applicable guidelines for throughfall monitoring. (C) 2014 Elsevier B.V. All rights reserved.}, language = {en} } @article{SchwanghartHeckmann2012, author = {Schwanghart, Wolfgang and Heckmann, Tobias}, title = {Fuzzy delineation of drainage basins through probabilistic interpretation of diverging flow algorithms}, series = {Environmental modelling \& software with environment data news}, volume = {33}, journal = {Environmental modelling \& software with environment data news}, publisher = {Elsevier}, address = {Oxford}, issn = {1364-8152}, doi = {10.1016/j.envsoft.2012.01.016}, pages = {106 -- 113}, year = {2012}, abstract = {The assessment of uncertainty is a major challenge in geomorphometry. Methods to quantify uncertainty in digital elevation models (DEM) are needed to assess and report derivatives such as drainage basins. While Monte-Carlo (MC) techniques have been developed and employed to assess the variability of second-order derivatives of DEMs, their application requires explicit error modeling and numerous simulations to reliably calculate error bounds. Here, we develop an analytical model to quantify and visualize uncertainty in drainage basin delineation in DEMs. The model is based on the assumption that multiple flow directions (MFD) represent a discrete probability distribution of non-diverging flow networks. The Shannon Index quantifies the uncertainty of each cell to drain into a specific drainage basin outlet. In addition, error bounds for drainage areas can be derived. An application of the model shows that it identifies areas in a DEM where drainage basin delineation is highly uncertain owing to flow dispersion on convex landforms such as alluvial fans. The model allows for a quantitative assessment of the magnitudes of expected drainage area variability and delivers constraints for observed volatile hydrological behavior in a palaeoenvironmental record of lake level change. Since the model cannot account for all uncertainties in drainage basin delineation we conclude that a joint application with MC techniques is promising for an efficient and comprehensive error assessment in the future.}, language = {en} }