@misc{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75694}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects; these are usually associated with constraints in working memory (DLT: Gibson, 2000; activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation-based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory-based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {312}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00312}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects; these are usually associated with constraints in working memory (DLT: Gibson, 2000; activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation-based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory-based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00312}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects: these are usually associated with constraints in working memory (DLT: Gibson, 2000: activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{SchadNicenboimBuerkneretal.2022, author = {Schad, Daniel and Nicenboim, Bruno and B{\"u}rkner, Paul-Christian and Betancourt, Michael and Vasishth, Shravan}, title = {Workflow techniques for the robust use of bayes factors}, series = {Psychological methods}, volume = {28}, journal = {Psychological methods}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {1082-989X}, doi = {10.1037/met0000472}, pages = {1404 -- 1426}, year = {2022}, abstract = {Inferences about hypotheses are ubiquitous in the cognitive sciences. Bayes factors provide one general way to compare different hypotheses by their compatibility with the observed data. Those quantifications can then also be used to choose between hypotheses. While Bayes factors provide an immediate approach to hypothesis testing, they are highly sensitive to details of the data/model assumptions and it's unclear whether the details of the computational implementation (such as bridge sampling) are unbiased for complex analyses. Hem, we study how Bayes factors misbehave under different conditions. This includes a study of errors in the estimation of Bayes factors; the first-ever use of simulation-based calibration to test the accuracy and bias of Bayes factor estimates using bridge sampling; a study of the stability of Bayes factors against different MCMC draws and sampling variation in the data; and a look at the variability of decisions based on Bayes factors using a utility function. We outline a Bayes factor workflow that researchers can use to study whether Bayes factors are robust for their individual analysis. Reproducible code is available from haps://osf.io/y354c/.
Translational Abstract
In psychology and related areas, scientific hypotheses are commonly tested by asking questions like "is [some] effect present or absent." Such hypothesis testing is most often carried out using frequentist null hypothesis significance testing (NIIST). The NHST procedure is very simple: It usually returns a p-value, which is then used to make binary decisions like "the effect is present/abscnt." For example, it is common to see studies in the media that draw simplistic conclusions like "coffee causes cancer," or "coffee reduces the chances of geuing cancer." However, a powerful and more nuanced alternative approach exists: Bayes factors. Bayes factors have many advantages over NHST. However, for the complex statistical models that arc commonly used for data analysis today, computing Bayes factors is not at all a simple matter. In this article, we discuss the main complexities associated with computing Bayes factors. This is the first article to provide a detailed workflow for understanding and computing Bayes factors in complex statistical models. The article provides a statistically more nuanced way to think about hypothesis testing than the overly simplistic tendency to declare effects as being "present" or "absent".}, language = {en} } @article{BuerkiAlarioVasishth2022, author = {B{\"u}rki, Audrey and Alario, F-Xavier and Vasishth, Shravan}, title = {When words collide: Bayesian meta-analyses of distractor and target properties in the picture-word interference paradigm}, series = {Quarterly Journal of Experimental Psychology}, volume = {76}, journal = {Quarterly Journal of Experimental Psychology}, number = {6}, publisher = {Sage Publications}, address = {London}, issn = {1747-0218}, doi = {10.1177/17470218221114644}, pages = {1410 -- 1430}, year = {2022}, abstract = {In the picture-word interference paradigm, participants name pictures while ignoring a written or spoken distractor word. Naming times to the pictures are slowed down by the presence of the distractor word. The present study investigates in detail the impact of distractor and target word properties on picture naming times, building on the seminal study by Miozzo and Caramazza. We report the results of several Bayesian meta-analyses based on 26 datasets. These analyses provide estimates of effect sizes and their precision for several variables and their interactions. They show the reliability of the distractor frequency effect on picture naming latencies (latencies decrease as the frequency of the distractor increases) and demonstrate for the first time the impact of distractor length, with longer naming latencies for trials with longer distractors. Moreover, distractor frequency interacts with target word frequency to predict picture naming latencies. The methodological and theoretical implications of these findings are discussed.}, language = {en} } @misc{NicenboimLogacevGatteietal.2016, author = {Nicenboim, Bruno and Logacev, Pavel and Gattei, Carolina and Vasishth, Shravan}, title = {When High-Capacity Readers Slow Down and Low-Capacity Readers Speed Up}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90663}, pages = {1 -- 24}, year = {2016}, abstract = {We examined the effects of argument-head distance in SVO and SOV languages (Spanish and German), while taking into account readers' working memory capacity and controlling for expectation (Levy, 2008) and other factors. We predicted only locality effects, that is, a slowdown produced by increased dependency distance (Gibson, 2000; Lewis and Vasishth, 2005). Furthermore, we expected stronger locality effects for readers with low working memory capacity. Contrary to our predictions, low-capacity readers showed faster reading with increased distance, while high-capacity readers showed locality effects. We suggest that while the locality effects are compatible with memory-based explanations, the speedup of low-capacity readers can be explained by an increased probability of retrieval failure. We present a computational model based on ACT-R built under the previous assumptions, which is able to give a qualitative account for the present data and can be tested in future research. Our results suggest that in some cases, interpreting longer RTs as indexing increased processing difficulty and shorter RTs as facilitation may be too simplistic: The same increase in processing difficulty may lead to slowdowns in high-capacity readers and speedups in low-capacity ones. Ignoring individual level capacity differences when investigating locality effects may lead to misleading conclusions.}, language = {en} } @article{NicenboimLogacevGatteietal.2016, author = {Nicenboim, Bruno and Logacev, Pavel and Gattei, Carolina and Vasishth, Shravan}, title = {When High-Capacity Readers Slow Down and Low-Capacity Readers Speed Up}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00280}, pages = {1 -- 24}, year = {2016}, abstract = {We examined the effects of argument-head distance in SVO and SOV languages (Spanish and German), while taking into account readers' working memory capacity and controlling for expectation (Levy, 2008) and other factors. We predicted only locality effects, that is, a slowdown produced by increased dependency distance (Gibson, 2000; Lewis and Vasishth, 2005). Furthermore, we expected stronger locality effects for readers with low working memory capacity. Contrary to our predictions, low-capacity readers showed faster reading with increased distance, while high-capacity readers showed locality effects. We suggest that while the locality effects are compatible with memory-based explanations, the speedup of low-capacity readers can be explained by an increased probability of retrieval failure. We present a computational model based on ACT-R built under the previous assumptions, which is able to give a qualitative account for the present data and can be tested in future research. Our results suggest that in some cases, interpreting longer RTs as indexing increased processing difficulty and shorter RTs as facilitation may be too simplistic: The same increase in processing difficulty may lead to slowdowns in high-capacity readers and speedups in low-capacity ones. Ignoring individual level capacity differences when investigating locality effects may lead to misleading conclusions.}, language = {en} } @article{vonderMalsburgVasishth2011, author = {von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {What is the scanpath signature of syntactic reanalysis?}, series = {Journal of memory and language}, volume = {65}, journal = {Journal of memory and language}, number = {2}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2011.02.004}, pages = {109 -- 127}, year = {2011}, abstract = {Which repair strategy does the language system deploy when it gets garden-pathed, and what can regressive eye movements in reading tell us about reanalysis strategies? Several influential eye-tracking studies on syntactic reanalysis (Frazier \& Rayner, 1982; Meseguer, Carreiras, \& Clifton, 2002; Mitchell, Shen, Green, \& Hodgson, 2008) have addressed this question by examining scanpaths, i.e., sequential patterns of eye fixations. However, in the absence of a suitable method for analyzing scanpaths, these studies relied on simplified dependent measures that are arguably ambiguous and hard to interpret. We address the theoretical question of repair strategy by developing a new method that quantifies scanpath similarity. Our method reveals several distinct fixation strategies associated with reanalysis that went undetected in a previously published data set (Meseguer et al., 2002). One prevalent pattern suggests re-parsing of the sentence, a strategy that has been discussed in the literature (Frazier \& Rayner, 1982); however, readers differed tremendously in how they orchestrated the various fixation strategies. Our results suggest that the human parsing system non-deterministically adopts different strategies when confronted with the disambiguating material in garden-path sentences.}, language = {en} } @misc{VasishthvonderMalsburgEngelmann2013, author = {Vasishth, Shravan and von der Malsburg, Titus Raban and Engelmann, Felix}, title = {What eye movements can tell us about sentence comprehension}, series = {Wiley interdisciplinary reviews : Cognitive Science}, volume = {4}, journal = {Wiley interdisciplinary reviews : Cognitive Science}, number = {2}, publisher = {Wiley}, address = {San Fransisco}, issn = {1939-5078}, doi = {10.1002/wcs.1209}, pages = {125 -- 134}, year = {2013}, abstract = {Eye movement data have proven to be very useful for investigating human sentence processing. Eyetracking research has addressed a wide range of questions, such as recovery mechanisms following garden-pathing, the timing of processes driving comprehension, the role of anticipation and expectation in parsing, the role of semantic, pragmatic, and prosodic information, and so on. However, there are some limitations regarding the inferences that can be made on the basis of eye movements. One relates to the nontrivial interaction between parsing and the eye movement control system which complicates the interpretation of eye movement data. Detailed computational models that integrate parsing with eye movement control theories have the potential to unpack the complexity of eye movement data and can therefore aid in the interpretation of eye movements. Another limitation is the difficulty of capturing spatiotemporal patterns in eye movements using the traditional word-based eyetracking measures. Recent research has demonstrated the relevance of these patterns and has shown how they can be analyzed. In this review, we focus on reading, and present examples demonstrating how eye movement data reveal what events unfold when the parser runs into difficulty, and how the parsing system interacts with eye movement control. WIREs Cogn Sci 2013, 4:125134. doi: 10.1002/wcs.1209 For further resources related to this article, please visit the WIREs website.}, language = {en} } @article{BuerkiFoschiniElbuyMadecetal.2020, author = {B{\"u}rki-Foschini, Audrey Damaris and Elbuy, Shereen and Madec, Sylvain and Vasishth, Shravan}, title = {What did we learn from forty years of research on semantic interference?}, series = {Journal of memory and language}, volume = {114}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104125}, pages = {25}, year = {2020}, abstract = {When participants in an experiment have to name pictures while ignoring distractor words superimposed on the picture or presented auditorily (i.e., picture-word interference paradigm), they take more time when the word to be named (or target) and distractor words are from the same semantic category (e.g., cat-dog). This experimental effect is known as the semantic interference effect, and is probably one of the most studied in the language production literature. The functional origin of the effect and the exact conditions in which it occurs are however still debated. Since Lupker (1979) reported the effect in the first response time experiment about 40 years ago, more than 300 similar experiments have been conducted. The semantic interference effect was replicated in many experiments, but several studies also reported the absence of an effect in a subset of experimental conditions. The aim of the present study is to provide a comprehensive theoretical review of the existing evidence to date and several Bayesian meta-analyses and meta-regressions to determine the size of the effect and explore the experimental conditions in which the effect surfaces. The results are discussed in the light of current debates about the functional origin of the semantic interference effect and its implications for our understanding of the language production system.}, language = {en} }