@article{Vasishth2020, author = {Vasishth, Shravan}, title = {Using approximate Bayesian computation for estimating parameters in the cue-based retrieval model of sentence processing}, series = {MethodsX}, volume = {7}, journal = {MethodsX}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2215-0161}, doi = {10.1016/j.mex.2020.100850}, pages = {6}, year = {2020}, abstract = {A commonly used approach to parameter estimation in computational models is the so-called grid search procedure: the entire parameter space is searched in small steps to determine the parameter value that provides the best fit to the observed data. This approach has several disadvantages: first, it can be computationally very expensive; second, one optimal point value of the parameter is reported as the best fit value; we cannot quantify our uncertainty about the parameter estimate. In the main journal article that this methods article accompanies (Jager et al., 2020, Interference patterns in subject-verb agreement and reflexives revisited: A large-sample study, Journal of Memory and Language), we carried out parameter estimation using Approximate Bayesian Computation (ABC), which is a Bayesian approach that allows us to quantify our uncertainty about the parameter's values given data. This customization has the further advantage that it allows us to generate both prior and posterior predictive distributions of reading times from the cue-based retrieval model of Lewis and Vasishth, 2005.
Instead of the conventional method of using grid search, we use Approximate Bayesian Computation (ABC) for parameter estimation in the [4] model.
The ABC method of parameter estimation has the advantage that the uncertainty of the parameter can be quantified.}, language = {en} } @article{PaapeVasishthvonderMalsburg2020, author = {Paape, Dario and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Quadruplex negatio invertit?}, series = {Journal of semantics}, volume = {37}, journal = {Journal of semantics}, number = {4}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0167-5133}, doi = {10.1093/jos/ffaa009}, pages = {509 -- 555}, year = {2020}, abstract = {So-called "depth charge" sentences (No head injury is too trivial to be ignored) are interpreted by the vast majority of speakers to mean the opposite of what their compositional semantics would dictate. The semantic inversion that is observed for sentences of this type is the strongest and most persistent linguistic illusion known to the field (Wason \& Reich, 1979). However, it has recently been argued that the preferred interpretation arises not because of a prevailing failure of the processing system, but rather because the non-compositional meaning is grammaticalized in the form of a stored construction (Cook \& Stevenson, 2010; Fortuin, 2014). In a series of five experiments, we investigate whether the depth charge effect is better explained by processing failure due to memory overload (the overloading hypothesis) or by the existence of an underlying grammaticalized construction with two available meanings (the ambiguity hypothesis). To our knowledge, our experiments are the first to explore the on-line processing profile of depth charge sentences. Overall, the data are consistent with specific variants of the ambiguity and overloading hypotheses while providing evidence against other variants. As an extension of the overloading hypothesis, we suggest two heuristic processes that may ultimately yield the incorrect reading when compositional processing is suspended for strategic reasons.}, language = {en} } @article{EngbertRabeSchwetlicketal.2022, author = {Engbert, Ralf and Rabe, Maximilian Michael and Schwetlick, Lisa and Seelig, Stefan A. and Reich, Sebastian and Vasishth, Shravan}, title = {Data assimilation in dynamical cognitive science}, series = {Trends in cognitive sciences}, volume = {26}, journal = {Trends in cognitive sciences}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1364-6613}, doi = {10.1016/j.tics.2021.11.006}, pages = {99 -- 102}, year = {2022}, abstract = {Dynamical models make specific assumptions about cognitive processes that generate human behavior. In data assimilation, these models are tested against timeordered data. Recent progress on Bayesian data assimilation demonstrates that this approach combines the strengths of statistical modeling of individual differences with the those of dynamical cognitive models.}, language = {en} } @misc{StoneVasishthMalsburg2022, author = {Stone, Kate and Vasishth, Shravan and Malsburg, Titus von der}, title = {Does entropy modulate the prediction of German long-distance verb particles?}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8364}, doi = {10.25932/publishup-56231}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562312}, pages = {1 -- 25}, year = {2022}, abstract = {In this paper we examine the effect of uncertainty on readers' predictions about meaning. In particular, we were interested in how uncertainty might influence the likelihood of committing to a specific sentence meaning. We conducted two event-related potential (ERP) experiments using particle verbs such as turn down and manipulated uncertainty by constraining the context such that readers could be either highly certain about the identity of a distant verb particle, such as turn the bed […] down, or less certain due to competing particles, such as turn the music […] up/down. The study was conducted in German, where verb particles appear clause-finally and may be separated from the verb by a large amount of material. We hypothesised that this separation would encourage readers to predict the particle, and that high certainty would make prediction of a specific particle more likely than lower certainty. If a specific particle was predicted, this would reflect a strong commitment to sentence meaning that should incur a higher processing cost if the prediction is wrong. If a specific particle was less likely to be predicted, commitment should be weaker and the processing cost of a wrong prediction lower. If true, this could suggest that uncertainty discourages predictions via an unacceptable cost-benefit ratio. However, given the clear predictions made by the literature, it was surprisingly unclear whether the uncertainty manipulation affected the two ERP components studied, the N400 and the PNP. Bayes factor analyses showed that evidence for our a priori hypothesised effect sizes was inconclusive, although there was decisive evidence against a priori hypothesised effect sizes larger than 1μV for the N400 and larger than 3μV for the PNP. We attribute the inconclusive finding to the properties of verb-particle dependencies that differ from the verb-noun dependencies in which the N400 and PNP are often studied.}, language = {en} } @misc{JaegerEngelmannVasishth2017, author = {Jaeger, Lena A. and Engelmann, Felix and Vasishth, Shravan}, title = {Similarity-based interference in sentence comprehension: Literature review and Bayesian meta-analysis}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.01.004}, pages = {316 -- 339}, year = {2017}, abstract = {We report a comprehensive review of the published reading studies on retrieval interference in reflexive-/reciprocal-antecedent and subject-verb dependencies. We also provide a quantitative random-effects meta-analysis of eyetracking and self-paced reading studies. We show that the empirical evidence is only partly consistent with cue-based retrieval as implemented in the ACT-R-based model of sentence processing by Lewis and Vasishth (2005) (LV05) and that there are important differences between the reviewed dependency types. In non-agreement subject-verb dependencies, there is evidence for inhibitory interference in configurations where the correct dependent fully matches the retrieval cues. This is consistent with the LV05 cue-based retrieval account. By contrast, in subject-verb agreement as well as in reflexive-/reciprocal-antecedent dependencies, no evidence for inhibitory interference is found in configurations with a fully cue-matching subject/antecedent. In configurations with only a partially cue-matching subject or antecedent, the meta-analysis reveals facilitatory interference in subject-verb agreement and inhibitory interference in reflexives/reciprocals. The former is consistent with the LV05 account, but the latter is not. Moreover, the meta-analysis reveals that (i) interference type (proactive versus retroactive) leads to different effects in the reviewed dependency types and (ii) the prominence of the distractor strongly influences the interference effect. In sum, the meta-analysis suggests that the LV05 needs important modifications to account for the unexplained interference patterns and the differences between the dependency types. More generally, the meta-analysis provides a quantitative empirical basis for comparing the predictions of competing accounts of retrieval processes in sentence comprehension. (C) 2017 Elsevier Inc. All rights reserved.}, language = {en} } @article{FrankTrompenaarsVasishth2016, author = {Frank, Stefan L. and Trompenaars, Thijs and Vasishth, Shravan}, title = {Cross-Linguistic Differences in Processing Double-Embedded Relative Clauses: Working-Memory Constraints or Language Statistics?}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12247}, pages = {554 -- 578}, year = {2016}, abstract = {An English double-embedded relative clause from which the middle verb is omitted can often be processed more easily than its grammatical counterpart, a phenomenon known as the grammaticality illusion. This effect has been found to be reversed in German, suggesting that the illusion is language specific rather than a consequence of universal working memory constraints. We present results from three self-paced reading experiments which show that Dutch native speakers also do not show the grammaticality illusion in Dutch, whereas both German and Dutch native speakers do show the illusion when reading English sentences. These findings provide evidence against working memory constraints as an explanation for the observed effect in English. We propose an alternative account based on the statistical patterns of the languages involved. In support of this alternative, a single recurrent neural network model that is trained on both Dutch and English sentences is shown to predict the cross-linguistic difference in the grammaticality effect.}, language = {en} } @article{VasishthNicenboim2016, author = {Vasishth, Shravan and Nicenboim, Bruno}, title = {Statistical Methods for Linguistic Research: Foundational Ideas - Part I}, series = {Language and linguistics compass}, volume = {10}, journal = {Language and linguistics compass}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1749-818X}, doi = {10.1111/lnc3.12201}, pages = {349 -- 369}, year = {2016}, abstract = {We present the fundamental ideas underlying statistical hypothesis testing using the frequentist framework. We start with a simple example that builds up the one-sample t-test from the beginning, explaining important concepts such as the sampling distribution of the sample mean, and the iid assumption. Then, we examine the meaning of the p-value in detail and discuss several important misconceptions about what a p-value does and does not tell us. This leads to a discussion of Type I, II error and power, and Type S and M error. An important conclusion from this discussion is that one should aim to carry out appropriately powered studies. Next, we discuss two common issues that we have encountered in psycholinguistics and linguistics: running experiments until significance is reached and the 'garden-of-forking-paths' problem discussed by Gelman and others. The best way to use frequentist methods is to run appropriately powered studies, check model assumptions, clearly separate exploratory data analysis from planned comparisons decided upon before the study was run, and always attempt to replicate results.}, language = {en} } @article{VasishthLewis2006, author = {Vasishth, Shravan and Lewis, Richard L.}, title = {Argument-head distance and processing complexity: Explaining both locality and antilocality effects}, series = {Language : journal of the Linguistic Society of America}, volume = {82}, journal = {Language : journal of the Linguistic Society of America}, number = {4}, publisher = {Linguistic Society of America}, address = {Washington}, issn = {0097-8507}, doi = {10.1353/lan.2006.0236}, pages = {767 -- 794}, year = {2006}, abstract = {Although proximity between arguments and verbs (locality) is a relatively robust determinant of sentence-processing difficulty (Hawkins 1998, 2001, Gibson 2000), increasing argument-verb distance can also facilitate processing (Konieczny 2000). We present two self-paced reading (SPR) experiments involving Hindi that provide further evidence of antilocality, and a third SPR experiment which suggests that similarity-based interference can attenuate this distance-based facilitation. A unified explanation of interference, locality, and antilocality effects is proposed via an independently motivated theory of activation decay and retrieval interference (Anderson et al. 2004).*}, language = {en} } @article{SorensenHohensteinVasishth2016, author = {Sorensen, Tanner and Hohenstein, Sven and Vasishth, Shravan}, title = {Bayesian linear mixed models using Stan: A tutorial for psychologists, linguists, and cognitive scientists}, series = {Tutorials in Quantitative Methods for Psychology}, volume = {12}, journal = {Tutorials in Quantitative Methods for Psychology}, publisher = {University of Montreal, Department of Psychology}, address = {Montreal}, issn = {2292-1354}, doi = {10.20982/tqmp.12.3.p175}, pages = {175 -- 200}, year = {2016}, abstract = {With the arrival of the R packages nlme and lme4, linear mixed models (LMMs) have come to be widely used in experimentally-driven areas like psychology, linguistics, and cognitive science. This tutorial provides a practical introduction to fitting LMMs in a Bayesian framework using the probabilistic programming language Stan. We choose Stan (rather than WinBUGS or JAGS) because it provides an elegant and scalable framework for fitting models in most of the standard applications of LMMs. We ease the reader into fitting increasingly complex LMMs, using a two-condition repeated measures self-paced reading study.}, language = {en} } @misc{VasishthNicenboimEngelmannetal.2019, author = {Vasishth, Shravan and Nicenboim, Bruno and Engelmann, Felix and Burchert, Frank}, title = {Computational Models of Retrieval Processes in Sentence Processing}, series = {Trends in Cognitive Sciences}, volume = {23}, journal = {Trends in Cognitive Sciences}, number = {11}, publisher = {Elsevier}, address = {London}, issn = {1364-6613}, doi = {10.1016/j.tics.2019.09.003}, pages = {968 -- 982}, year = {2019}, abstract = {Sentence comprehension requires that the comprehender work out who did what to whom. This process has been characterized as retrieval from memory. This review summarizes the quantitative predictions and empirical coverage of the two existing computational models of retrieval and shows how the predictive performance of these two competing models can be tested against a benchmark data-set. We also show how computational modeling can help us better understand sources of variability in both unimpaired and impaired sentence comprehension.}, language = {en} }