@article{NicenboimVasishthRoesler2020, author = {Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Are words pre-activated probabilistically during sentence comprehension?}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {142}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2020.107427}, pages = {27}, year = {2020}, abstract = {Several studies (e.g., Wicha et al., 2003b; DeLong et al., 2005) have shown that readers use information from the sentential context to predict nouns (or some of their features), and that predictability effects can be inferred from the EEG signal in determiners or adjectives appearing before the predicted noun. While these findings provide evidence for the pre-activation proposal, recent replication attempts together with inconsistencies in the results from the literature cast doubt on the robustness of this phenomenon. Our study presents the first attempt to use the effect of gender on predictability in German to study the pre-activation hypothesis, capitalizing on the fact that all German nouns have a gender and that their preceding determiners can show an unambiguous gender marking when the noun phrase has accusative case. Despite having a relatively large sample size (of 120 subjects), both our preregistered and exploratory analyses failed to yield conclusive evidence for or against an effect of pre-activation. The sign of the effect is, however, in the expected direction: the more unexpected the gender of the determiner, the larger the negativity. The recent, inconclusive replication attempts by Nieuwland et al. (2018) and others also show effects with signs in the expected direction. We conducted a Bayesian random-ef-fects meta-analysis using our data and the publicly available data from these recent replication attempts. Our meta-analysis shows a relatively clear but very small effect that is consistent with the pre-activation account and demonstrates a very important advantage of the Bayesian data analysis methodology: we can incrementally accumulate evidence to obtain increasingly precise estimates of the effect of interest.}, language = {en} } @article{JaegerMertzenVanDykeetal.2020, author = {J{\"a}ger, Lena Ann and Mertzen, Daniela and Van Dyke, Julie A. and Vasishth, Shravan}, title = {Interference patterns in subject-verb agreement and reflexives revisited}, series = {Journal of memory and language}, volume = {111}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104063}, pages = {21}, year = {2020}, abstract = {Cue-based retrieval theories in sentence processing predict two classes of interference effect: (i) Inhibitory interference is predicted when multiple items match a retrieval cue: cue-overloading leads to an overall slowdown in reading time; and (ii) Facilitatory interference arises when a retrieval target as well as a distractor only partially match the retrieval cues; this partial matching leads to an overall speedup in retrieval time. Inhibitory interference effects are widely observed, but facilitatory interference apparently has an exception: reflexives have been claimed to show no facilitatory interference effects. Because the claim is based on underpowered studies, we conducted a large-sample experiment that investigated both facilitatory and inhibitory interference. In contrast to previous studies, we find facilitatory interference effects in reflexives. We also present a quantitative evaluation of the cue-based retrieval model of Engelmann, Jager, and Vasishth (2019).}, language = {en} } @article{AvetisyanLagoVasishth2020, author = {Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Does case marking affect agreement attraction in comprehension?}, series = {Journal of memory and language}, volume = {112}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104087}, pages = {18}, year = {2020}, abstract = {Previous studies have suggested that distinctive case marking on noun phrases reduces attraction effects in production, i.e., the tendency to produce a verb that agrees with a nonsubject noun. An important open question is whether attraction effects are modulated by case information in sentence comprehension. To address this question, we conducted three attraction experiments in Armenian, a language with a rich and productive case system. The experiments showed clear attraction effects, and they also revealed an overall role of case marking such that participants showed faster response and reading times when the nouns in the sentence had different case. However, we found little indication that distinctive case marking modulated attraction effects. We present a theoretical proposal of how case and number information may be used differentially during agreement licensing in comprehension. More generally, this work sheds light on the nature of the retrieval cues deployed when completing morphosyntactic dependencies.}, language = {en} } @article{PaapeVasishthvonderMalsburg2020, author = {Paape, Dario and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Quadruplex negatio invertit?}, series = {Journal of semantics}, volume = {37}, journal = {Journal of semantics}, number = {4}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0167-5133}, doi = {10.1093/jos/ffaa009}, pages = {509 -- 555}, year = {2020}, abstract = {So-called "depth charge" sentences (No head injury is too trivial to be ignored) are interpreted by the vast majority of speakers to mean the opposite of what their compositional semantics would dictate. The semantic inversion that is observed for sentences of this type is the strongest and most persistent linguistic illusion known to the field (Wason \& Reich, 1979). However, it has recently been argued that the preferred interpretation arises not because of a prevailing failure of the processing system, but rather because the non-compositional meaning is grammaticalized in the form of a stored construction (Cook \& Stevenson, 2010; Fortuin, 2014). In a series of five experiments, we investigate whether the depth charge effect is better explained by processing failure due to memory overload (the overloading hypothesis) or by the existence of an underlying grammaticalized construction with two available meanings (the ambiguity hypothesis). To our knowledge, our experiments are the first to explore the on-line processing profile of depth charge sentences. Overall, the data are consistent with specific variants of the ambiguity and overloading hypotheses while providing evidence against other variants. As an extension of the overloading hypothesis, we suggest two heuristic processes that may ultimately yield the incorrect reading when compositional processing is suspended for strategic reasons.}, language = {en} } @article{StonevonderMalsburgVasishth2020, author = {Stone, Kate and von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {The effect of decay and lexical uncertainty on processing long-distance dependencies in reading}, series = {PeerJ}, volume = {8}, journal = {PeerJ}, publisher = {PeerJ Inc.}, address = {London}, issn = {2167-8359}, doi = {10.7717/peerj.10438}, pages = {33}, year = {2020}, abstract = {To make sense of a sentence, a reader must keep track of dependent relationships between words, such as between a verb and its particle (e.g. turn the music down). In languages such as German, verb-particle dependencies often span long distances, with the particle only appearing at the end of the clause. This means that it may be necessary to process a large amount of intervening sentence material before the full verb of the sentence is known. To facilitate processing, previous studies have shown that readers can preactivate the lexical information of neighbouring upcoming words, but less is known about whether such preactivation can be sustained over longer distances. We asked the question, do readers preactivate lexical information about long-distance verb particles? In one self-paced reading and one eye tracking experiment, we delayed the appearance of an obligatory verb particle that varied only in the predictability of its lexical identity. We additionally manipulated the length of the delay in order to test two contrasting accounts of dependency processing: that increased distance between dependent elements may sharpen expectation of the distant word and facilitate its processing (an antilocality effect), or that it may slow processing via temporal activation decay (a locality effect). We isolated decay by delaying the particle with a neutral noun modifier containing no information about the identity of the upcoming particle, and no known sources of interference or working memory load. Under the assumption that readers would preactivate the lexical representations of plausible verb particles, we hypothesised that a smaller number of plausible particles would lead to stronger preactivation of each particle, and thus higher predictability of the target. This in turn should have made predictable target particles more resistant to the effects of decay than less predictable target particles. The eye tracking experiment provided evidence that higher predictability did facilitate reading times, but found evidence against any effect of decay or its interaction with predictability. The self-paced reading study provided evidence against any effect of predictability or temporal decay, or their interaction. In sum, we provide evidence from eye movements that readers preactivate long-distance lexical content and that adding neutral sentence information does not induce detectable decay of this activation. The findings are consistent with accounts suggesting that delaying dependency resolution may only affect processing if the intervening information either confirms expectations or adds to working memory load, and that temporal activation decay alone may not be a major predictor of processing time.}, language = {en} } @article{Vasishth2020, author = {Vasishth, Shravan}, title = {Using approximate Bayesian computation for estimating parameters in the cue-based retrieval model of sentence processing}, series = {MethodsX}, volume = {7}, journal = {MethodsX}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2215-0161}, doi = {10.1016/j.mex.2020.100850}, pages = {6}, year = {2020}, abstract = {A commonly used approach to parameter estimation in computational models is the so-called grid search procedure: the entire parameter space is searched in small steps to determine the parameter value that provides the best fit to the observed data. This approach has several disadvantages: first, it can be computationally very expensive; second, one optimal point value of the parameter is reported as the best fit value; we cannot quantify our uncertainty about the parameter estimate. In the main journal article that this methods article accompanies (Jager et al., 2020, Interference patterns in subject-verb agreement and reflexives revisited: A large-sample study, Journal of Memory and Language), we carried out parameter estimation using Approximate Bayesian Computation (ABC), which is a Bayesian approach that allows us to quantify our uncertainty about the parameter's values given data. This customization has the further advantage that it allows us to generate both prior and posterior predictive distributions of reading times from the cue-based retrieval model of Lewis and Vasishth, 2005.
Instead of the conventional method of using grid search, we use Approximate Bayesian Computation (ABC) for parameter estimation in the [4] model.
The ABC method of parameter estimation has the advantage that the uncertainty of the parameter can be quantified.}, language = {en} } @article{BuerkiFoschiniElbuyMadecetal.2020, author = {B{\"u}rki-Foschini, Audrey Damaris and Elbuy, Shereen and Madec, Sylvain and Vasishth, Shravan}, title = {What did we learn from forty years of research on semantic interference?}, series = {Journal of memory and language}, volume = {114}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104125}, pages = {25}, year = {2020}, abstract = {When participants in an experiment have to name pictures while ignoring distractor words superimposed on the picture or presented auditorily (i.e., picture-word interference paradigm), they take more time when the word to be named (or target) and distractor words are from the same semantic category (e.g., cat-dog). This experimental effect is known as the semantic interference effect, and is probably one of the most studied in the language production literature. The functional origin of the effect and the exact conditions in which it occurs are however still debated. Since Lupker (1979) reported the effect in the first response time experiment about 40 years ago, more than 300 similar experiments have been conducted. The semantic interference effect was replicated in many experiments, but several studies also reported the absence of an effect in a subset of experimental conditions. The aim of the present study is to provide a comprehensive theoretical review of the existing evidence to date and several Bayesian meta-analyses and meta-regressions to determine the size of the effect and explore the experimental conditions in which the effect surfaces. The results are discussed in the light of current debates about the functional origin of the semantic interference effect and its implications for our understanding of the language production system.}, language = {en} } @article{SmithVasishth2020, author = {Smith, Garrett and Vasishth, Shravan}, title = {A principled approach to feature selection in models of sentence processing}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {44}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, number = {12}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12918}, pages = {25}, year = {2020}, abstract = {Among theories of human language comprehension, cue-based memory retrieval has proven to be a useful framework for understanding when and how processing difficulty arises in the resolution of long-distance dependencies. Most previous work in this area has assumed that very general retrieval cues like [+subject] or [+singular] do the work of identifying (and sometimes misidentifying) a retrieval target in order to establish a dependency between words. However, recent work suggests that general, handpicked retrieval cues like these may not be enough to explain illusions of plausibility (Cunnings \& Sturt, 2018), which can arise in sentences like The letter next to the porcelain plate shattered. Capturing such retrieval interference effects requires lexically specific features and retrieval cues, but handpicking the features is hard to do in a principled way and greatly increases modeler degrees of freedom. To remedy this, we use well-established word embedding methods for creating distributed lexical feature representations that encode information relevant for retrieval using distributed retrieval cue vectors. We show that the similarity between the feature and cue vectors (a measure of plausibility) predicts total reading times in Cunnings and Sturt's eye-tracking data. The features can easily be plugged into existing parsing models (including cue-based retrieval and self-organized parsing), putting very different models on more equal footing and facilitating future quantitative comparisons.}, language = {en} } @article{SchadVasishthHohensteinetal.2020, author = {Schad, Daniel and Vasishth, Shravan and Hohenstein, Sven and Kliegl, Reinhold}, title = {How to capitalize on a priori contrasts in linear (mixed) models}, series = {Journal of memory and language}, volume = {110}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104038}, pages = {40}, year = {2020}, abstract = {Factorial experiments in research on memory, language, and in other areas are often analyzed using analysis of variance (ANOVA). However, for effects with more than one numerator degrees of freedom, e.g., for experimental factors with more than two levels, the ANOVA omnibus F-test is not informative about the source of a main effect or interaction. Because researchers typically have specific hypotheses about which condition means differ from each other, a priori contrasts (i.e., comparisons planned before the sample means are known) between specific conditions or combinations of conditions are the appropriate way to represent such hypotheses in the statistical model. Many researchers have pointed out that contrasts should be "tested instead of, rather than as a supplement to, the ordinary 'omnibus' F test" (Hays, 1973, p. 601). In this tutorial, we explain the mathematics underlying different kinds of contrasts (i.e., treatment, sum, repeated, polynomial, custom, nested, interaction contrasts), discuss their properties, and demonstrate how they are applied in the R System for Statistical Computing (R Core Team, 2018). In this context, we explain the generalized inverse which is needed to compute the coefficients for contrasts that test hypotheses that are not covered by the default set of contrasts. A detailed understanding of contrast coding is crucial for successful and correct specification in linear models (including linear mixed models). Contrasts defined a priori yield far more useful confirmatory tests of experimental hypotheses than standard omnibus F-tests. Reproducible code is available from https://osf.io/7ukf6/.}, language = {en} }