@article{StoneVerissimoSchadetal.2021, author = {Stone, Kate and Verissimo, Joao and Schad, Daniel J. and Oltrogge, Elise and Vasishth, Shravan and Lago, Sol}, title = {The interaction of grammatically distinct agreement dependencies in predictive processing}, series = {Language, cognition and neuroscience}, volume = {36}, journal = {Language, cognition and neuroscience}, number = {9}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/23273798.2021.1921816}, pages = {1159 -- 1179}, year = {2021}, abstract = {Previous research has found that comprehenders sometimes predict information that is grammatically unlicensed by sentence constraints. An open question is why such grammatically unlicensed predictions occur. We examined the possibility that unlicensed predictions arise in situations of information conflict, for instance when comprehenders try to predict upcoming words while simultaneously building dependencies with previously encountered elements in memory. German possessive pronouns are a good testing ground for this hypothesis because they encode two grammatically distinct agreement dependencies: a retrospective one between the possessive and its previously mentioned referent, and a prospective one between the possessive and its following nominal head. In two visual world eye-tracking experiments, we estimated the onset of predictive effects in participants' fixations. The results showed that the retrospective dependency affected resolution of the prospective dependency by shifting the onset of predictive effects. We attribute this effect to an interaction between predictive and memory retrieval processes.}, language = {en} } @article{BuerkiAlarioVasishth2022, author = {B{\"u}rki, Audrey and Alario, F-Xavier and Vasishth, Shravan}, title = {When words collide: Bayesian meta-analyses of distractor and target properties in the picture-word interference paradigm}, series = {Quarterly Journal of Experimental Psychology}, volume = {76}, journal = {Quarterly Journal of Experimental Psychology}, number = {6}, publisher = {Sage Publications}, address = {London}, issn = {1747-0218}, doi = {10.1177/17470218221114644}, pages = {1410 -- 1430}, year = {2022}, abstract = {In the picture-word interference paradigm, participants name pictures while ignoring a written or spoken distractor word. Naming times to the pictures are slowed down by the presence of the distractor word. The present study investigates in detail the impact of distractor and target word properties on picture naming times, building on the seminal study by Miozzo and Caramazza. We report the results of several Bayesian meta-analyses based on 26 datasets. These analyses provide estimates of effect sizes and their precision for several variables and their interactions. They show the reliability of the distractor frequency effect on picture naming latencies (latencies decrease as the frequency of the distractor increases) and demonstrate for the first time the impact of distractor length, with longer naming latencies for trials with longer distractors. Moreover, distractor frequency interacts with target word frequency to predict picture naming latencies. The methodological and theoretical implications of these findings are discussed.}, language = {en} } @article{SchadNicenboimBuerkneretal.2022, author = {Schad, Daniel and Nicenboim, Bruno and B{\"u}rkner, Paul-Christian and Betancourt, Michael and Vasishth, Shravan}, title = {Workflow techniques for the robust use of bayes factors}, series = {Psychological methods}, volume = {28}, journal = {Psychological methods}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {1082-989X}, doi = {10.1037/met0000472}, pages = {1404 -- 1426}, year = {2022}, abstract = {Inferences about hypotheses are ubiquitous in the cognitive sciences. Bayes factors provide one general way to compare different hypotheses by their compatibility with the observed data. Those quantifications can then also be used to choose between hypotheses. While Bayes factors provide an immediate approach to hypothesis testing, they are highly sensitive to details of the data/model assumptions and it's unclear whether the details of the computational implementation (such as bridge sampling) are unbiased for complex analyses. Hem, we study how Bayes factors misbehave under different conditions. This includes a study of errors in the estimation of Bayes factors; the first-ever use of simulation-based calibration to test the accuracy and bias of Bayes factor estimates using bridge sampling; a study of the stability of Bayes factors against different MCMC draws and sampling variation in the data; and a look at the variability of decisions based on Bayes factors using a utility function. We outline a Bayes factor workflow that researchers can use to study whether Bayes factors are robust for their individual analysis. Reproducible code is available from haps://osf.io/y354c/.
Translational Abstract
In psychology and related areas, scientific hypotheses are commonly tested by asking questions like "is [some] effect present or absent." Such hypothesis testing is most often carried out using frequentist null hypothesis significance testing (NIIST). The NHST procedure is very simple: It usually returns a p-value, which is then used to make binary decisions like "the effect is present/abscnt." For example, it is common to see studies in the media that draw simplistic conclusions like "coffee causes cancer," or "coffee reduces the chances of geuing cancer." However, a powerful and more nuanced alternative approach exists: Bayes factors. Bayes factors have many advantages over NHST. However, for the complex statistical models that arc commonly used for data analysis today, computing Bayes factors is not at all a simple matter. In this article, we discuss the main complexities associated with computing Bayes factors. This is the first article to provide a detailed workflow for understanding and computing Bayes factors in complex statistical models. The article provides a statistically more nuanced way to think about hypothesis testing than the overly simplistic tendency to declare effects as being "present" or "absent".}, language = {en} } @article{NicenboimVasishthRoesler2020, author = {Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Are words pre-activated probabilistically during sentence comprehension?}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {142}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2020.107427}, pages = {27}, year = {2020}, abstract = {Several studies (e.g., Wicha et al., 2003b; DeLong et al., 2005) have shown that readers use information from the sentential context to predict nouns (or some of their features), and that predictability effects can be inferred from the EEG signal in determiners or adjectives appearing before the predicted noun. While these findings provide evidence for the pre-activation proposal, recent replication attempts together with inconsistencies in the results from the literature cast doubt on the robustness of this phenomenon. Our study presents the first attempt to use the effect of gender on predictability in German to study the pre-activation hypothesis, capitalizing on the fact that all German nouns have a gender and that their preceding determiners can show an unambiguous gender marking when the noun phrase has accusative case. Despite having a relatively large sample size (of 120 subjects), both our preregistered and exploratory analyses failed to yield conclusive evidence for or against an effect of pre-activation. The sign of the effect is, however, in the expected direction: the more unexpected the gender of the determiner, the larger the negativity. The recent, inconclusive replication attempts by Nieuwland et al. (2018) and others also show effects with signs in the expected direction. We conducted a Bayesian random-ef-fects meta-analysis using our data and the publicly available data from these recent replication attempts. Our meta-analysis shows a relatively clear but very small effect that is consistent with the pre-activation account and demonstrates a very important advantage of the Bayesian data analysis methodology: we can incrementally accumulate evidence to obtain increasingly precise estimates of the effect of interest.}, language = {en} } @article{StoneVasishthvonderMalsburg2022, author = {Stone, Kate and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Does entropy modulate the prediction of German long-distance verb particles?}, series = {PLOS ONE}, volume = {17}, journal = {PLOS ONE}, number = {8}, publisher = {PLOS}, address = {San Francisco, California, US}, issn = {1932-6203}, doi = {10.1371/journal.pone.0267813}, pages = {25}, year = {2022}, abstract = {In this paper we examine the effect of uncertainty on readers' predictions about meaning. In particular, we were interested in how uncertainty might influence the likelihood of committing to a specific sentence meaning. We conducted two event-related potential (ERP) experiments using particle verbs such as turn down and manipulated uncertainty by constraining the context such that readers could be either highly certain about the identity of a distant verb particle, such as turn the bed [...] down, or less certain due to competing particles, such as turn the music [...] up/down. The study was conducted in German, where verb particles appear clause-finally and may be separated from the verb by a large amount of material. We hypothesised that this separation would encourage readers to predict the particle, and that high certainty would make prediction of a specific particle more likely than lower certainty. If a specific particle was predicted, this would reflect a strong commitment to sentence meaning that should incur a higher processing cost if the prediction is wrong. If a specific particle was less likely to be predicted, commitment should be weaker and the processing cost of a wrong prediction lower. If true, this could suggest that uncertainty discourages predictions via an unacceptable cost-benefit ratio. However, given the clear predictions made by the literature, it was surprisingly unclear whether the uncertainty manipulation affected the two ERP components studied, the N400 and the PNP. Bayes factor analyses showed that evidence for our a priori hypothesised effect sizes was inconclusive, although there was decisive evidence against a priori hypothesised effect sizes larger than 1 mu Vfor the N400 and larger than 3 mu V for the PNP. We attribute the inconclusive finding to the properties of verb-particle dependencies that differ from the verb-noun dependencies in which the N400 and PNP are often studied.}, language = {en} } @article{SchadBetancourtVasishth2021, author = {Schad, Daniel and Betancourt, Michael and Vasishth, Shravan}, title = {Toward a principled Bayesian workflow in cognitive science}, series = {Psychological methods}, volume = {26}, journal = {Psychological methods}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {1082-989X}, doi = {10.1037/met0000275}, pages = {103 -- 126}, year = {2021}, abstract = {Experiments in research on memory, language, and in other areas of cognitive science are increasingly being analyzed using Bayesian methods. This has been facilitated by the development of probabilistic programming languages such as Stan, and easily accessible front-end packages such as brms. The utility of Bayesian methods, however, ultimately depends on the relevance of the Bayesian model, in particular whether or not it accurately captures the structure of the data and the data analyst's domain expertise. Even with powerful software, the analyst is responsible for verifying the utility of their model. To demonstrate this point, we introduce a principled Bayesian workflow (Betancourt, 2018) to cognitive science. Using a concrete working example, we describe basic questions one should ask about the model: prior predictive checks, computational faithfulness, model sensitivity, and posterior predictive checks. The running example for demonstrating the workflow is data on reading times with a linguistic manipulation of object versus subject relative clause sentences. This principled Bayesian workflow also demonstrates how to use domain knowledge to inform prior distributions. It provides guidelines and checks for valid data analysis, avoiding overfitting complex models to noise, and capturing relevant data structure in a probabilistic model. Given the increasing use of Bayesian methods, we aim to discuss how these methods can be properly employed to obtain robust answers to scientific questions.}, language = {en} } @article{PaapeVasishth2022, author = {Paape, Dario and Vasishth, Shravan}, title = {Estimating the true cost of garden pathing:}, series = {Cognitive science}, volume = {46}, journal = {Cognitive science}, number = {8}, publisher = {Wiley-Blackwell}, address = {Malden, Mass.}, issn = {0364-0213}, doi = {10.1111/cogs.13186}, pages = {23}, year = {2022}, abstract = {What is the processing cost of being garden-pathed by a temporary syntactic ambiguity? We argue that comparing average reading times in garden-path versus non-garden-path sentences is not enough to answer this question. Trial-level contaminants such as inattention, the fact that garden pathing may occur non-deterministically in the ambiguous condition, and "triage" (rejecting the sentence without reanalysis; Fodor \& Inoue, 2000) lead to systematic underestimates of the true cost of garden pathing. Furthermore, the "pure" garden-path effect due to encountering an unexpected word needs to be separated from the additional cost of syntactic reanalysis. To get more realistic estimates for the individual processing costs of garden pathing and syntactic reanalysis, we implement a novel computational model that includes trial-level contaminants as probabilistically occurring latent cognitive processes. The model shows a good predictive fit to existing reading time and judgment data. Furthermore, the latent-process approach captures differences between noun phrase/zero complement (NP/Z) garden-path sentences and semantically biased reduced relative clause (RRC) garden-path sentences: The NP/Z garden path occurs nearly deterministically but can be mostly eliminated by adding a comma. By contrast, the RRC garden path occurs with a lower probability, but disambiguation via semantic plausibility is not always effective.}, language = {en} } @article{SchadVasishth2022, author = {Schad, Daniel and Vasishth, Shravan}, title = {The posterior probability of a null hypothesis given a statistically significant result}, series = {The quantitative methods for psychology}, volume = {18}, journal = {The quantitative methods for psychology}, number = {2}, publisher = {University of Montreal, Department of Psychology}, address = {Montreal}, issn = {1913-4126}, doi = {10.20982/tqmp.18.2.p011}, pages = {130 -- 141}, year = {2022}, abstract = {When researchers carry out a null hypothesis significance test, it is tempting to assume that a statistically significant result lowers Prob(H0), the probability of the null hypothesis being true. Technically, such a statement is meaningless for various reasons: e.g., the null hypothesis does not have a probability associated with it. However, it is possible to relax certain assumptions to compute the posterior probability Prob(H0) under repeated sampling. We show in a step-by-step guide that the intuitively appealing belief, that Prob(H0) is low when significant results have been obtained under repeated sampling, is in general incorrect and depends greatly on: (a) the prior probability of the null being true; (b) type-I error rate, (c) type-II error rate, and (d) replication of a result. Through step-by-step simulations using open-source code in the R System of Statistical Computing, we show that uncertainty about the null hypothesis being true often remains high despite a significant result. To help the reader develop intuitions about this common misconception, we provide a Shiny app (https://danielschad.shinyapps.io/probnull/). We expect that this tutorial will help researchers better understand and judge results from null hypothesis significance tests.}, language = {en} } @article{VasishthGelman2021, author = {Vasishth, Shravan and Gelman, Andrew}, title = {How to embrace variation and accept uncertainty in linguistic and psycholinguistic data analysis}, series = {Linguistics : an interdisciplinary journal of the language sciences}, volume = {59}, journal = {Linguistics : an interdisciplinary journal of the language sciences}, number = {5}, publisher = {De Gruyter Mouton}, address = {Berlin}, issn = {0024-3949}, doi = {10.1515/ling-2019-0051}, pages = {1311 -- 1342}, year = {2021}, abstract = {The use of statistical inference in linguistics and related areas like psychology typically involves a binary decision: either reject or accept some null hypothesis using statistical significance testing. When statistical power is low, this frequentist data-analytic approach breaks down: null results are uninformative, and effect size estimates associated with significant results are overestimated. Using an example from psycholinguistics, several alternative approaches are demonstrated for reporting inconsistencies between the data and a theoretical prediction. The key here is to focus on committing to a falsifiable prediction, on quantifying uncertainty statistically, and learning to accept the fact that - in almost all practical data analysis situations - we can only draw uncertain conclusions from data, regardless of whether we manage to obtain statistical significance or not. A focus on uncertainty quantification is likely to lead to fewer excessively bold claims that, on closer investigation, may turn out to be not supported by the data.}, language = {en} } @article{PaapeAvetisyanLagoetal.2021, author = {Paape, Dario and Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Modeling misretrieval and feature substitution in agreement attraction}, series = {Cognitive science}, volume = {45}, journal = {Cognitive science}, number = {8}, publisher = {Wiley-Blackwell}, address = {Malden, Mass.}, issn = {0364-0213}, doi = {10.1111/cogs.13019}, pages = {30}, year = {2021}, abstract = {We present computational modeling results based on a self-paced reading study investigating number attraction effects in Eastern Armenian. We implement three novel computational models of agreement attraction in a Bayesian framework and compare their predictive fit to the data using k-fold cross-validation. We find that our data are better accounted for by an encoding-based model of agreement attraction, compared to a retrieval-based model. A novel methodological contribution of our study is the use of comprehension questions with open-ended responses, so that both misinterpretation of the number feature of the subject phrase and misassignment of the thematic subject role of the verb can be investigated at the same time. We find evidence for both types of misinterpretation in our study, sometimes in the same trial. However, the specific error patterns in our data are not fully consistent with any previously proposed model.}, language = {en} } @article{MertzenLagoVasishth2021, author = {Mertzen, Daniela and Lago, Sol and Vasishth, Shravan}, title = {The benefits of preregistration for hypothesis-driven bilingualism research}, series = {Bilingualism : language and cognition}, volume = {24}, journal = {Bilingualism : language and cognition}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge}, issn = {1366-7289}, doi = {10.1017/S1366728921000031}, pages = {807 -- 812}, year = {2021}, abstract = {Preregistration is an open science practice that requires the specification of research hypotheses and analysis plans before the data are inspected. Here, we discuss the benefits of preregistration for hypothesis-driven, confirmatory bilingualism research. Using examples from psycholinguistics and bilingualism, we illustrate how non-peer reviewed preregistrations can serve to implement a clean distinction between hypothesis testing and data exploration. This distinction helps researchers avoid casting post-hoc hypotheses and analyses as confirmatory ones. We argue that, in keeping with current best practices in the experimental sciences, preregistration, along with sharing data and code, should be an integral part of hypothesis-driven bilingualism research.}, language = {en} } @article{JaegerMertzenVanDykeetal.2020, author = {J{\"a}ger, Lena Ann and Mertzen, Daniela and Van Dyke, Julie A. and Vasishth, Shravan}, title = {Interference patterns in subject-verb agreement and reflexives revisited}, series = {Journal of memory and language}, volume = {111}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104063}, pages = {21}, year = {2020}, abstract = {Cue-based retrieval theories in sentence processing predict two classes of interference effect: (i) Inhibitory interference is predicted when multiple items match a retrieval cue: cue-overloading leads to an overall slowdown in reading time; and (ii) Facilitatory interference arises when a retrieval target as well as a distractor only partially match the retrieval cues; this partial matching leads to an overall speedup in retrieval time. Inhibitory interference effects are widely observed, but facilitatory interference apparently has an exception: reflexives have been claimed to show no facilitatory interference effects. Because the claim is based on underpowered studies, we conducted a large-sample experiment that investigated both facilitatory and inhibitory interference. In contrast to previous studies, we find facilitatory interference effects in reflexives. We also present a quantitative evaluation of the cue-based retrieval model of Engelmann, Jager, and Vasishth (2019).}, language = {en} } @article{AvetisyanLagoVasishth2020, author = {Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Does case marking affect agreement attraction in comprehension?}, series = {Journal of memory and language}, volume = {112}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104087}, pages = {18}, year = {2020}, abstract = {Previous studies have suggested that distinctive case marking on noun phrases reduces attraction effects in production, i.e., the tendency to produce a verb that agrees with a nonsubject noun. An important open question is whether attraction effects are modulated by case information in sentence comprehension. To address this question, we conducted three attraction experiments in Armenian, a language with a rich and productive case system. The experiments showed clear attraction effects, and they also revealed an overall role of case marking such that participants showed faster response and reading times when the nouns in the sentence had different case. However, we found little indication that distinctive case marking modulated attraction effects. We present a theoretical proposal of how case and number information may be used differentially during agreement licensing in comprehension. More generally, this work sheds light on the nature of the retrieval cues deployed when completing morphosyntactic dependencies.}, language = {en} } @misc{StoneNicenboimVasishthetal.2022, author = {Stone, Kate and Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Understanding the effects of constraint and predictability in ERP}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {829}, issn = {1866-8364}, doi = {10.25932/publishup-58759}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587594}, pages = {71}, year = {2022}, abstract = {Intuitively, strongly constraining contexts should lead to stronger probabilistic representations of sentences in memory. Encountering unexpected words could therefore be expected to trigger costlier shifts in these representations than expected words. However, psycholinguistic measures commonly used to study probabilistic processing, such as the N400 event-related potential (ERP) component, are sensitive to word predictability but not to contextual constraint. Some research suggests that constraint-related processing cost may be measurable via an ERP positivity following the N400, known as the anterior post-N400 positivity (PNP). The PNP is argued to reflect update of a sentence representation and to be distinct from the posterior P600, which reflects conflict detection and reanalysis. However, constraint-related PNP findings are inconsistent. We sought to conceptually replicate Federmeier et al. (2007) and Kuperberg et al. (2020), who observed that the PNP, but not the N400 or the P600, was affected by constraint at unexpected but plausible words. Using a pre-registered design and statistical approach maximising power, we demonstrated a dissociated effect of predictability and constraint: strong evidence for predictability but not constraint in the N400 window, and strong evidence for constraint but not predictability in the later window. However, the constraint effect was consistent with a P600 and not a PNP, suggesting increased conflict between a strong representation and unexpected input rather than greater update of the representation. We conclude that either a simple strong/weak constraint design is not always sufficient to elicit the PNP, or that previous PNP constraint findings could be an artifact of smaller sample size.}, language = {en} } @article{StoneNicenboimVasishthetal.2023, author = {Stone, Kate and Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Understanding the effects of constraint and predictability in ERP}, series = {Neurobiology of language}, volume = {4}, journal = {Neurobiology of language}, number = {2}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, issn = {2641-4368}, doi = {10.1162/nol_a_00094}, pages = {221 -- 256}, year = {2023}, abstract = {Intuitively, strongly constraining contexts should lead to stronger probabilistic representations of sentences in memory. Encountering unexpected words could therefore be expected to trigger costlier shifts in these representations than expected words. However, psycholinguistic measures commonly used to study probabilistic processing, such as the N400 event-related potential (ERP) component, are sensitive to word predictability but not to contextual constraint. Some research suggests that constraint-related processing cost may be measurable via an ERP positivity following the N400, known as the anterior post-N400 positivity (PNP). The PNP is argued to reflect update of a sentence representation and to be distinct from the posterior P600, which reflects conflict detection and reanalysis. However, constraint-related PNP findings are inconsistent. We sought to conceptually replicate Federmeier et al. (2007) and Kuperberg et al. (2020), who observed that the PNP, but not the N400 or the P600, was affected by constraint at unexpected but plausible words. Using a pre-registered design and statistical approach maximising power, we demonstrated a dissociated effect of predictability and constraint: strong evidence for predictability but not constraint in the N400 window, and strong evidence for constraint but not predictability in the later window. However, the constraint effect was consistent with a P600 and not a PNP, suggesting increased conflict between a strong representation and unexpected input rather than greater update of the representation. We conclude that either a simple strong/weak constraint design is not always sufficient to elicit the PNP, or that previous PNP constraint findings could be an artifact of smaller sample size.}, language = {en} } @article{PaapeVasishthvonderMalsburg2020, author = {Paape, Dario and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Quadruplex negatio invertit?}, series = {Journal of semantics}, volume = {37}, journal = {Journal of semantics}, number = {4}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0167-5133}, doi = {10.1093/jos/ffaa009}, pages = {509 -- 555}, year = {2020}, abstract = {So-called "depth charge" sentences (No head injury is too trivial to be ignored) are interpreted by the vast majority of speakers to mean the opposite of what their compositional semantics would dictate. The semantic inversion that is observed for sentences of this type is the strongest and most persistent linguistic illusion known to the field (Wason \& Reich, 1979). However, it has recently been argued that the preferred interpretation arises not because of a prevailing failure of the processing system, but rather because the non-compositional meaning is grammaticalized in the form of a stored construction (Cook \& Stevenson, 2010; Fortuin, 2014). In a series of five experiments, we investigate whether the depth charge effect is better explained by processing failure due to memory overload (the overloading hypothesis) or by the existence of an underlying grammaticalized construction with two available meanings (the ambiguity hypothesis). To our knowledge, our experiments are the first to explore the on-line processing profile of depth charge sentences. Overall, the data are consistent with specific variants of the ambiguity and overloading hypotheses while providing evidence against other variants. As an extension of the overloading hypothesis, we suggest two heuristic processes that may ultimately yield the incorrect reading when compositional processing is suspended for strategic reasons.}, language = {en} } @article{StonevonderMalsburgVasishth2020, author = {Stone, Kate and von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {The effect of decay and lexical uncertainty on processing long-distance dependencies in reading}, series = {PeerJ}, volume = {8}, journal = {PeerJ}, publisher = {PeerJ Inc.}, address = {London}, issn = {2167-8359}, doi = {10.7717/peerj.10438}, pages = {33}, year = {2020}, abstract = {To make sense of a sentence, a reader must keep track of dependent relationships between words, such as between a verb and its particle (e.g. turn the music down). In languages such as German, verb-particle dependencies often span long distances, with the particle only appearing at the end of the clause. This means that it may be necessary to process a large amount of intervening sentence material before the full verb of the sentence is known. To facilitate processing, previous studies have shown that readers can preactivate the lexical information of neighbouring upcoming words, but less is known about whether such preactivation can be sustained over longer distances. We asked the question, do readers preactivate lexical information about long-distance verb particles? In one self-paced reading and one eye tracking experiment, we delayed the appearance of an obligatory verb particle that varied only in the predictability of its lexical identity. We additionally manipulated the length of the delay in order to test two contrasting accounts of dependency processing: that increased distance between dependent elements may sharpen expectation of the distant word and facilitate its processing (an antilocality effect), or that it may slow processing via temporal activation decay (a locality effect). We isolated decay by delaying the particle with a neutral noun modifier containing no information about the identity of the upcoming particle, and no known sources of interference or working memory load. Under the assumption that readers would preactivate the lexical representations of plausible verb particles, we hypothesised that a smaller number of plausible particles would lead to stronger preactivation of each particle, and thus higher predictability of the target. This in turn should have made predictable target particles more resistant to the effects of decay than less predictable target particles. The eye tracking experiment provided evidence that higher predictability did facilitate reading times, but found evidence against any effect of decay or its interaction with predictability. The self-paced reading study provided evidence against any effect of predictability or temporal decay, or their interaction. In sum, we provide evidence from eye movements that readers preactivate long-distance lexical content and that adding neutral sentence information does not induce detectable decay of this activation. The findings are consistent with accounts suggesting that delaying dependency resolution may only affect processing if the intervening information either confirms expectations or adds to working memory load, and that temporal activation decay alone may not be a major predictor of processing time.}, language = {en} } @article{Vasishth2020, author = {Vasishth, Shravan}, title = {Using approximate Bayesian computation for estimating parameters in the cue-based retrieval model of sentence processing}, series = {MethodsX}, volume = {7}, journal = {MethodsX}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2215-0161}, doi = {10.1016/j.mex.2020.100850}, pages = {6}, year = {2020}, abstract = {A commonly used approach to parameter estimation in computational models is the so-called grid search procedure: the entire parameter space is searched in small steps to determine the parameter value that provides the best fit to the observed data. This approach has several disadvantages: first, it can be computationally very expensive; second, one optimal point value of the parameter is reported as the best fit value; we cannot quantify our uncertainty about the parameter estimate. In the main journal article that this methods article accompanies (Jager et al., 2020, Interference patterns in subject-verb agreement and reflexives revisited: A large-sample study, Journal of Memory and Language), we carried out parameter estimation using Approximate Bayesian Computation (ABC), which is a Bayesian approach that allows us to quantify our uncertainty about the parameter's values given data. This customization has the further advantage that it allows us to generate both prior and posterior predictive distributions of reading times from the cue-based retrieval model of Lewis and Vasishth, 2005.
Instead of the conventional method of using grid search, we use Approximate Bayesian Computation (ABC) for parameter estimation in the [4] model.
The ABC method of parameter estimation has the advantage that the uncertainty of the parameter can be quantified.}, language = {en} } @article{EngbertRabeSchwetlicketal.2022, author = {Engbert, Ralf and Rabe, Maximilian Michael and Schwetlick, Lisa and Seelig, Stefan A. and Reich, Sebastian and Vasishth, Shravan}, title = {Data assimilation in dynamical cognitive science}, series = {Trends in cognitive sciences}, volume = {26}, journal = {Trends in cognitive sciences}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1364-6613}, doi = {10.1016/j.tics.2021.11.006}, pages = {99 -- 102}, year = {2022}, abstract = {Dynamical models make specific assumptions about cognitive processes that generate human behavior. In data assimilation, these models are tested against timeordered data. Recent progress on Bayesian data assimilation demonstrates that this approach combines the strengths of statistical modeling of individual differences with the those of dynamical cognitive models.}, language = {en} } @article{BuerkiFoschiniElbuyMadecetal.2020, author = {B{\"u}rki-Foschini, Audrey Damaris and Elbuy, Shereen and Madec, Sylvain and Vasishth, Shravan}, title = {What did we learn from forty years of research on semantic interference?}, series = {Journal of memory and language}, volume = {114}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104125}, pages = {25}, year = {2020}, abstract = {When participants in an experiment have to name pictures while ignoring distractor words superimposed on the picture or presented auditorily (i.e., picture-word interference paradigm), they take more time when the word to be named (or target) and distractor words are from the same semantic category (e.g., cat-dog). This experimental effect is known as the semantic interference effect, and is probably one of the most studied in the language production literature. The functional origin of the effect and the exact conditions in which it occurs are however still debated. Since Lupker (1979) reported the effect in the first response time experiment about 40 years ago, more than 300 similar experiments have been conducted. The semantic interference effect was replicated in many experiments, but several studies also reported the absence of an effect in a subset of experimental conditions. The aim of the present study is to provide a comprehensive theoretical review of the existing evidence to date and several Bayesian meta-analyses and meta-regressions to determine the size of the effect and explore the experimental conditions in which the effect surfaces. The results are discussed in the light of current debates about the functional origin of the semantic interference effect and its implications for our understanding of the language production system.}, language = {en} }