@article{VasishthNicenboim2016, author = {Vasishth, Shravan and Nicenboim, Bruno}, title = {Statistical Methods for Linguistic Research: Foundational Ideas - Part I}, series = {Language and linguistics compass}, volume = {10}, journal = {Language and linguistics compass}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1749-818X}, doi = {10.1111/lnc3.12201}, pages = {349 -- 369}, year = {2016}, abstract = {We present the fundamental ideas underlying statistical hypothesis testing using the frequentist framework. We start with a simple example that builds up the one-sample t-test from the beginning, explaining important concepts such as the sampling distribution of the sample mean, and the iid assumption. Then, we examine the meaning of the p-value in detail and discuss several important misconceptions about what a p-value does and does not tell us. This leads to a discussion of Type I, II error and power, and Type S and M error. An important conclusion from this discussion is that one should aim to carry out appropriately powered studies. Next, we discuss two common issues that we have encountered in psycholinguistics and linguistics: running experiments until significance is reached and the 'garden-of-forking-paths' problem discussed by Gelman and others. The best way to use frequentist methods is to run appropriately powered studies, check model assumptions, clearly separate exploratory data analysis from planned comparisons decided upon before the study was run, and always attempt to replicate results.}, language = {en} } @misc{VasishthNicenboimEngelmannetal.2019, author = {Vasishth, Shravan and Nicenboim, Bruno and Engelmann, Felix and Burchert, Frank}, title = {Computational Models of Retrieval Processes in Sentence Processing}, series = {Trends in Cognitive Sciences}, volume = {23}, journal = {Trends in Cognitive Sciences}, number = {11}, publisher = {Elsevier}, address = {London}, issn = {1364-6613}, doi = {10.1016/j.tics.2019.09.003}, pages = {968 -- 982}, year = {2019}, abstract = {Sentence comprehension requires that the comprehender work out who did what to whom. This process has been characterized as retrieval from memory. This review summarizes the quantitative predictions and empirical coverage of the two existing computational models of retrieval and shows how the predictive performance of these two competing models can be tested against a benchmark data-set. We also show how computational modeling can help us better understand sources of variability in both unimpaired and impaired sentence comprehension.}, language = {en} } @article{VasishthNicenboimBeckmanetal.2018, author = {Vasishth, Shravan and Nicenboim, Bruno and Beckman, Mary E. and Li, Fangfang and Kong, Eun Jong}, title = {Bayesian data analysis in the phonetic sciences}, series = {Journal of phonetics}, volume = {71}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.07.008}, pages = {147 -- 161}, year = {2018}, abstract = {This tutorial analyzes voice onset time (VOT) data from Dongbei (Northeastern) Mandarin Chinese and North American English to demonstrate how Bayesian linear mixed models can be fit using the programming language Stan via the R package brms. Through this case study, we demonstrate some of the advantages of the Bayesian framework: researchers can (i) flexibly define the underlying process that they believe to have generated the data; (ii) obtain direct information regarding the uncertainty about the parameter that relates the data to the theoretical question being studied; and (iii) incorporate prior knowledge into the analysis. Getting started with Bayesian modeling can be challenging, especially when one is trying to model one's own (often unique) data. It is difficult to see how one can apply general principles described in textbooks to one's own specific research problem. We address this barrier to using Bayesian methods by providing three detailed examples, with source code to allow easy reproducibility. The examples presented are intended to give the reader a flavor of the process of model-fitting; suggestions for further study are also provided. All data and code are available from: https://osf.io/g4zpv.}, language = {en} } @article{NicenboimRoettgerVasishth2018, author = {Nicenboim, Bruno and Roettger, Timo B. and Vasishth, Shravan}, title = {Using meta-analysis for evidence synthesis}, series = {Journal of phonetics}, volume = {70}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.06.001}, pages = {39 -- 55}, year = {2018}, abstract = {Within quantitative phonetics, it is common practice to draw conclusions based on statistical significance alone Using incomplete neutralization of final devoicing in German as a case study, we illustrate the problems with this approach. If researchers find a significant acoustic difference between voiceless and devoiced obstruents, they conclude that neutralization is incomplete, and if they find no significant difference, they conclude that neutralization is complete. However, such strong claims regarding the existence or absence of an effect based on significant results alone can be misleading. Instead, the totality of available evidence should be brought to bear on the question. Towards this end, we synthesize the evidence from 14 studies on incomplete neutralization in German using a Bayesian random-effects meta-analysis. Our meta-analysis provides evidence in favor of incomplete neutralization. We conclude with some suggestions for improving the quality of future research on phonetic phenomena: ensure that sample sizes allow for high-precision estimates of the effect; avoid the temptation to deploy researcher degrees of freedom when analyzing data; focus on estimates of the parameter of interest and the uncertainty about that parameter; attempt to replicate effects found; and, whenever possible, make both the data and analysis available publicly. (c) 2018 Elsevier Ltd. All rights reserved.}, language = {en} } @article{NicenboimVasishth2018, author = {Nicenboim, Bruno and Vasishth, Shravan}, title = {Models of retrieval in sentence comprehension}, series = {Journal of memory and language}, volume = {99}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.08.004}, pages = {1 -- 34}, year = {2018}, abstract = {Research on similarity-based interference has provided extensive evidence that the formation of dependencies between non-adjacent words relies on a cue-based retrieval mechanism. There are two different models that can account for one of the main predictions of interference, i.e., a slowdown at a retrieval site, when several items share a feature associated with a retrieval cue: Lewis and Vasishth's (2005) activation-based model and McElree's (2000) direct-access model. Even though these two models have been used almost interchangeably, they are based on different assumptions and predict differences in the relationship between reading times and response accuracy. The activation-based model follows the assumptions of the ACT-R framework, and its retrieval process behaves as a lognormal race between accumulators of evidence with a single variance. Under this model, accuracy of the retrieval is determined by the winner of the race and retrieval time by its rate of accumulation. In contrast, the direct-access model assumes a model of memory where only the probability of retrieval can be affected, while the retrieval time is drawn from the same distribution; in this model, differences in latencies are a by-product of the possibility of backtracking and repairing incorrect retrievals. We implemented both models in a Bayesian hierarchical framework in order to evaluate them and compare them. The data show that correct retrievals take longer than incorrect ones, and this pattern is better fit under the direct-access model than under the activation-based model. This finding does not rule out the possibility that retrieval may be behaving as a race model with assumptions that follow less closely the ones from the ACT-R framework. By introducing a modification of the activation model, i.e., by assuming that the accumulation of evidence for retrieval of incorrect items is not only slower but noisier (i.e., different variances for the correct and incorrect items), the model can provide a fit as good as the one of the direct-access model. This first ever computational evaluation of alternative accounts of retrieval processes in sentence processing opens the way for a broader investigation of theories of dependency completion.}, language = {en} } @article{NicenboimVasishthEngelmannetal.2018, author = {Nicenboim, Bruno and Vasishth, Shravan and Engelmann, Felix and Suckow, Katja}, title = {Exploratory and confirmatory analyses in sentence processing}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {42}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12589}, pages = {1075 -- 1100}, year = {2018}, abstract = {Given the replication crisis in cognitive science, it is important to consider what researchers need to do in order to report results that are reliable. We consider three changes in current practice that have the potential to deliver more realistic and robust claims. First, the planned experiment should be divided into two stages, an exploratory stage and a confirmatory stage. This clear separation allows the researcher to check whether any results found in the exploratory stage are robust. The second change is to carry out adequately powered studies. We show that this is imperative if we want to obtain realistic estimates of effects in psycholinguistics. The third change is to use Bayesian data-analytic methods rather than frequentist ones; the Bayesian framework allows us to focus on the best estimates we can obtain of the effect, rather than rejecting a strawman null. As a case study, we investigate number interference effects in German. Number feature interference is predicted by cue-based retrieval models of sentence processing (Van Dyke \& Lewis, 2003; Vasishth \& Lewis, 2006), but it has shown inconsistent results. We show that by implementing the three changes mentioned, suggestive evidence emerges that is consistent with the predicted number interference effects.}, language = {en} } @misc{StoneNicenboimVasishthetal.2022, author = {Stone, Kate and Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Understanding the effects of constraint and predictability in ERP}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {829}, issn = {1866-8364}, doi = {10.25932/publishup-58759}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587594}, pages = {71}, year = {2022}, abstract = {Intuitively, strongly constraining contexts should lead to stronger probabilistic representations of sentences in memory. Encountering unexpected words could therefore be expected to trigger costlier shifts in these representations than expected words. However, psycholinguistic measures commonly used to study probabilistic processing, such as the N400 event-related potential (ERP) component, are sensitive to word predictability but not to contextual constraint. Some research suggests that constraint-related processing cost may be measurable via an ERP positivity following the N400, known as the anterior post-N400 positivity (PNP). The PNP is argued to reflect update of a sentence representation and to be distinct from the posterior P600, which reflects conflict detection and reanalysis. However, constraint-related PNP findings are inconsistent. We sought to conceptually replicate Federmeier et al. (2007) and Kuperberg et al. (2020), who observed that the PNP, but not the N400 or the P600, was affected by constraint at unexpected but plausible words. Using a pre-registered design and statistical approach maximising power, we demonstrated a dissociated effect of predictability and constraint: strong evidence for predictability but not constraint in the N400 window, and strong evidence for constraint but not predictability in the later window. However, the constraint effect was consistent with a P600 and not a PNP, suggesting increased conflict between a strong representation and unexpected input rather than greater update of the representation. We conclude that either a simple strong/weak constraint design is not always sufficient to elicit the PNP, or that previous PNP constraint findings could be an artifact of smaller sample size.}, language = {en} } @article{NicenboimVasishthRoesler2020, author = {Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Are words pre-activated probabilistically during sentence comprehension?}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {142}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2020.107427}, pages = {27}, year = {2020}, abstract = {Several studies (e.g., Wicha et al., 2003b; DeLong et al., 2005) have shown that readers use information from the sentential context to predict nouns (or some of their features), and that predictability effects can be inferred from the EEG signal in determiners or adjectives appearing before the predicted noun. While these findings provide evidence for the pre-activation proposal, recent replication attempts together with inconsistencies in the results from the literature cast doubt on the robustness of this phenomenon. Our study presents the first attempt to use the effect of gender on predictability in German to study the pre-activation hypothesis, capitalizing on the fact that all German nouns have a gender and that their preceding determiners can show an unambiguous gender marking when the noun phrase has accusative case. Despite having a relatively large sample size (of 120 subjects), both our preregistered and exploratory analyses failed to yield conclusive evidence for or against an effect of pre-activation. The sign of the effect is, however, in the expected direction: the more unexpected the gender of the determiner, the larger the negativity. The recent, inconclusive replication attempts by Nieuwland et al. (2018) and others also show effects with signs in the expected direction. We conducted a Bayesian random-ef-fects meta-analysis using our data and the publicly available data from these recent replication attempts. Our meta-analysis shows a relatively clear but very small effect that is consistent with the pre-activation account and demonstrates a very important advantage of the Bayesian data analysis methodology: we can incrementally accumulate evidence to obtain increasingly precise estimates of the effect of interest.}, language = {en} } @misc{PaapeNicenboimVasishth2017, author = {Paape, Dario L. J. F. and Nicenboim, Bruno and Vasishth, Shravan}, title = {Does antecedent complexity affect ellipsis processing?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403373}, pages = {29}, year = {2017}, abstract = {In two self-paced reading experiments, we investigated the effect of changes in antecedent complexity on processing times for ellipsis. Pointer- or "sharing"-based approaches to ellipsis processing (Frazier \& Clifton 2001, 2005; Martin \& McElree 2008) predict no effect of antecedent complexity on reading times at the ellipsis site while other accounts predict increased antecedent complexity to either slow down processing (Murphy 1985) or to speed it up (Hofmeister 2011). Experiment 1 manipulated antecedent complexity and elision, yielding evidence against a speedup at the ellipsis site and in favor of a null effect. In order to investigate possible superficial processing on part of participants, Experiment 2 manipulated the amount of attention required to correctly respond to end-of-sentence comprehension probes, yielding evidence against a complexity-induced slowdown at the ellipsis site. Overall, our results are compatible with pointer-based approaches while casting doubt on the notion that changes antecedent complexity lead to measurable differences in ellipsis processing speed.}, language = {en} } @phdthesis{Nicenboim2016, author = {Nicenboim, Bruno}, title = {Dependency resolution as a retrieval process}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 209}, year = {2016}, abstract = {My thesis focused on the predictions of the activation-based model of Lewis and Vasishth (2005) to investigate the evidence for the use of the memory system in the formation of non-local dependencies in sentence comprehension. The activation-based model, which follows the Adaptive Control of Thought-Rational framework (ACT-R; Anderson et al., 2004), has been used to explain locality effects and similarity-based interference by assuming that dependencies are resolved by a cue-based retrieval mechanism, and that the retrieval mechanism is affected by decay and interference. Both locality effects and (inhibitory) similarity-based interference cause increased difficulty (e.g., longer reading times) at the site of the dependency completion where a retrieval is assumed: (I) Locality effects are attributed to the increased difficulty in the retrieval of a dependent when the distance from its retrieval site is increased. (II) Similarity-based interference is attributed to the retrieval being affected by the presence of items which have similar features as the dependent that needs to be retrieved. In this dissertation, I investigated some findings problematic to the activation-based model, namely, facilitation where locality effects are expected (e.g., Levy, 2008), and the lack of similarity-based interference from the number feature in grammatical sentences (e.g., Wagers et al., 2009). In addition, I used individual differences in working memory capacity and reading fluency as a way to validate the theories investigated (Underwood, 1975), and computational modeling to achieve a more precise account of the phenomena. Regarding locality effects, by using self-paced reading and eye-tracking-while reading methods with Spanish and German data, this dissertation yielded two main findings: (I) Locality effects seem to be modulated by working memory capacity, with high-capacity participants showing expectation-driven facilitation. (II) Once expectations and other potential confounds are controlled using baselines, with increased distance, high-capacity readers can show a slow-down (i.e., locality effects) and low-capacity readers can show a speedup. While the locality effects are compatible with the activation-based model, simulations show that the speedup of low-capacity readers can only be accounted for by changing some of the assumptions of the activation-based model. Regarding similarity-based interference, two relatively high-powered self-paced reading experiments in German using grammatical sentences yielded a slowdown at the verb as predicted by the activation-based model. This provides evidence in favor of dependency creation via cue-based retrieval, and in contrast with the view that cue-based retrieval is a reanalysis mechanism (Wagers et al., 2009). Finally, the same experimental results that showed inhibitory interference from the number feature are used for a finer grain evaluation of the retrieval process. Besides Lewis and Vasishth's (2005) activation-based model, also McElree's (2000) direct-access model can account for inhibitory interference. These two models assume a cue-based retrieval mechanism to build dependencies, but they are based on different assumptions. I present a computational evaluation of the predictions of these two theories of retrieval. The models were compared by implementing them in a Bayesian hierarchical framework. The evaluation of the models reveals that some aspects of the data fit better under the direct access model than under the activation-based model. However, a simple extension of the activation-based model provides a comparable fit to the direct access model. This serves as a proof of concept showing potential ways to improve the original activation-based model. In conclusion, this thesis adds to the body of evidence that argues for the use of the general memory system in dependency resolution, and in particular for a cue-based retrieval mechanism. However, it also shows that some of the default assumptions inherited from ACT-R in the activation-based model need to be revised.}, language = {en} }