@article{SchadBetancourtVasishth2021, author = {Schad, Daniel and Betancourt, Michael and Vasishth, Shravan}, title = {Toward a principled Bayesian workflow in cognitive science}, series = {Psychological methods}, volume = {26}, journal = {Psychological methods}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {1082-989X}, doi = {10.1037/met0000275}, pages = {103 -- 126}, year = {2021}, abstract = {Experiments in research on memory, language, and in other areas of cognitive science are increasingly being analyzed using Bayesian methods. This has been facilitated by the development of probabilistic programming languages such as Stan, and easily accessible front-end packages such as brms. The utility of Bayesian methods, however, ultimately depends on the relevance of the Bayesian model, in particular whether or not it accurately captures the structure of the data and the data analyst's domain expertise. Even with powerful software, the analyst is responsible for verifying the utility of their model. To demonstrate this point, we introduce a principled Bayesian workflow (Betancourt, 2018) to cognitive science. Using a concrete working example, we describe basic questions one should ask about the model: prior predictive checks, computational faithfulness, model sensitivity, and posterior predictive checks. The running example for demonstrating the workflow is data on reading times with a linguistic manipulation of object versus subject relative clause sentences. This principled Bayesian workflow also demonstrates how to use domain knowledge to inform prior distributions. It provides guidelines and checks for valid data analysis, avoiding overfitting complex models to noise, and capturing relevant data structure in a probabilistic model. Given the increasing use of Bayesian methods, we aim to discuss how these methods can be properly employed to obtain robust answers to scientific questions.}, language = {en} } @article{JaegerMertzenVanDykeetal.2020, author = {J{\"a}ger, Lena Ann and Mertzen, Daniela and Van Dyke, Julie A. and Vasishth, Shravan}, title = {Interference patterns in subject-verb agreement and reflexives revisited}, series = {Journal of memory and language}, volume = {111}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104063}, pages = {21}, year = {2020}, abstract = {Cue-based retrieval theories in sentence processing predict two classes of interference effect: (i) Inhibitory interference is predicted when multiple items match a retrieval cue: cue-overloading leads to an overall slowdown in reading time; and (ii) Facilitatory interference arises when a retrieval target as well as a distractor only partially match the retrieval cues; this partial matching leads to an overall speedup in retrieval time. Inhibitory interference effects are widely observed, but facilitatory interference apparently has an exception: reflexives have been claimed to show no facilitatory interference effects. Because the claim is based on underpowered studies, we conducted a large-sample experiment that investigated both facilitatory and inhibitory interference. In contrast to previous studies, we find facilitatory interference effects in reflexives. We also present a quantitative evaluation of the cue-based retrieval model of Engelmann, Jager, and Vasishth (2019).}, language = {en} } @article{NicenboimRoettgerVasishth2018, author = {Nicenboim, Bruno and Roettger, Timo B. and Vasishth, Shravan}, title = {Using meta-analysis for evidence synthesis}, series = {Journal of phonetics}, volume = {70}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.06.001}, pages = {39 -- 55}, year = {2018}, abstract = {Within quantitative phonetics, it is common practice to draw conclusions based on statistical significance alone Using incomplete neutralization of final devoicing in German as a case study, we illustrate the problems with this approach. If researchers find a significant acoustic difference between voiceless and devoiced obstruents, they conclude that neutralization is incomplete, and if they find no significant difference, they conclude that neutralization is complete. However, such strong claims regarding the existence or absence of an effect based on significant results alone can be misleading. Instead, the totality of available evidence should be brought to bear on the question. Towards this end, we synthesize the evidence from 14 studies on incomplete neutralization in German using a Bayesian random-effects meta-analysis. Our meta-analysis provides evidence in favor of incomplete neutralization. We conclude with some suggestions for improving the quality of future research on phonetic phenomena: ensure that sample sizes allow for high-precision estimates of the effect; avoid the temptation to deploy researcher degrees of freedom when analyzing data; focus on estimates of the parameter of interest and the uncertainty about that parameter; attempt to replicate effects found; and, whenever possible, make both the data and analysis available publicly. (c) 2018 Elsevier Ltd. All rights reserved.}, language = {en} } @article{VasishthNicenboimBeckmanetal.2018, author = {Vasishth, Shravan and Nicenboim, Bruno and Beckman, Mary E. and Li, Fangfang and Kong, Eun Jong}, title = {Bayesian data analysis in the phonetic sciences}, series = {Journal of phonetics}, volume = {71}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.07.008}, pages = {147 -- 161}, year = {2018}, abstract = {This tutorial analyzes voice onset time (VOT) data from Dongbei (Northeastern) Mandarin Chinese and North American English to demonstrate how Bayesian linear mixed models can be fit using the programming language Stan via the R package brms. Through this case study, we demonstrate some of the advantages of the Bayesian framework: researchers can (i) flexibly define the underlying process that they believe to have generated the data; (ii) obtain direct information regarding the uncertainty about the parameter that relates the data to the theoretical question being studied; and (iii) incorporate prior knowledge into the analysis. Getting started with Bayesian modeling can be challenging, especially when one is trying to model one's own (often unique) data. It is difficult to see how one can apply general principles described in textbooks to one's own specific research problem. We address this barrier to using Bayesian methods by providing three detailed examples, with source code to allow easy reproducibility. The examples presented are intended to give the reader a flavor of the process of model-fitting; suggestions for further study are also provided. All data and code are available from: https://osf.io/g4zpv.}, language = {en} } @article{VasishthMertzenJaegeretal.2018, author = {Vasishth, Shravan and Mertzen, Daniela and Jaeger, Lena A. and Gelman, Andrew}, title = {The statistical significance filter leads to overoptimistic expectations of replicability}, series = {Journal of memory and language}, volume = {103}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2018.07.004}, pages = {151 -- 175}, year = {2018}, abstract = {It is well-known in statistics (e.g., Gelman \& Carlin, 2014) that treating a result as publishable just because the p-value is less than 0.05 leads to overoptimistic expectations of replicability. These effects get published, leading to an overconfident belief in replicability. We demonstrate the adverse consequences of this statistical significance filter by conducting seven direct replication attempts (268 participants in total) of a recent paper (Levy \& Keller, 2013). We show that the published claims are so noisy that even non-significant results are fully compatible with them. We also demonstrate the contrast between such small-sample studies and a larger-sample study; the latter generally yields a less noisy estimate but also a smaller effect magnitude, which looks less compelling but is more realistic. We reiterate several suggestions from the methodology literature for improving current practices.}, language = {en} } @article{SorensenHohensteinVasishth2016, author = {Sorensen, Tanner and Hohenstein, Sven and Vasishth, Shravan}, title = {Bayesian linear mixed models using Stan: A tutorial for psychologists, linguists, and cognitive scientists}, series = {Tutorials in Quantitative Methods for Psychology}, volume = {12}, journal = {Tutorials in Quantitative Methods for Psychology}, publisher = {University of Montreal, Department of Psychology}, address = {Montreal}, issn = {2292-1354}, doi = {10.20982/tqmp.12.3.p175}, pages = {175 -- 200}, year = {2016}, abstract = {With the arrival of the R packages nlme and lme4, linear mixed models (LMMs) have come to be widely used in experimentally-driven areas like psychology, linguistics, and cognitive science. This tutorial provides a practical introduction to fitting LMMs in a Bayesian framework using the probabilistic programming language Stan. We choose Stan (rather than WinBUGS or JAGS) because it provides an elegant and scalable framework for fitting models in most of the standard applications of LMMs. We ease the reader into fitting increasingly complex LMMs, using a two-condition repeated measures self-paced reading study.}, language = {en} }