@article{NixonvanRijMoketal.2016, author = {Nixon, Jessie S. and van Rij, Jacolien and Mok, Peggy and Baayen, Harald R. and Chen, Yiya}, title = {The temporal dynamics of perceptual uncertainty: eye movement evidence from Cantonese segment and tone perception}, series = {Journal of memory and language}, volume = {90}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2016.03.005}, pages = {103 -- 125}, year = {2016}, abstract = {Two visual world eyetracking experiments investigated how acoustic cue value and statistical variance affect perceptual uncertainty during Cantonese consonant (Experiment 1) and tone perception (Experiment 2). Participants heard low- or high-variance acoustic stimuli. Euclidean distance of fixations from target and competitor pictures over time was analysed using Generalised Additive Mixed Modelling. Distance of fixations from target and competitor pictures varied as a function of acoustic cue, providing evidence for gradient, nonlinear sensitivity to cue values. Moreover, cue value effects significantly interacted with statistical variance, indicating that the cue distribution directly affects perceptual uncertainty. Interestingly, the time course of effects differed between target distance and competitor distance models. The pattern of effects over time suggests a global strategy in response to the level of uncertainty: as uncertainty increases, verification looks increase accordingly. Low variance generally creates less uncertainty, but can lead to greater uncertainty in the face of unexpected speech tokens. (C) 2016 Elsevier Inc. All rights reserved.}, language = {en} } @article{BaayenVasishthKliegletal.2017, author = {Baayen, Harald R. and Vasishth, Shravan and Kliegl, Reinhold and Bates, Douglas}, title = {The cave of shadows: Addressing the human factor with generalized additive mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2016.11.006}, pages = {206 -- 234}, year = {2017}, language = {en} } @article{Baayen2012, author = {Baayen, Harald R.}, title = {Resource requirements for neo-generative modeling in (psycho)linguistics}, series = {Potsdam cognitive science series}, journal = {Potsdam cognitive science series}, number = {3}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2190-4545}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62318}, pages = {5 -- 8}, year = {2012}, language = {en} } @article{MatuschekKlieglVasishthetal.2017, author = {Matuschek, Hannes and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald R. and Bates, Douglas}, title = {Balancing Type I error and power in linear mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.01.001}, pages = {305 -- 315}, year = {2017}, abstract = {Linear mixed-effects models have increasingly replaced mixed-model analyses of variance for statistical inference in factorial psycholinguistic experiments. Although LMMs have many advantages over ANOVA, like ANOVAs, setting them up for data analysis also requires some care. One simple option, when numerically possible, is to fit the full variance covariance structure of random effects (the maximal model; Barr, Levy, Scheepers \& Tily, 2013), presumably to keep Type I error down to the nominal a in the presence of random effects. Although it is true that fitting a model with only random intercepts may lead to higher Type I error, fitting a maximal model also has a cost: it can lead to a significant loss of power. We demonstrate this with simulations and suggest that for typical psychological and psycholinguistic data, higher power is achieved without inflating Type I error rate if a model selection criterion is used to select a random effect structure that is supported by the data. (C) 2017 The Authors. Published by Elsevier Inc.}, language = {en} }