@article{MatuschekKlieglVasishthetal.2017, author = {Matuschek, Hannes and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald R. and Bates, Douglas}, title = {Balancing Type I error and power in linear mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.01.001}, pages = {305 -- 315}, year = {2017}, abstract = {Linear mixed-effects models have increasingly replaced mixed-model analyses of variance for statistical inference in factorial psycholinguistic experiments. Although LMMs have many advantages over ANOVA, like ANOVAs, setting them up for data analysis also requires some care. One simple option, when numerically possible, is to fit the full variance covariance structure of random effects (the maximal model; Barr, Levy, Scheepers \& Tily, 2013), presumably to keep Type I error down to the nominal a in the presence of random effects. Although it is true that fitting a model with only random intercepts may lead to higher Type I error, fitting a maximal model also has a cost: it can lead to a significant loss of power. We demonstrate this with simulations and suggest that for typical psychological and psycholinguistic data, higher power is achieved without inflating Type I error rate if a model selection criterion is used to select a random effect structure that is supported by the data. (C) 2017 The Authors. Published by Elsevier Inc.}, language = {en} } @article{BaayenVasishthKliegletal.2017, author = {Baayen, Harald R. and Vasishth, Shravan and Kliegl, Reinhold and Bates, Douglas}, title = {The cave of shadows: Addressing the human factor with generalized additive mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2016.11.006}, pages = {206 -- 234}, year = {2017}, language = {en} }