@phdthesis{Laurinavichyute2021, author = {Laurinavichyute, Anna}, title = {Similarity-based interference and faulty encoding accounts of sentence processing}, doi = {10.25932/publishup-50966}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-509669}, school = {Universit{\"a}t Potsdam}, pages = {237}, year = {2021}, abstract = {The goal of this dissertation is to empirically evaluate the predictions of two classes of models applied to language processing: the similarity-based interference models (Lewis \& Vasishth, 2005; McElree, 2000) and the group of smaller-scale accounts that we will refer to as faulty encoding accounts (Eberhard, Cutting, \& Bock, 2005; Bock \& Eberhard, 1993). Both types of accounts make predictions with regard to processing the same class of structures: sentences containing a non-subject (interfering) noun in addition to a subject noun and a verb. Both accounts make the same predictions for processing ungrammatical sentences with a number-mismatching interfering noun, and this prediction finds consistent support in the data. However, the similarity-based interference accounts predict similar effects not only for morphosyntactic, but also for the semantic level of language organization. We verified this prediction in three single-trial online experiments, where we found consistent support for the predictions of the similarity-based interference account. In addition, we report computational simulations further supporting the similarity-based interference accounts. The combined evidence suggests that the faulty encoding accounts are not required to explain comprehension of ill-formed sentences. For the processing of grammatical sentences, the accounts make conflicting predictions, and neither the slowdown predicted by the similarity-based interference account, nor the complementary slowdown predicted by the faulty encoding accounts were systematically observed. The majority of studies found no difference between the compared configurations. We tested one possible explanation for the lack of predicted difference, namely, that both slowdowns are present simultaneously and thus conceal each other. We decreased the amount of similarity-based interference: if the effects were concealing each other, decreasing one of them should allow the other to surface. Surprisingly, throughout three larger-sample single-trial online experiments, we consistently found the slowdown predicted by the faulty encoding accounts, but no effects consistent with the presence of inhibitory interference. The overall pattern of the results observed across all the experiments reported in this dissertation is consistent with previous findings: predictions of the interference accounts for the processing of ungrammatical sentences receive consistent support, but the predictions for the processing of grammatical sentences are not always met. Recent proposals by Nicenboim et al. (2016) and Mertzen et al. (2020) suggest that interference might arise only in people with high working memory capacity or under deep processing mode. Following these proposals, we tested whether interference effects might depend on the depth of processing: we manipulated the complexity of the training materials preceding the grammatical experimental sentences while making no changes to the experimental materials themselves. We found that the slowdown predicted by the faulty encoding accounts disappears in the deep processing mode, but the effects consistent with the predictions of the similarity-based interference account do not arise. Independently of whether similarity-based interference arises under deep processing mode or not, our results suggest that the faulty encoding accounts cannot be dismissed since they make unique predictions with regard to processing grammatical sentences, which are supported by data. At the same time, the support is not unequivocal: the slowdowns are present only in the superficial processing mode, which is not predicted by the faulty encoding accounts. Our results might therefore favor a much simpler system that superficially tracks number features and is distracted by every plural feature.}, language = {en} }