@phdthesis{LissonHernandez2022, author = {Liss{\´o}n Hern{\´a}ndez, Paula J.}, title = {Computational models of sentence comprehension in aphasia}, doi = {10.25932/publishup-55548}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-555487}, school = {Universit{\"a}t Potsdam}, pages = {vi, 159}, year = {2022}, abstract = {It is well-known that individuals with aphasia (IWA) have difficulties understanding sentences that involve non-adjacent dependencies, such as object relative clauses or passives (Caplan, Baker, \& Dehaut, 1985; Caramazza \& Zurif, 1976). A large body of research supports the view that IWA's grammatical system is intact, and that comprehension difficulties in aphasia are caused by a processing deficit, such as a delay in lexical access and/or in syntactic structure building (e.g., Burkhardt, Pi{\~n}ango, \& Wong, 2003; Caplan, Michaud, \& Hufford, 2015; Caplan, Waters, DeDe, Michaud, \& Reddy, 2007; Ferrill, Love, Walenski, \& Shapiro, 2012; Hanne, Burchert, De Bleser, \& Vasishth, 2015; Love, Swinney, Walenski, \& Zurif, 2008). The main goal of this dissertation is to computationally investigate the processing sources of comprehension impairments in sentence processing in aphasia. In this work, prominent theories of processing deficits coming from the aphasia literature are implemented within two cognitive models of sentence processing -the activation-based model (Lewis \& Vasishth, 2005) and the direct-access model (McEl- ree, 2000)-. These models are two different expressions of the cue-based retrieval theory (Lewis, Vasishth, \& Van Dyke, 2006), which posits that sentence processing is the result of a series of iterative retrievals from memory. These two models have been widely used to account for sentence processing in unimpaired populations in multiple languages and linguistic constructions, sometimes interchangeably (Parker, Shvarts- man, \& Van Dyke, 2017). However, Nicenboim and Vasishth (2018) showed that when both models are implemented in the same framework and fitted to the same data, the models yield different results, because the models assume different data- generating processes. Specifically, the models hold different assumptions regarding the retrieval latencies. The second goal of this dissertation is to compare these two models of cue-based retrieval, using data from individuals with aphasia and control participants. We seek to answer the following question: Which retrieval mechanism is more likely to mediate sentence comprehension? We model 4 subsets of existing data: Relative clauses in English and German; and control structures and pronoun resolution in German. The online data come from either self-paced listening experiments, or visual-world eye-tracking experiments. The offline data come from a complementary sentence-picture matching task performed at the end of the trial in both types of experiments. The two competing models of retrieval are implemented in the Bayesian framework, following Nicenboim and Vasishth (2018). In addition, we present a modified version of the direct-acess model that - we argue - is more suitable for individuals with aphasia. This dissertation presents a systematic approach to implement and test verbally- stated theories of comprehension deficits in aphasia within cognitive models of sen- tence processing. The conclusions drawn from this work are that (a) the original direct-access model (as implemented here) cannot account for the full pattern of data from individuals with aphasia because it cannot account for slow misinterpretations; and (b) an activation-based model of retrieval can account for sentence comprehension deficits in individuals with aphasia by assuming a delay in syntactic structure building, and noise in the processing system. The overall pattern of results support an activation-based mechanism of memory retrieval, in which a combination of processing deficits, namely slow syntax and intermittent deficiencies, cause comprehension difficulties in individuals with aphasia.}, language = {en} } @phdthesis{Laurinavichyute2021, author = {Laurinavichyute, Anna}, title = {Similarity-based interference and faulty encoding accounts of sentence processing}, doi = {10.25932/publishup-50966}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-509669}, school = {Universit{\"a}t Potsdam}, pages = {237}, year = {2021}, abstract = {The goal of this dissertation is to empirically evaluate the predictions of two classes of models applied to language processing: the similarity-based interference models (Lewis \& Vasishth, 2005; McElree, 2000) and the group of smaller-scale accounts that we will refer to as faulty encoding accounts (Eberhard, Cutting, \& Bock, 2005; Bock \& Eberhard, 1993). Both types of accounts make predictions with regard to processing the same class of structures: sentences containing a non-subject (interfering) noun in addition to a subject noun and a verb. Both accounts make the same predictions for processing ungrammatical sentences with a number-mismatching interfering noun, and this prediction finds consistent support in the data. However, the similarity-based interference accounts predict similar effects not only for morphosyntactic, but also for the semantic level of language organization. We verified this prediction in three single-trial online experiments, where we found consistent support for the predictions of the similarity-based interference account. In addition, we report computational simulations further supporting the similarity-based interference accounts. The combined evidence suggests that the faulty encoding accounts are not required to explain comprehension of ill-formed sentences. For the processing of grammatical sentences, the accounts make conflicting predictions, and neither the slowdown predicted by the similarity-based interference account, nor the complementary slowdown predicted by the faulty encoding accounts were systematically observed. The majority of studies found no difference between the compared configurations. We tested one possible explanation for the lack of predicted difference, namely, that both slowdowns are present simultaneously and thus conceal each other. We decreased the amount of similarity-based interference: if the effects were concealing each other, decreasing one of them should allow the other to surface. Surprisingly, throughout three larger-sample single-trial online experiments, we consistently found the slowdown predicted by the faulty encoding accounts, but no effects consistent with the presence of inhibitory interference. The overall pattern of the results observed across all the experiments reported in this dissertation is consistent with previous findings: predictions of the interference accounts for the processing of ungrammatical sentences receive consistent support, but the predictions for the processing of grammatical sentences are not always met. Recent proposals by Nicenboim et al. (2016) and Mertzen et al. (2020) suggest that interference might arise only in people with high working memory capacity or under deep processing mode. Following these proposals, we tested whether interference effects might depend on the depth of processing: we manipulated the complexity of the training materials preceding the grammatical experimental sentences while making no changes to the experimental materials themselves. We found that the slowdown predicted by the faulty encoding accounts disappears in the deep processing mode, but the effects consistent with the predictions of the similarity-based interference account do not arise. Independently of whether similarity-based interference arises under deep processing mode or not, our results suggest that the faulty encoding accounts cannot be dismissed since they make unique predictions with regard to processing grammatical sentences, which are supported by data. At the same time, the support is not unequivocal: the slowdowns are present only in the superficial processing mode, which is not predicted by the faulty encoding accounts. Our results might therefore favor a much simpler system that superficially tracks number features and is distracted by every plural feature.}, language = {en} }