@phdthesis{Jaeger2015, author = {J{\"a}ger, Lena Ann}, title = {Working memory and prediction in human sentence parsing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82517}, school = {Universit{\"a}t Potsdam}, pages = {xi, 144}, year = {2015}, abstract = {This dissertation investigates the working memory mechanism subserving human sentence processing and its relative contribution to processing difficulty as compared to syntactic prediction. Within the last decades, evidence for a content-addressable memory system underlying human cognition in general has accumulated (e.g., Anderson et al., 2004). In sentence processing research, it has been proposed that this general content-addressable architecture is also used for language processing (e.g., McElree, 2000). Although there is a growing body of evidence from various kinds of linguistic dependencies that is consistent with a general content-addressable memory subserving sentence processing (e.g., McElree et al., 2003; VanDyke2006), the case of reflexive-antecedent dependencies has challenged this view. It has been proposed that in the processing of reflexive-antecedent dependencies, a syntactic-structure based memory access is used rather than cue-based retrieval within a content-addressable framework (e.g., Sturt, 2003). Two eye-tracking experiments on Chinese reflexives were designed to tease apart accounts assuming a syntactic-structure based memory access mechanism from cue-based retrieval (implemented in ACT-R as proposed by Lewis and Vasishth (2005). In both experiments, interference effects were observed from noun phrases which syntactically do not qualify as the reflexive's antecedent but match the animacy requirement the reflexive imposes on its antecedent. These results are interpreted as evidence against a purely syntactic-structure based memory access. However, the exact pattern of effects observed in the data is only partially compatible with the Lewis and Vasishth cue-based parsing model. Therefore, an extension of the Lewis and Vasishth model is proposed. Two principles are added to the original model, namely 'cue confusion' and 'distractor prominence'. Although interference effects are generally interpreted in favor of a content-addressable memory architecture, an alternative explanation for interference effects in reflexive processing has been proposed which, crucially, might reconcile interference effects with a structure-based account. It has been argued that interference effects do not necessarily reflect cue-based retrieval interference in a content-addressable memory but might equally well be accounted for by interference effects which have already occurred at the moment of encoding the antecedent in memory (Dillon, 2011). Three experiments (eye-tracking and self-paced reading) on German reflexives and Swedish possessives were designed to tease apart cue-based retrieval interference from encoding interference. The results of all three experiments suggest that there is no evidence that encoding interference affects the retrieval of a reflexive's antecedent. Taken together, these findings suggest that the processing of reflexives can be explained with the same cue-based retrieval mechanism that has been invoked to explain syntactic dependency resolution in a range of other structures. This supports the view that the language processing system is located within a general cognitive architecture, with a general-purpose content-addressable working memory system operating on linguistic expressions. Finally, two experiments (self-paced reading and eye-tracking) using Chinese relative clauses were conducted to determine the relative contribution to sentence processing difficulty of working-memory processes as compared to syntactic prediction during incremental parsing. Chinese has the cross-linguistically rare property of being a language with subject-verb-object word order and pre-nominal relative clauses. This property leads to opposing predictions of expectation-based accounts and memory-based accounts with respect to the relative processing difficulty of subject vs. object relatives. Previous studies showed contradictory results, which has been attributed to different kinds local ambiguities confounding the materials (Lin and Bever, 2011). The two experiments presented are the first to compare Chinese relatives clauses in syntactically unambiguous contexts. The results of both experiments were consistent with the predictions of the expectation-based account of sentence processing but not with the memory-based account. From these findings, I conclude that any theory of human sentence processing needs to take into account the power of predictive processes unfolding in the human mind.}, language = {en} } @phdthesis{Gerth2015, author = {Gerth, Sabrina}, title = {Memory limitations in sentence comprehension}, isbn = {978-3-86956-321-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71554}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 157}, year = {2015}, abstract = {This dissertation addresses the question of how linguistic structures can be represented in working memory. We propose a memory-based computational model that derives offline and online complexity profiles in terms of a top-down parser for minimalist grammars (Stabler, 2011). The complexity metric reflects the amount of time an item is stored in memory. The presented architecture links grammatical representations stored in memory directly to the cognitive behavior by deriving predictions about sentence processing difficulty. Results from five different sentence comprehension experiments were used to evaluate the model's assumptions about memory limitations. The predictions of the complexity metric were compared to the locality (integration and storage) cost metric of Dependency Locality Theory (Gibson, 2000). Both metrics make comparable offline and online predictions for four of the five phenomena. The key difference between the two metrics is that the proposed complexity metric accounts for the structural complexity of intervening material. In contrast, DLT's integration cost metric considers the number of discourse referents, not the syntactic structural complexity. We conclude that the syntactic analysis plays a significant role in memory requirements of parsing. An incremental top-down parser based on a grammar formalism easily computes offline and online complexity profiles, which can be used to derive predictions about sentence processing difficulty.}, language = {en} }