@article{AvetisyanLagoVasishth2020, author = {Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Does case marking affect agreement attraction in comprehension?}, series = {Journal of memory and language}, volume = {112}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104087}, pages = {18}, year = {2020}, abstract = {Previous studies have suggested that distinctive case marking on noun phrases reduces attraction effects in production, i.e., the tendency to produce a verb that agrees with a nonsubject noun. An important open question is whether attraction effects are modulated by case information in sentence comprehension. To address this question, we conducted three attraction experiments in Armenian, a language with a rich and productive case system. The experiments showed clear attraction effects, and they also revealed an overall role of case marking such that participants showed faster response and reading times when the nouns in the sentence had different case. However, we found little indication that distinctive case marking modulated attraction effects. We present a theoretical proposal of how case and number information may be used differentially during agreement licensing in comprehension. More generally, this work sheds light on the nature of the retrieval cues deployed when completing morphosyntactic dependencies.}, language = {en} } @article{BaayenVasishthKliegletal.2017, author = {Baayen, Harald R. and Vasishth, Shravan and Kliegl, Reinhold and Bates, Douglas}, title = {The cave of shadows: Addressing the human factor with generalized additive mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2016.11.006}, pages = {206 -- 234}, year = {2017}, language = {en} } @article{BartekLewisVasishthetal.2011, author = {Bartek, Brian and Lewis, Richard L. and Vasishth, Shravan and Smith, Mason R.}, title = {In Search of on-line locality effects in sentence comprehension}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {37}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {5}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0024194}, pages = {1178 -- 1198}, year = {2011}, abstract = {Many comprehension theories assert that increasing the distance between elements participating in a linguistic relation (e.g., a verb and a noun phrase argument) increases the difficulty of establishing that relation during on-line comprehension. Such locality effects are expected to increase reading times and are thought to reveal properties and limitations of the short-term memory system that supports comprehension. Despite their theoretical importance and putative ubiquity, however, evidence for on-line locality effects is quite narrow linguistically and methodologically: It is restricted almost exclusively to self-paced reading of complex structures involving a particular class of syntactic relation. We present 4 experiments (2 self-paced reading and 2 eyetracking experiments) that demonstrate locality effects in the course of establishing subject-verb dependencies; locality effects are seen even in materials that can be read quickly and easily. These locality effects are observable in the earliest possible eye-movement measures and are of much shorter duration than previously reported effects. To account for the observed empirical patterns, we outline a processing model of the adaptive control of button pressing and eye movements. This model makes progress toward the goal of eliminating linking assumptions between memory constructs and empirical measures in favor of explicit theories of the coordinated control of motor responses and parsing.}, language = {en} } @article{BeckVasishth2009, author = {Beck, Sigrid and Vasishth, Shravan}, title = {Multiple focus}, issn = {0167-5133}, doi = {10.1093/Jos/Ffp001}, year = {2009}, abstract = {This paper presents the results of an experimental study on multiple focus configurations, that is, structures containing two nested focus-sensitive operators plus two foci supposed to associate with those operators. There has been controversial discussion in the semantic literature regarding whether or not an interpretation is acceptable that corresponds to this association. While the data are unclear, the issue is of considerable theoretical significance, as it distinguishes between the available theories of focus interpretation. Some theories (e. g. Rooth's 1992) predict such a pattern of association with focus to be impossible, while others (such as Wold's 1996) predict it to be acceptable. The results of our study show the data to be unacceptable rather than acceptable, favouring important aspects of the theory of focus interpretation developed by Rooth.}, language = {en} } @article{BostonHaleVasishthetal.2011, author = {Boston, Marisa Ferrara and Hale, John T. and Vasishth, Shravan and Kliegl, Reinhold}, title = {Parallel processing and sentence comprehension difficulty}, series = {Language and cognitive processes}, volume = {26}, journal = {Language and cognitive processes}, number = {3}, publisher = {Wiley}, address = {Hove}, issn = {0169-0965}, doi = {10.1080/01690965.2010.492228}, pages = {301 -- 349}, year = {2011}, abstract = {Eye fixation durations during normal reading correlate with processing difficulty, but the specific cognitive mechanisms reflected in these measures are not well understood. This study finds support in German readers' eye fixations for two distinct difficulty metrics: surprisal, which reflects the change in probabilities across syntactic analyses as new words are integrated; and retrieval, which quantifies comprehension difficulty in terms of working memory constraints. We examine the predictions of both metrics using a family of dependency parsers indexed by an upper limit on the number of candidate syntactic analyses they retain at successive words. Surprisal models all fixation measures and regression probability. By contrast, retrieval does not model any measure in serial processing. As more candidate analyses are considered in parallel at each word, retrieval can account for the same measures as surprisal. This pattern suggests an important role for ranked parallelism in theories of sentence comprehension.}, language = {en} } @article{BurchertHanneVasishth2013, author = {Burchert, Frank and Hanne, Sandra and Vasishth, Shravan}, title = {Sentence comprehension disorders in aphasia the concept of chance performance revisited}, series = {Aphasiology : an international, interdisciplinary journal}, volume = {27}, journal = {Aphasiology : an international, interdisciplinary journal}, number = {1}, publisher = {Wiley}, address = {Hove}, issn = {0268-7038}, doi = {10.1080/02687038.2012.730603}, pages = {112 -- 125}, year = {2013}, abstract = {Background: In behavioural tests of sentence comprehension in aphasia, correct and incorrect responses are often randomly distributed. Such a pattern of chance performance is a typical trait of Broca's aphasia, but can be found in other aphasic syndromes as well. Many researchers have argued that chance behaviour is the result of a guessing strategy, which is adopted in the face of a syntactic breakdown in sentence processing. Aims: Capitalising on new evidence from recent studies investigating online sentence comprehension in aphasia using the visual world paradigm, the aim of this paper is to review the concept of chance performance as a reflection of a syntactic impairment in sentence processing and to re-examine the conventional interpretation of chance performance as a guessing behaviour. Main Contribution: Based on a review of recent evidence from visual world paradigm studies, we argue that the assumption of chance performance equalling guessing is not necessarily compatible with actual real-time parsing procedures in people with aphasia. We propose a reinterpretation of the concept of chance performance by assuming that there are two distinct processing mechanisms underlying sentence comprehension in aphasia. Correct responses are always the result of normal-like parsing mechanisms, even in those cases where the overall performance pattern is at chance. Incorrect responses, on the other hand, are the result of intermittent deficiencies of the parser. Hence the random guessing behaviour that persons with aphasia often display does not necessarily reflect a syntactic breakdown in sentence comprehension and a random selection between alternatives. Instead it should be regarded as a result of temporal deficient parsing procedures in otherwise normal-like comprehension routines. Conclusion: Our conclusion is that the consideration of behavioural offline data alone may not be sufficient to interpret a performance in language tests and subsequently draw theoretical conclusions about language impairments. Rather it is important to call on additional data from online studies that look at language processing in real time in order to gain a comprehensive picture about syntactic comprehension abilities of people with aphasia and possible underlying deficits.}, language = {en} } @article{BuerkiAlarioVasishth2022, author = {B{\"u}rki, Audrey and Alario, F-Xavier and Vasishth, Shravan}, title = {When words collide: Bayesian meta-analyses of distractor and target properties in the picture-word interference paradigm}, series = {Quarterly Journal of Experimental Psychology}, volume = {76}, journal = {Quarterly Journal of Experimental Psychology}, number = {6}, publisher = {Sage Publications}, address = {London}, issn = {1747-0218}, doi = {10.1177/17470218221114644}, pages = {1410 -- 1430}, year = {2022}, abstract = {In the picture-word interference paradigm, participants name pictures while ignoring a written or spoken distractor word. Naming times to the pictures are slowed down by the presence of the distractor word. The present study investigates in detail the impact of distractor and target word properties on picture naming times, building on the seminal study by Miozzo and Caramazza. We report the results of several Bayesian meta-analyses based on 26 datasets. These analyses provide estimates of effect sizes and their precision for several variables and their interactions. They show the reliability of the distractor frequency effect on picture naming latencies (latencies decrease as the frequency of the distractor increases) and demonstrate for the first time the impact of distractor length, with longer naming latencies for trials with longer distractors. Moreover, distractor frequency interacts with target word frequency to predict picture naming latencies. The methodological and theoretical implications of these findings are discussed.}, language = {en} } @article{BuerkiFoschiniElbuyMadecetal.2020, author = {B{\"u}rki-Foschini, Audrey Damaris and Elbuy, Shereen and Madec, Sylvain and Vasishth, Shravan}, title = {What did we learn from forty years of research on semantic interference?}, series = {Journal of memory and language}, volume = {114}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104125}, pages = {25}, year = {2020}, abstract = {When participants in an experiment have to name pictures while ignoring distractor words superimposed on the picture or presented auditorily (i.e., picture-word interference paradigm), they take more time when the word to be named (or target) and distractor words are from the same semantic category (e.g., cat-dog). This experimental effect is known as the semantic interference effect, and is probably one of the most studied in the language production literature. The functional origin of the effect and the exact conditions in which it occurs are however still debated. Since Lupker (1979) reported the effect in the first response time experiment about 40 years ago, more than 300 similar experiments have been conducted. The semantic interference effect was replicated in many experiments, but several studies also reported the absence of an effect in a subset of experimental conditions. The aim of the present study is to provide a comprehensive theoretical review of the existing evidence to date and several Bayesian meta-analyses and meta-regressions to determine the size of the effect and explore the experimental conditions in which the effect surfaces. The results are discussed in the light of current debates about the functional origin of the semantic interference effect and its implications for our understanding of the language production system.}, language = {en} } @article{DrenhausZimmermannVasishth2011, author = {Drenhaus, Heiner and Zimmermann, Malte and Vasishth, Shravan}, title = {Exhaustiveness effects in clefts are not truth-functional}, series = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, volume = {24}, journal = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, number = {3}, publisher = {Elsevier}, address = {Oxford}, issn = {0911-6044}, doi = {10.1016/j.jneuroling.2010.10.004}, pages = {320 -- 337}, year = {2011}, abstract = {While it is widely acknowledged in the formal semantic literature that both the truth-functional focus particle only and it-clefts convey exhaustiveness, the nature and source of exhaustiveness effects with it-clefts remain contested. We describe a questionnaire study (n = 80) and an event-related brain potentials (ERP) study (n = 16) that investigated the violation of exhaustiveness in German only-foci versus it-clefts. The offline study showed that a violation of exhaustivity with only is less acceptable than the violation with it-clefts, suggesting a difference in the nature of exhaustivity interpretation in the two environments. The ERP-results confirm that this difference can be seen in online processing as well: a violation of exhaustiveness in only-foci elicited a centro-posterior positivity (600-800ms), whereas a violation in it-clefts induced a globally distributed N400 pattern (400-600ms). The positivity can be interpreted as a reanalysis process and more generally as a process of context updating. The N400 effect in it-clefts is interpreted as indexing a cancelation process that is functionally distinct from the only case. The ERP study is, to our knowledge, the first evidence from an online experimental paradigm which shows that the violation of exhaustiveness involves different underlying processes in the two structural environments.}, language = {en} } @article{EngbertRabeSchwetlicketal.2022, author = {Engbert, Ralf and Rabe, Maximilian Michael and Schwetlick, Lisa and Seelig, Stefan A. and Reich, Sebastian and Vasishth, Shravan}, title = {Data assimilation in dynamical cognitive science}, series = {Trends in cognitive sciences}, volume = {26}, journal = {Trends in cognitive sciences}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1364-6613}, doi = {10.1016/j.tics.2021.11.006}, pages = {99 -- 102}, year = {2022}, abstract = {Dynamical models make specific assumptions about cognitive processes that generate human behavior. In data assimilation, these models are tested against timeordered data. Recent progress on Bayesian data assimilation demonstrates that this approach combines the strengths of statistical modeling of individual differences with the those of dynamical cognitive models.}, language = {en} } @article{HanneBurchertDeBleseretal.2015, author = {Hanne, Sandra and Burchert, Frank and De Bleser, Ria and Vasishth, Shravan}, title = {Sentence comprehension and morphological cues in aphasia: What eye-tracking reveals about integration and prediction}, series = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, volume = {34}, journal = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, publisher = {Elsevier}, address = {Oxford}, issn = {0911-6044}, doi = {10.1016/j.jneuroling.2014.12.003}, pages = {83 -- 111}, year = {2015}, abstract = {Comprehension of non-canonical sentences can be difficult for individuals with aphasia (IWA). It is still unclear to which extent morphological cues like case marking or verb inflection may influence IWA's performance or even help to override deficits in sentence comprehension. Until now, studies have mainly used offline methods to draw inferences about syntactic deficits and, so far, only a few studies have looked at online syntactic processing in aphasia. We investigated sentence processing in German-speaking IWA by combining an offline (sentence-picture matching) and an online (eye-tracking in the visual-world paradigm) method. Our goal was to determine whether IWA are capable of using inflectional morphology (number-agreement markers on verbs and case markers in noun phrases) as a cue to sentence interpretation. We report results of two visual-world experiments using German reversible SVO and OVS sentences. In each study, there were eight IWA and 20 age-matched controls. Experiment 1 targeted the role of unambiguous case morphology, while Experiment 2 looked at processing of number-agreement cues at the verb in caseambiguous sentences. IWA showed deficits in using both types of morphological markers as a cue to non-canonical sentence interpretation and the results indicate that in aphasia, processing of case-marking cues is more vulnerable as compared to verbagreement morphology. We ascribe this finding to the higher cue reliability of agreement cues, which renders them more resistant against impairments in aphasia. However, the online data revealed that IWA are in principle capable of successfully computing morphological cues, but the integration of morphological information is delayed as compared to age-matched controls. Furthermore, we found striking differences between controls and IWA regarding subject-before-object parsing predictions. While in case-unambiguous sentences IWA showed evidence for early subjectbefore-object parsing commitments, they exhibited no straightforward subject-first prediction in case-ambiguous sentences, although controls did so for ambiguous structures. IWA delayed their parsing decisions in case-ambiguous sentences until unambiguous morphological information, such as a subject-verbnumber-agreement cue, was available. We attribute the results for IWA to deficits in predictive processes based on morphosyntactic cues during sentence comprehension. The results indicate that IWA adopt a wait-and-see strategy and initiate prediction of upcoming syntactic structure only when unambiguous case or agreement cues are available. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{HanneSekerinaVasishthetal.2011, author = {Hanne, Sandra and Sekerina, Irina A. and Vasishth, Shravan and Burchert, Frank and De Bleser, Ria}, title = {Chance in agrammatic sentence comprehension what does it really mean? Evidence from eye movements of German agrammatic aphasic patients}, series = {Aphasiology : an international, interdisciplinary journal}, volume = {25}, journal = {Aphasiology : an international, interdisciplinary journal}, number = {2}, publisher = {Wiley}, address = {Hove}, issn = {0268-7038}, doi = {10.1080/02687038.2010.489256}, pages = {221 -- 244}, year = {2011}, abstract = {Background: In addition to the canonical subject-verb-object (SVO) word order, German also allows for non-canonical order (OVS), and the case-marking system supports thematic role interpretation. Previous eye-tracking studies (Kamide et al., 2003; Knoeferle, 2007) have shown that unambiguous case information in non-canonical sentences is processed incrementally. For individuals with agrammatic aphasia, comprehension of non-canonical sentences is at chance level (Burchert et al., 2003). The trace deletion hypothesis (Grodzinsky 1995, 2000) claims that this is due to structural impairments in syntactic representations, which force the individual with aphasia (IWA) to apply a guessing strategy. However, recent studies investigating online sentence processing in aphasia (Caplan et al., 2007; Dickey et al., 2007) found that divergences exist in IWAs' sentence-processing routines depending on whether they comprehended non-canonical sentences correctly or not, pointing rather to a processing deficit explanation. Aims: The aim of the current study was to investigate agrammatic IWAs' online and offline sentence comprehension simultaneously in order to reveal what online sentence-processing strategies they rely on and how these differ from controls' processing routines. We further asked whether IWAs' offline chance performance for non-canonical sentences does indeed result from guessing. Methods Procedures: We used the visual-world paradigm and measured eye movements (as an index of online sentence processing) of controls (N = 8) and individuals with aphasia (N = 7) during a sentence-picture matching task. Additional offline measures were accuracy and reaction times. Outcomes Results: While the offline accuracy results corresponded to the pattern predicted by the TDH, IWAs' eye movements revealed systematic differences depending on the response accuracy. Conclusions: These findings constitute evidence against attributing IWAs' chance performance for non-canonical structures to mere guessing. Instead, our results support processing deficit explanations and characterise the agrammatic parser as deterministic and inefficient: it is slowed down, affected by intermittent deficiencies in performing syntactic operations, and fails to compute reanalysis even when one is detected.}, language = {en} } @article{HusainVasishthSrinivasan2014, author = {Husain, Samar and Vasishth, Shravan and Srinivasan, Narayanan}, title = {Strong expectations cancel locality effects: Evidence from Hindi}, series = {PLoS one}, volume = {9}, journal = {PLoS one}, number = {7}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0100986}, pages = {14}, year = {2014}, abstract = {Expectation-driven facilitation (Hale, 2001; Levy, 2008) and locality-driven retrieval difficulty (Gibson, 1998, 2000; Lewis \& Vasishth, 2005) are widely recognized to be two critical factors in incremental sentence processing; there is accumulating evidence that both can influence processing difficulty. However, it is unclear whether and how expectations and memory interact. We first confirm a key prediction of the expectation account: a Hindi self-paced reading study shows that when an expectation for an upcoming part of speech is dashed, building a rarer structure consumes more processing time than building a less rare structure. This is a strong validation of the expectation-based account. In a second study, we show that when expectation is strong, i.e., when a particular verb is predicted, strong facilitation effects are seen when the appearance of the verb is delayed; however, when expectation is weak, i.e., when only the part of speech "verb' is predicted but a particular verb is not predicted, the facilitation disappears and a tendency towards a locality effect is seen. The interaction seen between expectation strength and distance shows that strong expectations cancel locality effects, and that weak expectations allow locality effects to emerge.}, language = {en} } @misc{JaegerEngelmannVasishth2017, author = {Jaeger, Lena A. and Engelmann, Felix and Vasishth, Shravan}, title = {Similarity-based interference in sentence comprehension: Literature review and Bayesian meta-analysis}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.01.004}, pages = {316 -- 339}, year = {2017}, abstract = {We report a comprehensive review of the published reading studies on retrieval interference in reflexive-/reciprocal-antecedent and subject-verb dependencies. We also provide a quantitative random-effects meta-analysis of eyetracking and self-paced reading studies. We show that the empirical evidence is only partly consistent with cue-based retrieval as implemented in the ACT-R-based model of sentence processing by Lewis and Vasishth (2005) (LV05) and that there are important differences between the reviewed dependency types. In non-agreement subject-verb dependencies, there is evidence for inhibitory interference in configurations where the correct dependent fully matches the retrieval cues. This is consistent with the LV05 cue-based retrieval account. By contrast, in subject-verb agreement as well as in reflexive-/reciprocal-antecedent dependencies, no evidence for inhibitory interference is found in configurations with a fully cue-matching subject/antecedent. In configurations with only a partially cue-matching subject or antecedent, the meta-analysis reveals facilitatory interference in subject-verb agreement and inhibitory interference in reflexives/reciprocals. The former is consistent with the LV05 account, but the latter is not. Moreover, the meta-analysis reveals that (i) interference type (proactive versus retroactive) leads to different effects in the reviewed dependency types and (ii) the prominence of the distractor strongly influences the interference effect. In sum, the meta-analysis suggests that the LV05 needs important modifications to account for the unexplained interference patterns and the differences between the dependency types. More generally, the meta-analysis provides a quantitative empirical basis for comparing the predictions of competing accounts of retrieval processes in sentence comprehension. (C) 2017 Elsevier Inc. All rights reserved.}, language = {en} } @article{JaegerBenzRoeseretal.2015, author = {J{\"a}ger, Lena Ann and Benz, Lena and Roeser, Jens and Dillon, Brian W. and Vasishth, Shravan}, title = {Teasing apart retrieval and encoding interference in the processing of anaphors}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00506}, pages = {18}, year = {2015}, abstract = {Two classes of account have been proposed to explain the memory processes subserving the processing of reflexive-antecedent dependencies. Structure-based accounts assume that the retrieval of the antecedent is guided by syntactic tree-configurational information without considering other kinds of information such as gender marking in the case of English reflexives. By contrast, unconstrained cue-based retrieval assumes that all available information is used for retrieving the antecedent. Similarity-based interference effects from structurally illicit distractors which match a non-structural retrieval cue have been interpreted as evidence favoring the unconstrained cue-based retrieval account since cue-based retrieval interference from structurally illicit distractors is incompatible with the structure-based account. However, it has been argued that the observed effects do not necessarily reflect interference occurring at the moment of retrieval but might equally well be accounted for by interference occurring already at the stage of encoding or maintaining the antecedent in memory, in which case they cannot be taken as evidence against the structure-based account. We present three experiments (self-paced reading and eye-tracking) on German reflexives and Swedish reflexive and pronominal possessives in which we pit the predictions of encoding interference and cue-based retrieval interference against each other. We could not find any indication that encoding interference affects the processing ease of the reflexive-antecedent dependency formation. Thus, there is no evidence that encoding interference might be the explanation for the interference effects observed in previous work. We therefore conclude that invoking encoding interference may not be a plausible way to reconcile interference effects with a structure-based account of reflexive processing.}, language = {en} } @misc{JaegerBenzRoeseretal.2015, author = {J{\"a}ger, Lena Ann and Benz, Lena and Roeser, Jens and Dillon, Brian W. and Vasishth, Shravan}, title = {Teasing apart Retrieval and Encoding Interference in the Processing of Anaphors}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78714}, year = {2015}, abstract = {Two classes of account have been proposed to explain the memory processes subserving the processing of reflexive-antecedent dependencies. Structure-based accounts assume that the retrieval of the antecedent is guided by syntactic tree-configurational information without considering other kinds of information such as gender marking in the case of English reflexives. By contrast, unconstrained cue-based retrieval assumes that all available information is used for retrieving the antecedent. Similarity-based interference effects from structurally illicit distractors which match a non-structural retrieval cue have been interpreted as evidence favoring the unconstrained cue-based retrieval account since cue-based retrieval interference from structurally illicit distractors is incompatible with the structure-based account. However, it has been argued that the observed effects do not necessarily reflect interference occurring at the moment of retrieval but might equally well be accounted for by interference occurring already at the stage of encoding or maintaining the antecedent in memory, in which case they cannot be taken as evidence against the structure-based account. We present three experiments (self-paced reading and eye-tracking) on German reflexives and Swedish reflexive and pronominal possessives in which we pit the predictions of encoding interference and cue-based retrieval interference against each other. We could not find any indication that encoding interference affects the processing ease of the reflexive-antecedent dependency formation. Thus, there is no evidence that encoding interference might be the explanation for the interference effects observed in previous work. We therefore conclude that invoking encoding interference may not be a plausible way to reconcile interference effects with a structure-based account of reflexive processing.}, language = {en} } @article{JaegerBenzRoeseretal.2015, author = {J{\"a}ger, Lena Ann and Benz, Lena and Roeser, Jens and Dillon, Brian W. and Vasishth, Shravan}, title = {Teasing apart retrieval and encoding interference in the processing of anaphors}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {506}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00506}, year = {2015}, abstract = {Two classes of account have been proposed to explain the memory processes subserving the processing of reflexive-antecedent dependencies. Structure-based accounts assume that the retrieval of the antecedent is guided by syntactic tree-configurational information without considering other kinds of information such as gender marking in the case of English reflexives. By contrast, unconstrained cue-based retrieval assumes that all available information is used for retrieving the antecedent. Similarity-based interference effects from structurally illicit distractors which match a non-structural retrieval cue have been interpreted as evidence favoring the unconstrained cue-based retrieval account since cue-based retrieval interference from structurally illicit distractors is incompatible with the structure-based account. However, it has been argued that the observed effects do not necessarily reflect interference occurring at the moment of retrieval but might equally well be accounted for by interference occurring already at the stage of encoding or maintaining the antecedent in memory, in which case they cannot be taken as evidence against the structure-based account. We present three experiments (self-paced reading and eye-tracking) on German reflexives and Swedish reflexive and pronominal possessives in which we pit the predictions of encoding interference and cue-based retrieval interference against each other. We could not find any indication that encoding interference affects the processing ease of the reflexive-antecedent dependency formation. Thus, there is no evidence that encoding interference might be the explanation for the interference effects observed in previous work. We therefore conclude that invoking encoding interference may not be a plausible way to reconcile interference effects with a structure-based account of reflexive processing.}, language = {en} } @article{JaegerChenLietal.2015, author = {J{\"a}ger, Lena Ann and Chen, Zhong and Li, Qiang and Lin, Chien-Jer Charles and Vasishth, Shravan}, title = {The subject-relative advantage in Chinese: Evidence for expectation-based processing}, series = {Journal of memory and language}, volume = {79}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2014.10.005}, pages = {97 -- 120}, year = {2015}, abstract = {Chinese relative clauses are an important test case for pitting the predictions of expectation-based accounts against those of memory-based theories. The memory-based accounts predict that object relatives are easier to process than subject relatives because, in object relatives, the distance between the relative clause verb and the head noun is shorter. By contrast, expectation-based accounts such as surprisal predict that the less frequent object relative should be harder to process. In previous studies on Chinese relative clause comprehension, local ambiguities may have rendered a comparison between relative clause types uninterpretable. We designed experimental materials in which no local ambiguities confound the comparison. We ran two experiments (self-paced reading and eye-tracking) to compare reading difficulty in subject and object relatives which were placed either in subject or object modifying position. The evidence from our studies is consistent with the predictions of expectation-based accounts but not with those of memory-based theories. (C) 2014 Elsevier Inc. All rights reserved.}, language = {en} } @article{JaegerEngelmannVasishth2015, author = {J{\"a}ger, Lena Ann and Engelmann, Felix and Vasishth, Shravan}, title = {Retrieval interference in reflexive processing: experimental evidence from Mandarin, and computational modeling}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00617}, pages = {24}, year = {2015}, abstract = {We conducted two eye-tracking experiments investigating the processing of the Mandarin reflexive ziji in order to tease apart structurally constrained accounts from standard cue-based accounts of memory retrieval. In both experiments, we tested whether structurally inaccessible distractors that fulfill the animacy requirement of ziji influence processing times at the reflexive. In Experiment 1, we manipulated animacy of the antecedent and a structurally inaccessible distractor intervening between the antecedent and the reflexive. In conditions where the accessible antecedent mismatched the animacy cue, we found inhibitory interference whereas in antecedent-match conditions, no effect of the distractor was observed. In Experiment 2, we tested only antecedent-match configurations and manipulated locality of the reflexive-antecedent binding (Mandarin allows non-local binding). Participants were asked to hold three distractors (animate vs. inanimate nouns) in memory while reading the target sentence. We found slower reading times when animate distractors were held in memory (inhibitory interference). Moreover, we replicated the locality effect reported in previous studies. These results are incompatible with structure-based accounts. However, the cue-based ACT-R model of Lewis and Vasishth (2005) cannot explain the observed pattern either. We therefore extend the original ACT-R model and show how this model not only explains the data presented in this article, but is also able to account for previously unexplained patterns in the literature on reflexive processing.}, language = {en} } @misc{JaegerEngelmannVasishth2015, author = {J{\"a}ger, Lena Ann and Engelmann, Felix and Vasishth, Shravan}, title = {Retrieval interference in reflexive processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78738}, year = {2015}, abstract = {We conducted two eye-tracking experiments investigating the processing of the Mandarin reflexive ziji in order to tease apart structurally constrained accounts from standard cue-based accounts of memory retrieval. In both experiments, we tested whether structurally inaccessible distractors that fulfill the animacy requirement of ziji influence processing times at the reflexive. In Experiment 1, we manipulated animacy of the antecedent and a structurally inaccessible distractor intervening between the antecedent and the reflexive. In conditions where the accessible antecedent mismatched the animacy cue, we found inhibitory interference whereas in antecedent-match conditions, no effect of the distractor was observed. In Experiment 2, we tested only antecedent-match configurations and manipulated locality of the reflexive-antecedent binding (Mandarin allows non-local binding). Participants were asked to hold three distractors (animate vs. inanimate nouns) in memory while reading the target sentence. We found slower reading times when animate distractors were held in memory (inhibitory interference). Moreover, we replicated the locality effect reported in previous studies. These results are incompatible with structure-based accounts. However, the cue-based ACT-R model of Lewis and Vasishth (2005) cannot explain the observed pattern either. We therefore extend the original ACT-R model and show how this model not only explains the data presented in this article, but is also able to account for previously unexplained patterns in the literature on reflexive processing.}, language = {en} } @article{JaegerEngelmannVasishth2015, author = {J{\"a}ger, Lena Ann and Engelmann, Felix and Vasishth, Shravan}, title = {Retrieval interference in reflexive processing}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {617}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00617}, year = {2015}, abstract = {We conducted two eye-tracking experiments investigating the processing of the Mandarin reflexive ziji in order to tease apart structurally constrained accounts from standard cue-based accounts of memory retrieval. In both experiments, we tested whether structurally inaccessible distractors that fulfill the animacy requirement of ziji influence processing times at the reflexive. In Experiment 1, we manipulated animacy of the antecedent and a structurally inaccessible distractor intervening between the antecedent and the reflexive. In conditions where the accessible antecedent mismatched the animacy cue, we found inhibitory interference whereas in antecedent-match conditions, no effect of the distractor was observed. In Experiment 2, we tested only antecedent-match configurations and manipulated locality of the reflexive-antecedent binding (Mandarin allows non-local binding). Participants were asked to hold three distractors (animate vs. inanimate nouns) in memory while reading the target sentence. We found slower reading times when animate distractors were held in memory (inhibitory interference). Moreover, we replicated the locality effect reported in previous studies. These results are incompatible with structure-based accounts. However, the cue-based ACT-R model of Lewis and Vasishth (2005) cannot explain the observed pattern either. We therefore extend the original ACT-R model and show how this model not only explains the data presented in this article, but is also able to account for previously unexplained patterns in the literature on reflexive processing.}, language = {en} } @article{JaegerMertzenVanDykeetal.2020, author = {J{\"a}ger, Lena Ann and Mertzen, Daniela and Van Dyke, Julie A. and Vasishth, Shravan}, title = {Interference patterns in subject-verb agreement and reflexives revisited}, series = {Journal of memory and language}, volume = {111}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104063}, pages = {21}, year = {2020}, abstract = {Cue-based retrieval theories in sentence processing predict two classes of interference effect: (i) Inhibitory interference is predicted when multiple items match a retrieval cue: cue-overloading leads to an overall slowdown in reading time; and (ii) Facilitatory interference arises when a retrieval target as well as a distractor only partially match the retrieval cues; this partial matching leads to an overall speedup in retrieval time. Inhibitory interference effects are widely observed, but facilitatory interference apparently has an exception: reflexives have been claimed to show no facilitatory interference effects. Because the claim is based on underpowered studies, we conducted a large-sample experiment that investigated both facilitatory and inhibitory interference. In contrast to previous studies, we find facilitatory interference effects in reflexives. We also present a quantitative evaluation of the cue-based retrieval model of Engelmann, Jager, and Vasishth (2019).}, language = {en} } @article{LaurinavichyuteYadavVasishth2022, author = {Laurinavichyute, Anna and Yadav, Himanshu and Vasishth, Shravan}, title = {Share the code, not just the data}, series = {Journal of memory and language}, volume = {125}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2022.104332}, pages = {12}, year = {2022}, abstract = {In 2019 the Journal of Memory and Language instituted an open data and code policy; this policy requires that, as a rule, code and data be released at the latest upon publication. How effective is this policy? We compared 59 papers published before, and 59 papers published after, the policy took effect. After the policy was in place, the rate of data sharing increased by more than 50\%. We further looked at whether papers published under the open data policy were reproducible, in the sense that the published results should be possible to regenerate given the data, and given the code, when code was provided. For 8 out of the 59 papers, data sets were inaccessible. The reproducibility rate ranged from 34\% to 56\%, depending on the reproducibility criteria. The strongest predictor of whether an attempt to reproduce would be successful is the presence of the analysis code: it increases the probability of reproducing reported results by almost 40\%. We propose two simple steps that can increase the reproducibility of published papers: share the analysis code, and attempt to reproduce one's own analysis using only the shared materials.}, language = {en} } @article{LogacevVasishth2016, author = {Logacev, Pavel and Vasishth, Shravan}, title = {A Multiple-Channel Model of Task-Dependent Ambiguity Resolution in Sentence Comprehension}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12228}, pages = {266 -- 298}, year = {2016}, abstract = {Traxler, Pickering, and Clifton (1998) found that ambiguous sentences are read faster than their unambiguous counterparts. This so-called ambiguity advantage has presented a major challenge to classical theories of human sentence comprehension (parsing) because its most prominent explanation, in the form of the unrestricted race model (URM), assumes that parsing is non-deterministic. Recently, Swets, Desmet, Clifton, and Ferreira (2008) have challenged the URM. They argue that readers strategically underspecify the representation of ambiguous sentences to save time, unless disambiguation is required by task demands. When disambiguation is required, however, readers assign sentences full structure—and Swets et al. provide experimental evidence to this end. On the basis of their findings, they argue against the URM and in favor of a model of task-dependent sentence comprehension. We show through simulations that the Swets et al. data do not constitute evidence for task-dependent parsing because they can be explained by the URM. However, we provide decisive evidence from a German self-paced reading study consistent with Swets et al.'s general claim about task-dependent parsing. Specifically, we show that under certain conditions, ambiguous sentences can be read more slowly than their unambiguous counterparts, suggesting that the parser may create several parses, when required. Finally, we present the first quantitative model of task-driven disambiguation that subsumes the URM, and we show that it can explain both Swets et al.'s results and our findings.}, language = {en} } @misc{LogačevVasishth2016, author = {Logačev, Pavel and Vasishth, Shravan}, title = {Understanding underspecification}, doi = {10.1080/17470218.2015.1134602}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93441}, pages = {996 -- 1012}, year = {2016}, abstract = {Swets et al. (2008. Underspecification of syntactic ambiguities: Evidence from self-paced reading. Memory and Cognition, 36(1), 201-216) presented evidence that the so-called ambiguity advantage [Traxler et al. (1998). Adjunct attachment is not a form of lexical ambiguity resolution. Journal of Memory and Language, 39(4), 558-592], which has been explained in terms of the Unrestricted Race Model, can equally well be explained by assuming underspecification in ambiguous conditions driven by task-demands. Specifically, if comprehension questions require that ambiguities be resolved, the parser tends to make an attachment: when questions are about superficial aspects of the target sentence, readers tend to pursue an underspecification strategy. It is reasonable to assume that individual differences in strategy will play a significant role in the application of such strategies, so that studying average behaviour may not be informative. In order to study the predictions of the good-enough processing theory, we implemented two versions of underspecification: the partial specification model (PSM), which is an implementation of the Swets et al. proposal, and a more parsimonious version, the non-specification model (NSM). We evaluate the relative fit of these two kinds of underspecification to Swets et al.'s data; as a baseline, we also fitted three models that assume no underspecification. We find that a model without underspecification provides a somewhat better fit than both underspecification models, while the NSM model provides a better fit than the PSM. We interpret the results as lack of unambiguous evidence in favour of underspecification; however, given that there is considerable existing evidence for good-enough processing in the literature, it is reasonable to assume that some underspecification might occur. Under this assumption, the results can be interpreted as tentative evidence for NSM over PSM. More generally, our work provides a method for choosing between models of real-time processes in sentence comprehension that make qualitative predictions about the relationship between several dependent variables. We believe that sentence processing research will greatly benefit from a wider use of such methods.}, language = {en} } @article{MatuschekKlieglVasishthetal.2017, author = {Matuschek, Hannes and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald R. and Bates, Douglas}, title = {Balancing Type I error and power in linear mixed models}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.01.001}, pages = {305 -- 315}, year = {2017}, abstract = {Linear mixed-effects models have increasingly replaced mixed-model analyses of variance for statistical inference in factorial psycholinguistic experiments. Although LMMs have many advantages over ANOVA, like ANOVAs, setting them up for data analysis also requires some care. One simple option, when numerically possible, is to fit the full variance covariance structure of random effects (the maximal model; Barr, Levy, Scheepers \& Tily, 2013), presumably to keep Type I error down to the nominal a in the presence of random effects. Although it is true that fitting a model with only random intercepts may lead to higher Type I error, fitting a maximal model also has a cost: it can lead to a significant loss of power. We demonstrate this with simulations and suggest that for typical psychological and psycholinguistic data, higher power is achieved without inflating Type I error rate if a model selection criterion is used to select a random effect structure that is supported by the data. (C) 2017 The Authors. Published by Elsevier Inc.}, language = {en} } @article{McCurdyKentnerVasishth2013, author = {McCurdy, Kate and Kentner, Gerrit and Vasishth, Shravan}, title = {Implicit prosody and contextual bias in silent reading}, series = {Journal of Eye Movement Research}, volume = {6}, journal = {Journal of Eye Movement Research}, number = {2}, publisher = {International Group for Eye Movement Research}, address = {Bern}, issn = {1995-8692}, pages = {17}, year = {2013}, abstract = {Eye-movement research on implicit prosody has found effects of lexical stress on syntactic ambiguity resolution, suggesting that metrical well-formedness constraints interact with syntactic category assignment. Building on these findings, the present eyetracking study investigates whether contextual bias can modulate the effects of metrical structure on syntactic ambiguity resolution in silent reading. Contextual bias and potential stress-clash in the ambiguous region were crossed in a 2 x 2 design. Participants read biased context sentences followed by temporarily ambiguous test sentences. In the three-word ambiguous region, main effects of lexical stress were dominant, while early effects of context were absent. Potential stress clash yielded a significant increase in first-pass regressions and re-reading probability across the three words. In the disambiguating region, the disambiguating word itself showed increased processing difficulty (lower skipping and increased re-reading probability) when the disambiguation engendered a stress clash configuration, while the word immediately following showed main effects of context in those same measures. Taken together, effects of lexical stress upon eye movements were swift and pervasive across first-pass and second-pass measures, while effects of context were relatively delayed. These results indicate a strong role for implicit meter in guiding parsing, one that appears insensitive to higher-level constraints. Our findings are problematic for two classes of models, the two-stage garden-path model and the constraint-based competition-integration model, but can be explained by a variation on the two-stage model, the unrestricted race model.}, language = {en} } @article{MertzenLagoVasishth2021, author = {Mertzen, Daniela and Lago, Sol and Vasishth, Shravan}, title = {The benefits of preregistration for hypothesis-driven bilingualism research}, series = {Bilingualism : language and cognition}, volume = {24}, journal = {Bilingualism : language and cognition}, number = {5}, publisher = {Cambridge Univ. Press}, address = {Cambridge}, issn = {1366-7289}, doi = {10.1017/S1366728921000031}, pages = {807 -- 812}, year = {2021}, abstract = {Preregistration is an open science practice that requires the specification of research hypotheses and analysis plans before the data are inspected. Here, we discuss the benefits of preregistration for hypothesis-driven, confirmatory bilingualism research. Using examples from psycholinguistics and bilingualism, we illustrate how non-peer reviewed preregistrations can serve to implement a clean distinction between hypothesis testing and data exploration. This distinction helps researchers avoid casting post-hoc hypotheses and analyses as confirmatory ones. We argue that, in keeping with current best practices in the experimental sciences, preregistration, along with sharing data and code, should be an integral part of hypothesis-driven bilingualism research.}, language = {en} } @article{MetznervonderMalsburgVasishthetal.2017, author = {Metzner, Paul and von der Malsburg, Titus Raban and Vasishth, Shravan and Roesler, Frank}, title = {The Importance of Reading Naturally: Evidence From Combined Recordings of Eye Movements and Electric Brain Potentials}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {41}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12384}, pages = {1232 -- 1263}, year = {2017}, language = {en} } @article{MaetzigVasishthEngelmannetal.2018, author = {M{\"a}tzig, Paul and Vasishth, Shravan and Engelmann, Felix and Caplan, David and Burchert, Frank}, title = {A computational investigation of sources of variability in sentence comprehension difficulty in aphasia}, series = {Topics in cognitive science}, volume = {10}, journal = {Topics in cognitive science}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {1756-8757}, doi = {10.1111/tops.12323}, pages = {161 -- 174}, year = {2018}, abstract = {We present a computational evaluation of three hypotheses about sources of deficit in sentence comprehension in aphasia: slowed processing, intermittent deficiency, and resource reduction. The ACT-R based Lewis and Vasishth (2005) model is used to implement these three proposals. Slowed processing is implemented as slowed execution time of parse steps; intermittent deficiency as increased random noise in activation of elements in memory; and resource reduction as reduced spreading activation. As data, we considered subject vs. object relative sentences, presented in a self-paced listening modality to 56 individuals with aphasia (IWA) and 46 matched controls. The participants heard the sentences and carried out a picture verification task to decide on an interpretation of the sentence. These response accuracies are used to identify the best parameters (for each participant) that correspond to the three hypotheses mentioned above. We show that controls have more tightly clustered (less variable) parameter values than IWA; specifically, compared to controls, among IWA there are more individuals with slow parsing times, high noise, and low spreading activation. We find that (a) individual IWA show differential amounts of deficit along the three dimensions of slowed processing, intermittent deficiency, and resource reduction, (b) overall, there is evidence for all three sources of deficit playing a role, and (c) IWA have a more variable range of parameter values than controls. An important implication is that it may be meaningless to talk about sources of deficit with respect to an abstract verage IWA; the focus should be on the individual's differential degrees of deficit along different dimensions, and on understanding the causes of variability in deficit between participants.}, language = {en} } @article{NicenboimRoettgerVasishth2018, author = {Nicenboim, Bruno and Roettger, Timo B. and Vasishth, Shravan}, title = {Using meta-analysis for evidence synthesis}, series = {Journal of phonetics}, volume = {70}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.06.001}, pages = {39 -- 55}, year = {2018}, abstract = {Within quantitative phonetics, it is common practice to draw conclusions based on statistical significance alone Using incomplete neutralization of final devoicing in German as a case study, we illustrate the problems with this approach. If researchers find a significant acoustic difference between voiceless and devoiced obstruents, they conclude that neutralization is incomplete, and if they find no significant difference, they conclude that neutralization is complete. However, such strong claims regarding the existence or absence of an effect based on significant results alone can be misleading. Instead, the totality of available evidence should be brought to bear on the question. Towards this end, we synthesize the evidence from 14 studies on incomplete neutralization in German using a Bayesian random-effects meta-analysis. Our meta-analysis provides evidence in favor of incomplete neutralization. We conclude with some suggestions for improving the quality of future research on phonetic phenomena: ensure that sample sizes allow for high-precision estimates of the effect; avoid the temptation to deploy researcher degrees of freedom when analyzing data; focus on estimates of the parameter of interest and the uncertainty about that parameter; attempt to replicate effects found; and, whenever possible, make both the data and analysis available publicly. (c) 2018 Elsevier Ltd. All rights reserved.}, language = {en} } @article{NicenboimVasishth2018, author = {Nicenboim, Bruno and Vasishth, Shravan}, title = {Models of retrieval in sentence comprehension}, series = {Journal of memory and language}, volume = {99}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2017.08.004}, pages = {1 -- 34}, year = {2018}, abstract = {Research on similarity-based interference has provided extensive evidence that the formation of dependencies between non-adjacent words relies on a cue-based retrieval mechanism. There are two different models that can account for one of the main predictions of interference, i.e., a slowdown at a retrieval site, when several items share a feature associated with a retrieval cue: Lewis and Vasishth's (2005) activation-based model and McElree's (2000) direct-access model. Even though these two models have been used almost interchangeably, they are based on different assumptions and predict differences in the relationship between reading times and response accuracy. The activation-based model follows the assumptions of the ACT-R framework, and its retrieval process behaves as a lognormal race between accumulators of evidence with a single variance. Under this model, accuracy of the retrieval is determined by the winner of the race and retrieval time by its rate of accumulation. In contrast, the direct-access model assumes a model of memory where only the probability of retrieval can be affected, while the retrieval time is drawn from the same distribution; in this model, differences in latencies are a by-product of the possibility of backtracking and repairing incorrect retrievals. We implemented both models in a Bayesian hierarchical framework in order to evaluate them and compare them. The data show that correct retrievals take longer than incorrect ones, and this pattern is better fit under the direct-access model than under the activation-based model. This finding does not rule out the possibility that retrieval may be behaving as a race model with assumptions that follow less closely the ones from the ACT-R framework. By introducing a modification of the activation model, i.e., by assuming that the accumulation of evidence for retrieval of incorrect items is not only slower but noisier (i.e., different variances for the correct and incorrect items), the model can provide a fit as good as the one of the direct-access model. This first ever computational evaluation of alternative accounts of retrieval processes in sentence processing opens the way for a broader investigation of theories of dependency completion.}, language = {en} } @article{NicenboimVasishthEngelmannetal.2018, author = {Nicenboim, Bruno and Vasishth, Shravan and Engelmann, Felix and Suckow, Katja}, title = {Exploratory and confirmatory analyses in sentence processing}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {42}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12589}, pages = {1075 -- 1100}, year = {2018}, abstract = {Given the replication crisis in cognitive science, it is important to consider what researchers need to do in order to report results that are reliable. We consider three changes in current practice that have the potential to deliver more realistic and robust claims. First, the planned experiment should be divided into two stages, an exploratory stage and a confirmatory stage. This clear separation allows the researcher to check whether any results found in the exploratory stage are robust. The second change is to carry out adequately powered studies. We show that this is imperative if we want to obtain realistic estimates of effects in psycholinguistics. The third change is to use Bayesian data-analytic methods rather than frequentist ones; the Bayesian framework allows us to focus on the best estimates we can obtain of the effect, rather than rejecting a strawman null. As a case study, we investigate number interference effects in German. Number feature interference is predicted by cue-based retrieval models of sentence processing (Van Dyke \& Lewis, 2003; Vasishth \& Lewis, 2006), but it has shown inconsistent results. We show that by implementing the three changes mentioned, suggestive evidence emerges that is consistent with the predicted number interference effects.}, language = {en} } @misc{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75694}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects; these are usually associated with constraints in working memory (DLT: Gibson, 2000; activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation-based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory-based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {312}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00312}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects; these are usually associated with constraints in working memory (DLT: Gibson, 2000; activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation-based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory-based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{NicenboimVasishthGatteietal.2015, author = {Nicenboim, Bruno and Vasishth, Shravan and Gattei, Carolina and Sigman, Mariano and Kliegl, Reinhold}, title = {Working memory differences in long-distance dependency resolution}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00312}, pages = {16}, year = {2015}, abstract = {There is a wealth of evidence showing that increasing the distance between an argument and its head leads to more processing effort, namely, locality effects: these are usually associated with constraints in working memory (DLT: Gibson, 2000: activation-based model: Lewis and Vasishth, 2005). In SOV languages, however, the opposite effect has been found: antilocality (see discussion in Levy et al., 2013). Antilocality effects can be explained by the expectation based approach as proposed by Levy (2008) or by the activation-based model of sentence processing as proposed by Lewis and Vasishth (2005). We report an eye-tracking and a self-paced reading study with sentences in Spanish together with measures of individual differences to examine the distinction between expectation- and memory based accounts, and within memory-based accounts the further distinction between DLT and the activation-based model. The experiments show that (i) antilocality effects as predicted by the expectation account appear only for high-capacity readers; (ii) increasing dependency length by interposing material that modifies the head of the dependency (the verb) produces stronger facilitation than increasing dependency length with material that does not modify the head; this is in agreement with the activation-based model but not with the expectation account; and (iii) a possible outcome of memory load on low-capacity readers is the increase in regressive saccades (locality effects as predicted by memory-based accounts) or, surprisingly, a speedup in the self-paced reading task; the latter consistent with good-enough parsing (Ferreira et al., 2002). In sum, the study suggests that individual differences in working memory capacity play a role in dependency resolution, and that some of the aspects of dependency resolution can be best explained with the activation-based model together with a prediction component.}, language = {en} } @article{NicenboimVasishthRoesler2020, author = {Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Are words pre-activated probabilistically during sentence comprehension?}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {142}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier Science}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2020.107427}, pages = {27}, year = {2020}, abstract = {Several studies (e.g., Wicha et al., 2003b; DeLong et al., 2005) have shown that readers use information from the sentential context to predict nouns (or some of their features), and that predictability effects can be inferred from the EEG signal in determiners or adjectives appearing before the predicted noun. While these findings provide evidence for the pre-activation proposal, recent replication attempts together with inconsistencies in the results from the literature cast doubt on the robustness of this phenomenon. Our study presents the first attempt to use the effect of gender on predictability in German to study the pre-activation hypothesis, capitalizing on the fact that all German nouns have a gender and that their preceding determiners can show an unambiguous gender marking when the noun phrase has accusative case. Despite having a relatively large sample size (of 120 subjects), both our preregistered and exploratory analyses failed to yield conclusive evidence for or against an effect of pre-activation. The sign of the effect is, however, in the expected direction: the more unexpected the gender of the determiner, the larger the negativity. The recent, inconclusive replication attempts by Nieuwland et al. (2018) and others also show effects with signs in the expected direction. We conducted a Bayesian random-ef-fects meta-analysis using our data and the publicly available data from these recent replication attempts. Our meta-analysis shows a relatively clear but very small effect that is consistent with the pre-activation account and demonstrates a very important advantage of the Bayesian data analysis methodology: we can incrementally accumulate evidence to obtain increasingly precise estimates of the effect of interest.}, language = {en} } @article{PaapeAvetisyanLagoetal.2021, author = {Paape, Dario and Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Modeling misretrieval and feature substitution in agreement attraction}, series = {Cognitive science}, volume = {45}, journal = {Cognitive science}, number = {8}, publisher = {Wiley-Blackwell}, address = {Malden, Mass.}, issn = {0364-0213}, doi = {10.1111/cogs.13019}, pages = {30}, year = {2021}, abstract = {We present computational modeling results based on a self-paced reading study investigating number attraction effects in Eastern Armenian. We implement three novel computational models of agreement attraction in a Bayesian framework and compare their predictive fit to the data using k-fold cross-validation. We find that our data are better accounted for by an encoding-based model of agreement attraction, compared to a retrieval-based model. A novel methodological contribution of our study is the use of comprehension questions with open-ended responses, so that both misinterpretation of the number feature of the subject phrase and misassignment of the thematic subject role of the verb can be investigated at the same time. We find evidence for both types of misinterpretation in our study, sometimes in the same trial. However, the specific error patterns in our data are not fully consistent with any previously proposed model.}, language = {en} } @misc{PaapeNicenboimVasishth2017, author = {Paape, Dario L. J. F. and Nicenboim, Bruno and Vasishth, Shravan}, title = {Does antecedent complexity affect ellipsis processing?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403373}, pages = {29}, year = {2017}, abstract = {In two self-paced reading experiments, we investigated the effect of changes in antecedent complexity on processing times for ellipsis. Pointer- or "sharing"-based approaches to ellipsis processing (Frazier \& Clifton 2001, 2005; Martin \& McElree 2008) predict no effect of antecedent complexity on reading times at the ellipsis site while other accounts predict increased antecedent complexity to either slow down processing (Murphy 1985) or to speed it up (Hofmeister 2011). Experiment 1 manipulated antecedent complexity and elision, yielding evidence against a speedup at the ellipsis site and in favor of a null effect. In order to investigate possible superficial processing on part of participants, Experiment 2 manipulated the amount of attention required to correctly respond to end-of-sentence comprehension probes, yielding evidence against a complexity-induced slowdown at the ellipsis site. Overall, our results are compatible with pointer-based approaches while casting doubt on the notion that changes antecedent complexity lead to measurable differences in ellipsis processing speed.}, language = {en} } @article{PaapeNicenboimVasishth2017, author = {Paape, Dario L. J. F. and Nicenboim, Bruno and Vasishth, Shravan}, title = {Does antecedent complexity affect ellipsis processing?}, series = {Glossa : a journal of general linguistics}, volume = {2}, journal = {Glossa : a journal of general linguistics}, number = {1}, publisher = {Ubiquity Press}, address = {London}, issn = {2397-1835}, doi = {10.5334/gjgl.290}, pages = {1 -- 29}, year = {2017}, abstract = {In two self-paced reading experiments, we investigated the effect of changes in antecedent complexity on processing times for ellipsis. Pointer- or "sharing"-based approaches to ellipsis processing (Frazier \& Clifton 2001, 2005; Martin \& McElree 2008) predict no effect of antecedent complexity on reading times at the ellipsis site while other accounts predict increased antecedent complexity to either slow down processing (Murphy 1985) or to speed it up (Hofmeister 2011). Experiment 1 manipulated antecedent complexity and elision, yielding evidence against a speedup at the ellipsis site and in favor of a null effect. In order to investigate possible superficial processing on part of participants, Experiment 2 manipulated the amount of attention required to correctly respond to end-of-sentence comprehension probes, yielding evidence against a complexity-induced slowdown at the ellipsis site. Overall, our results are compatible with pointer-based approaches while casting doubt on the notion that changes antecedent complexity lead to measurable differences in ellipsis processing speed.}, language = {en} } @article{PaapeVasishth2022, author = {Paape, Dario and Vasishth, Shravan}, title = {Estimating the true cost of garden pathing:}, series = {Cognitive science}, volume = {46}, journal = {Cognitive science}, number = {8}, publisher = {Wiley-Blackwell}, address = {Malden, Mass.}, issn = {0364-0213}, doi = {10.1111/cogs.13186}, pages = {23}, year = {2022}, abstract = {What is the processing cost of being garden-pathed by a temporary syntactic ambiguity? We argue that comparing average reading times in garden-path versus non-garden-path sentences is not enough to answer this question. Trial-level contaminants such as inattention, the fact that garden pathing may occur non-deterministically in the ambiguous condition, and "triage" (rejecting the sentence without reanalysis; Fodor \& Inoue, 2000) lead to systematic underestimates of the true cost of garden pathing. Furthermore, the "pure" garden-path effect due to encountering an unexpected word needs to be separated from the additional cost of syntactic reanalysis. To get more realistic estimates for the individual processing costs of garden pathing and syntactic reanalysis, we implement a novel computational model that includes trial-level contaminants as probabilistically occurring latent cognitive processes. The model shows a good predictive fit to existing reading time and judgment data. Furthermore, the latent-process approach captures differences between noun phrase/zero complement (NP/Z) garden-path sentences and semantically biased reduced relative clause (RRC) garden-path sentences: The NP/Z garden path occurs nearly deterministically but can be mostly eliminated by adding a comma. By contrast, the RRC garden path occurs with a lower probability, but disambiguation via semantic plausibility is not always effective.}, language = {en} } @article{PaapeVasishthvonderMalsburg2020, author = {Paape, Dario and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Quadruplex negatio invertit?}, series = {Journal of semantics}, volume = {37}, journal = {Journal of semantics}, number = {4}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0167-5133}, doi = {10.1093/jos/ffaa009}, pages = {509 -- 555}, year = {2020}, abstract = {So-called "depth charge" sentences (No head injury is too trivial to be ignored) are interpreted by the vast majority of speakers to mean the opposite of what their compositional semantics would dictate. The semantic inversion that is observed for sentences of this type is the strongest and most persistent linguistic illusion known to the field (Wason \& Reich, 1979). However, it has recently been argued that the preferred interpretation arises not because of a prevailing failure of the processing system, but rather because the non-compositional meaning is grammaticalized in the form of a stored construction (Cook \& Stevenson, 2010; Fortuin, 2014). In a series of five experiments, we investigate whether the depth charge effect is better explained by processing failure due to memory overload (the overloading hypothesis) or by the existence of an underlying grammaticalized construction with two available meanings (the ambiguity hypothesis). To our knowledge, our experiments are the first to explore the on-line processing profile of depth charge sentences. Overall, the data are consistent with specific variants of the ambiguity and overloading hypotheses while providing evidence against other variants. As an extension of the overloading hypothesis, we suggest two heuristic processes that may ultimately yield the incorrect reading when compositional processing is suspended for strategic reasons.}, language = {en} } @misc{PatilKentnerGollradetal.2008, author = {Patil, Umesh and Kentner, Gerrit and Gollrad, Anja and K{\"u}gler, Frank and F{\´e}ry, Caroline and Vasishth, Shravan}, title = {Focus, word order and intonation in Hindi}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-46118}, year = {2008}, abstract = {A production study is presented that investigates the effects of word order and information structural context on the prosodic realization of declarative sentences in Hindi. Previous work on Hindi intonation has shown that: (i) non-final content words bear rising pitch accents (Moore 1965, Dyrud 2001, Nair 1999); (ii) focused constituents show greater pitch excursion and longer duration and that post-focal material undergoes pitch range reduction (Moore 1965, Harnsberger 1994, Harnsberger and Judge 1996); and (iii) focused constituents may be followed by a phrase break (Moore 1965). By means of a controlled experiment, we investigated the effect of focus in relation to word order variation using 1200 utterances produced by 20 speakers. Fundamental frequency (F0) and duration of constituents were measured in Subject-Object-Verb (SOV) and Object-Subject-Verb (OSV) sentences in different information structural conditions (wide focus, subject focus and object focus). The analyses indicate that (i) regardless of word order and focus, the constituents are in a strict downstep relationship; (ii) focus is mainly characterized by post-focal pitch range reduction rather than pitch raising of the element in focus; (iii) given expressions that occur pre-focally appear to undergo no reduction; (iv) pitch excursion and duration of the constituents is higher in OSV compared to SOV sentences. A phonological analysis suggests that focus affects pitch scaling and that word order influences prosodic phrasing of the constituents.}, language = {en} } @article{PatilVasishthLewis2016, author = {Patil, Umesh and Vasishth, Shravan and Lewis, Richard L.}, title = {Retrieval Interference in Syntactic Processing: The Case of Reflexive Binding in English}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00329}, pages = {18}, year = {2016}, abstract = {It has been proposed that in online sentence comprehension the dependency between a reflexive pronoun such as himself/herself and its antecedent is resolved using exclusively syntactic constraints. Under this strictly syntactic search account, Principle A of the binding theory—which requires that the antecedent c-command the reflexive within the same clause that the reflexive occurs in—constrains the parser's search for an antecedent. The parser thus ignores candidate antecedents that might match agreement features of the reflexive (e.g., gender) but are ineligible as potential antecedents because they are in structurally illicit positions. An alternative possibility accords no special status to structural constraints: in addition to using Principle A, the parser also uses non-structural cues such as gender to access the antecedent. According to cue-based retrieval theories of memory (e.g., Lewis and Vasishth, 2005), the use of non-structural cues should result in increased retrieval times and occasional errors when candidates partially match the cues, even if the candidates are in structurally illicit positions. In this paper, we first show how the retrieval processes that underlie the reflexive binding are naturally realized in the Lewis and Vasishth (2005) model. We present the predictions of the model under the assumption that both structural and non-structural cues are used during retrieval, and provide a critical analysis of previous empirical studies that failed to find evidence for the use of non-structural cues, suggesting that these failures may be Type II errors. We use this analysis and the results of further modeling to motivate a new empirical design that we use in an eye tracking study. The results of this study confirm the key predictions of the model concerning the use of non-structural cues, and are inconsistent with the strictly syntactic search account. These results present a challenge for theories advocating the infallibility of the human parser in the case of reflexive resolution, and provide support for the inclusion of agreement features such as gender in the set of retrieval cues.}, language = {en} } @article{PreglaLissonHernandezVasishthetal.2021, author = {Pregla, Dorothea and Liss{\´o}n Hern{\´a}ndez, Paula J. and Vasishth, Shravan and Burchert, Frank and Stadie, Nicole}, title = {Variability in sentence comprehension in aphasia in German}, series = {Brain \& language : a journal of the neurobiology of language}, volume = {222}, journal = {Brain \& language : a journal of the neurobiology of language}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0093-934X}, doi = {10.1016/j.bl.2021.105008}, pages = {20}, year = {2021}, abstract = {An important aspect of aphasia is the observation of behavioral variability between and within individual participants. Our study addresses variability in sentence comprehension in German, by testing 21 individuals with aphasia and a control group and involving (a) several constructions (declarative sentences, relative clauses and control structures with an overt pronoun or PRO), (b) three response tasks (object manipulation, sentence-picture matching with/without self-paced listening), and (c) two test phases (to investigate test-retest performance). With this systematic, large-scale study we gained insights into variability in sentence comprehension. We found that the size of syntactic effects varied both in aphasia and in control participants. Whereas variability in control participants led to systematic changes, variability in individuals with aphasia was unsystematic across test phases or response tasks. The persistent occurrence of canonicity and interference effects across response tasks and test phases, however, shows that the performance is systematically influenced by syntactic complexity.}, language = {en} } @article{RabeChandraKruegeletal.2021, author = {Rabe, Maximilian Michael and Chandra, Johan and Kr{\"u}gel, Andr{\´e} and Seelig, Stefan A. and Vasishth, Shravan and Engbert, Ralf}, title = {A bayesian approach to dynamical modeling of eye-movement control in reading of normal, mirrored, and scrambled texts}, series = {Psychological Review}, volume = {128}, journal = {Psychological Review}, number = {5}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000268}, pages = {803 -- 823}, year = {2021}, abstract = {In eye-movement control during reading, advanced process-oriented models have been developed to reproduce behavioral data. So far, model complexity and large numbers of model parameters prevented rigorous statistical inference and modeling of interindividual differences. Here we propose a Bayesian approach to both problems for one representative computational model of sentence reading (SWIFT; Engbert et al., Psychological Review, 112, 2005, pp. 777-813). We used experimental data from 36 subjects who read the text in a normal and one of four manipulated text layouts (e.g., mirrored and scrambled letters). The SWIFT model was fitted to subjects and experimental conditions individually to investigate between- subject variability. Based on posterior distributions of model parameters, fixation probabilities and durations are reliably recovered from simulated data and reproduced for withheld empirical data, at both the experimental condition and subject levels. A subsequent statistical analysis of model parameters across reading conditions generates model-driven explanations for observable effects between conditions.}, language = {en} } @article{SchadNicenboimBuerkneretal.2022, author = {Schad, Daniel and Nicenboim, Bruno and B{\"u}rkner, Paul-Christian and Betancourt, Michael and Vasishth, Shravan}, title = {Workflow techniques for the robust use of bayes factors}, series = {Psychological methods}, volume = {28}, journal = {Psychological methods}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {1082-989X}, doi = {10.1037/met0000472}, pages = {1404 -- 1426}, year = {2022}, abstract = {Inferences about hypotheses are ubiquitous in the cognitive sciences. Bayes factors provide one general way to compare different hypotheses by their compatibility with the observed data. Those quantifications can then also be used to choose between hypotheses. While Bayes factors provide an immediate approach to hypothesis testing, they are highly sensitive to details of the data/model assumptions and it's unclear whether the details of the computational implementation (such as bridge sampling) are unbiased for complex analyses. Hem, we study how Bayes factors misbehave under different conditions. This includes a study of errors in the estimation of Bayes factors; the first-ever use of simulation-based calibration to test the accuracy and bias of Bayes factor estimates using bridge sampling; a study of the stability of Bayes factors against different MCMC draws and sampling variation in the data; and a look at the variability of decisions based on Bayes factors using a utility function. We outline a Bayes factor workflow that researchers can use to study whether Bayes factors are robust for their individual analysis. Reproducible code is available from haps://osf.io/y354c/.
Translational Abstract
In psychology and related areas, scientific hypotheses are commonly tested by asking questions like "is [some] effect present or absent." Such hypothesis testing is most often carried out using frequentist null hypothesis significance testing (NIIST). The NHST procedure is very simple: It usually returns a p-value, which is then used to make binary decisions like "the effect is present/abscnt." For example, it is common to see studies in the media that draw simplistic conclusions like "coffee causes cancer," or "coffee reduces the chances of geuing cancer." However, a powerful and more nuanced alternative approach exists: Bayes factors. Bayes factors have many advantages over NHST. However, for the complex statistical models that arc commonly used for data analysis today, computing Bayes factors is not at all a simple matter. In this article, we discuss the main complexities associated with computing Bayes factors. This is the first article to provide a detailed workflow for understanding and computing Bayes factors in complex statistical models. The article provides a statistically more nuanced way to think about hypothesis testing than the overly simplistic tendency to declare effects as being "present" or "absent".}, language = {en} } @article{SchadVasishth2022, author = {Schad, Daniel and Vasishth, Shravan}, title = {The posterior probability of a null hypothesis given a statistically significant result}, series = {The quantitative methods for psychology}, volume = {18}, journal = {The quantitative methods for psychology}, number = {2}, publisher = {University of Montreal, Department of Psychology}, address = {Montreal}, issn = {1913-4126}, doi = {10.20982/tqmp.18.2.p011}, pages = {130 -- 141}, year = {2022}, abstract = {When researchers carry out a null hypothesis significance test, it is tempting to assume that a statistically significant result lowers Prob(H0), the probability of the null hypothesis being true. Technically, such a statement is meaningless for various reasons: e.g., the null hypothesis does not have a probability associated with it. However, it is possible to relax certain assumptions to compute the posterior probability Prob(H0) under repeated sampling. We show in a step-by-step guide that the intuitively appealing belief, that Prob(H0) is low when significant results have been obtained under repeated sampling, is in general incorrect and depends greatly on: (a) the prior probability of the null being true; (b) type-I error rate, (c) type-II error rate, and (d) replication of a result. Through step-by-step simulations using open-source code in the R System of Statistical Computing, we show that uncertainty about the null hypothesis being true often remains high despite a significant result. To help the reader develop intuitions about this common misconception, we provide a Shiny app (https://danielschad.shinyapps.io/probnull/). We expect that this tutorial will help researchers better understand and judge results from null hypothesis significance tests.}, language = {en} } @article{SchadVasishthHohensteinetal.2020, author = {Schad, Daniel and Vasishth, Shravan and Hohenstein, Sven and Kliegl, Reinhold}, title = {How to capitalize on a priori contrasts in linear (mixed) models}, series = {Journal of memory and language}, volume = {110}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2019.104038}, pages = {40}, year = {2020}, abstract = {Factorial experiments in research on memory, language, and in other areas are often analyzed using analysis of variance (ANOVA). However, for effects with more than one numerator degrees of freedom, e.g., for experimental factors with more than two levels, the ANOVA omnibus F-test is not informative about the source of a main effect or interaction. Because researchers typically have specific hypotheses about which condition means differ from each other, a priori contrasts (i.e., comparisons planned before the sample means are known) between specific conditions or combinations of conditions are the appropriate way to represent such hypotheses in the statistical model. Many researchers have pointed out that contrasts should be "tested instead of, rather than as a supplement to, the ordinary 'omnibus' F test" (Hays, 1973, p. 601). In this tutorial, we explain the mathematics underlying different kinds of contrasts (i.e., treatment, sum, repeated, polynomial, custom, nested, interaction contrasts), discuss their properties, and demonstrate how they are applied in the R System for Statistical Computing (R Core Team, 2018). In this context, we explain the generalized inverse which is needed to compute the coefficients for contrasts that test hypotheses that are not covered by the default set of contrasts. A detailed understanding of contrast coding is crucial for successful and correct specification in linear models (including linear mixed models). Contrasts defined a priori yield far more useful confirmatory tests of experimental hypotheses than standard omnibus F-tests. Reproducible code is available from https://osf.io/7ukf6/.}, language = {en} } @article{SmithVasishth2020, author = {Smith, Garrett and Vasishth, Shravan}, title = {A principled approach to feature selection in models of sentence processing}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {44}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, number = {12}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12918}, pages = {25}, year = {2020}, abstract = {Among theories of human language comprehension, cue-based memory retrieval has proven to be a useful framework for understanding when and how processing difficulty arises in the resolution of long-distance dependencies. Most previous work in this area has assumed that very general retrieval cues like [+subject] or [+singular] do the work of identifying (and sometimes misidentifying) a retrieval target in order to establish a dependency between words. However, recent work suggests that general, handpicked retrieval cues like these may not be enough to explain illusions of plausibility (Cunnings \& Sturt, 2018), which can arise in sentences like The letter next to the porcelain plate shattered. Capturing such retrieval interference effects requires lexically specific features and retrieval cues, but handpicking the features is hard to do in a principled way and greatly increases modeler degrees of freedom. To remedy this, we use well-established word embedding methods for creating distributed lexical feature representations that encode information relevant for retrieval using distributed retrieval cue vectors. We show that the similarity between the feature and cue vectors (a measure of plausibility) predicts total reading times in Cunnings and Sturt's eye-tracking data. The features can easily be plugged into existing parsing models (including cue-based retrieval and self-organized parsing), putting very different models on more equal footing and facilitating future quantitative comparisons.}, language = {en} } @misc{StoneNicenboimVasishthetal.2022, author = {Stone, Kate and Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Understanding the effects of constraint and predictability in ERP}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {829}, issn = {1866-8364}, doi = {10.25932/publishup-58759}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587594}, pages = {71}, year = {2022}, abstract = {Intuitively, strongly constraining contexts should lead to stronger probabilistic representations of sentences in memory. Encountering unexpected words could therefore be expected to trigger costlier shifts in these representations than expected words. However, psycholinguistic measures commonly used to study probabilistic processing, such as the N400 event-related potential (ERP) component, are sensitive to word predictability but not to contextual constraint. Some research suggests that constraint-related processing cost may be measurable via an ERP positivity following the N400, known as the anterior post-N400 positivity (PNP). The PNP is argued to reflect update of a sentence representation and to be distinct from the posterior P600, which reflects conflict detection and reanalysis. However, constraint-related PNP findings are inconsistent. We sought to conceptually replicate Federmeier et al. (2007) and Kuperberg et al. (2020), who observed that the PNP, but not the N400 or the P600, was affected by constraint at unexpected but plausible words. Using a pre-registered design and statistical approach maximising power, we demonstrated a dissociated effect of predictability and constraint: strong evidence for predictability but not constraint in the N400 window, and strong evidence for constraint but not predictability in the later window. However, the constraint effect was consistent with a P600 and not a PNP, suggesting increased conflict between a strong representation and unexpected input rather than greater update of the representation. We conclude that either a simple strong/weak constraint design is not always sufficient to elicit the PNP, or that previous PNP constraint findings could be an artifact of smaller sample size.}, language = {en} } @article{StoneNicenboimVasishthetal.2023, author = {Stone, Kate and Nicenboim, Bruno and Vasishth, Shravan and R{\"o}sler, Frank}, title = {Understanding the effects of constraint and predictability in ERP}, series = {Neurobiology of language}, volume = {4}, journal = {Neurobiology of language}, number = {2}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, issn = {2641-4368}, doi = {10.1162/nol_a_00094}, pages = {221 -- 256}, year = {2023}, abstract = {Intuitively, strongly constraining contexts should lead to stronger probabilistic representations of sentences in memory. Encountering unexpected words could therefore be expected to trigger costlier shifts in these representations than expected words. However, psycholinguistic measures commonly used to study probabilistic processing, such as the N400 event-related potential (ERP) component, are sensitive to word predictability but not to contextual constraint. Some research suggests that constraint-related processing cost may be measurable via an ERP positivity following the N400, known as the anterior post-N400 positivity (PNP). The PNP is argued to reflect update of a sentence representation and to be distinct from the posterior P600, which reflects conflict detection and reanalysis. However, constraint-related PNP findings are inconsistent. We sought to conceptually replicate Federmeier et al. (2007) and Kuperberg et al. (2020), who observed that the PNP, but not the N400 or the P600, was affected by constraint at unexpected but plausible words. Using a pre-registered design and statistical approach maximising power, we demonstrated a dissociated effect of predictability and constraint: strong evidence for predictability but not constraint in the N400 window, and strong evidence for constraint but not predictability in the later window. However, the constraint effect was consistent with a P600 and not a PNP, suggesting increased conflict between a strong representation and unexpected input rather than greater update of the representation. We conclude that either a simple strong/weak constraint design is not always sufficient to elicit the PNP, or that previous PNP constraint findings could be an artifact of smaller sample size.}, language = {en} } @article{StoneVasishthMalsburg2022, author = {Stone, Kate and Vasishth, Shravan and Malsburg, Titus von der}, title = {Does entropy modulate the prediction of German long-distance verb particles?}, series = {PLOS ONE}, journal = {PLOS ONE}, publisher = {PLOS ONE}, address = {San Francisco, California, US}, issn = {1932-6203}, doi = {10.1371/journal.pone.0267813}, pages = {1 -- 25}, year = {2022}, abstract = {In this paper we examine the effect of uncertainty on readers' predictions about meaning. In particular, we were interested in how uncertainty might influence the likelihood of committing to a specific sentence meaning. We conducted two event-related potential (ERP) experiments using particle verbs such as turn down and manipulated uncertainty by constraining the context such that readers could be either highly certain about the identity of a distant verb particle, such as turn the bed […] down, or less certain due to competing particles, such as turn the music […] up/down. The study was conducted in German, where verb particles appear clause-finally and may be separated from the verb by a large amount of material. We hypothesised that this separation would encourage readers to predict the particle, and that high certainty would make prediction of a specific particle more likely than lower certainty. If a specific particle was predicted, this would reflect a strong commitment to sentence meaning that should incur a higher processing cost if the prediction is wrong. If a specific particle was less likely to be predicted, commitment should be weaker and the processing cost of a wrong prediction lower. If true, this could suggest that uncertainty discourages predictions via an unacceptable cost-benefit ratio. However, given the clear predictions made by the literature, it was surprisingly unclear whether the uncertainty manipulation affected the two ERP components studied, the N400 and the PNP. Bayes factor analyses showed that evidence for our a priori hypothesised effect sizes was inconclusive, although there was decisive evidence against a priori hypothesised effect sizes larger than 1μV for the N400 and larger than 3μV for the PNP. We attribute the inconclusive finding to the properties of verb-particle dependencies that differ from the verb-noun dependencies in which the N400 and PNP are often studied.}, language = {en} } @article{StoneVasishthvonderMalsburg2022, author = {Stone, Kate and Vasishth, Shravan and von der Malsburg, Titus Raban}, title = {Does entropy modulate the prediction of German long-distance verb particles?}, series = {PLOS ONE}, volume = {17}, journal = {PLOS ONE}, number = {8}, publisher = {PLOS}, address = {San Francisco, California, US}, issn = {1932-6203}, doi = {10.1371/journal.pone.0267813}, pages = {25}, year = {2022}, abstract = {In this paper we examine the effect of uncertainty on readers' predictions about meaning. In particular, we were interested in how uncertainty might influence the likelihood of committing to a specific sentence meaning. We conducted two event-related potential (ERP) experiments using particle verbs such as turn down and manipulated uncertainty by constraining the context such that readers could be either highly certain about the identity of a distant verb particle, such as turn the bed [...] down, or less certain due to competing particles, such as turn the music [...] up/down. The study was conducted in German, where verb particles appear clause-finally and may be separated from the verb by a large amount of material. We hypothesised that this separation would encourage readers to predict the particle, and that high certainty would make prediction of a specific particle more likely than lower certainty. If a specific particle was predicted, this would reflect a strong commitment to sentence meaning that should incur a higher processing cost if the prediction is wrong. If a specific particle was less likely to be predicted, commitment should be weaker and the processing cost of a wrong prediction lower. If true, this could suggest that uncertainty discourages predictions via an unacceptable cost-benefit ratio. However, given the clear predictions made by the literature, it was surprisingly unclear whether the uncertainty manipulation affected the two ERP components studied, the N400 and the PNP. Bayes factor analyses showed that evidence for our a priori hypothesised effect sizes was inconclusive, although there was decisive evidence against a priori hypothesised effect sizes larger than 1 mu Vfor the N400 and larger than 3 mu V for the PNP. We attribute the inconclusive finding to the properties of verb-particle dependencies that differ from the verb-noun dependencies in which the N400 and PNP are often studied.}, language = {en} } @article{StoneVerissimoSchadetal.2021, author = {Stone, Kate and Verissimo, Joao and Schad, Daniel J. and Oltrogge, Elise and Vasishth, Shravan and Lago, Sol}, title = {The interaction of grammatically distinct agreement dependencies in predictive processing}, series = {Language, cognition and neuroscience}, volume = {36}, journal = {Language, cognition and neuroscience}, number = {9}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/23273798.2021.1921816}, pages = {1159 -- 1179}, year = {2021}, abstract = {Previous research has found that comprehenders sometimes predict information that is grammatically unlicensed by sentence constraints. An open question is why such grammatically unlicensed predictions occur. We examined the possibility that unlicensed predictions arise in situations of information conflict, for instance when comprehenders try to predict upcoming words while simultaneously building dependencies with previously encountered elements in memory. German possessive pronouns are a good testing ground for this hypothesis because they encode two grammatically distinct agreement dependencies: a retrospective one between the possessive and its previously mentioned referent, and a prospective one between the possessive and its following nominal head. In two visual world eye-tracking experiments, we estimated the onset of predictive effects in participants' fixations. The results showed that the retrospective dependency affected resolution of the prospective dependency by shifting the onset of predictive effects. We attribute this effect to an interaction between predictive and memory retrieval processes.}, language = {en} } @article{StonevonderMalsburgVasishth2020, author = {Stone, Kate and von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {The effect of decay and lexical uncertainty on processing long-distance dependencies in reading}, series = {PeerJ}, volume = {8}, journal = {PeerJ}, publisher = {PeerJ Inc.}, address = {London}, issn = {2167-8359}, doi = {10.7717/peerj.10438}, pages = {33}, year = {2020}, abstract = {To make sense of a sentence, a reader must keep track of dependent relationships between words, such as between a verb and its particle (e.g. turn the music down). In languages such as German, verb-particle dependencies often span long distances, with the particle only appearing at the end of the clause. This means that it may be necessary to process a large amount of intervening sentence material before the full verb of the sentence is known. To facilitate processing, previous studies have shown that readers can preactivate the lexical information of neighbouring upcoming words, but less is known about whether such preactivation can be sustained over longer distances. We asked the question, do readers preactivate lexical information about long-distance verb particles? In one self-paced reading and one eye tracking experiment, we delayed the appearance of an obligatory verb particle that varied only in the predictability of its lexical identity. We additionally manipulated the length of the delay in order to test two contrasting accounts of dependency processing: that increased distance between dependent elements may sharpen expectation of the distant word and facilitate its processing (an antilocality effect), or that it may slow processing via temporal activation decay (a locality effect). We isolated decay by delaying the particle with a neutral noun modifier containing no information about the identity of the upcoming particle, and no known sources of interference or working memory load. Under the assumption that readers would preactivate the lexical representations of plausible verb particles, we hypothesised that a smaller number of plausible particles would lead to stronger preactivation of each particle, and thus higher predictability of the target. This in turn should have made predictable target particles more resistant to the effects of decay than less predictable target particles. The eye tracking experiment provided evidence that higher predictability did facilitate reading times, but found evidence against any effect of decay or its interaction with predictability. The self-paced reading study provided evidence against any effect of predictability or temporal decay, or their interaction. In sum, we provide evidence from eye movements that readers preactivate long-distance lexical content and that adding neutral sentence information does not induce detectable decay of this activation. The findings are consistent with accounts suggesting that delaying dependency resolution may only affect processing if the intervening information either confirms expectations or adds to working memory load, and that temporal activation decay alone may not be a major predictor of processing time.}, language = {en} } @article{Vasishth2020, author = {Vasishth, Shravan}, title = {Using approximate Bayesian computation for estimating parameters in the cue-based retrieval model of sentence processing}, series = {MethodsX}, volume = {7}, journal = {MethodsX}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2215-0161}, doi = {10.1016/j.mex.2020.100850}, pages = {6}, year = {2020}, abstract = {A commonly used approach to parameter estimation in computational models is the so-called grid search procedure: the entire parameter space is searched in small steps to determine the parameter value that provides the best fit to the observed data. This approach has several disadvantages: first, it can be computationally very expensive; second, one optimal point value of the parameter is reported as the best fit value; we cannot quantify our uncertainty about the parameter estimate. In the main journal article that this methods article accompanies (Jager et al., 2020, Interference patterns in subject-verb agreement and reflexives revisited: A large-sample study, Journal of Memory and Language), we carried out parameter estimation using Approximate Bayesian Computation (ABC), which is a Bayesian approach that allows us to quantify our uncertainty about the parameter's values given data. This customization has the further advantage that it allows us to generate both prior and posterior predictive distributions of reading times from the cue-based retrieval model of Lewis and Vasishth, 2005.
Instead of the conventional method of using grid search, we use Approximate Bayesian Computation (ABC) for parameter estimation in the [4] model.
The ABC method of parameter estimation has the advantage that the uncertainty of the parameter can be quantified.}, language = {en} } @book{VasishthBroe2011, author = {Vasishth, Shravan and Broe, Michael}, title = {The foundations of statistics: a simulation-based approach}, publisher = {Springer-Verlag Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-16312-8}, doi = {10.1007/978-3-642-16313-5}, pages = {178 S.}, year = {2011}, language = {en} } @article{VasishthChenLietal.2013, author = {Vasishth, Shravan and Chen, Zhong and Li, Qiang and Guo, Gueilan}, title = {Processing chinese relative clauses - evidence for the subject-relative advantage}, series = {PLoS one}, volume = {8}, journal = {PLoS one}, number = {10}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0077006}, pages = {15}, year = {2013}, abstract = {A general fact about language is that subject relative clauses are easier to process than object relative clauses. Recently, several self-paced reading studies have presented surprising evidence that object relatives in Chinese are easier to process than subject relatives. We carried out three self-paced reading experiments that attempted to replicate these results. Two of our three studies found a subject-relative preference, and the third study found an object-relative advantage. Using a random effects bayesian meta-analysis of fifteen studies (including our own), we show that the overall current evidence for the subject-relative advantage is quite strong (approximate posterior probability of a subject-relative advantage given the data: 78-80\%). We argue that retrieval/integration based accounts would have difficulty explaining all three experimental results. These findings are important because they narrow the theoretical space by limiting the role of an important class of explanation-retrieval/integration cost-at least for relative clause processing in Chinese.}, language = {en} } @article{VasishthGelman2021, author = {Vasishth, Shravan and Gelman, Andrew}, title = {How to embrace variation and accept uncertainty in linguistic and psycholinguistic data analysis}, series = {Linguistics : an interdisciplinary journal of the language sciences}, volume = {59}, journal = {Linguistics : an interdisciplinary journal of the language sciences}, number = {5}, publisher = {De Gruyter Mouton}, address = {Berlin}, issn = {0024-3949}, doi = {10.1515/ling-2019-0051}, pages = {1311 -- 1342}, year = {2021}, abstract = {The use of statistical inference in linguistics and related areas like psychology typically involves a binary decision: either reject or accept some null hypothesis using statistical significance testing. When statistical power is low, this frequentist data-analytic approach breaks down: null results are uninformative, and effect size estimates associated with significant results are overestimated. Using an example from psycholinguistics, several alternative approaches are demonstrated for reporting inconsistencies between the data and a theoretical prediction. The key here is to focus on committing to a falsifiable prediction, on quantifying uncertainty statistically, and learning to accept the fact that - in almost all practical data analysis situations - we can only draw uncertain conclusions from data, regardless of whether we manage to obtain statistical significance or not. A focus on uncertainty quantification is likely to lead to fewer excessively bold claims that, on closer investigation, may turn out to be not supported by the data.}, language = {en} } @article{VasishthLewis2006, author = {Vasishth, Shravan and Lewis, Richard L.}, title = {Argument-head distance and processing complexity: Explaining both locality and antilocality effects}, series = {Language : journal of the Linguistic Society of America}, volume = {82}, journal = {Language : journal of the Linguistic Society of America}, number = {4}, publisher = {Linguistic Society of America}, address = {Washington}, issn = {0097-8507}, doi = {10.1353/lan.2006.0236}, pages = {767 -- 794}, year = {2006}, abstract = {Although proximity between arguments and verbs (locality) is a relatively robust determinant of sentence-processing difficulty (Hawkins 1998, 2001, Gibson 2000), increasing argument-verb distance can also facilitate processing (Konieczny 2000). We present two self-paced reading (SPR) experiments involving Hindi that provide further evidence of antilocality, and a third SPR experiment which suggests that similarity-based interference can attenuate this distance-based facilitation. A unified explanation of interference, locality, and antilocality effects is proposed via an independently motivated theory of activation decay and retrieval interference (Anderson et al. 2004).*}, language = {en} } @article{VasishthMertzenJaegeretal.2018, author = {Vasishth, Shravan and Mertzen, Daniela and Jaeger, Lena A. and Gelman, Andrew}, title = {The statistical significance filter leads to overoptimistic expectations of replicability}, series = {Journal of memory and language}, volume = {103}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2018.07.004}, pages = {151 -- 175}, year = {2018}, abstract = {It is well-known in statistics (e.g., Gelman \& Carlin, 2014) that treating a result as publishable just because the p-value is less than 0.05 leads to overoptimistic expectations of replicability. These effects get published, leading to an overconfident belief in replicability. We demonstrate the adverse consequences of this statistical significance filter by conducting seven direct replication attempts (268 participants in total) of a recent paper (Levy \& Keller, 2013). We show that the published claims are so noisy that even non-significant results are fully compatible with them. We also demonstrate the contrast between such small-sample studies and a larger-sample study; the latter generally yields a less noisy estimate but also a smaller effect magnitude, which looks less compelling but is more realistic. We reiterate several suggestions from the methodology literature for improving current practices.}, language = {en} } @misc{VasishthMertzenJaegeretal.2018, author = {Vasishth, Shravan and Mertzen, Daniela and J{\"a}ger, Lena Ann and Gelman, Andrew}, title = {Corrigendum to: Shravan Vasishth, Daniela Mertzen, Lena A. J{\"a}ger, Andrew Gelman; The statistical significance filter leads to overoptimistic expectations of replicability. - Journal of Memory and Language. - 103 (2018), pg. 151 - 175}, series = {Journal of memory and language}, volume = {104}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2018.09.004}, pages = {128 -- 128}, year = {2018}, language = {en} } @article{VasishthNicenboimBeckmanetal.2018, author = {Vasishth, Shravan and Nicenboim, Bruno and Beckman, Mary E. and Li, Fangfang and Kong, Eun Jong}, title = {Bayesian data analysis in the phonetic sciences}, series = {Journal of phonetics}, volume = {71}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2018.07.008}, pages = {147 -- 161}, year = {2018}, abstract = {This tutorial analyzes voice onset time (VOT) data from Dongbei (Northeastern) Mandarin Chinese and North American English to demonstrate how Bayesian linear mixed models can be fit using the programming language Stan via the R package brms. Through this case study, we demonstrate some of the advantages of the Bayesian framework: researchers can (i) flexibly define the underlying process that they believe to have generated the data; (ii) obtain direct information regarding the uncertainty about the parameter that relates the data to the theoretical question being studied; and (iii) incorporate prior knowledge into the analysis. Getting started with Bayesian modeling can be challenging, especially when one is trying to model one's own (often unique) data. It is difficult to see how one can apply general principles described in textbooks to one's own specific research problem. We address this barrier to using Bayesian methods by providing three detailed examples, with source code to allow easy reproducibility. The examples presented are intended to give the reader a flavor of the process of model-fitting; suggestions for further study are also provided. All data and code are available from: https://osf.io/g4zpv.}, language = {en} } @misc{VasishthNicenboimEngelmannetal.2019, author = {Vasishth, Shravan and Nicenboim, Bruno and Engelmann, Felix and Burchert, Frank}, title = {Computational Models of Retrieval Processes in Sentence Processing}, series = {Trends in Cognitive Sciences}, volume = {23}, journal = {Trends in Cognitive Sciences}, number = {11}, publisher = {Elsevier}, address = {London}, issn = {1364-6613}, doi = {10.1016/j.tics.2019.09.003}, pages = {968 -- 982}, year = {2019}, abstract = {Sentence comprehension requires that the comprehender work out who did what to whom. This process has been characterized as retrieval from memory. This review summarizes the quantitative predictions and empirical coverage of the two existing computational models of retrieval and shows how the predictive performance of these two competing models can be tested against a benchmark data-set. We also show how computational modeling can help us better understand sources of variability in both unimpaired and impaired sentence comprehension.}, language = {en} } @article{VasishthSuckowLewisetal.2010, author = {Vasishth, Shravan and Suckow, Katja and Lewis, Richard L. and Kern, Sabine}, title = {Short-term forgetting in sentence comprehension : crosslinguistic evidence from verb-final structures}, issn = {0169-0965}, doi = {10.1080/01690960903310587}, year = {2010}, abstract = {Seven experiments using self-paced reading and eyetracking suggest that omitting the middle verb in a double centre embedding leads to easier processing in English but leads to greater difficulty in German. One commonly accepted explanation for the English pattern-based on data from offline acceptability ratings and due to Gibson and Thomas (1999)- is that working-memory overload leads the comprehender to forget the prediction of the upcoming verb phrase (VP), which reduces working-memory load. We show that this VP-forgetting hypothesis does an excellent job of explaining the English data, but cannot account for the German results. We argue that the English and German results can be explained by the parser's adaptation to the grammatical properties of the languages; in contrast to English, German subordinate clauses always have the verb in clause-final position, and this property of German may lead the German parser to maintain predictions of upcoming VPs more robustly compared to English. The evidence thus argues against language- independent forgetting effects in online sentence processing; working-memory constraints can be conditioned by countervailing influences deriving from grammatical properties of the language under study.}, language = {en} } @misc{VasishthvonderMalsburgEngelmann2013, author = {Vasishth, Shravan and von der Malsburg, Titus Raban and Engelmann, Felix}, title = {What eye movements can tell us about sentence comprehension}, series = {Wiley interdisciplinary reviews : Cognitive Science}, volume = {4}, journal = {Wiley interdisciplinary reviews : Cognitive Science}, number = {2}, publisher = {Wiley}, address = {San Fransisco}, issn = {1939-5078}, doi = {10.1002/wcs.1209}, pages = {125 -- 134}, year = {2013}, abstract = {Eye movement data have proven to be very useful for investigating human sentence processing. Eyetracking research has addressed a wide range of questions, such as recovery mechanisms following garden-pathing, the timing of processes driving comprehension, the role of anticipation and expectation in parsing, the role of semantic, pragmatic, and prosodic information, and so on. However, there are some limitations regarding the inferences that can be made on the basis of eye movements. One relates to the nontrivial interaction between parsing and the eye movement control system which complicates the interpretation of eye movement data. Detailed computational models that integrate parsing with eye movement control theories have the potential to unpack the complexity of eye movement data and can therefore aid in the interpretation of eye movements. Another limitation is the difficulty of capturing spatiotemporal patterns in eye movements using the traditional word-based eyetracking measures. Recent research has demonstrated the relevance of these patterns and has shown how they can be analyzed. In this review, we focus on reading, and present examples demonstrating how eye movement data reveal what events unfold when the parser runs into difficulty, and how the parsing system interacts with eye movement control. WIREs Cogn Sci 2013, 4:125134. doi: 10.1002/wcs.1209 For further resources related to this article, please visit the WIREs website.}, language = {en} } @article{vonderMalsburgKlieglVasishth2015, author = {von der Malsburg, Titus Raban and Kliegl, Reinhold and Vasishth, Shravan}, title = {Determinants of Scanpath Regularity in Reading}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {39}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, number = {7}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12208}, pages = {1675 -- 1703}, year = {2015}, abstract = {Scanpaths have played an important role in classic research on reading behavior. Nevertheless, they have largely been neglected in later research perhaps due to a lack of suitable analytical tools. Recently, von der Malsburg and Vasishth (2011) proposed a new measure for quantifying differences between scanpaths and demonstrated that this measure can recover effects that were missed with the traditional eyetracking measures. However, the sentences used in that study were difficult to process and scanpath effects accordingly strong. The purpose of the present study was to test the validity, sensitivity, and scope of applicability of the scanpath measure, using simple sentences that are typically read from left to right. We derived predictions for the regularity of scanpaths from the literature on oculomotor control, sentence processing, and cognitive aging and tested these predictions using the scanpath measure and a large database of eye movements. All predictions were confirmed: Sentences with short words and syntactically more difficult sentences elicited more irregular scanpaths. Also, older readers produced more irregular scanpaths than younger readers. In addition, we found an effect that was not reported earlier: Syntax had a smaller influence on the eye movements of older readers than on those of young readers. We discuss this interaction of syntactic parsing cost with age in terms of shifts in processing strategies and a decline of executive control as readers age. Overall, our results demonstrate the validity and sensitivity of the scanpath measure and thus establish it as a productive and versatile tool for reading research.}, language = {en} } @article{vonderMalsburgVasishth2013, author = {von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {Scanpaths reveal syntactic underspecification and reanalysis strategies}, series = {Language and cognitive processes}, volume = {28}, journal = {Language and cognitive processes}, number = {10}, publisher = {Wiley}, address = {Hove}, issn = {0169-0965}, doi = {10.1080/01690965.2012.728232}, pages = {1545 -- 1578}, year = {2013}, abstract = {What theories best characterise the parsing processes triggered upon encountering ambiguity, and what effects do these processes have on eye movement patterns in reading? The present eye-tracking study, which investigated processing of attachment ambiguities of an adjunct in Spanish, suggests that readers sometimes underspecify attachment to save memory resources, consistent with the good-enough account of parsing. Our results confirm a surprising prediction of the good-enough account: high-capacity readers commit to an attachment decision more often than low-capacity participants, leading to more errors and a greater need to reanalyse in garden-path sentences. These results emerged only when we separated functionally different types of regressive eye movements using a scanpath analysis; conventional eye-tracking measures alone would have led to different conclusions. The scanpath analysis also showed that rereading was the dominant strategy for recovering from garden-pathing. Our results may also have broader implications for models of reading processes: reanalysis effects in eye movements occurred late, which suggests that the coupling of oculo-motor control and the parser may not be as tight as assumed in current computational models of eye movement control in reading.}, language = {en} } @article{vonderMalsburgVasishth2011, author = {von der Malsburg, Titus Raban and Vasishth, Shravan}, title = {What is the scanpath signature of syntactic reanalysis?}, series = {Journal of memory and language}, volume = {65}, journal = {Journal of memory and language}, number = {2}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2011.02.004}, pages = {109 -- 127}, year = {2011}, abstract = {Which repair strategy does the language system deploy when it gets garden-pathed, and what can regressive eye movements in reading tell us about reanalysis strategies? Several influential eye-tracking studies on syntactic reanalysis (Frazier \& Rayner, 1982; Meseguer, Carreiras, \& Clifton, 2002; Mitchell, Shen, Green, \& Hodgson, 2008) have addressed this question by examining scanpaths, i.e., sequential patterns of eye fixations. However, in the absence of a suitable method for analyzing scanpaths, these studies relied on simplified dependent measures that are arguably ambiguous and hard to interpret. We address the theoretical question of repair strategy by developing a new method that quantifies scanpath similarity. Our method reveals several distinct fixation strategies associated with reanalysis that went undetected in a previously published data set (Meseguer et al., 2002). One prevalent pattern suggests re-parsing of the sentence, a strategy that has been discussed in the literature (Frazier \& Rayner, 1982); however, readers differed tremendously in how they orchestrated the various fixation strategies. Our results suggest that the human parsing system non-deterministically adopts different strategies when confronted with the disambiguating material in garden-path sentences.}, language = {en} } @article{WuKaiserVasishth2017, author = {Wu, Fuyun and Kaiser, Elsi and Vasishth, Shravan}, title = {Effects of early cues on the processing of chinese relative clauses}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {42}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12551}, pages = {1101 -- 1133}, year = {2017}, abstract = {We used Chinese prenominal relative clauses (RCs) to test the predictions of two competing accounts of sentence comprehension difficulty: the experience-based account of Levy () and the Dependency Locality Theory (DLT; Gibson, ). Given that in Chinese RCs, a classifier and/or a passive marker BEI can be added to the sentence-initial position, we manipulated the presence/absence of classifiers and the presence/absence of BEI, such that BEI sentences were passivized subject-extracted RCs, and no-BEI sentences were standard object-extracted RCs. We conducted two self-paced reading experiments, using the same critical stimuli but somewhat different filler items. Reading time patterns from both experiments showed facilitative effects of BEI within and beyond RC regions, and delayed facilitative effects of classifiers, suggesting that cues that occur before a clear signal of an upcoming RC can help Chinese comprehenders to anticipate RC structures. The data patterns are not predicted by the DLT, but they are consistent with the predictions of experience-based theories.}, language = {en} }