@article{LogacevVasishth2016, author = {Logacev, Pavel and Vasishth, Shravan}, title = {A Multiple-Channel Model of Task-Dependent Ambiguity Resolution in Sentence Comprehension}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12228}, pages = {266 -- 298}, year = {2016}, abstract = {Traxler, Pickering, and Clifton (1998) found that ambiguous sentences are read faster than their unambiguous counterparts. This so-called ambiguity advantage has presented a major challenge to classical theories of human sentence comprehension (parsing) because its most prominent explanation, in the form of the unrestricted race model (URM), assumes that parsing is non-deterministic. Recently, Swets, Desmet, Clifton, and Ferreira (2008) have challenged the URM. They argue that readers strategically underspecify the representation of ambiguous sentences to save time, unless disambiguation is required by task demands. When disambiguation is required, however, readers assign sentences full structure—and Swets et al. provide experimental evidence to this end. On the basis of their findings, they argue against the URM and in favor of a model of task-dependent sentence comprehension. We show through simulations that the Swets et al. data do not constitute evidence for task-dependent parsing because they can be explained by the URM. However, we provide decisive evidence from a German self-paced reading study consistent with Swets et al.'s general claim about task-dependent parsing. Specifically, we show that under certain conditions, ambiguous sentences can be read more slowly than their unambiguous counterparts, suggesting that the parser may create several parses, when required. Finally, we present the first quantitative model of task-driven disambiguation that subsumes the URM, and we show that it can explain both Swets et al.'s results and our findings.}, language = {en} } @article{LogacevVasishth2016, author = {Logacev, Pavel and Vasishth, Shravan}, title = {Understanding underspecification: A comparison of two computational implementations}, series = {The quarterly journal of experimental psychology}, volume = {69}, journal = {The quarterly journal of experimental psychology}, publisher = {BioMed Central}, address = {Abingdon}, issn = {1747-0218}, doi = {10.1080/17470218.2015.1134602}, pages = {996 -- 1012}, year = {2016}, abstract = {Swets et al. (2008. Underspecification of syntactic ambiguities: Evidence from self-paced reading. Memory and Cognition, 36(1), 201-216) presented evidence that the so-called ambiguity advantage [Traxler et al. (1998 Traxler, M. J., Pickering, M. J., \& Clifton, C. (1998). Adjunct attachment is not a form of lexical ambiguity resolution. Journal of Memory and Language, 39(4), 558-592. doi: 10.1006/jmla.1998.2600[CrossRef], [Web of Science ®], [Google Scholar]). Adjunct attachment is not a form of lexical ambiguity resolution. Journal of Memory and Language, 39(4), 558-592], which has been explained in terms of the Unrestricted Race Model, can equally well be explained by assuming underspecification in ambiguous conditions driven by task-demands. Specifically, if comprehension questions require that ambiguities be resolved, the parser tends to make an attachment: when questions are about superficial aspects of the target sentence, readers tend to pursue an underspecification strategy. It is reasonable to assume that individual differences in strategy will play a significant role in the application of such strategies, so that studying average behaviour may not be informative. In order to study the predictions of the good-enough processing theory, we implemented two versions of underspecification: the partial specification model (PSM), which is an implementation of the Swets et al. proposal, and a more parsimonious version, the non-specification model (NSM). We evaluate the relative fit of these two kinds of underspecification to Swets et al.'s data; as a baseline, we also fitted three models that assume no underspecification. We find that a model without underspecification provides a somewhat better fit than both underspecification models, while the NSM model provides a better fit than the PSM. We interpret the results as lack of unambiguous evidence in favour of underspecification; however, given that there is considerable existing evidence for good-enough processing in the literature, it is reasonable to assume that some underspecification might occur. Under this assumption, the results can be interpreted as tentative evidence for NSM over PSM. More generally, our work provides a method for choosing between models of real-time processes in sentence comprehension that make qualitative predictions about the relationship between several dependent variables. We believe that sentence processing research will greatly benefit from a wider use of such methods.}, language = {en} } @misc{NicenboimLogacevGatteietal.2016, author = {Nicenboim, Bruno and Logacev, Pavel and Gattei, Carolina and Vasishth, Shravan}, title = {When High-Capacity Readers Slow Down and Low-Capacity Readers Speed Up}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90663}, pages = {1 -- 24}, year = {2016}, abstract = {We examined the effects of argument-head distance in SVO and SOV languages (Spanish and German), while taking into account readers' working memory capacity and controlling for expectation (Levy, 2008) and other factors. We predicted only locality effects, that is, a slowdown produced by increased dependency distance (Gibson, 2000; Lewis and Vasishth, 2005). Furthermore, we expected stronger locality effects for readers with low working memory capacity. Contrary to our predictions, low-capacity readers showed faster reading with increased distance, while high-capacity readers showed locality effects. We suggest that while the locality effects are compatible with memory-based explanations, the speedup of low-capacity readers can be explained by an increased probability of retrieval failure. We present a computational model based on ACT-R built under the previous assumptions, which is able to give a qualitative account for the present data and can be tested in future research. Our results suggest that in some cases, interpreting longer RTs as indexing increased processing difficulty and shorter RTs as facilitation may be too simplistic: The same increase in processing difficulty may lead to slowdowns in high-capacity readers and speedups in low-capacity ones. Ignoring individual level capacity differences when investigating locality effects may lead to misleading conclusions.}, language = {en} } @article{NicenboimLogacevGatteietal.2016, author = {Nicenboim, Bruno and Logacev, Pavel and Gattei, Carolina and Vasishth, Shravan}, title = {When High-Capacity Readers Slow Down and Low-Capacity Readers Speed Up}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00280}, pages = {1 -- 24}, year = {2016}, abstract = {We examined the effects of argument-head distance in SVO and SOV languages (Spanish and German), while taking into account readers' working memory capacity and controlling for expectation (Levy, 2008) and other factors. We predicted only locality effects, that is, a slowdown produced by increased dependency distance (Gibson, 2000; Lewis and Vasishth, 2005). Furthermore, we expected stronger locality effects for readers with low working memory capacity. Contrary to our predictions, low-capacity readers showed faster reading with increased distance, while high-capacity readers showed locality effects. We suggest that while the locality effects are compatible with memory-based explanations, the speedup of low-capacity readers can be explained by an increased probability of retrieval failure. We present a computational model based on ACT-R built under the previous assumptions, which is able to give a qualitative account for the present data and can be tested in future research. Our results suggest that in some cases, interpreting longer RTs as indexing increased processing difficulty and shorter RTs as facilitation may be too simplistic: The same increase in processing difficulty may lead to slowdowns in high-capacity readers and speedups in low-capacity ones. Ignoring individual level capacity differences when investigating locality effects may lead to misleading conclusions.}, language = {en} }