@article{LogacevVasishth2016, author = {Logacev, Pavel and Vasishth, Shravan}, title = {A Multiple-Channel Model of Task-Dependent Ambiguity Resolution in Sentence Comprehension}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12228}, pages = {266 -- 298}, year = {2016}, abstract = {Traxler, Pickering, and Clifton (1998) found that ambiguous sentences are read faster than their unambiguous counterparts. This so-called ambiguity advantage has presented a major challenge to classical theories of human sentence comprehension (parsing) because its most prominent explanation, in the form of the unrestricted race model (URM), assumes that parsing is non-deterministic. Recently, Swets, Desmet, Clifton, and Ferreira (2008) have challenged the URM. They argue that readers strategically underspecify the representation of ambiguous sentences to save time, unless disambiguation is required by task demands. When disambiguation is required, however, readers assign sentences full structure—and Swets et al. provide experimental evidence to this end. On the basis of their findings, they argue against the URM and in favor of a model of task-dependent sentence comprehension. We show through simulations that the Swets et al. data do not constitute evidence for task-dependent parsing because they can be explained by the URM. However, we provide decisive evidence from a German self-paced reading study consistent with Swets et al.'s general claim about task-dependent parsing. Specifically, we show that under certain conditions, ambiguous sentences can be read more slowly than their unambiguous counterparts, suggesting that the parser may create several parses, when required. Finally, we present the first quantitative model of task-driven disambiguation that subsumes the URM, and we show that it can explain both Swets et al.'s results and our findings.}, language = {en} } @article{GoetheOberauerKliegl2016, author = {G{\"o}the, Katrin and Oberauer, Klaus and Kliegl, Reinhold}, title = {Eliminating dual-task costs by minimizing crosstalk between tasks: The role of modality and feature pairings}, series = {Cognition : international journal of cognitive science}, volume = {150}, journal = {Cognition : international journal of cognitive science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-0277}, doi = {10.1016/j.cognition.2016.02.003}, pages = {92 -- 108}, year = {2016}, abstract = {We tested the independent influences of two content-based factors on dual-task costs, and on the parallel processing ability: The pairing of S-R modalities and the pairing of relevant features between stimuli and responses of two tasks. The two pairing factors were realized across four dual-task groups. Within each group the two tasks comprised two different stimulus modalities (visual and auditory), two different relevant stimulus features (spatial and verbal) and two response modalities (manual and vocal). Pairings of S-R modalities (standard: visual-manual and auditory-vocal, non-standard: visual-vocal and auditory manual) and feature pairings (standard: spatial-manual and verbal-vocal, non-standard: spatial-vocal and verbal-manual) varied across groups. All participants practiced their respective dual-task combination in a paradigm with simultaneous stimulus onset before being transferred to a psychological refractory period paradigm varying stimulus-onset asynchrony. A comparison at the end of practice revealed similar dual-task costs and similar pairing effects in both paradigms. Dual-task costs depended on modality and feature pairings. Groups training with non-standard feature pairings (i.e., verbal stimulus features mapped to spatially separated response keys, or spatial stimulus features mapped to verbal responses) and non-standard modality pairings (i.e., auditory stimulus mapped to manual response, or visual stimulus mapped to vocal responses) had higher dual-task costs than respective standard pairings. In contrast, irrespective of modality pairing dual-task costs virtually disappeared with standard feature pairings after practice in both paradigms. The results can be explained by crosstalk between feature-binding processes for the two tasks. Crosstalk was present for non-standard but absent for standard feature pairings. Therefore, standard feature pairings enabled parallel processing at the end of practice. (C) 2016 Elsevier B.V. All rights reserved.}, language = {en} }