@article{KruegelVituEngbert2012, author = {Kr{\"u}gel, Andre and Vitu, Francoise and Engbert, Ralf}, title = {Fixation positions after skipping saccades - a single space makes a large difference}, series = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, volume = {74}, journal = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, number = {8}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-012-0365-1}, pages = {1556 -- 1561}, year = {2012}, abstract = {During reading, saccadic eye movements are generated to shift words into the center of the visual field for lexical processing. Recently, Krugel and Engbert (Vision Research 50:1532-1539, 2010) demonstrated that within-word fixation positions are largely shifted to the left after skipped words. However, explanations of the origin of this effect cannot be drawn from normal reading data alone. Here we show that the large effect of skipped words on the distribution of within-word fixation positions is primarily based on rather subtle differences in the low-level visual information acquired before saccades. Using arrangements of "x" letter strings, we reproduced the effect of skipped character strings in a highly controlled single-saccade task. Our results demonstrate that the effect of skipped words in reading is the signature of a general visuomotor phenomenon. Moreover, our findings extend beyond the scope of the widely accepted range-error model, which posits that within-word fixation positions in reading depend solely on the distances of target words. We expect that our results will provide critical boundary conditions for the development of visuomotor models of saccade planning during reading.}, language = {en} } @article{TsaiKlieglYan2012, author = {Tsai, Jie-Li and Kliegl, Reinhold and Yan, Ming}, title = {Parafoveal semantic information extraction in traditional Chinese reading}, series = {Acta psychologica : international journal of psychonomics}, volume = {141}, journal = {Acta psychologica : international journal of psychonomics}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0001-6918}, doi = {10.1016/j.actpsy.2012.06.004}, pages = {17 -- 23}, year = {2012}, abstract = {Semantic information extraction from the parafovea has been reported only in simplified Chinese for a special subset of characters and its generalizability has been questioned. This study uses traditional Chinese, which differs from simplified Chinese in visual complexity and in mapping semantic forms, to demonstrate access to parafoveal semantic information during reading of this script. Preview duration modulates various types (identical, phonological, and unrelated) of parafoveal information extraction. Parafoveal semantic extraction is more elusive in English; therefore, we conclude that such effects in Chinese are presumably caused by substantial cross-language differences from alphabetic scripts. The property of Chinese characters carrying rich lexical information in a small region provides the possibility of semantic extraction in the parafovea.}, language = {en} } @phdthesis{Trukenbrod2012, author = {Trukenbrod, Hans Arne}, title = {Temporal and spatial aspects of eye-movement control : from reading to scanning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70206}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Eye movements are a powerful tool to examine cognitive processes. However, in most paradigms little is known about the dynamics present in sequences of saccades and fixations. In particular, the control of fixation durations has been widely neglected in most tasks. As a notable exception, both spatial and temporal aspects of eye-movement control have been thoroughly investigated during reading. There, the scientific discourse was dominated by three controversies, (i), the role of oculomotor vs. cognitive processing on eye-movement control, (ii) the serial vs. parallel processing of words, and, (iii), the control of fixation durations. The main purpose of this thesis was to investigate eye movements in tasks that require sequences of fixations and saccades. While reading phenomena served as a starting point, we examined eye guidance in non-reading tasks with the aim to identify general principles of eye-movement control. In addition, the investigation of eye movements in non-reading tasks helped refine our knowledge about eye-movement control during reading. Our approach included the investigation of eye movements in non-reading experiments as well as the evaluation and development of computational models. I present three main results : First, oculomotor phenomena during reading can also be observed in non-reading tasks (Chapter 2 \& 4). Oculomotor processes determine the fixation position within an object. The fixation position, in turn, modulates both the next saccade target and the current fixation duration. Second, predicitions of eye-movement models based on sequential attention shifts were falsified (Chapter 3). In fact, our results suggest that distributed processing of multiple objects forms the basis of eye-movement control. Third, fixation durations are under asymmetric control (Chapter 4). While increasing processing demands immediately prolong fixation durations, decreasing processing demands reduce fixation durations only with a temporal delay. We propose a computational model ICAT to account for asymmetric control. In this model, an autonomous timer initiates saccades after random time intervals independent of ongoing processing. However, processing demands that are higher than expected inhibit the execution of the next saccade and, thereby, prolong the current fixation. On the other hand, lower processing demands will not affect the duration before the next saccade is executed. Since the autonomous timer adjusts to expected processing demands from fixation to fixation, a decrease in processing demands may lead to a temporally delayed reduction of fixation durations. In an extended version of ICAT, we evaluated its performance while simulating both temporal and spatial aspects of eye-movement control. The eye-movement phenomena investigated in this thesis have now been observed in a number of different tasks, which suggests that they represent general principles of eye guidance. I propose that distributed processing of the visual input forms the basis of eye-movement control, while fixation durations are controlled by the principles outlined in ICAT. In addition, oculomotor control contributes considerably to the variability observed in eye movements. Interpretations for the relation between eye movements and cognition strongly benefit from a precise understanding of this interplay.}, language = {en} } @article{SchadEngbert2012, author = {Schad, Daniel and Engbert, Ralf}, title = {The zoom lens of attention simulating shuffled versus normal text reading using the SWIFT model}, series = {Visual cognition}, volume = {20}, journal = {Visual cognition}, number = {4-5}, publisher = {Wiley}, address = {Hove}, issn = {1350-6285}, doi = {10.1080/13506285.2012.670143}, pages = {391 -- 421}, year = {2012}, abstract = {Assumptions on the allocation of attention during reading are crucial for theoretical models of eye guidance. The zoom lens model of attention postulates that attentional deployment can vary from a sharp focus to a broad window. The model is closely related to the foveal load hypothesis, i.e., the assumption that the perceptual span is modulated by the difficulty of the fixated word. However, these important theoretical concepts for cognitive research have not been tested quantitatively in eye movement models. Here we show that the zoom lens model, implemented in the SWIFT model of saccade generation, captures many important patterns of eye movements. We compared the model's performance to experimental data from normal and shuffled text reading. Our results demonstrate that the zoom lens of attention might be an important concept for eye movement control in reading.}, language = {en} } @article{SchadNuthmannEngbert2012, author = {Schad, Daniel and Nuthmann, Antje and Engbert, Ralf}, title = {Your mind wanders weakly, your mind wanders deeply - objective measures reveal mindless reading at different levels}, series = {Cognition : international journal of cognitive science}, volume = {125}, journal = {Cognition : international journal of cognitive science}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-0277}, doi = {10.1016/j.cognition.2012.07.004}, pages = {179 -- 194}, year = {2012}, abstract = {When the mind wanders, attention turns away from the external environment and cognitive processing is decoupled from perceptual information. Mind wandering is usually treated as a dichotomy (dichotomy-hypothesis), and is often measured using self-reports. Here, we propose the levels of inattention hypothesis, which postulates attentional decoupling to graded degrees at different hierarchical levels of cognitive processing. To measure graded levels of attentional decoupling during reading we introduce the sustained attention to stimulus task (SAST), which is based on psychophysics of error detection. Under experimental conditions likely to induce mind wandering, we found that subjects were less likely to notice errors that required high-level processing for their detection as opposed to errors that only required low-level processing. Eye tracking revealed that before errors were overlooked influences of high- and low-level linguistic variables on eye fixations were reduced in a graded fashion, indicating episodes of mindless reading at weak and deep levels. Individual fixation durations predicted overlooking of lexical errors 5 s before they occurred. Our findings support the levels of inattention hypothesis and suggest that different levels of mindless reading can be measured behaviorally in the SAST. Using eye tracking to detect mind wandering online represents a promising approach for the development of new techniques to study mind wandering and to ameliorate its negative consequences.}, language = {en} }