@article{LaubrockCajarEngbert2013, author = {Laubrock, Jochen and Cajar, Anke and Engbert, Ralf}, title = {Control of fixation duration during scene viewing by interaction of foveal and peripheral processing}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.11}, pages = {20}, year = {2013}, abstract = {Processing in our visual system is functionally segregated, with the fovea specialized in processing fine detail (high spatial frequencies) for object identification, and the periphery in processing coarse information (low frequencies) for spatial orienting and saccade target selection. Here we investigate the consequences of this functional segregation for the control of fixation durations during scene viewing. Using gaze-contingent displays, we applied high-pass or low-pass filters to either the central or the peripheral visual field and compared eye-movement patterns with an unfiltered control condition. In contrast with predictions from functional segregation, fixation durations were unaffected when the critical information for vision was strongly attenuated (foveal low-pass and peripheral high-pass filtering); fixation durations increased, however, when useful information was left mostly intact by the filter (foveal high-pass and peripheral low-pass filtering). These patterns of results are difficult to explain under the assumption that fixation durations are controlled by foveal processing difficulty. As an alternative explanation, we developed the hypothesis that the interaction of foveal and peripheral processing controls fixation duration. To investigate the viability of this explanation, we implemented a computational model with two compartments, approximating spatial aspects of processing by foveal and peripheral activations that change according to a small set of dynamical rules. The model reproduced distributions of fixation durations from all experimental conditions by variation of few parameters that were affected by specific filtering conditions.}, language = {en} } @article{LangeEngbert2013, author = {Lange, Elke B. and Engbert, Ralf}, title = {Differentiating between verbal and spatial encoding using eye-movement recordings}, series = {The quarterly journal of experimental psychology}, volume = {66}, journal = {The quarterly journal of experimental psychology}, number = {9}, publisher = {Wiley}, address = {Hove}, issn = {1747-0218}, doi = {10.1080/17470218.2013.772214}, pages = {1840 -- 1857}, year = {2013}, abstract = {Visual information processing is guided by an active mechanism generating saccadic eye movements to salient stimuli. Here we investigate the specific contribution of saccades to memory encoding of verbal and spatial properties in a serial recall task. In the first experiment, participants moved their eyes freely without specific instruction. We demonstrate the existence of qualitative differences in eye-movement strategies during verbal and spatial memory encoding. While verbal memory encoding was characterized by shifting the gaze to the to-be-encoded stimuli, saccadic activity was suppressed during spatial encoding. In the second experiment, participants were required to suppress saccades by fixating centrally during encoding or to make precise saccades onto the memory items. Active suppression of saccades had no effect on memory performance, but tracking the upcoming stimuli decreased memory performance dramatically in both tasks, indicating a resource bottleneck between display-controlled saccadic control and memory encoding. We conclude that optimized encoding strategies for verbal and spatial features are underlying memory performance in serial recall, but such strategies work on an involuntary level only and do not support memory encoding when they are explicitly required by the task.}, language = {en} } @article{EngelmannVasishthEngbertetal.2013, author = {Engelmann, Felix and Vasishth, Shravan and Engbert, Ralf and Kliegl, Reinhold}, title = {A framework for modeling the interaction of syntactic processing and eye movement control}, series = {Topics in cognitive science}, volume = {5}, journal = {Topics in cognitive science}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1756-8757}, doi = {10.1111/tops.12026}, pages = {452 -- 474}, year = {2013}, abstract = {We explore the interaction between oculomotor control and language comprehension on the sentence level using two well-tested computational accounts of parsing difficulty. Previous work (Boston, Hale, Vasishth, \& Kliegl, 2011) has shown that surprisal (Hale, 2001; Levy, 2008) and cue-based memory retrieval (Lewis \& Vasishth, 2005) are significant and complementary predictors of reading time in an eyetracking corpus. It remains an open question how the sentence processor interacts with oculomotor control. Using a simple linking hypothesis proposed in Reichle, Warren, and McConnell (2009), we integrated both measures with the eye movement model EMMA (Salvucci, 2001) inside the cognitive architecture ACT-R (Anderson et al., 2004). We built a reading model that could initiate short Time Out regressions (Mitchell, Shen, Green, \& Hodgson, 2008) that compensate for slow postlexical processing. This simple interaction enabled the model to predict the re-reading of words based on parsing difficulty. The model was evaluated in different configurations on the prediction of frequency effects on the Potsdam Sentence Corpus. The extension of EMMA with postlexical processing improved its predictions and reproduced re-reading rates and durations with a reasonable fit to the data. This demonstration, based on simple and independently motivated assumptions, serves as a foundational step toward a precise investigation of the interaction between high-level language processing and eye movement control.}, language = {en} } @article{BarthelmeTrukenbrodEngbertetal.2013, author = {Barthelme, Simon and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Modeling fixation locations using spatial point processes}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.1}, pages = {34}, year = {2013}, abstract = {Whenever eye movements are measured, a central part of the analysis has to do with where subjects fixate and why they fixated where they fixated. To a first approximation, a set of fixations can be viewed as a set of points in space; this implies that fixations are spatial data and that the analysis of fixation locations can be beneficially thought of as a spatial statistics problem. We argue that thinking of fixation locations as arising from point processes is a very fruitful framework for eye-movement data, helping turn qualitative questions into quantitative ones. We provide a tutorial introduction to some of the main ideas of the field of spatial statistics, focusing especially on spatial Poisson processes. We show how point processes help relate image properties to fixation locations. In particular we show how point processes naturally express the idea that image features' predictability for fixations may vary from one image to another. We review other methods of analysis used in the literature, show how they relate to point process theory, and argue that thinking in terms of point processes substantially extends the range of analyses that can be performed and clarify their interpretation.}, language = {en} }