@article{EngbertMergenthaler2005, author = {Engbert, Ralf and Mergenthaler, Konstantin}, title = {Statistics of fixational eye movements and oculomotor control}, issn = {0301-0066}, year = {2005}, language = {en} } @article{KruegelEngbert2010, author = {Kr{\"u}gel, Andr{\´e} and Engbert, Ralf}, title = {On the launch-site effect for skipped words during reading}, issn = {0042-6989}, doi = {10.1016/j.visres.2010.05.009}, year = {2010}, abstract = {The launch-site effect, a systematic variation of within-word landing position as a function of launch-site distance, is among the most important oculomotor phenomena in reading. Here we show that the launch-site effect is strongly modulated in word skipping, a finding which is inconsistent with the view that the launch-site effect is caused by a saccadic-range error. We observe that distributions of landing positions in skipping saccades show an increased leftward shift compared to non-skipping saccades at equal launch-site distances. Using an improved algorithm for the estimation of mislocated fixations, we demonstrate the reliability of our results.}, language = {en} } @article{KleinKruegelRisseetal.2015, author = {Klein, Angela Ines and Kruegel, Andre and Risse, Sarah and Esser, G{\"u}nter and Engbert, Ralf and Pereira, Vera Wannmacher}, title = {The processing of pronominal anaphora by children that have attention deficit hyperactivity disorder or dyslexia: a study through the analysis of eye movements}, series = {Letras de hoje}, volume = {50}, journal = {Letras de hoje}, number = {1}, publisher = {PUCRS}, address = {Porto Alegre}, issn = {0101-3335}, pages = {40 -- 48}, year = {2015}, abstract = {The aim of this work was to verify the processing of pronominal anaphora by children that have attention deficit hyperactivity disorder or dyslexia. The sample studied consisted of 75 children that speak German, which read two texts of 80 words containing pronominal anaphora. The eye movements of all participants were recorded and, to make sure they were reading with attention, two activities that tested reading comprehension were proposed. Through the analysis of eye movements, specifically the fixations, the data indicate that children with disorders have difficulty to process the pronominal anaphora, especially dyslexic children.}, language = {it} } @misc{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8364}, doi = {10.25932/publishup-56318}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563184}, pages = {1 -- 7}, year = {2022}, language = {en} } @book{Engbert1997, author = {Engbert, Ralf}, title = {Tempo-induced transitions in polyrhythmic hand movements}, series = {Preprint NLD}, volume = {41}, journal = {Preprint NLD}, publisher = {Univ. Potsdam}, address = {Potsdam}, issn = {1432-2935}, pages = {12 S. : graph. Darst.}, year = {1997}, language = {en} } @misc{WarschburgerCalvanoRichteretal.2015, author = {Warschburger, Petra and Calvano, Claudia and Richter, Eike M. and Engbert, Ralf}, title = {Analysis of Attentional Bias towards Attractive and Unattractive Body Regions among Overweight Males and Females}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86570}, year = {2015}, abstract = {Background Body image distortion is highly prevalent among overweight individuals. Whilst there is evidence that body-dissatisfied women and those suffering from disordered eating show a negative attentional bias towards their own unattractive body parts and others' attractive body parts, little is known about visual attention patterns in the area of obesity and with respect to males. Since eating disorders and obesity share common features in terms of distorted body image and body dissatisfaction, the aim of this study was to examine whether overweight men and women show a similar attentional bias. Methods/Design We analyzed eye movements in 30 overweight individuals (18 females) and 28 normalweight individuals (16 females) with respect to the participants' own pictures as well as gender- and BMI-matched control pictures (front and back view). Additionally, we assessed body image and disordered eating using validated questionnaires. Discussion The overweight sample rated their own body as less attractive and showed a more disturbed body image. Contrary to our assumptions, they focused significantly longer on attractive compared to unattractive regions of both their own and the control body. For one's own body, this was more pronounced for women. A higher weight status and more frequent body checking predicted attentional bias towards attractive body parts. We found that overweight adults exhibit an unexpected and stable pattern of selective attention, with a distinctive focus on their own attractive body regions despite higher levels of body dissatisfaction. This positive attentional bias may either be an indicator of a more pronounced pattern of attentional avoidance or a self-enhancing strategy. Further research is warranted to clarify these results.}, language = {en} } @article{WarschburgerCalvanoRichteretal.2015, author = {Warschburger, Petra and Calvano, Claudia and Richter, Eike M. and Engbert, Ralf}, title = {Analysis of Attentional Bias towards Attractive and Unattractive Body Regions among Overweight Males and Females}, series = {PLoS one}, journal = {PLoS one}, number = {10}, publisher = {Public Library of Science}, address = {Lawrence, Kan.}, issn = {1932-6203}, doi = {10.1371/journal.pone.0140813}, pages = {17}, year = {2015}, abstract = {Background Body image distortion is highly prevalent among overweight individuals. Whilst there is evidence that body-dissatisfied women and those suffering from disordered eating show a negative attentional bias towards their own unattractive body parts and others' attractive body parts, little is known about visual attention patterns in the area of obesity and with respect to males. Since eating disorders and obesity share common features in terms of distorted body image and body dissatisfaction, the aim of this study was to examine whether overweight men and women show a similar attentional bias. Methods/Design We analyzed eye movements in 30 overweight individuals (18 females) and 28 normalweight individuals (16 females) with respect to the participants' own pictures as well as gender- and BMI-matched control pictures (front and back view). Additionally, we assessed body image and disordered eating using validated questionnaires. Discussion The overweight sample rated their own body as less attractive and showed a more disturbed body image. Contrary to our assumptions, they focused significantly longer on attractive compared to unattractive regions of both their own and the control body. For one's own body, this was more pronounced for women. A higher weight status and more frequent body checking predicted attentional bias towards attractive body parts. We found that overweight adults exhibit an unexpected and stable pattern of selective attention, with a distinctive focus on their own attractive body regions despite higher levels of body dissatisfaction. This positive attentional bias may either be an indicator of a more pronounced pattern of attentional avoidance or a self-enhancing strategy. Further research is warranted to clarify these results.}, language = {en} } @article{EngbertMergenthaler2006, author = {Engbert, Ralf and Mergenthaler, Konstantin}, title = {Microsaccades are triggered by low retinal image slip}, issn = {0027-8424}, doi = {10.1073/pnas.0509557103}, year = {2006}, abstract = {Even during visual fixation of a stationary target, our eyes perform rather erratic miniature movements, which represent a random walk. These "fixational" eye movements counteract perceptual fading, a consequence of fast adaptation of the retinal receptor systems to constant input. The most important contribution to fixational eye movements is produced by microsaccades; however, a specific function of microsaccades only recently has been found. Here we show that the occurrence of microsaccades is correlated with low retinal image slip approximate to 200 ms before microsaccade onset. This result suggests that microsaccades are triggered dynamically, in contrast to the current view that microsaccades are randomly distributed in time characterized by their rate-of-occurrence of 1 to 2 per second. As a result of the dynamic triggering mechanism, individual microsaccade rate can be predicted by the fractal dimension of trajectories. Finally, we propose a minimal computational model for the dynamic triggering of microsaccades}, language = {en} } @article{Engbert2006, author = {Engbert, Ralf}, title = {Flick-induced flips in perception}, issn = {0896-6273}, doi = {10.1016/j.neuron.2006.01.005}, year = {2006}, abstract = {Microsaccades are miniature eye movements produced involuntarily during visual fixation of stationary objects. Since their first description more than 40 years ago, the role of microsaccades in vision has been controversial. In this issue, Martinez-Conde and colleagues present a solution to the long-standing research problem connecting this basic oculomotor function to visual perception, by showing that microsaccades may control peripheral vision during visual fixation by inducing flips in bistable peripheral percepts in head-unrestrained viewing. Their study provides new insight into the functional connectivity between oculomotor function and visual perception}, language = {en} } @article{TrukenbrodEngbert2007, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Oculomotor control in a sequential search task}, doi = {10.1016/j.visres.2007.05.010}, year = {2007}, abstract = {Using a serial search paradigm, we observed several effects of within-object fixation position on spatial and temporal control of eye movements: the preferred viewing location, launch site effect, the optimal viewing position, and the inverted optimal viewing position of fixation duration. While these effects were first identified by eye-movement studies in reading, our approach permits an analysis of the functional relationships between the effects in a different paradigm. Our results demonstrate that the fixation position is an important predictor of the subsequent saccade by influencing both fixation duration and the selection of the next saccade target.}, language = {en} } @article{SchwetlickRothkegelTrukenbrodetal.2020, author = {Schwetlick, Lisa and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Modeling the effects of perisaccadic attention on gaze statistics during scene viewing}, series = {Communications biology}, volume = {3}, journal = {Communications biology}, number = {1}, publisher = {Springer Nature}, address = {London}, issn = {2399-3642}, doi = {10.1038/s42003-020-01429-8}, pages = {11}, year = {2020}, abstract = {Lisa Schwetlick et al. present a computational model linking visual scan path generation in scene viewing to physiological and experimental work on perisaccadic covert attention, the act of attending to an object visually without obviously moving the eyes toward it. They find that integrating covert attention into predictive models of visual scan paths greatly improves the model's agreement with experimental data.
How we perceive a visual scene depends critically on the selection of gaze positions. For this selection process, visual attention is known to play a key role in two ways. First, image-features attract visual attention, a fact that is captured well by time-independent fixation models. Second, millisecond-level attentional dynamics around the time of saccade drives our gaze from one position to the next. These two related research areas on attention are typically perceived as separate, both theoretically and experimentally. Here we link the two research areas by demonstrating that perisaccadic attentional dynamics improve predictions on scan path statistics. In a mathematical model, we integrated perisaccadic covert attention with dynamic scan path generation. Our model reproduces saccade amplitude distributions, angular statistics, intersaccadic turning angles, and their impact on fixation durations as well as inter-individual differences using Bayesian inference. Therefore, our result lend support to the relevance of perisaccadic attention to gaze statistics.}, language = {en} } @misc{SinnEngbert2011, author = {Sinn, Petra and Engbert, Ralf}, title = {Saccadic facilitation by modulation of microsaccades in natural backgrounds}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {595}, issn = {1866-8364}, doi = {10.25932/publishup-43181}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431817}, pages = {7}, year = {2011}, abstract = {Saccades move objects of interest into the center of the visual field for high-acuity visual analysis. White, Stritzke, and Gegenfurtner (Current Biology, 18, 124-128, 2008) have shown that saccadic latencies in the context of a structured background are much shorter than those with an unstructured background at equal levels of visibility. This effect has been explained by possible preactivation of the saccadic circuitry whenever a structured background acts as a mask for potential saccade targets. Here, we show that background textures modulate rates of microsaccades during visual fixation. First, after a display change, structured backgrounds induce a stronger decrease of microsaccade rates than do uniform backgrounds. Second, we demonstrate that the occurrence of a microsaccade in a critical time window can delay a subsequent saccadic response. Taken together, our findings suggest that microsaccades contribute to the saccadic facilitation effect, due to a modulation of microsaccade rates by properties of the background.}, language = {en} } @misc{LaubrockEngbertCajar2017, author = {Laubrock, Jochen and Engbert, Ralf and Cajar, Anke}, title = {Gaze-contingent manipulation of the FVF demonstrates the importance of fixation duration for explaining search behavior}, series = {Behavioral and brain sciences : an international journal of current research and theory with open peer commentary}, volume = {40}, journal = {Behavioral and brain sciences : an international journal of current research and theory with open peer commentary}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {0140-525X}, doi = {10.1017/S0140525X16000145}, pages = {31 -- 32}, year = {2017}, abstract = {Hulleman \& Olivers' (H\&O's) model introduces variation of the functional visual field (FVF) for explaining visual search behavior. Our research shows how the FVF can be studied using gaze-contingent displays and how FVF variation can be implemented in models of gaze control. Contrary to H\&O, we believe that fixation duration is an important factor when modeling visual search behavior.}, language = {en} } @misc{RolfsKlieglEngbert2004, author = {Rolfs, Martin and Kliegl, Reinhold and Engbert, Ralf}, title = {Microsaccade orientation supports attentional enhancement opposite to a peripheral cue: Commentary on Tse, Sheinberg, and Logothetis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57081}, year = {2004}, language = {en} } @article{RichterEngbert2005, author = {Richter, Eike M. and Engbert, Ralf}, title = {Impossible gap paradigm : Experimental evidence for autonomous saccade timing}, issn = {0301-0066}, year = {2005}, language = {en} } @article{RichterEngbert2004, author = {Richter, Eike M. and Engbert, Ralf}, title = {Parafoveal processing during reading}, issn = {0301-0066}, year = {2004}, language = {en} } @article{NuthmannSmithEngbertetal.2010, author = {Nuthmann, Antje and Smith, Tim J. and Engbert, Ralf and Henderson, John M.}, title = {CRISP: a computational model of fixation duration in scene viewing}, year = {2010}, abstract = {Eye-movement control during scene viewing can be represented as a series of individual decisions about where and when to move the eyes. While substantial behavioral and computational research has been devoted to investigating the placement of fixations in scenes, relatively little is known about the mechanisms that control fixation durations. Here, we propose a computational model (CRISP) that accounts for saccade timing and programming and thus for variations in fixation durations in scene viewing. First, timing signals are modeled as continuous-time random walks. Second, difficulties at the level of visual and cognitive processing can inhibit and thus modulate saccade timing. Inhibition generates moment-by-moment changes in the random walk's transition rate and processing-related saccade cancellation. Third, saccade programming is completed in 2 stages: an initial, labile stage that is subject to cancellation and a subsequent, nonlabile stage. Several simulation studies tested the model's adequacy and generality. An initial simulation study explored the role of cognitive factors in scene viewing by examining how fixation durations differed under different viewing task instructions. Additional simulations investigated the degree to which fixation durations were under direct moment-to-moment control of the current visual scene. The present work further supports the conclusion that fixation durations, to a certain degree, reflect perceptual and cognitive activity in scene viewing. Computational model simulations contribute to an understanding of the underlying processes of gaze control.}, language = {en} } @article{NuthmannEngbert2009, author = {Nuthmann, Antje and Engbert, Ralf}, title = {Mindless reading revisited : an analysis based on the SWIFT model of eye-movement control}, issn = {0042-6989}, doi = {10.1016/j.visres.2008.10.022}, year = {2009}, abstract = {In this article, we revisit the mindless reading paradigm from the perspective of computational modeling. In the standard version of the paradigm, participants read sentences in both their normal version as well as the transformed (or mindless) version where each letter is replaced with a z. z-String scanning shares the oculomotor requirements with reading but none of the higher-level lexical and semantic processes. Here we use the z-string scanning task to validate the SWIFT model of saccade generation [Engbert, R., Nuthmann, A., Richter, E., \& Kliegl, R. (2005). SWIFT: A dynamical model of saccade generation during reading. Psychological Review, 112(4), 777-813] as an example for an advanced theory of eye-movement control in reading. We test the central assumption of spatially distributed processing across an attentional gradient proposed by the SWIFT model. Key experimental results like prolonged average fixation durations in z-string scanning compared to normal reading and the existence of a string-length effect on fixation durations and probabilities were reproduced by the model, which lends support to the model's assumptions on visual processing. Moreover, simulation results for patterns of regressive saccades in z-string scanning confirm SWIFT's concept of activation field dynamics for the selection of saccade targets.}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2017, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Temporal evolution of the central fixation bias in scene viewing}, series = {Journal of vision}, volume = {17}, journal = {Journal of vision}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/17.13.3}, pages = {1626 -- 1638}, year = {2017}, abstract = {When watching the image of a natural scene on a computer screen, observers initially move their eyes toward the center of the image—a reliable experimental finding termed central fixation bias. This systematic tendency in eye guidance likely masks attentional selection driven by image properties and top-down cognitive processes. Here, we show that the central fixation bias can be reduced by delaying the initial saccade relative to image onset. In four scene-viewing experiments we manipulated observers' initial gaze position and delayed their first saccade by a specific time interval relative to the onset of an image. We analyzed the distance to image center over time and show that the central fixation bias of initial fixations was significantly reduced after delayed saccade onsets. We additionally show that selection of the initial saccade target strongly depended on the first saccade latency. A previously published model of saccade generation was extended with a central activation map on the initial fixation whose influence declined with increasing saccade latency. This extension was sufficient to replicate the central fixation bias from our experiments. Our results suggest that the central fixation bias is generated by default activation as a response to the sudden image onset and that this default activation pattern decreases over time. Thus, it may often be preferable to use a modified version of the scene viewing paradigm that decouples image onset from the start signal for scene exploration to explicitly reduce the central fixation bias.}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2016, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Influence of initial fixation position in scene viewing}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {129}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2016.09.012}, pages = {33 -- 49}, year = {2016}, language = {en} } @article{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Frontiers in psychology / Frontiers Research Foundation}, volume = {13}, journal = {Frontiers in psychology / Frontiers Research Foundation}, publisher = {Frontiers Research Foundation}, address = {Lausanne, Schweiz}, issn = {1664-1078}, doi = {10.3389/fpsyg.2022.850482}, pages = {1 -- 7}, year = {2022}, language = {en} } @misc{TrukenbrodEngbert2014, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {ICAT: a computational model for the adaptive control of fixation durations}, series = {Psychonomic bulletin \& review : a journal of the Psychonomic Society}, volume = {21}, journal = {Psychonomic bulletin \& review : a journal of the Psychonomic Society}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1069-9384}, doi = {10.3758/s13423-013-0575-0}, pages = {907 -- 934}, year = {2014}, abstract = {Eye movements depend on cognitive processes related to visual information processing. Much has been learned about the spatial selection of fixation locations, while the principles governing the temporal control (fixation durations) are less clear. Here, we review current theories for the control of fixation durations in tasks like visual search, scanning, scene perception, and reading and propose a new model for the control of fixation durations. We distinguish two local principles from one global principle of control. First, an autonomous saccade timer initiates saccades after random time intervals (local-I). Second, foveal inhibition permits immediate prolongation of fixation durations by ongoing processing (local-II). Third, saccade timing is adaptive, so that the mean timer value depends on task requirements and fixation history (Global). We demonstrate by numerical simulations that our model qualitatively reproduces patterns of mean fixation durations and fixation duration distributions observed in typical experiments. When combined with assumptions of saccade target selection and oculomotor control, the model accounts for both temporal and spatial aspects of eye movement control in two versions of a visual search task. We conclude that the model provides a promising framework for the control of fixation durations in saccadic tasks.}, language = {en} } @article{LangeEngbert2013, author = {Lange, Elke B. and Engbert, Ralf}, title = {Differentiating between verbal and spatial encoding using eye-movement recordings}, series = {The quarterly journal of experimental psychology}, volume = {66}, journal = {The quarterly journal of experimental psychology}, number = {9}, publisher = {Wiley}, address = {Hove}, issn = {1747-0218}, doi = {10.1080/17470218.2013.772214}, pages = {1840 -- 1857}, year = {2013}, abstract = {Visual information processing is guided by an active mechanism generating saccadic eye movements to salient stimuli. Here we investigate the specific contribution of saccades to memory encoding of verbal and spatial properties in a serial recall task. In the first experiment, participants moved their eyes freely without specific instruction. We demonstrate the existence of qualitative differences in eye-movement strategies during verbal and spatial memory encoding. While verbal memory encoding was characterized by shifting the gaze to the to-be-encoded stimuli, saccadic activity was suppressed during spatial encoding. In the second experiment, participants were required to suppress saccades by fixating centrally during encoding or to make precise saccades onto the memory items. Active suppression of saccades had no effect on memory performance, but tracking the upcoming stimuli decreased memory performance dramatically in both tasks, indicating a resource bottleneck between display-controlled saccadic control and memory encoding. We conclude that optimized encoding strategies for verbal and spatial features are underlying memory performance in serial recall, but such strategies work on an involuntary level only and do not support memory encoding when they are explicitly required by the task.}, language = {en} } @article{BettenbuehlRusconiEngbertetal.2012, author = {Bettenb{\"u}hl, Mario and Rusconi, Marco and Engbert, Ralf and Holschneider, Matthias}, title = {Bayesian selection of Markov Models for symbol sequences application to microsaccadic eye movements}, series = {PLoS one}, volume = {7}, journal = {PLoS one}, number = {9}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0043388}, pages = {10}, year = {2012}, abstract = {Complex biological dynamics often generate sequences of discrete events which can be described as a Markov process. The order of the underlying Markovian stochastic process is fundamental for characterizing statistical dependencies within sequences. As an example for this class of biological systems, we investigate the Markov order of sequences of microsaccadic eye movements from human observers. We calculate the integrated likelihood of a given sequence for various orders of the Markov process and use this in a Bayesian framework for statistical inference on the Markov order. Our analysis shows that data from most participants are best explained by a first-order Markov process. This is compatible with recent findings of a statistical coupling of subsequent microsaccade orientations. Our method might prove to be useful for a broad class of biological systems.}, language = {en} } @article{MakaravaBettenbuehlEngbertetal.2012, author = {Makarava, Natallia and Bettenb{\"u}hl, Mario and Engbert, Ralf and Holschneider, Matthias}, title = {Bayesian estimation of the scaling parameter of fixational eye movements}, series = {epl : a letters journal exploring the frontiers of physics}, volume = {100}, journal = {epl : a letters journal exploring the frontiers of physics}, number = {4}, publisher = {EDP Sciences}, address = {Mulhouse}, issn = {0295-5075}, doi = {10.1209/0295-5075/100/40003}, pages = {6}, year = {2012}, abstract = {In this study we re-evaluate the estimation of the self-similarity exponent of fixational eye movements using Bayesian theory. Our analysis is based on a subsampling decomposition, which permits an analysis of the signal up to some scale factor. We demonstrate that our approach can be applied to simulated data from mathematical models of fixational eye movements to distinguish the models' properties reliably.}, language = {en} } @inproceedings{KruegelEngbert2012, author = {Kr{\"u}gel, Andre and Engbert, Ralf}, title = {Saccade targeting of spatially extended objects a Bayesian model}, series = {Perception}, volume = {41}, booktitle = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {167 -- 167}, year = {2012}, language = {en} } @article{WarschburgerCalvanoRichteretal.2015, author = {Warschburger, Petra and Calvano, Claudia and Richter, Eike M. and Engbert, Ralf}, title = {Analysis of Attentional Bias towards Attractive and Unattractive Body Regions among Overweight Males and Females: An Eye-Movement Study}, series = {PLoS one}, volume = {10}, journal = {PLoS one}, number = {10}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0140813}, pages = {17}, year = {2015}, abstract = {Background Body image distortion is highly prevalent among overweight individuals. Whilst there is evidence that body-dissatisfied women and those suffering from disordered eating show a negative attentional bias towards their own unattractive body parts and others' attractive body parts, little is known about visual attention patterns in the area of obesity and with respect to males. Since eating disorders and obesity share common features in terms of distorted body image and body dissatisfaction, the aim of this study was to examine whether overweight men and women show a similar attentional bias. Methods/Design We analyzed eye movements in 30 overweight individuals (18 females) and 28 normal-weight individuals (16 females) with respect to the participants' own pictures as well as gender- and BMI-matched control pictures (front and back view). Additionally, we assessed body image and disordered eating using validated questionnaires. Discussion The overweight sample rated their own body as less attractive and showed a more disturbed body image. Contrary to our assumptions, they focused significantly longer on attractive compared to unattractive regions of both their own and the control body. For one's own body, this was more pronounced for women. A higher weight status and more frequent body checking predicted attentional bias towards attractive body parts. We found that overweight adults exhibit an unexpected and stable pattern of selective attention, with a distinctive focus on their own attractive body regions despite higher levels of body dissatisfaction. This positive attentional bias may either be an indicator of a more pronounced pattern of attentional avoidance or a self-enhancing strategy. Further research is warranted to clarify these results.}, language = {en} } @article{KruegelVituEngbert2012, author = {Kr{\"u}gel, Andre and Vitu, Francoise and Engbert, Ralf}, title = {Fixation positions after skipping saccades - a single space makes a large difference}, series = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, volume = {74}, journal = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, number = {8}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-012-0365-1}, pages = {1556 -- 1561}, year = {2012}, abstract = {During reading, saccadic eye movements are generated to shift words into the center of the visual field for lexical processing. Recently, Krugel and Engbert (Vision Research 50:1532-1539, 2010) demonstrated that within-word fixation positions are largely shifted to the left after skipped words. However, explanations of the origin of this effect cannot be drawn from normal reading data alone. Here we show that the large effect of skipped words on the distribution of within-word fixation positions is primarily based on rather subtle differences in the low-level visual information acquired before saccades. Using arrangements of "x" letter strings, we reproduced the effect of skipped character strings in a highly controlled single-saccade task. Our results demonstrate that the effect of skipped words in reading is the signature of a general visuomotor phenomenon. Moreover, our findings extend beyond the scope of the widely accepted range-error model, which posits that within-word fixation positions in reading depend solely on the distances of target words. We expect that our results will provide critical boundary conditions for the development of visuomotor models of saccade planning during reading.}, language = {en} } @article{LaubrockCajarEngbert2013, author = {Laubrock, Jochen and Cajar, Anke and Engbert, Ralf}, title = {Control of fixation duration during scene viewing by interaction of foveal and peripheral processing}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.11}, pages = {20}, year = {2013}, abstract = {Processing in our visual system is functionally segregated, with the fovea specialized in processing fine detail (high spatial frequencies) for object identification, and the periphery in processing coarse information (low frequencies) for spatial orienting and saccade target selection. Here we investigate the consequences of this functional segregation for the control of fixation durations during scene viewing. Using gaze-contingent displays, we applied high-pass or low-pass filters to either the central or the peripheral visual field and compared eye-movement patterns with an unfiltered control condition. In contrast with predictions from functional segregation, fixation durations were unaffected when the critical information for vision was strongly attenuated (foveal low-pass and peripheral high-pass filtering); fixation durations increased, however, when useful information was left mostly intact by the filter (foveal high-pass and peripheral low-pass filtering). These patterns of results are difficult to explain under the assumption that fixation durations are controlled by foveal processing difficulty. As an alternative explanation, we developed the hypothesis that the interaction of foveal and peripheral processing controls fixation duration. To investigate the viability of this explanation, we implemented a computational model with two compartments, approximating spatial aspects of processing by foveal and peripheral activations that change according to a small set of dynamical rules. The model reproduced distributions of fixation durations from all experimental conditions by variation of few parameters that were affected by specific filtering conditions.}, language = {en} } @article{EngbertTrukenbrodBarthelmeetal.2015, author = {Engbert, Ralf and Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A.}, title = {Spatial statistics and attentional dynamics in scene viewing}, series = {Journal of vision}, volume = {15}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/15.1.14}, pages = {17}, year = {2015}, abstract = {In humans and in foveated animals visual acuity is highly concentrated at the center of gaze, so that choosing where to look next is an important example of online, rapid decision-making. Computational neuroscientists have developed biologically-inspired models of visual attention, termed saliency maps, which successfully predict where people fixate on average. Using point process theory for spatial statistics, we show that scanpaths contain, however, important statistical structure, such as spatial clustering on top of distributions of gaze positions. Here, we develop a dynamical model of saccadic selection that accurately predicts the distribution of gaze positions as well as spatial clustering along individual scanpaths. Our model relies on activation dynamics via spatially-limited (foveated) access to saliency information, and, second, a leaky memory process controlling the re-inspection of target regions. This theoretical framework models a form of context-dependent decision-making, linking neural dynamics of attention to behavioral gaze data.}, language = {en} } @article{ChandraKruegelEngbert2020, author = {Chandra, Johan and Kr{\"u}gel, Andr{\´e} and Engbert, Ralf}, title = {Modulation of oculomotor control during reading of mirrored and inverted texts}, series = {Scientific Reports}, volume = {10}, journal = {Scientific Reports}, publisher = {Macmillan Publishers Limited, part of Springer Nature}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-020-60833-6}, pages = {15}, year = {2020}, abstract = {The interplay between cognitive and oculomotor processes during reading can be explored when the spatial layout of text deviates from the typical display. In this study, we investigate various eye-movement measures during reading of text with experimentally manipulated layout (word-wise and letter-wise mirrored-reversed text as well as inverted and scrambled text). While typical findings (e.g., longer mean fixation times, shorter mean saccades lengths) in reading manipulated texts compared to normal texts were reported in earlier work, little is known about changes of oculomotor targeting observed in within-word landing positions under the above text layouts. Here we carry out precise analyses of landing positions and find substantial changes in the so-called launch-site effect in addition to the expected overall slow-down of reading performance. Specifically, during reading of our manipulated text conditions with reversed letter order (against overall reading direction), we find a reduced launch-site effect, while in all other manipulated text conditions, we observe an increased launch-site effect. Our results clearly indicate that the oculomotor system is highly adaptive when confronted with unusual reading conditions.}, language = {en} } @article{MeybergSinnEngbertetal.2017, author = {Meyberg, Susann and Sinn, Petra and Engbert, Ralf and Sommer, Werner}, title = {Revising the link between microsaccades and the spatial cueing of voluntary attention}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {133}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2017.01.001}, pages = {47 -- 60}, year = {2017}, abstract = {Microsaccades - i.e., small fixational saccades generated in the superior colliculus (SC) - have been linked to spatial attention. While maintaining fixation, voluntary shifts of covert attention toward peripheral targets result in a sequence of attention-aligned and attention-opposing microsaccades. In most previous studies the direction of the voluntary shift is signaled by a spatial cue (e.g., a leftwards pointing arrow) that presents the most informative part of the cue (e.g., the arrowhead) in the to-be attended visual field. Here we directly investigated the influence of cue position and tested the hypothesis that microsaccades align with cue position rather than with the attention shift. In a spatial cueing task, we presented the task-relevant part of a symmetric cue either in the to-be attended visual field or in the opposite field. As a result, microsaccades were still weakly related to the covert attention shift; however, they were strongly related to the position of the cue even if that required a movement opposite to the cued attention shift. Moreover, if microsaccades aligned with cue position, we observed stronger cueing effects on manual response times. Our interpretation of the data is supported by numerical simulations of a computational model of microsaccade generation that is based on SC properties, where we explain our findings by separate attentional mechanisms for cue localization and the cued attention shift. We conclude that during cueing of voluntary attention, microsaccades are related to both - the overt attentional selection of the task-relevant part of the cue stimulus and the subsequent covert attention shift.(C) 2017 Elsevier Ltd. All rights reserved.}, language = {en} } @misc{SeeligRisseEngbert2021, author = {Seelig, Stefan and Risse, Sarah and Engbert, Ralf}, title = {Predictive modeling of parafoveal information processing during reading}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, issn = {1866-8364}, doi = {10.25932/publishup-52666}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526665}, pages = {11}, year = {2021}, abstract = {Skilled reading requires information processing of the fixated and the not-yet-fixated words to generate precise control of gaze. Over the last 30 years, experimental research provided evidence that word processing is distributed across the perceptual span, which permits recognition of the fixated (foveal) word as well as preview of parafoveal words to the right of fixation. However, theoretical models have been unable to differentiate the specific influences of foveal and parafoveal information on saccade control. Here we show how parafoveal word difficulty modulates spatial and temporal control of gaze in a computational model to reproduce experimental results. In a fully Bayesian framework, we estimated model parameters for different models of parafoveal processing and carried out large-scale predictive simulations and model comparisons for a gaze-contingent reading experiment. We conclude that mathematical modeling of data from gaze-contingent experiments permits the precise identification of pathways from parafoveal information processing to gaze control, uncovering potential mechanisms underlying the parafoveal contribution to eye-movement control.}, language = {en} } @article{KruegelRothkegelEngbert2020, author = {Kr{\"u}gel, Andr{\´e} and Rothkegel, Lars and Engbert, Ralf}, title = {No exception from Bayes' rule}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {7}, publisher = {ARVO}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.7.15}, pages = {14}, year = {2020}, abstract = {In an influential theoretical model, human sensorimotor control is achieved by a Bayesian decision process, which combines noisy sensory information and learned prior knowledge. A ubiquitous signature of prior knowledge and Bayesian integration in human perception and motor behavior is the frequently observed bias toward an average stimulus magnitude (i.e., a central-tendency bias, range effect, regression-to-the-mean effect). However, in the domain of eye movements, there is a recent controversy about the fundamental existence of a range effect in the saccadic system. Here we argue that the problem of the existence of a range effect is linked to the availability of prior knowledge for saccade control. We present results from two prosaccade experiments that both employ an informative prior structure (i.e., a nonuniform Gaussian distribution of saccade target distances). Our results demonstrate the validity of Bayesian integration in saccade control, which generates a range effect in saccades. According to Bayesian integration principles, the saccadic range effect depends on the availability of prior knowledge and varies in size as a function of the reliability of the prior and the sensory likelihood.}, language = {en} } @misc{ChandraKruegelEngbert2020, author = {Chandra, Johan and Kr{\"u}gel, Andr{\´e} and Engbert, Ralf}, title = {Modulation of oculomotor control during reading of mirrored and inverted texts}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {659}, issn = {1866-8364}, doi = {10.25932/publishup-49487}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-494879}, pages = {17}, year = {2020}, abstract = {The interplay between cognitive and oculomotor processes during reading can be explored when the spatial layout of text deviates from the typical display. In this study, we investigate various eye-movement measures during reading of text with experimentally manipulated layout (word-wise and letter-wise mirrored-reversed text as well as inverted and scrambled text). While typical findings (e.g., longer mean fixation times, shorter mean saccades lengths) in reading manipulated texts compared to normal texts were reported in earlier work, little is known about changes of oculomotor targeting observed in within-word landing positions under the above text layouts. Here we carry out precise analyses of landing positions and find substantial changes in the so-called launch-site effect in addition to the expected overall slow-down of reading performance. Specifically, during reading of our manipulated text conditions with reversed letter order (against overall reading direction), we find a reduced launch-site effect, while in all other manipulated text conditions, we observe an increased launch-site effect. Our results clearly indicate that the oculomotor system is highly adaptive when confronted with unusual reading conditions.}, language = {en} } @misc{KruegelVituEngbert2012, author = {Kr{\"u}gel, Andr{\´e} and Vitu, Fran{\c{c}}oise and Engbert, Ralf}, title = {Fixation positions after skipping saccades}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {856}, issn = {1866-8372}, doi = {10.25932/publishup-43288}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432887}, pages = {1556 -- 1561}, year = {2012}, abstract = {During reading, saccadic eye movements are generated to shift words into the center of the visual field for lexical processing. Recently, Krugel and Engbert (Vision Research 50:1532-1539, 2010) demonstrated that within-word fixation positions are largely shifted to the left after skipped words. However, explanations of the origin of this effect cannot be drawn from normal reading data alone. Here we show that the large effect of skipped words on the distribution of within-word fixation positions is primarily based on rather subtle differences in the low-level visual information acquired before saccades. Using arrangements of "x" letter strings, we reproduced the effect of skipped character strings in a highly controlled single-saccade task. Our results demonstrate that the effect of skipped words in reading is the signature of a general visuomotor phenomenon. Moreover, our findings extend beyond the scope of the widely accepted range-error model, which posits that within-word fixation positions in reading depend solely on the distances of target words. We expect that our results will provide critical boundary conditions for the development of visuomotor models of saccade planning during reading.}, language = {en} } @article{CajarEngbertLaubrock2016, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Spatial frequency processing in the central and peripheral visual field during scene viewing}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {127}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2016.05.008}, pages = {186 -- 197}, year = {2016}, abstract = {Visuospatial attention and gaze control depend on the interaction of foveal and peripheral processing. The foveal and peripheral regions of the visual field are differentially sensitive to parts of the spatial frequency spectrum. In two experiments, we investigated how the selective attenuation of spatial frequencies in the central or the peripheral visual field affects eye-movement behavior during real-world scene viewing. Gaze-contingent low-pass or high-pass filters with varying filter levels (i.e., cutoff frequencies; Experiment 1) or filter sizes (Experiment 2) were applied. Compared to unfiltered control conditions, mean fixation durations increased most with central high-pass and peripheral low-pass filtering. Increasing filter size prolonged fixation durations with peripheral filtering, but not with central filtering. Increasing filter level prolonged fixation durations with low-pass filtering, but not with high-pass filtering. These effects indicate that fixation durations are not always longer under conditions of increased processing difficulty. Saccade amplitudes largely adapted to processing difficulty: amplitudes increased with central filtering and decreased with peripheral filtering; the effects strengthened with increasing filter size and filter level. In addition, we observed a trade-off between saccade timing and saccadic selection, since saccade amplitudes were modulated when fixation durations were unaffected by the experimental manipulations. We conclude that interactions of perception and gaze control are highly sensitive to experimental manipulations of input images as long as the residual information can still be accessed for gaze control. (C) 2016 Elsevier Ltd. All rights reserved.}, language = {en} } @misc{CajarEngbertLaubrock2016, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Eye movements during gaze-contingent spatial-frequency filtering of real-world scenes: Effects of filter location, cutoff, and size}, series = {Perception}, volume = {45}, journal = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {126 -- 126}, year = {2016}, language = {en} } @misc{KruegelEngbert2016, author = {Kruegel, Andre and Engbert, Ralf}, title = {Statistics of microsaccades indicate early frequency effects during visual word recognition}, series = {Perception}, volume = {45}, journal = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {127 -- 127}, year = {2016}, language = {en} } @article{SinnEngbert2016, author = {Sinn, Petra and Engbert, Ralf}, title = {Small saccades versus microsaccades: Experimental distinction and model-based unification}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {118}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2015.05.012}, pages = {132 -- 143}, year = {2016}, abstract = {Natural vision is characterized by alternating sequences of rapid gaze shifts (saccades) and fixations. During fixations, microsaccades and slower drift movements occur spontaneously, so that the eye is never motionless. Theoretical models of fixational eye movements predict that microsaccades are dynamically coupled to slower drift movements generated immediately before microsaccades, which might be used as a criterion to distinguish microsaccades from small voluntary saccades. Here we investigate a sequential scanning task, where participants generate goal-directed saccades and microsaccades with overlapping amplitude distributions. We show that properties of microsaccades are correlated with precursory drift motion, while amplitudes of goal-directed saccades do not dependent on previous drift epochs. We develop and test a mathematical model that integrates goal-directed and fixational eye movements, including microsaccades. Using model simulations, we reproduce the experimental finding of correlations within fixational eye movement components (i.e., between physiological drift and microsaccades) but not between goal-directed saccades and fixational drift motion. These results lend support to a functional difference between microsaccades and goal-directed saccades, while, at the same time, both types of behavior may be part of an oculomotor continuum that is quantitatively described by our mathematical model. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{CajarSchneeweissEngbertetal.2016, author = {Cajar, Anke and Schneeweiss, Paul and Engbert, Ralf and Laubrock, Jochen}, title = {Coupling of attention and saccades when viewing scenes with central and peripheral degradation}, series = {Journal of vision}, volume = {16}, journal = {Journal of vision}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/16.2.8}, pages = {19}, year = {2016}, abstract = {Degrading real-world scenes in the central or the peripheral visual field yields a characteristic pattern: Mean saccade amplitudes increase with central and decrease with peripheral degradation. Does this pattern reflect corresponding modulations of selective attention? If so, the observed saccade amplitude pattern should reflect more focused attention in the central region with peripheral degradation and an attentional bias toward the periphery with central degradation. To investigate this hypothesis, we measured the detectability of peripheral (Experiment 1) or central targets (Experiment 2) during scene viewing when low or high spatial frequencies were gaze-contingently filtered in the central or the peripheral visual field. Relative to an unfiltered control condition, peripheral filtering induced a decrease of the detection probability for peripheral but not for central targets (tunnel vision). Central filtering decreased the detectability of central but not of peripheral targets. Additional post hoc analyses are compatible with the interpretation that saccade amplitudes and direction are computed in partial independence. Our experimental results indicate that task-induced modulations of saccade amplitudes reflect attentional modulations.}, language = {en} } @article{HerrmannMetzlerEngbert2017, author = {Herrmann, Carl J. J. and Metzler, Ralf and Engbert, Ralf}, title = {A self-avoiding walk with neural delays as a model of fixational eye movements}, series = {Scientific reports}, volume = {7}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-017-13489-8}, pages = {17}, year = {2017}, abstract = {Fixational eye movements show scaling behaviour of the positional mean-squared displacement with a characteristic transition from persistence to antipersistence for increasing time-lag. These statistical patterns were found to be mainly shaped by microsaccades (fast, small-amplitude movements). However, our re-analysis of fixational eye-movement data provides evidence that the slow component (physiological drift) of the eyes exhibits scaling behaviour of the mean-squared displacement that varies across human participants. These results suggest that drift is a correlated movement that interacts with microsaccades. Moreover, on the long time scale, the mean-squared displacement of the drift shows oscillations, which is also present in the displacement auto-correlation function. This finding lends support to the presence of time-delayed feedback in the control of drift movements. Based on an earlier non-linear delayed feedback model of fixational eye movements, we propose and discuss different versions of a new model that combines a self-avoiding walk with time delay. As a result, we identify a model that reproduces oscillatory correlation functions, the transition from persistence to antipersistence, and microsaccades.}, language = {en} } @article{TrukenbrodBarthelmeWichmannetal.2019, author = {Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A. and Engbert, Ralf}, title = {Spatial statistics for gaze patterns in scene viewing}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {5}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.6.5}, pages = {1 -- 19}, year = {2019}, abstract = {Scene viewing is used to study attentional selection in complex but still controlled environments. One of the main observations on eye movements during scene viewing is the inhomogeneous distribution of fixation locations: While some parts of an image are fixated by almost all observers and are inspected repeatedly by the same observer, other image parts remain unfixated by observers even after long exploration intervals. Here, we apply spatial point process methods to investigate the relationship between pairs of fixations. More precisely, we use the pair correlation function, a powerful statistical tool, to evaluate dependencies between fixation locations along individual scanpaths. We demonstrate that aggregation of fixation locations within 4 degrees is stronger than expected from chance. Furthermore, the pair correlation function reveals stronger aggregation of fixations when the same image is presented a second time. We use simulations of a dynamical model to show that a narrower spatial attentional span may explain differences in pair correlations between the first and the second inspection of the same image.}, language = {en} } @article{ChandraKruegelEngbert2020, author = {Chandra, Johan and Kr{\"u}gel, Andr{\´e} and Engbert, Ralf}, title = {Experimental test of Bayesian saccade targeting under reversed reading direction}, series = {Attention, Perception, \& Psychophysics}, volume = {82}, journal = {Attention, Perception, \& Psychophysics}, publisher = {Springer}, address = {New York, NY}, issn = {1943-393X}, doi = {10.3758/s13414-019-01814-4}, pages = {1230 -- 1240}, year = {2020}, abstract = {During reading, rapid eye movements (saccades) shift the reader's line of sight from one word to another for high-acuity visual information processing. While experimental data and theoretical models show that readers aim at word centers, the eye-movement (oculomotor) accuracy is low compared to other tasks. As a consequence, distributions of saccadic landing positions indicate large (i) random errors and (ii) systematic over- and undershoot of word centers, which additionally depend on saccade lengths (McConkie et al.Visual Research, 28(10), 1107-1118,1988). Here we show that both error components can be simultaneously reduced by reading texts from right to left in German language (N= 32). We used our experimental data to test a Bayesian model of saccade planning. First, experimental data are consistent with the model. Second, the model makes specific predictions of the effects of the precision of prior and (sensory) likelihood. Our results suggest that it is a more precise sensory likelihood that can explain the reduction of both random and systematic error components.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{RothkegelSchuettTrukenbrodetal.2019, author = {Rothkegel, Lars Oliver Martin and Sch{\"u}tt, Heiko Herbert and Trukenbrod, Hans Arne and Wichmann, Felix A. and Engbert, Ralf}, title = {Searchers adjust their eye-movement dynamics to target characteristics in natural scenes}, series = {Scientific reports}, volume = {9}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-018-37548-w}, pages = {12}, year = {2019}, abstract = {When searching a target in a natural scene, it has been shown that both the target's visual properties and similarity to the background influence whether and how fast humans are able to find it. So far, it was unclear whether searchers adjust the dynamics of their eye movements (e.g., fixation durations, saccade amplitudes) to the target they search for. In our experiment, participants searched natural scenes for six artificial targets with different spatial frequency content throughout eight consecutive sessions. High-spatial frequency targets led to smaller saccade amplitudes and shorter fixation durations than low-spatial frequency targets if target identity was known. If a saccade was programmed in the same direction as the previous saccade, fixation durations and successive saccade amplitudes were not influenced by target type. Visual saliency and empirical fixation density at the endpoints of saccades which maintain direction were comparatively low, indicating that these saccades were less selective. Our results suggest that searchers adjust their eye movement dynamics to the search target efficiently, since previous research has shown that low-spatial frequencies are visible farther into the periphery than high-spatial frequencies. We interpret the saccade direction specificity of our effects as an underlying separation into a default scanning mechanism and a selective, target-dependent mechanism.}, language = {en} } @misc{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Predicting fixation densities over time from early visual processing}, series = {Perception}, volume = {48}, journal = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {64 -- 65}, year = {2019}, abstract = {Bottom-up saliency is often cited as a factor driving the choice of fixation locations of human observers, based on the (partial) success of saliency models to predict fixation densities in free viewing. However, these observations are only weak evidence for a causal role of bottom-up saliency in natural viewing behaviour. To test bottom-up saliency more directly, we analyse the performance of a number of saliency models---including our own saliency model based on our recently published model of early visual processing (Sch{\"u}tt \& Wichmann, 2017, JoV)---as well as the theoretical limits for predictions over time. On free viewing data our model performs better than classical bottom-up saliency models, but worse than the current deep learning based saliency models incorporating higher-level information like knowledge about objects. However, on search data all saliency models perform worse than the optimal image independent prediction. We observe that the fixation density in free viewing is not stationary over time, but changes over the course of a trial. It starts with a pronounced central fixation bias on the first chosen fixation, which is nonetheless influenced by image content. Starting with the 2nd to 3rd fixation, the fixation density is already well predicted by later densities, but more concentrated. From there the fixation distribution broadens until it reaches a stationary distribution around the 10th fixation. Taken together these observations argue against bottom-up saliency as a mechanistic explanation for eye movement control after the initial orienting reaction in the first one to two saccades, although we confirm the predictive value of early visual representations for fixation locations. The fixation distribution is, first, not well described by any stationary density, second, is predicted better when including object information and, third, is badly predicted by any saliency model in a search task.}, language = {en} } @misc{SchwetlickTrukenbrodEngbert2019, author = {Schwetlick, Lisa and Trukenbrod, Hans Arne and Engbert, Ralf}, title = {The Influence of Visual Long Term Memory on Eye Movements During Scene Viewing}, series = {Perception}, volume = {48}, journal = {Perception}, number = {S1}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {138 -- 138}, year = {2019}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2017, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Reich, Sebastian and Wichmann, Felix A. and Engbert, Ralf}, title = {Likelihood-based parameter estimation and comparison of dynamical cognitive models}, series = {Psychological Review}, volume = {124}, journal = {Psychological Review}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000068}, pages = {505 -- 524}, year = {2017}, abstract = {Dynamical models of cognition play an increasingly important role in driving theoretical and experimental research in psychology. Therefore, parameter estimation, model analysis and comparison of dynamical models are of essential importance. In this article, we propose a maximum likelihood approach for model analysis in a fully dynamical framework that includes time-ordered experimental data. Our methods can be applied to dynamical models for the prediction of discrete behavior (e.g., movement onsets); in particular, we use a dynamical model of saccade generation in scene viewing as a case study for our approach. For this model, the likelihood function can be computed directly by numerical simulation, which enables more efficient parameter estimation including Bayesian inference to obtain reliable estimates and corresponding credible intervals. Using hierarchical models inference is even possible for individual observers. Furthermore, our likelihood approach can be used to compare different models. In our example, the dynamical framework is shown to outperform nondynamical statistical models. Additionally, the likelihood based evaluation differentiates model variants, which produced indistinguishable predictions on hitherto used statistics. Our results indicate that the likelihood approach is a promising framework for dynamical cognitive models.}, language = {en} } @article{SeeligRisseEngbert2021, author = {Seelig, Stefan and Risse, Sarah and Engbert, Ralf}, title = {Predictive modeling of parafoveal information processing during reading}, series = {Scientific reports}, volume = {11}, journal = {Scientific reports}, number = {1}, publisher = {Nature Portfolio}, address = {Berlin}, issn = {2045-2322}, doi = {10.1038/s41598-021-92140-z}, pages = {9}, year = {2021}, abstract = {Skilled reading requires information processing of the fixated and the not-yet-fixated words to generate precise control of gaze. Over the last 30 years, experimental research provided evidence that word processing is distributed across the perceptual span, which permits recognition of the fixated (foveal) word as well as preview of parafoveal words to the right of fixation. However, theoretical models have been unable to differentiate the specific influences of foveal and parafoveal information on saccade control. Here we show how parafoveal word difficulty modulates spatial and temporal control of gaze in a computational model to reproduce experimental results. In a fully Bayesian framework, we estimated model parameters for different models of parafoveal processing and carried out large-scale predictive simulations and model comparisons for a gaze-contingent reading experiment. We conclude that mathematical modeling of data from gaze-contingent experiments permits the precise identification of pathways from parafoveal information processing to gaze control, uncovering potential mechanisms underlying the parafoveal contribution to eye-movement control.}, language = {en} } @article{CajarEngbertLaubrock2020, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {How spatial frequencies and color drive object search in real-world scenes}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {7}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.7.8}, pages = {16}, year = {2020}, abstract = {When studying how people search for objects in scenes, the inhomogeneity of the visual field is often ignored. Due to physiological limitations, peripheral vision is blurred and mainly uses coarse-grained information (i.e., low spatial frequencies) for selecting saccade targets, whereas high-acuity central vision uses fine-grained information (i.e., high spatial frequencies) for analysis of details. Here we investigated how spatial frequencies and color affect object search in real-world scenes. Using gaze-contingent filters, we attenuated high or low frequencies in central or peripheral vision while viewers searched color or grayscale scenes. Results showed that peripheral filters and central high-pass filters hardly affected search accuracy, whereas accuracy dropped drastically with central low-pass filters. Peripheral filtering increased the time to localize the target by decreasing saccade amplitudes and increasing number and duration of fixations. The use of coarse-grained information in the periphery was limited to color scenes. Central filtering increased the time to verify target identity instead, especially with low-pass filters. We conclude that peripheral vision is critical for object localization and central vision is critical for object identification. Visual guidance during peripheral object localization is dominated by low-frequency color information, whereas high-frequency information, relatively independent of color, is most important for object identification in central vision.}, language = {en} } @article{EngbertRabeSchwetlicketal.2022, author = {Engbert, Ralf and Rabe, Maximilian Michael and Schwetlick, Lisa and Seelig, Stefan A. and Reich, Sebastian and Vasishth, Shravan}, title = {Data assimilation in dynamical cognitive science}, series = {Trends in cognitive sciences}, volume = {26}, journal = {Trends in cognitive sciences}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1364-6613}, doi = {10.1016/j.tics.2021.11.006}, pages = {99 -- 102}, year = {2022}, abstract = {Dynamical models make specific assumptions about cognitive processes that generate human behavior. In data assimilation, these models are tested against timeordered data. Recent progress on Bayesian data assimilation demonstrates that this approach combines the strengths of statistical modeling of individual differences with the those of dynamical cognitive models.}, language = {en} } @article{BarthelmeTrukenbrodEngbertetal.2013, author = {Barthelme, Simon and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Modeling fixation locations using spatial point processes}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.1}, pages = {34}, year = {2013}, abstract = {Whenever eye movements are measured, a central part of the analysis has to do with where subjects fixate and why they fixated where they fixated. To a first approximation, a set of fixations can be viewed as a set of points in space; this implies that fixations are spatial data and that the analysis of fixation locations can be beneficially thought of as a spatial statistics problem. We argue that thinking of fixation locations as arising from point processes is a very fruitful framework for eye-movement data, helping turn qualitative questions into quantitative ones. We provide a tutorial introduction to some of the main ideas of the field of spatial statistics, focusing especially on spatial Poisson processes. We show how point processes help relate image properties to fixation locations. In particular we show how point processes naturally express the idea that image features' predictability for fixations may vary from one image to another. We review other methods of analysis used in the literature, show how they relate to point process theory, and argue that thinking in terms of point processes substantially extends the range of analyses that can be performed and clarify their interpretation.}, language = {en} } @article{SchiekDrepperEngbertetal.1998, author = {Schiek, Michael and Drepper, Friedhelm R. and Engbert, Ralf and Suder, Katrin and Abel, Hans-Henning}, title = {Cardiorespiratory Synchronization}, isbn = {3-540-63481-9}, year = {1998}, abstract = {The complex behaviour of cardiorespiratory dynamics is shown to be related to the interaction between several physiological oscillators. This study is based on electrocardiogram and respiratory flow data obtained from 3 different subjects during paced breathing at 10 different pacing cycle lengths ranging from 5 s to 12 s. Two different methods ideally suited for the analysis of synchronization pattern of coupled oscillators are applied: 1. Symbolic dynamics based on symbol coding adapted for the detection of respiratory modulation of cardiac parasympathetic activity discloses two regimes of different synchronization behaviour within the frequency area corresponding to the Arnold tongue of 1:1 frequency-locking between respiratory flow and respiratory heartbeat variation (respiratory sinus arrhythmia). 2. The analysis of the phase shift between respiratory flow and respiratory sinus arrhythmia indicates that synchronization is not a static but a dynamic phenomenon. The observed dependence of the phase shift on respiratory cycle length shows large inter-individual variation. These findings turn out to be further hints for the existence of an additional central oscillator in the frequency range of respiration interacting with the central respiratory oscillator driving mechanical respiration.}, language = {en} } @article{MergenthalerEngbert2010, author = {Mergenthaler, Konstantin and Engbert, Ralf}, title = {Microsaccades are different from saccades in scene perception}, year = {2010}, abstract = {Eye-fixation durations are among the best and most widely used measures of ongoing cognition in visual tasks, e.g., reading, visual search or scene perception. However, fixations are characterized by ongoing motor activity (or fixational eye movements) with microsaccades as their most pronounced components. Recent work demonstrated the similarities of microsaccades and inspection saccades. Here, we show that distinct properties of microsaccades and inspection saccades can be found in a scene perception task, based on descriptive measures (e.g., a bimodal amplitude distribution) as well as functional characteristics (e.g., inter saccadic-event intervals and generating processes). Besides these specific differences, microsaccade rates produced by individual participants in a fixation paradigm are correlated with microsaccade rates extracted from fixations in scene perception, indicating a common neurophysiological basis. Finally, we observed that slow fixational eye movements, called drift, are significantly reduced during long fixations in scene viewing, which informs about the control of eye movements in scene viewing.}, language = {en} } @article{EngbertKruegel2010, author = {Engbert, Ralf and Kruegel, Andr{\´e}}, title = {Readers use Bayesian estimation for eye movement control}, issn = {0956-7976}, doi = {10.1177/0956797610362060}, year = {2010}, abstract = {During reading, saccadic landing positions within words show a pronounced peak close to the word center, with an additional systematic error that is modulated by the distance from the launch site and the length of the target word. Here we show that the systematic variation of fixation positions within words, the saccadic range error, can be derived from Bayesian decision theory. We present the first mathematical model for the saccadic range error; this model makes explicit assumptions regarding underlying visual and oculomotor processes. Analyzing a corpus of eye movement recordings, we obtained results that are consistent with the view that readers use Bayesian estimation for saccade planning. Furthermore, we show that alternative models fail to reproduce the experimental data.}, language = {en} } @article{StarzynskiEngbert2009, author = {Starzynski, Christian and Engbert, Ralf}, title = {Noise-enhanced target discrimination under the influence of fixational eye movements and external noise}, issn = {1054-1500}, doi = {10.1063/1.3098950}, year = {2009}, abstract = {Active motor processes are present in many sensory systems to enhance perception. In the human visual system, miniature eye movements are produced involuntarily and unconsciously when we fixate a stationary target. These fixational eye movements represent self-generated noise which serves important perceptual functions. Here we investigate fixational eye movements under the influence of external noise. In a two-choice discrimination task, the target stimulus performed a random walk with varying noise intensity. We observe noise-enhanced discrimination of the target stimulus characterized by a U-shaped curve of manual response times as a function of the diffusion constant of the stimulus. Based on the experiments, we develop a stochastic information-accumulator model for stimulus discrimination in a noisy environment. Our results provide a new explanation for the constructive role of fixational eye movements in visual perception.}, language = {en} } @article{SinnEngbert2011, author = {Sinn, Petra and Engbert, Ralf}, title = {Saccadic facilitation by modulation of microsaccades in natural backgrounds}, series = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, volume = {73}, journal = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-011-0107-9}, pages = {1029 -- 1033}, year = {2011}, abstract = {Saccades move objects of interest into the center of the visual field for high-acuity visual analysis. White, Stritzke, and Gegenfurtner (Current Biology, 18, 124-128, 2008) have shown that saccadic latencies in the context of a structured background are much shorter than those with an unstructured background at equal levels of visibility. This effect has been explained by possible preactivation of the saccadic circuitry whenever a structured background acts as a mask for potential saccade targets. Here, we show that background textures modulate rates of microsaccades during visual fixation. First, after a display change, structured backgrounds induce a stronger decrease of microsaccade rates than do uniform backgrounds. Second, we demonstrate that the occurrence of a microsaccade in a critical time window can delay a subsequent saccadic response. Taken together, our findings suggest that microsaccades contribute to the saccadic facilitation effect, due to a modulation of microsaccade rates by properties of the background.}, language = {en} } @article{LangeStarzynskiEngbert2012, author = {Lange, Elke B. and Starzynski, Christian and Engbert, Ralf}, title = {Capture of the gaze does not capture the mind}, series = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, volume = {74}, journal = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, number = {6}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-012-0318-8}, pages = {1168 -- 1182}, year = {2012}, abstract = {Sudden visual changes attract our gaze, and related eye movement control requires attentional resources. Attention is a limited resource that is also involved in working memory-for instance, memory encoding. As a consequence, theory suggests that gaze capture could impair the buildup of memory respresentations due to an attentional resource bottleneck. Here we developed an experimental design combining a serial memory task (verbal or spatial) and concurrent gaze capture by a distractor (of high or low similarity to the relevant item). The results cannot be explained by a general resource bottleneck. Specifically, we observed that capture by the low-similar distractor resulted in delayed and reduced saccade rates to relevant items in both memory tasks. However, while spatial memory performance decreased, verbal memory remained unaffected. In contrast, the high-similar distractor led to capture and memory loss for both tasks. Our results lend support to the view that gaze capture leads to activation of irrelevant representations in working memory that compete for selection at recall. Activation of irrelevant spatial representations distracts spatial recall, whereas activation of irrelevant verbal features impairs verbal memory performance.}, language = {en} } @article{KruegelEngbert2014, author = {Kruegel, Andre and Engbert, Ralf}, title = {A model of saccadic landing positions in reading under the influence of sensory noise}, series = {Visual cognition}, volume = {22}, journal = {Visual cognition}, number = {3-4}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1350-6285}, doi = {10.1080/13506285.2014.894166}, pages = {334 -- 353}, year = {2014}, language = {en} } @article{Engbert2012, author = {Engbert, Ralf}, title = {Computational modeling of collicular integration of perceptual responses and attention in microsaccades}, series = {The journal of neuroscience}, volume = {32}, journal = {The journal of neuroscience}, number = {23}, publisher = {Society for Neuroscience}, address = {Washington}, issn = {0270-6474}, doi = {10.1523/JNEUROSCI.0808-12.2012}, pages = {8035 -- 8039}, year = {2012}, abstract = {During visual fixation on a target object, our eyes are not motionless but generate slow fixational eye movements and microsaccades. Effects of visual attention have been observed in both microsaccade rates and spatial directions. Experimental results, however, range from early (<200 ms) to late (>600 ms) effects combined with cue-congruent as well as cue-incongruent microsaccade directions. On the basis of well characterized neural circuitry in superior colliculus, we construct a dynamical model of neural activation that is modulated by perceptual input and visual attention. Our results show that additive integration of low-level perceptual responses and visual attention can explain microsaccade rate and direction effects across a range of visual cueing tasks. These findings suggest that the patterns of microsaccade direction observed in experiments are compatible with a single dynamical mechanism. The basic principles of the model are highly relevant to the general problem of integration of low-level perception and top-down selective attention.}, language = {en} } @article{TrukenbrodEngbert2012, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Eye movements in a sequential scanning task - evidence for distributed processing}, series = {Journal of vision}, volume = {12}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/12.1.5}, pages = {12}, year = {2012}, abstract = {Current models of eye movement control are derived from theories assuming serial processing of single items or from theories based on parallel processing of multiple items at a time. This issue has persisted because most investigated paradigms generated data compatible with both serial and parallel models. Here, we study eye movements in a sequential scanning task, where stimulus n indicates the position of the next stimulus n + 1. We investigate whether eye movements are controlled by sequential attention shifts when the task requires serial order of processing. Our measures of distributed processing in the form of parafoveal-on-foveal effects, long-range modulations of target selection, and skipping saccades provide evidence against models strictly based on serial attention shifts. We conclude that our results lend support to parallel processing as a strategy for eye movement control.}, language = {en} } @inproceedings{Engbert2012, author = {Engbert, Ralf}, title = {A computational model of microsaccadic responses to shifts of covert attention}, series = {Perception}, volume = {41}, booktitle = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {55 -- 55}, year = {2012}, language = {en} } @inproceedings{TrukenbrodBarthelmeWichmannetal.2012, author = {Trukenbrod, Hans Arne and Barthelme, S. and Wichmann, Felix A. and Engbert, Ralf}, title = {Color does not guide eye movements: Evidence from a gaze-contingent experiment}, series = {PERCEPTION}, volume = {41}, booktitle = {PERCEPTION}, publisher = {PION LTD}, address = {LONDON}, issn = {0301-0066}, pages = {88 -- 88}, year = {2012}, language = {en} } @inproceedings{LaubrockCajarEngbert2012, author = {Laubrock, Jochen and Cajar, Anke and Engbert, Ralf}, title = {Peripheral spatial frequency processing affects timing and metrics of saccades}, series = {Perception}, volume = {41}, booktitle = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {170 -- 170}, year = {2012}, language = {en} } @article{WidmannEngbertSchroeger2014, author = {Widmann, Andreas and Engbert, Ralf and Schroeger, Erich}, title = {Microsaccadic responses indicate fast categorization of sounds: A novel approach to study auditory cognition}, series = {The journal of neuroscience}, volume = {34}, journal = {The journal of neuroscience}, number = {33}, publisher = {Society for Neuroscience}, address = {Washington}, issn = {0270-6474}, doi = {10.1523/JNEUROSCI.1568-14.2014}, pages = {11152 -- 11158}, year = {2014}, abstract = {The mental chronometry of the human brain's processing of sounds to be categorized as targets has intensively been studied in cognitive neuroscience. According to current theories, a series of successive stages consisting of the registration, identification, and categorization of the sound has to be completed before participants are able to report the sound as a target by button press after similar to 300-500 ms. Here we use miniature eye movements as a tool to study the categorization of a sound as a target or nontarget, indicating that an initial categorization is present already after 80-100 ms. During visual fixation, the rate of microsaccades, the fastest components of miniature eye movements, is transiently modulated after auditory stimulation. In two experiments, we measured microsaccade rates in human participants in an auditory three-tone oddball paradigm (including rare nontarget sounds) and observed a difference in the microsaccade rates between targets and nontargets as early as 142 ms after sound onset. This finding was replicated in a third experiment with directed saccades measured in a paradigm in which tones had to be matched to score-like visual symbols. Considering the delays introduced by (motor) signal transmission and data analysis constraints, the brain must have differentiated target from nontarget sounds as fast as 80-100 ms after sound onset in both paradigms. We suggest that predictive information processing for expected input makes higher cognitive attributes, such as a sound's identity and category, available already during early sensory processing. The measurement of eye movements is thus a promising approach to investigate hearing.}, language = {en} } @article{CajarSchneeweissEngbertetal.2016, author = {Cajar, Anke and Schneeweiß, Paul and Engbert, Ralf and Laubrock, Jochen}, title = {Coupling of attention and saccades when viewing scenes with central and peripheral degradation}, series = {Journal of Vision}, volume = {16}, journal = {Journal of Vision}, number = {2}, publisher = {ARVO}, address = {Rockville, Md.}, issn = {1534-7362}, doi = {10.1167/16.2.8}, pages = {1 -- 19}, year = {2016}, abstract = {Degrading real-world scenes in the central or the peripheral visual field yields a characteristic pattern: Mean saccade amplitudes increase with central and decrease with peripheral degradation. Does this pattern reflect corresponding modulations of selective attention? If so, the observed saccade amplitude pattern should reflect more focused attention in the central region with peripheral degradation and an attentional bias toward the periphery with central degradation. To investigate this hypothesis, we measured the detectability of peripheral (Experiment 1) or central targets (Experiment 2) during scene viewing when low or high spatial frequencies were gaze-contingently filtered in the central or the peripheral visual field. Relative to an unfiltered control condition, peripheral filtering induced a decrease of the detection probability for peripheral but not for central targets (tunnel vision). Central filtering decreased the detectability of central but not of peripheral targets. Additional post hoc analyses are compatible with the interpretation that saccade amplitudes and direction are computed in partial independence. Our experimental results indicate that task-induced modulations of saccade amplitudes reflect attentional modulations.}, language = {en} } @article{SeeligRabeMalemShinitskietal.2020, author = {Seelig, Stefan A. and Rabe, Maximilian Michael and Malem-Shinitski, Noa and Risse, Sarah and Reich, Sebastian and Engbert, Ralf}, title = {Bayesian parameter estimation for the SWIFT model of eye-movement control during reading}, series = {Journal of mathematical psychology}, volume = {95}, journal = {Journal of mathematical psychology}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-2496}, doi = {10.1016/j.jmp.2019.102313}, pages = {32}, year = {2020}, abstract = {Process-oriented theories of cognition must be evaluated against time-ordered observations. Here we present a representative example for data assimilation of the SWIFT model, a dynamical model of the control of fixation positions and fixation durations during natural reading of single sentences. First, we develop and test an approximate likelihood function of the model, which is a combination of a spatial, pseudo-marginal likelihood and a temporal likelihood obtained by probability density approximation Second, we implement a Bayesian approach to parameter inference using an adaptive Markov chain Monte Carlo procedure. Our results indicate that model parameters can be estimated reliably for individual subjects. We conclude that approximative Bayesian inference represents a considerable step forward for computational models of eye-movement control, where modeling of individual data on the basis of process-based dynamic models has not been possible so far.}, language = {en} } @article{EngbertMergenthalerSinnetal.2011, author = {Engbert, Ralf and Mergenthaler, Konstantin and Sinn, Petra and Pikovskij, Arkadij}, title = {An integrated model of fixational eye movements and microsaccades}, series = {Proceedings of the National Academy of Sciences of the United States of America}, volume = {108}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, number = {39}, publisher = {National Acad. of Sciences}, address = {Washington}, issn = {0027-8424}, doi = {10.1073/pnas.1102730108}, pages = {E765 -- E770}, year = {2011}, abstract = {When we fixate a stationary target, our eyes generate miniature (or fixational) eye movements involuntarily. These fixational eye movements are classified as slow components (physiological drift, tremor) and microsaccades, which represent rapid, small-amplitude movements. Here we propose an integrated mathematical model for the generation of slow fixational eye movements and microsaccades. The model is based on the concept of self-avoiding random walks in a potential, a process driven by a self-generated activation field. The self-avoiding walk generates persistent movements on a short timescale, whereas, on a longer timescale, the potential produces antipersistent motions that keep the eye close to an intended fixation position. We introduce microsaccades as fast movements triggered by critical activation values. As a consequence, both slow movements and microsaccades follow the same law of motion; i.e., movements are driven by the self-generated activation field. Thus, the model contributes a unified explanation of why it has been a long-standing problem to separate slow movements and microsaccades with respect to their motion-generating principles. We conclude that the concept of a self-avoiding random walk captures fundamental properties of fixational eye movements and provides a coherent theoretical framework for two physiologically distinct movement types.}, language = {en} } @article{SchadNuthmannEngbert2012, author = {Schad, Daniel and Nuthmann, Antje and Engbert, Ralf}, title = {Your mind wanders weakly, your mind wanders deeply - objective measures reveal mindless reading at different levels}, series = {Cognition : international journal of cognitive science}, volume = {125}, journal = {Cognition : international journal of cognitive science}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-0277}, doi = {10.1016/j.cognition.2012.07.004}, pages = {179 -- 194}, year = {2012}, abstract = {When the mind wanders, attention turns away from the external environment and cognitive processing is decoupled from perceptual information. Mind wandering is usually treated as a dichotomy (dichotomy-hypothesis), and is often measured using self-reports. Here, we propose the levels of inattention hypothesis, which postulates attentional decoupling to graded degrees at different hierarchical levels of cognitive processing. To measure graded levels of attentional decoupling during reading we introduce the sustained attention to stimulus task (SAST), which is based on psychophysics of error detection. Under experimental conditions likely to induce mind wandering, we found that subjects were less likely to notice errors that required high-level processing for their detection as opposed to errors that only required low-level processing. Eye tracking revealed that before errors were overlooked influences of high- and low-level linguistic variables on eye fixations were reduced in a graded fashion, indicating episodes of mindless reading at weak and deep levels. Individual fixation durations predicted overlooking of lexical errors 5 s before they occurred. Our findings support the levels of inattention hypothesis and suggest that different levels of mindless reading can be measured behaviorally in the SAST. Using eye tracking to detect mind wandering online represents a promising approach for the development of new techniques to study mind wandering and to ameliorate its negative consequences.}, language = {en} } @article{SchadNuthmannEngbert2010, author = {Schad, Daniel and Nuthmann, Antje and Engbert, Ralf}, title = {Eye movements during reading of randomly shuffled texts}, issn = {0042-6989}, year = {2010}, abstract = {In research on eye-movement control during reading, the importance of cognitive processes related to language comprehension relative to visuomotor aspects of saccade generation is the topic of an ongoing debate. Here we investigate various eye-movement measures during reading of randomly shuffled meaningless text as compared to normal meaningful text. To ensure processing of the material, readers were occasionally probed for words occurring in normal or shuffled text. For reading of shuffled text we observed longer fixation times, less word skippings, and more refixations than in normal reading. Shuffled-text reading further differed from normal reading in that low-frequency words were not overall fixated longer than high-frequency words. However, the frequency effect was present on long words, but was reversed for short words. Also, consistent with our prior research we found distinct experimental effects of spatially distributed processing over several words at a time, indicating how lexical word processing affected eye movements. Based on analyses of statistical linear mixed-effect models we argue that the results are compatible with the hypothesis that the perceptual span is more strongly modulated by foveal load in the shuffled reading task than in normal reading. Results are discussed in the context of computational models of reading.}, language = {en} } @article{SchadEngbert2012, author = {Schad, Daniel and Engbert, Ralf}, title = {The zoom lens of attention simulating shuffled versus normal text reading using the SWIFT model}, series = {Visual cognition}, volume = {20}, journal = {Visual cognition}, number = {4-5}, publisher = {Wiley}, address = {Hove}, issn = {1350-6285}, doi = {10.1080/13506285.2012.670143}, pages = {391 -- 421}, year = {2012}, abstract = {Assumptions on the allocation of attention during reading are crucial for theoretical models of eye guidance. The zoom lens model of attention postulates that attentional deployment can vary from a sharp focus to a broad window. The model is closely related to the foveal load hypothesis, i.e., the assumption that the perceptual span is modulated by the difficulty of the fixated word. However, these important theoretical concepts for cognitive research have not been tested quantitatively in eye movement models. Here we show that the zoom lens model, implemented in the SWIFT model of saccade generation, captures many important patterns of eye movements. We compared the model's performance to experimental data from normal and shuffled text reading. Our results demonstrate that the zoom lens of attention might be an important concept for eye movement control in reading.}, language = {en} } @article{MalemShinitskiOpperReichetal.2020, author = {Malem-Shinitski, Noa and Opper, Manfred and Reich, Sebastian and Schwetlick, Lisa and Seelig, Stefan A. and Engbert, Ralf}, title = {A mathematical model of local and global attention in natural scene viewing}, series = {PLoS Computational Biology : a new community journal}, volume = {16}, journal = {PLoS Computational Biology : a new community journal}, number = {12}, publisher = {PLoS}, address = {San Fransisco}, issn = {1553-734X}, doi = {10.1371/journal.pcbi.1007880}, pages = {21}, year = {2020}, abstract = {Author summary
Switching between local and global attention is a general strategy in human information processing. We investigate whether this strategy is a viable approach to model sequences of fixations generated by a human observer in a free viewing task with natural scenes. Variants of the basic model are used to predict the experimental data based on Bayesian inference. Results indicate a high predictive power for both aggregated data and individual differences across observers. The combination of a novel model with state-of-the-art Bayesian methods lends support to our two-state model using local and global internal attention states for controlling eye movements.
Understanding the decision process underlying gaze control is an important question in cognitive neuroscience with applications in diverse fields ranging from psychology to computer vision. The decision for choosing an upcoming saccade target can be framed as a selection process between two states: Should the observer further inspect the information near the current gaze position (local attention) or continue with exploration of other patches of the given scene (global attention)? Here we propose and investigate a mathematical model motivated by switching between these two attentional states during scene viewing. The model is derived from a minimal set of assumptions that generates realistic eye movement behavior. We implemented a Bayesian approach for model parameter inference based on the model's likelihood function. In order to simplify the inference, we applied data augmentation methods that allowed the use of conjugate priors and the construction of an efficient Gibbs sampler. This approach turned out to be numerically efficient and permitted fitting interindividual differences in saccade statistics. Thus, the main contribution of our modeling approach is two-fold; first, we propose a new model for saccade generation in scene viewing. Second, we demonstrate the use of novel methods from Bayesian inference in the field of scan path modeling.}, language = {en} } @misc{BackhausEngbertRothkegeletal.2020, author = {Backhaus, Daniel and Engbert, Ralf and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne}, title = {Task-dependence in scene perception: Head unrestrained viewing using mobile eye-tracking}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {5}, issn = {1866-8364}, doi = {10.25932/publishup-51912}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-519124}, pages = {23}, year = {2020}, abstract = {Real-world scene perception is typically studied in the laboratory using static picture viewing with restrained head position. Consequently, the transfer of results obtained in this paradigm to real-word scenarios has been questioned. The advancement of mobile eye-trackers and the progress in image processing, however, permit a more natural experimental setup that, at the same time, maintains the high experimental control from the standard laboratory setting. We investigated eye movements while participants were standing in front of a projector screen and explored images under four specific task instructions. Eye movements were recorded with a mobile eye-tracking device and raw gaze data were transformed from head-centered into image-centered coordinates. We observed differences between tasks in temporal and spatial eye-movement parameters and found that the bias to fixate images near the center differed between tasks. Our results demonstrate that current mobile eye-tracking technology and a highly controlled design support the study of fine-scaled task dependencies in an experimental setting that permits more natural viewing behavior than the static picture viewing paradigm.}, language = {en} } @article{BackhausEngbertRothkegeletal.2020, author = {Backhaus, Daniel and Engbert, Ralf and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne}, title = {Task-dependence in scene perception: Head unrestrained viewing using mobile eye-tracking}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {5}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.5.3}, pages = {1 -- 21}, year = {2020}, abstract = {Real-world scene perception is typically studied in the laboratory using static picture viewing with restrained head position. Consequently, the transfer of results obtained in this paradigm to real-word scenarios has been questioned. The advancement of mobile eye-trackers and the progress in image processing, however, permit a more natural experimental setup that, at the same time, maintains the high experimental control from the standard laboratory setting. We investigated eye movements while participants were standing in front of a projector screen and explored images under four specific task instructions. Eye movements were recorded with a mobile eye-tracking device and raw gaze data were transformed from head-centered into image-centered coordinates. We observed differences between tasks in temporal and spatial eye-movement parameters and found that the bias to fixate images near the center differed between tasks. Our results demonstrate that current mobile eye-tracking technology and a highly controlled design support the study of fine-scaled task dependencies in an experimental setting that permits more natural viewing behavior than the static picture viewing paradigm.}, language = {en} } @article{RabeChandraKruegeletal.2021, author = {Rabe, Maximilian Michael and Chandra, Johan and Kr{\"u}gel, Andr{\´e} and Seelig, Stefan A. and Vasishth, Shravan and Engbert, Ralf}, title = {A bayesian approach to dynamical modeling of eye-movement control in reading of normal, mirrored, and scrambled texts}, series = {Psychological Review}, volume = {128}, journal = {Psychological Review}, number = {5}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000268}, pages = {803 -- 823}, year = {2021}, abstract = {In eye-movement control during reading, advanced process-oriented models have been developed to reproduce behavioral data. So far, model complexity and large numbers of model parameters prevented rigorous statistical inference and modeling of interindividual differences. Here we propose a Bayesian approach to both problems for one representative computational model of sentence reading (SWIFT; Engbert et al., Psychological Review, 112, 2005, pp. 777-813). We used experimental data from 36 subjects who read the text in a normal and one of four manipulated text layouts (e.g., mirrored and scrambled letters). The SWIFT model was fitted to subjects and experimental conditions individually to investigate between- subject variability. Based on posterior distributions of model parameters, fixation probabilities and durations are reliably recovered from simulated data and reproduced for withheld empirical data, at both the experimental condition and subject levels. A subsequent statistical analysis of model parameters across reading conditions generates model-driven explanations for observable effects between conditions.}, language = {en} } @article{RisseHohensteinKliegletal.2014, author = {Risse, Sarah and Hohenstein, Sven and Kliegl, Reinhold and Engbert, Ralf}, title = {A theoretical analysis of the perceptual span based on SWIFT simulations of the n+2 boundary paradigm}, series = {Visual cognition}, volume = {22}, journal = {Visual cognition}, number = {3-4}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1350-6285}, doi = {10.1080/13506285.2014.881444}, pages = {283 -- 308}, year = {2014}, abstract = {Eye-movement experiments suggest that the perceptual span during reading is larger than the fixated word, asymmetric around the fixation position, and shrinks in size contingent on the foveal processing load. We used the SWIFT model of eye-movement control during reading to test these hypotheses and their implications under the assumption of graded parallel processing of all words inside the perceptual span. Specifically, we simulated reading in the boundary paradigm and analysed the effects of denying the model to have valid preview of a parafoveal word n + 2 two words to the right of fixation. Optimizing the model parameters for the valid preview condition only, we obtained span parameters with remarkably realistic estimates conforming to the empirical findings on the size of the perceptual span. More importantly, the SWIFT model generated parafoveal processing up to word n + 2 without fitting the model to such preview effects. Our results suggest that asymmetry and dynamic modulation are plausible properties of the perceptual span in a parallel word-processing model such as SWIFT. Moreover, they seem to guide the flexible distribution of processing resources during reading between foveal and parafoveal words.}, language = {en} } @misc{NuthmannVituEngbertetal.2015, author = {Nuthmann, Antje and Vitu, Fran{\c{c}}oise and Engbert, Ralf and Kliegl, Reinhold}, title = {No evidence for a saccadic range effect for visually guided and memory-guided saccades in simple saccade-targeting tasks}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {506}, issn = {1866-8364}, doi = {10.25932/publishup-41163}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411639}, pages = {27}, year = {2015}, abstract = {Saccades to single targets in peripheral vision are typically characterized by an undershoot bias. Putting this bias to a test, Kapoula [1] used a paradigm in which observers were presented with two different sets of target eccentricities that partially overlapped each other. Her data were suggestive of a saccadic range effect (SRE): There was a tendency for saccades to overshoot close targets and undershoot far targets in a block, suggesting that there was a response bias towards the center of eccentricities in a given block. Our Experiment 1 was a close replication of the original study by Kapoula [1]. In addition, we tested whether the SRE is sensitive to top-down requirements associated with the task, and we also varied the target presentation duration. In Experiments 1 and 2, we expected to replicate the SRE for a visual discrimination task. The simple visual saccade-targeting task in Experiment 3, entailing minimal top-down influence, was expected to elicit a weaker SRE. Voluntary saccades to remembered target locations in Experiment 3 were expected to elicit the strongest SRE. Contrary to these predictions, we did not observe a SRE in any of the tasks. Our findings complement the results reported by Gillen et al. [2] who failed to find the effect in a saccade-targeting task with a very brief target presentation. Together, these results suggest that unlike arm movements, saccadic eye movements are not biased towards making saccades of a constant, optimal amplitude for the task.}, language = {en} } @article{LaubrockKlieglRolfsetal.2010, author = {Laubrock, Jochen and Kliegl, Reinhold and Rolfs, Martin and Engbert, Ralf}, title = {When do microsaccades follow spatial attention?}, issn = {1943-3921}, doi = {10.3758/APP.72.3.683}, year = {2010}, abstract = {Following up on an exchange about the relation between microsaccades and spatial attention (Horowitz, Fencsik, Fine, Yurgenson, \& Wolfe, 2007; Horowitz, Fine, Fencsik, Yurgenson, \& Wolfe, 2007; Laubrock, Engbert, Rolfs, \& Kliegl, 2007), we examine the effects of selection criteria and response modality. We show that for Posner cuing with saccadic responses, microsaccades go with attention in at least 75\% of cases (almost 90\% if probability matching is assumed) when they are first (or only) microsaccades in the cue target interval and when they occur between 200 and 400 msec after the cue. The relation between spatial attention and the direction of microsaccades drops to chance level for unselected microsaccades collected during manual-response conditions. Analyses of data from four cross-modal cuing experiments demonstrate an above-chance, intermediate link for visual cues, but no systematic relation for auditory cues. Thus, the link between spatial attention and direction of microsaccades depends on the experimental condition and time of occurrence, but it can be very strong.}, language = {en} } @article{LaubrockEngbertRolfsetal.2007, author = {Laubrock, Jochen and Engbert, Ralf and Rolfs, Martin and Kliegl, Reinhold}, title = {Microsaccades are an index of covert attention : commentary on Horowitz, Fine, Fencsik, Yurgenson and Wolfe}, issn = {0956-7976}, doi = {10.1111/j.1467-9280.2007.01904.x}, year = {2007}, language = {en} } @article{KlieglRolfsLaubrocketal.2009, author = {Kliegl, Reinhold and Rolfs, Martin and Laubrock, Jochen and Engbert, Ralf}, title = {Microsaccadic modulation of response times in spatial attention tasks}, issn = {0340-0727}, doi = {10.1007/s00426-008-0202-2}, year = {2009}, language = {en} } @article{NuthmannVituEngbertetal.2016, author = {Nuthmann, Antje and Vitu, Francoise and Engbert, Ralf and Kliegl, Reinhold}, title = {No Evidence for a Saccadic Range Effect for Visually Guided and Memory-Guided Saccades in Simple Saccade-Targeting Tasks}, series = {PLoS one}, volume = {11}, journal = {PLoS one}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0162449}, pages = {9935 -- 9943}, year = {2016}, abstract = {Saccades to single targets in peripheral vision are typically characterized by an undershoot bias. Putting this bias to a test, Kapoula [1] used a paradigm in which observers were presented with two different sets of target eccentricities that partially overlapped each other. Her data were suggestive of a saccadic range effect (SRE): There was a tendency for saccades to overshoot close targets and undershoot far targets in a block, suggesting that there was a response bias towards the center of eccentricities in a given block. Our Experiment 1 was a close replication of the original study by Kapoula [1]. In addition, we tested whether the SRE is sensitive to top-down requirements associated with the task, and we also varied the target presentation duration. In Experiments 1 and 2, we expected to replicate the SRE for a visual discrimination task. The simple visual saccade-targeting task in Experiment 3, entailing minimal top-down influence, was expected to elicit a weaker SRE. Voluntary saccades to remembered target locations in Experiment 3 were expected to elicit the strongest SRE. Contrary to these predictions, we did not observe a SRE in any of the tasks. Our findings complement the results reported by Gillen et al. [2] who failed to find the effect in a saccade-targeting task with a very brief target presentation. Together, these results suggest that unlike arm movements, saccadic eye movements are not biased towards making saccades of a constant, optimal amplitude for the task.}, language = {en} } @article{EngbertRabeKliegletal.2021, author = {Engbert, Ralf and Rabe, Maximilian Michael and Kliegl, Reinhold and Reich, Sebastian}, title = {Sequential data assimilation of the stochastic SEIR epidemic model for regional COVID-19 dynamics}, series = {Bulletin of mathematical biology : official journal of the Society for Mathematical Biology}, volume = {83}, journal = {Bulletin of mathematical biology : official journal of the Society for Mathematical Biology}, number = {1}, publisher = {Springer}, address = {New York}, issn = {0092-8240}, doi = {10.1007/s11538-020-00834-8}, pages = {16}, year = {2021}, abstract = {Newly emerging pandemics like COVID-19 call for predictive models to implement precisely tuned responses to limit their deep impact on society. Standard epidemic models provide a theoretically well-founded dynamical description of disease incidence. For COVID-19 with infectiousness peaking before and at symptom onset, the SEIR model explains the hidden build-up of exposed individuals which creates challenges for containment strategies. However, spatial heterogeneity raises questions about the adequacy of modeling epidemic outbreaks on the level of a whole country. Here, we show that by applying sequential data assimilation to the stochastic SEIR epidemic model, we can capture the dynamic behavior of outbreaks on a regional level. Regional modeling, with relatively low numbers of infected and demographic noise, accounts for both spatial heterogeneity and stochasticity. Based on adapted models, short-term predictions can be achieved. Thus, with the help of these sequential data assimilation methods, more realistic epidemic models are within reach.}, language = {en} } @article{OhlWohltatKliegletal.2016, author = {Ohl, Sven and Wohltat, Christian and Kliegl, Reinhold and Pollatos, Olga and Engbert, Ralf}, title = {Microsaccades Are Coupled to Heartbeat}, series = {The journal of neuroscience}, volume = {36}, journal = {The journal of neuroscience}, publisher = {Society for Neuroscience}, address = {Washington}, issn = {0270-6474}, doi = {10.1523/JNEUROSCI.2211-15.2016}, pages = {1237 -- 1241}, year = {2016}, abstract = {During visual fixation, the eye generates microsaccades and slower components of fixational eye movements that are part of the visual processing strategy in humans. Here, we show that ongoing heartbeat is coupled to temporal rate variations in the generation of microsaccades. Using coregistration of eye recording and ECG in humans, we tested the hypothesis that microsaccade onsets are coupled to the relative phase of the R-R intervals in heartbeats. We observed significantly more microsaccades during the early phase after the R peak in the ECG. This form of coupling between heartbeat and eye movements was substantiated by the additional finding of a coupling between heart phase and motion activity in slow fixational eye movements; i.e., retinal image slip caused by physiological drift. Our findings therefore demonstrate a coupling of the oculomotor system and ongoing heartbeat, which provides further evidence for bodily influences on visuomotor functioning.}, language = {en} } @article{SchwetlickBackhausEngbert2023, author = {Schwetlick, Lisa and Backhaus, Daniel and Engbert, Ralf}, title = {A dynamical scan-path model for task-dependence during scene viewing}, series = {Psychological review}, volume = {130}, journal = {Psychological review}, number = {3}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000379}, pages = {807 -- 840}, year = {2023}, abstract = {In real-world scene perception, human observers generate sequences of fixations to move image patches into the high-acuity center of the visual field. Models of visual attention developed over the last 25 years aim to predict two-dimensional probabilities of gaze positions for a given image via saliency maps. Recently, progress has been made on models for the generation of scan paths under the constraints of saliency as well as attentional and oculomotor restrictions. Experimental research demonstrated that task constraints can have a strong impact on viewing behavior. Here, we propose a scan-path model for both fixation positions and fixation durations, which include influences of task instructions and interindividual differences. Based on an eye-movement experiment with four different task conditions, we estimated model parameters for each individual observer and task condition using a fully Bayesian dynamical modeling framework using a joint spatial-temporal likelihood approach with sequential estimation. Resulting parameter values demonstrate that model properties such as the attentional span are adjusted to task requirements. Posterior predictive checks indicate that our dynamical model can reproduce task differences in scan-path statistics across individual observers.}, language = {en} } @misc{KlieglEngbert2007, author = {Kliegl, Reinhold and Engbert, Ralf}, title = {Conference Abstracts: 14th European Conference on Eye Movements ECEM2007}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56799}, year = {2007}, language = {de} } @misc{KlieglEngbert2003, author = {Kliegl, Reinhold and Engbert, Ralf}, title = {How tight is the link between lexical processing and saccade programs?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56919}, year = {2003}, abstract = {We question the assumption of serial attention shifts and the assumption that saccade programs are initiated or canceled only after stage one of word identification. Evidence: (1) Fixation durations prior to skipped words are not consistently higher compared to those prior to nonskipped words. (2) Attentional modulation of microsaccade rate might occur after early visual processing. Saccades are probably triggered by attentional selection.}, language = {en} } @misc{EngbertKliegl2003, author = {Engbert, Ralf and Kliegl, Reinhold}, title = {The game of word skipping: Who are the competitors?}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56899}, year = {2003}, abstract = {Computational models such as E-Z Reader and SWIFT are ideal theoretical tools to test quantitatively our current understanding of eye-movement control in reading. Here we present a mathematical analysis of word skipping in the E-Z Reader model by semianalytic methods, to highlight the differences in current modeling approaches. In E-Z Reader, the word identification system must outperform the oculomotor system to induce word skipping. In SWIFT, there is competition among words to be selected as a saccade target. We conclude that it is the question of competitors in the "game" of word skipping that must be solved in eye movement research.}, language = {en} } @misc{RolfsEngbertKliegl2005, author = {Rolfs, Martin and Engbert, Ralf and Kliegl, Reinhold}, title = {Crossmodal coupling of oculomotor controland spatial attention in vision and audition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56804}, year = {2005}, abstract = {Fixational eye movements occur involuntarily during visual fixation of stationary scenes. The fastest components of these miniature eye movements are microsaccades, which can be observed about once per second. Recent studies demonstrated that microsaccades are linked to covert shifts of visual attention [e.g., Engbert \& Kliegl (2003), Vision Res 43:1035-1045]. Here,we generalized this finding in two ways. First, we used peripheral cues, rather than the centrally presented cues of earlier studies. Second, we spatially cued attention in vision and audition to visual and auditory targets. An analysis of microsaccade responses revealed an equivalent impact of visual and auditory cues on microsaccade-rate signature (i.e., an initial inhibition followed by an overshoot and a final return to the pre-cue baseline rate). With visual cues or visual targets,microsaccades were briefly aligned with cue direction and then opposite to cue direction during the overshoot epoch, probably as a result of an inhibition of an automatic saccade to the peripheral cue. With left auditory cues and auditory targets microsaccades oriented in cue direction. Thus, microsaccades can be used to study crossmodal integration of sensory information and to map the time course of saccade preparation during covert shifts of visual and auditory attention.}, language = {en} } @misc{KlieglRolfsLaubrocketal.2009, author = {Kliegl, Reinhold and Rolfs, Martin and Laubrock, Jochen and Engbert, Ralf}, title = {Microsaccadic Modulation of Response Times in Spatial Attention Tasks}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57098}, year = {2009}, abstract = {Covert shifts of attention are usually reflected in RT differences between responses to valid and invalid cues in the Posner spatial attention task. Such inferences about covert shifts of attention do not control for microsaccades in the cue target interval. We analyzed the effects of microsaccade orientation on RTs in four conditions, crossing peripheral visual and auditory cues with peripheral visual and auditory discrimination targets. Reaction time was generally faster on trials without microsaccades in the cue-target interval. If microsaccades occurred, the target-location congruency of the last microsaccade in the cuetarget interval interacted in a complex way with cue validity. For valid visual cues, irrespective of whether the discrimination target was visual or auditory, target-congruent microsaccades delayed RT. For invalid cues, target-incongruent microsaccades facilitated RTs for visual target discrimination, but delayed RT for auditory target discrimination. No reliable effects on RT were associated with auditory cues or with the first microsaccade in the cue-target interval. We discuss theoretical implications on the relation about spatial attention and oculomotor processes.}, language = {en} } @misc{EngbertNuthmannRichteretal.2005, author = {Engbert, Ralf and Nuthmann, Antje and Richter, Eike M. and Kliegl, Reinhold}, title = {SWIFT: A Dynamical Model of Saccade Generation during Reading}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57145}, year = {2005}, abstract = {Mathematical models have become an important tool for understanding the control of eye movements during reading. Main goals of the development of the SWIFT model (Engbert, Longtin, \& Kliegl, 2002)were to investigate the possibility of spatially distributed processing and to implement a general mechanism for all types of eye movements we observe in reading experiments. Here, we present an advanced version of SWIFT which integrates properties of the oculomotor system and effects of word recognition to explain many of the experimental phenomena faced in reading research. We propose new procedures for the estimation of model parameters and for the test of the model's performance. A mathematical analysis of the dynamics of the SWIFT model is presented. Finally, within this framework, we present an analysis of the transition from parallel to serial processing.}, language = {en} } @misc{KlieglNuthmannEngbert2006, author = {Kliegl, Reinhold and Nuthmann, Antje and Engbert, Ralf}, title = {Tracking the Mind During Reading: The Influence of Past, Present, and Future Words on Fixation Durations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57225}, year = {2006}, abstract = {Reading requires the orchestration of visual, attentional, language-related, and oculomotor processing constraints. This study replicates previous effects of frequency, predictability, and length of fixated words on fixation durations in natural reading and demonstrates new effects of these variables related to previous and next words. Results are based on fixation durations recorded from 222 persons, each reading 144 sentences. Such evidence for distributed processing of words across fixation durations challenges psycholinguistic immediacy-of-processing and eye-mind assumptions. Most of the time the mind processes several words in parallel at different perceptual and cognitive levels. Eye movements can help to unravel these processes.}, language = {en} } @article{RolfsKlieglEngbert2008, author = {Rolfs, Martin and Kliegl, Reinhold and Engbert, Ralf}, title = {Toward a model of microsaccade generation : the case of microsaccadic inhibition}, issn = {1534-7362}, doi = {10.1167/8.11.5}, year = {2008}, language = {en} } @article{EngbertLongtinKliegl2002, author = {Engbert, Ralf and Longtin, Andre and Kliegl, Reinhold}, title = {A dynamical model of saccade generation in reading based on spatially distributed lexical processing}, year = {2002}, language = {en} } @article{RichterEngbertKliegl2006, author = {Richter, Eike M. and Engbert, Ralf and Kliegl, Reinhold}, title = {Current advances in SWIFT}, issn = {1389-0417}, doi = {10.1016/j.cogsys.2005.07.003}, year = {2006}, abstract = {Models of eye movement control are very useful for gaining insights into the intricate connections of different cognitive and oculomotor subsystems involved in reading. The SWIFT model (Engbert, Longtin, \& Kliegl (2002). Vision Research, 42, 621 - 636) proposed a unified mechanism to account for all types of eye movement patterns that might be observed in reading behavior. The model is based on the notion of spatially distributed, or parallel, processing of words in a sentence. We present a refined version of SWIFT introducing a letter-based approach that proposes a processing gradient in the shape of a smooth function. We show that SWIFT extents its capabilities by accounting for distributions of landing positions.}, language = {en} } @article{RisseEngbertKliegl2008, author = {Risse, Sarah and Engbert, Ralf and Kliegl, Reinhold}, title = {Eye-movement control in reading : experimental and corpus-analysis challenges for a computational model}, isbn = {978-7-201-06107-8}, year = {2008}, language = {en} } @article{KlieglNuthmannEngbert2006, author = {Kliegl, Reinhold and Nuthmann, Antje and Engbert, Ralf}, title = {Tracking the mind during reading : the influence of past, present, and future words on fixation durations}, issn = {0096-3445}, doi = {10.1037/0096-3445.135.1.12}, year = {2006}, abstract = {Reading requires the orchestration of visual, attentional, language-related, and oculomotor processing constraints. This study replicates previous effects of frequency, predictability, and length of fixated words on fixation durations in natural reading and demonstrates new effects of these variables related to previous and next words. Results are based on fixation durations recorded from 222 persons, each reading 144 sentences. Such evidence for distributed processing of words across fixation durations challenges psycholinguistic immediacy-of-processing and eye- mind assumptions. Most of the time the mind processes several words in parallel at different perceptual and cognitive levels. Eye movements can help to unravel these processes}, language = {en} } @article{RolfsEngbertKliegl2005, author = {Rolfs, Martin and Engbert, Ralf and Kliegl, Reinhold}, title = {Crossmodal coupling of oculomotor control and spatial attention in vision and audition}, issn = {0014-4819}, year = {2005}, abstract = {Fixational eye movements occur involuntarily during visual fixation of stationary scenes. The fastest components of these miniature eye movements are microsaccades, which can be observed about once per second. Recent studies demonstrated that microsaccades are linked to covert shifts of visual attention. Here, we generalized this finding in two ways. First, we used peripheral cues, rather than the centrally presented cues of earlier studies. Second, we spatially cued attention in vision and audition to visual and auditory targets. An analysis of microsaccade responses revealed an equivalent impact of visual and auditory cues on microsaccade-rate signature (i.e. an initial inhibition followed by an overshoot and a final return to the pre-cue baseline rate). With visual cues or visual targets, microsaccades were briefly aligned with cue direction and then opposite to cue direction during the overshoot epoch, probably as a result of an inhibition of an automatic saccade to the peripheral cue. With left auditory cues and auditory targets microsaccades oriented in cue direction. We argue that microsaccades can be used to study crossmodal integration of sensory information and to map the time course of saccade preparation during covert shifts of visual and auditory attention}, language = {en} } @article{NuthmannEngbertKliegl2005, author = {Nuthmann, Antje and Engbert, Ralf and Kliegl, Reinhold}, title = {Mislocated fixations during reading and the inverted optimal viewing position effect}, year = {2005}, abstract = {Refixation probability during reading is lowest near the word center, suggestive of an optimal viewing position (OVP). Counter-intuitively, fixation durations are largest at the OVP, a result called the inverted optimal viewing position (IOVP) effect [Vitu, McConkie, Kerr, \& O'Regan, (2001). Vision Research 41, 3513-3533]. Current models of eye-movement control in reading fail to reproduce the IOVP effect. We propose a simple mechanism for generating this effect based on error-correction of mislocated fixations due to saccadic errors, First, we propose an algorithm for estimating proportions of mislocated fixations from experimental data yielding a higher probability for mislocated fixations near word boundaries. Second, we assume that mislocated fixations trigger an immediate start of a new saccade program causing a decrease of associated durations. Thus, the IOVP effect could emerge as a result of a coupling between cognitive and oculomotor processes. (c) 2005 Elsevier Ltd. All rights reserved}, language = {en} } @article{LiangMoshelZivotofskyetal.2005, author = {Liang, Jin-Rong and Moshel, Shay and Zivotofsky, Ari Z. and Caspi, Avi and Engbert, Ralf and Kliegl, Reinhold and Havlin, Shlomo}, title = {Scaling of horizontal and vertical fixational eye movements}, issn = {1063-651X}, year = {2005}, abstract = {Eye movements during fixation of a stationary target prevent the adaptation of the visual system to continuous illumination and inhibit fading of the image. These random, involuntary, small movements are restricted at long time scales so as to keep the target at the center of the field of view. Here we use detrended fluctuation analysis in order to study the properties of fixational eye movements at different time scales. Results show different scaling behavior between horizontal and vertical movements. When the small ballistic movements, i.e., microsaccades, are removed, the scaling exponents in both planes become similar. Our findings suggest that microsaccades enhance the persistence at short time scales mostly in the horizontal component and much less in the vertical component. This difference may be due to the need for continuously moving the eyes in the horizontal plane, in order to match the stereoscopic image for different viewing distances}, language = {en} }