@inproceedings{TrukenbrodBarthelmeWichmannetal.2012, author = {Trukenbrod, Hans Arne and Barthelme, S. and Wichmann, Felix A. and Engbert, Ralf}, title = {Color does not guide eye movements: Evidence from a gaze-contingent experiment}, series = {PERCEPTION}, volume = {41}, booktitle = {PERCEPTION}, publisher = {PION LTD}, address = {LONDON}, issn = {0301-0066}, pages = {88 -- 88}, year = {2012}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{TrukenbrodEngbert2012, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Eye movements in a sequential scanning task - evidence for distributed processing}, series = {Journal of vision}, volume = {12}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/12.1.5}, pages = {12}, year = {2012}, abstract = {Current models of eye movement control are derived from theories assuming serial processing of single items or from theories based on parallel processing of multiple items at a time. This issue has persisted because most investigated paradigms generated data compatible with both serial and parallel models. Here, we study eye movements in a sequential scanning task, where stimulus n indicates the position of the next stimulus n + 1. We investigate whether eye movements are controlled by sequential attention shifts when the task requires serial order of processing. Our measures of distributed processing in the form of parafoveal-on-foveal effects, long-range modulations of target selection, and skipping saccades provide evidence against models strictly based on serial attention shifts. We conclude that our results lend support to parallel processing as a strategy for eye movement control.}, language = {en} } @misc{TrukenbrodEngbert2014, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {ICAT: a computational model for the adaptive control of fixation durations}, series = {Psychonomic bulletin \& review : a journal of the Psychonomic Society}, volume = {21}, journal = {Psychonomic bulletin \& review : a journal of the Psychonomic Society}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1069-9384}, doi = {10.3758/s13423-013-0575-0}, pages = {907 -- 934}, year = {2014}, abstract = {Eye movements depend on cognitive processes related to visual information processing. Much has been learned about the spatial selection of fixation locations, while the principles governing the temporal control (fixation durations) are less clear. Here, we review current theories for the control of fixation durations in tasks like visual search, scanning, scene perception, and reading and propose a new model for the control of fixation durations. We distinguish two local principles from one global principle of control. First, an autonomous saccade timer initiates saccades after random time intervals (local-I). Second, foveal inhibition permits immediate prolongation of fixation durations by ongoing processing (local-II). Third, saccade timing is adaptive, so that the mean timer value depends on task requirements and fixation history (Global). We demonstrate by numerical simulations that our model qualitatively reproduces patterns of mean fixation durations and fixation duration distributions observed in typical experiments. When combined with assumptions of saccade target selection and oculomotor control, the model accounts for both temporal and spatial aspects of eye movement control in two versions of a visual search task. We conclude that the model provides a promising framework for the control of fixation durations in saccadic tasks.}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2016, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Influence of initial fixation position in scene viewing}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {129}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2016.09.012}, pages = {33 -- 49}, year = {2016}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2017, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Reich, Sebastian and Wichmann, Felix A. and Engbert, Ralf}, title = {Likelihood-based parameter estimation and comparison of dynamical cognitive models}, series = {Psychological Review}, volume = {124}, journal = {Psychological Review}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000068}, pages = {505 -- 524}, year = {2017}, abstract = {Dynamical models of cognition play an increasingly important role in driving theoretical and experimental research in psychology. Therefore, parameter estimation, model analysis and comparison of dynamical models are of essential importance. In this article, we propose a maximum likelihood approach for model analysis in a fully dynamical framework that includes time-ordered experimental data. Our methods can be applied to dynamical models for the prediction of discrete behavior (e.g., movement onsets); in particular, we use a dynamical model of saccade generation in scene viewing as a case study for our approach. For this model, the likelihood function can be computed directly by numerical simulation, which enables more efficient parameter estimation including Bayesian inference to obtain reliable estimates and corresponding credible intervals. Using hierarchical models inference is even possible for individual observers. Furthermore, our likelihood approach can be used to compare different models. In our example, the dynamical framework is shown to outperform nondynamical statistical models. Additionally, the likelihood based evaluation differentiates model variants, which produced indistinguishable predictions on hitherto used statistics. Our results indicate that the likelihood approach is a promising framework for dynamical cognitive models.}, language = {en} } @article{BarthelmeTrukenbrodEngbertetal.2013, author = {Barthelme, Simon and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Modeling fixation locations using spatial point processes}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.1}, pages = {34}, year = {2013}, abstract = {Whenever eye movements are measured, a central part of the analysis has to do with where subjects fixate and why they fixated where they fixated. To a first approximation, a set of fixations can be viewed as a set of points in space; this implies that fixations are spatial data and that the analysis of fixation locations can be beneficially thought of as a spatial statistics problem. We argue that thinking of fixation locations as arising from point processes is a very fruitful framework for eye-movement data, helping turn qualitative questions into quantitative ones. We provide a tutorial introduction to some of the main ideas of the field of spatial statistics, focusing especially on spatial Poisson processes. We show how point processes help relate image properties to fixation locations. In particular we show how point processes naturally express the idea that image features' predictability for fixations may vary from one image to another. We review other methods of analysis used in the literature, show how they relate to point process theory, and argue that thinking in terms of point processes substantially extends the range of analyses that can be performed and clarify their interpretation.}, language = {en} } @article{SchwetlickRothkegelTrukenbrodetal.2020, author = {Schwetlick, Lisa and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Modeling the effects of perisaccadic attention on gaze statistics during scene viewing}, series = {Communications biology}, volume = {3}, journal = {Communications biology}, number = {1}, publisher = {Springer Nature}, address = {London}, issn = {2399-3642}, doi = {10.1038/s42003-020-01429-8}, pages = {11}, year = {2020}, abstract = {Lisa Schwetlick et al. present a computational model linking visual scan path generation in scene viewing to physiological and experimental work on perisaccadic covert attention, the act of attending to an object visually without obviously moving the eyes toward it. They find that integrating covert attention into predictive models of visual scan paths greatly improves the model's agreement with experimental data.
How we perceive a visual scene depends critically on the selection of gaze positions. For this selection process, visual attention is known to play a key role in two ways. First, image-features attract visual attention, a fact that is captured well by time-independent fixation models. Second, millisecond-level attentional dynamics around the time of saccade drives our gaze from one position to the next. These two related research areas on attention are typically perceived as separate, both theoretically and experimentally. Here we link the two research areas by demonstrating that perisaccadic attentional dynamics improve predictions on scan path statistics. In a mathematical model, we integrated perisaccadic covert attention with dynamic scan path generation. Our model reproduces saccade amplitude distributions, angular statistics, intersaccadic turning angles, and their impact on fixation durations as well as inter-individual differences using Bayesian inference. Therefore, our result lend support to the relevance of perisaccadic attention to gaze statistics.}, language = {en} } @article{TrukenbrodEngbert2007, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Oculomotor control in a sequential search task}, doi = {10.1016/j.visres.2007.05.010}, year = {2007}, abstract = {Using a serial search paradigm, we observed several effects of within-object fixation position on spatial and temporal control of eye movements: the preferred viewing location, launch site effect, the optimal viewing position, and the inverted optimal viewing position of fixation duration. While these effects were first identified by eye-movement studies in reading, our approach permits an analysis of the functional relationships between the effects in a different paradigm. Our results demonstrate that the fixation position is an important predictor of the subsequent saccade by influencing both fixation duration and the selection of the next saccade target.}, language = {en} } @misc{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Predicting fixation densities over time from early visual processing}, series = {Perception}, volume = {48}, journal = {Perception}, publisher = {Sage Publ.}, address = {London}, issn = {0301-0066}, pages = {64 -- 65}, year = {2019}, abstract = {Bottom-up saliency is often cited as a factor driving the choice of fixation locations of human observers, based on the (partial) success of saliency models to predict fixation densities in free viewing. However, these observations are only weak evidence for a causal role of bottom-up saliency in natural viewing behaviour. To test bottom-up saliency more directly, we analyse the performance of a number of saliency models---including our own saliency model based on our recently published model of early visual processing (Sch{\"u}tt \& Wichmann, 2017, JoV)---as well as the theoretical limits for predictions over time. On free viewing data our model performs better than classical bottom-up saliency models, but worse than the current deep learning based saliency models incorporating higher-level information like knowledge about objects. However, on search data all saliency models perform worse than the optimal image independent prediction. We observe that the fixation density in free viewing is not stationary over time, but changes over the course of a trial. It starts with a pronounced central fixation bias on the first chosen fixation, which is nonetheless influenced by image content. Starting with the 2nd to 3rd fixation, the fixation density is already well predicted by later densities, but more concentrated. From there the fixation distribution broadens until it reaches a stationary distribution around the 10th fixation. Taken together these observations argue against bottom-up saliency as a mechanistic explanation for eye movement control after the initial orienting reaction in the first one to two saccades, although we confirm the predictive value of early visual representations for fixation locations. The fixation distribution is, first, not well described by any stationary density, second, is predicted better when including object information and, third, is badly predicted by any saliency model in a search task.}, language = {en} }