@article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{RothkegelSchuettTrukenbrodetal.2019, author = {Rothkegel, Lars Oliver Martin and Sch{\"u}tt, Heiko Herbert and Trukenbrod, Hans Arne and Wichmann, Felix A. and Engbert, Ralf}, title = {Searchers adjust their eye-movement dynamics to target characteristics in natural scenes}, series = {Scientific reports}, volume = {9}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/s41598-018-37548-w}, pages = {12}, year = {2019}, abstract = {When searching a target in a natural scene, it has been shown that both the target's visual properties and similarity to the background influence whether and how fast humans are able to find it. So far, it was unclear whether searchers adjust the dynamics of their eye movements (e.g., fixation durations, saccade amplitudes) to the target they search for. In our experiment, participants searched natural scenes for six artificial targets with different spatial frequency content throughout eight consecutive sessions. High-spatial frequency targets led to smaller saccade amplitudes and shorter fixation durations than low-spatial frequency targets if target identity was known. If a saccade was programmed in the same direction as the previous saccade, fixation durations and successive saccade amplitudes were not influenced by target type. Visual saliency and empirical fixation density at the endpoints of saccades which maintain direction were comparatively low, indicating that these saccades were less selective. Our results suggest that searchers adjust their eye movement dynamics to the search target efficiently, since previous research has shown that low-spatial frequencies are visible farther into the periphery than high-spatial frequencies. We interpret the saccade direction specificity of our effects as an underlying separation into a default scanning mechanism and a selective, target-dependent mechanism.}, language = {en} } @article{TrukenbrodBarthelmeWichmannetal.2019, author = {Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A. and Engbert, Ralf}, title = {Spatial statistics for gaze patterns in scene viewing}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {5}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.6.5}, pages = {1 -- 19}, year = {2019}, abstract = {Scene viewing is used to study attentional selection in complex but still controlled environments. One of the main observations on eye movements during scene viewing is the inhomogeneous distribution of fixation locations: While some parts of an image are fixated by almost all observers and are inspected repeatedly by the same observer, other image parts remain unfixated by observers even after long exploration intervals. Here, we apply spatial point process methods to investigate the relationship between pairs of fixations. More precisely, we use the pair correlation function, a powerful statistical tool, to evaluate dependencies between fixation locations along individual scanpaths. We demonstrate that aggregation of fixation locations within 4 degrees is stronger than expected from chance. Furthermore, the pair correlation function reveals stronger aggregation of fixations when the same image is presented a second time. We use simulations of a dynamical model to show that a narrower spatial attentional span may explain differences in pair correlations between the first and the second inspection of the same image.}, language = {en} }