@article{MeixnerNixonLaubrock2022, author = {Meixner, Johannes M. and Nixon, Jessie S. and Laubrock, Jochen}, title = {The perceptual span is dynamically adjusted in response to foveal load by beginning readers}, series = {Journal of experimental psychology : general}, volume = {151}, journal = {Journal of experimental psychology : general}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-3445}, doi = {10.1037/xge0001140}, pages = {1219 -- 1232}, year = {2022}, abstract = {The perceptual span describes the size of the visual field from which information is obtained during a fixation in reading. Its size depends on characteristics of writing system and reader, but-according to the foveal load hypothesis-it is also adjusted dynamically as a function of lexical processing difficulty. Using the moving window paradigm to manipulate the amount of preview, here we directly test whether the perceptual span shrinks as foveal word difficulty increases. We computed the momentary size of the span from word-based eye-movement measures as a function of foveal word frequency, allowing us to separately describe the perceptual span for information affecting spatial saccade targeting and temporal saccade execution. First fixation duration and gaze duration on the upcoming (parafoveal) word N + 1 were significantly shorter when the current (foveal) word N was more frequent. We show that the word frequency effect is modulated by window size. Fixation durations on word N + 1 decreased with high-frequency words N, but only for large windows, that is, when sufficient parafoveal preview was available. This provides strong support for the foveal load hypothesis. To investigate the development of the foveal load effect, we analyzed data from three waves of a longitudinal study on the perceptual span with German children in Grades 1 to 6. Perceptual span adjustment emerged early in development at around second grade and remained stable in later grades. We conclude that the local modulation of the perceptual span indicates a general cognitive process, perhaps an attentional gradient with rapid readjustment.}, language = {en} } @misc{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8364}, doi = {10.25932/publishup-56318}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563184}, pages = {1 -- 7}, year = {2022}, language = {en} } @article{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Frontiers in psychology / Frontiers Research Foundation}, volume = {13}, journal = {Frontiers in psychology / Frontiers Research Foundation}, publisher = {Frontiers Research Foundation}, address = {Lausanne, Schweiz}, issn = {1664-1078}, doi = {10.3389/fpsyg.2022.850482}, pages = {1 -- 7}, year = {2022}, language = {en} } @article{TsengLaubrockBateman2021, author = {Tseng, Chiao-I and Laubrock, Jochen and Bateman, John A.}, title = {The impact of multimodal cohesion on attention and interpretation in film}, series = {Discourse, context \& media}, volume = {44}, journal = {Discourse, context \& media}, publisher = {Amsterdam [u.a.]}, address = {Oxford}, issn = {2211-6958}, doi = {10.1016/j.dcm.2021.100544}, pages = {15}, year = {2021}, abstract = {This article presents results of an exploratory investigation combining multimodal cohesion analysis and eye-tracking studies. Multimodal cohesion, as a tool of multimodal discourse analysis, goes beyond lin-guistic cohesive mechanisms to enable the construction of cross-modal discourse structures that system-atically relate technical details of audio, visual and verbal modalities. Patterns of multimodal cohesion from these discourse structures were used to design eye-tracking experiments and questionnaires in order to empirically investigate how auditory and visual cohesive cues affect attention and comprehen-sion. We argue that the cross-modal structures of cohesion revealed by our method offer a strong methodology for addressing empirical questions concerning viewers' comprehension of narrative settings and the comparative salience of visual, verbal and audio cues. Analyses are presented of the beginning of Hitchcock's The Birds (1963) and a sketch from Monty Python filmed in 1971. Our approach balances the narrative-based issue of how narrative elements in film guide meaning interpretation and the recipient -based question of where a film viewer's attention is directed during viewing and how this affects comprehension.}, language = {en} } @misc{FelisattiLaubrockShakietal.2020, author = {Felisatti, Arianna and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {Commentary}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {620}, issn = {1866-8364}, doi = {10.25932/publishup-46041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-460413}, pages = {6}, year = {2020}, language = {en} } @article{FelisattiLaubrockShakietal.2020, author = {Felisatti, Arianna and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {Commentary}, series = {Frontiers in Human Neuroscience}, volume = {14}, journal = {Frontiers in Human Neuroscience}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1662-5161}, doi = {10.3389/fnhum.2020.00099}, pages = {4}, year = {2020}, language = {en} } @article{PanLaubrockYan2020, author = {Pan, Jinger and Laubrock, Jochen and Yan, Ming}, title = {Phonological consistency effects in Chinese sentence reading}, series = {Scientific studies of reading}, volume = {25}, journal = {Scientific studies of reading}, number = {4}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1088-8438}, doi = {10.1080/10888438.2020.1789146}, pages = {335 -- 350}, year = {2020}, abstract = {In two eye-tracking experiments, we investigated the processing of information about phonological consistency of Chinese phonograms during sentence reading. In Experiment 1, we adopted the error disruption paradigm in silent reading and found significant effects of phonological consistency and homophony in the foveal vision, but only in a late processing stage. Adding oral reading to Experiment 2, we found both effects shifted to earlier indices of parafoveal processing. Specifically, low-consistency characters led to a better homophonic foveal recovery effect in Experiment 1 and stronger homophonic preview benefits in Experiment 2. These findings suggest that phonological consistency information can be obtained during sentence reading, and compared to the low-consistency previews the high-consistency previews are processed faster, which leads to greater interference to the recognition of target characters.}, language = {en} } @article{FelisattiLaubrockShakietal.2020, author = {Felisatti, Arianna and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {A biological foundation for spatial-numerical associations}, series = {Annals of the New York Academy of Sciences}, volume = {1477}, journal = {Annals of the New York Academy of Sciences}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {0077-8923}, doi = {10.1111/nyas.14418}, pages = {44 -- 53}, year = {2020}, abstract = {"Left" and "right" coordinates control our spatial behavior and even influence abstract thoughts. For number concepts, horizontal spatial-numerical associations (SNAs) have been widely documented: we associate few with left and many with right. Importantly, increments are universally coded on the right side even in preverbal humans and nonhuman animals, thus questioning the fundamental role of directional cultural habits, such as reading or finger counting. Here, we propose a biological, nonnumerical mechanism for the origin of SNAs on the basis of asymmetric tuning of animal brains for different spatial frequencies (SFs). The resulting selective visual processing predicts both universal SNAs and their context-dependence. We support our proposal by analyzing the stimuli used to document SNAs in newborns for their SF content. As predicted, the SFs contained in visual patterns with few versus many elements preferentially engage right versus left brain hemispheres, respectively, thus predicting left-versus rightward behavioral biases. Our "brain's asymmetric frequency tuning" hypothesis explains the perceptual origin of horizontal SNAs for nonsymbolic visual numerosities and might be extensible to the auditory domain.}, language = {en} } @article{FelisattiAagtenMurphyLaubrocketal.2020, author = {Felisatti, Arianna and Aagten-Murphy, David and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {The brain's asymmetric frequency tuning}, series = {Symmetry / Molecular Diversity Preservation International (MDPI)}, volume = {12}, journal = {Symmetry / Molecular Diversity Preservation International (MDPI)}, number = {12}, publisher = {MDPI}, address = {Basel}, issn = {2073-8994}, doi = {10.3390/sym12122083}, pages = {25}, year = {2020}, abstract = {To construct a coherent multi-modal percept, vertebrate brains extract low-level features (such as spatial and temporal frequencies) from incoming sensory signals. However, because frequency processing is lateralized with the right hemisphere favouring low frequencies while the left favours higher frequencies, this introduces asymmetries between the hemispheres. Here, we describe how this lateralization shapes the development of several cognitive domains, ranging from visuo-spatial and numerical cognition to language, social cognition, and even aesthetic appreciation, and leads to the emergence of asymmetries in behaviour. We discuss the neuropsychological and educational implications of these emergent asymmetries and suggest future research approaches.}, language = {en} } @article{CajarEngbertLaubrock2020, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {How spatial frequencies and color drive object search in real-world scenes}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {7}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.7.8}, pages = {16}, year = {2020}, abstract = {When studying how people search for objects in scenes, the inhomogeneity of the visual field is often ignored. Due to physiological limitations, peripheral vision is blurred and mainly uses coarse-grained information (i.e., low spatial frequencies) for selecting saccade targets, whereas high-acuity central vision uses fine-grained information (i.e., high spatial frequencies) for analysis of details. Here we investigated how spatial frequencies and color affect object search in real-world scenes. Using gaze-contingent filters, we attenuated high or low frequencies in central or peripheral vision while viewers searched color or grayscale scenes. Results showed that peripheral filters and central high-pass filters hardly affected search accuracy, whereas accuracy dropped drastically with central low-pass filters. Peripheral filtering increased the time to localize the target by decreasing saccade amplitudes and increasing number and duration of fixations. The use of coarse-grained information in the periphery was limited to color scenes. Central filtering increased the time to verify target identity instead, especially with low-pass filters. We conclude that peripheral vision is critical for object localization and central vision is critical for object identification. Visual guidance during peripheral object localization is dominated by low-frequency color information, whereas high-frequency information, relatively independent of color, is most important for object identification in central vision.}, language = {en} }