@article{PanYanLaubrock2020, author = {Pan, Jinger and Yan, Ming and Laubrock, Jochen}, title = {Semantic preview benefit and cost}, series = {Cognition : international journal of cognitive science}, volume = {205}, journal = {Cognition : international journal of cognitive science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-0277}, doi = {10.1016/j.cognition.2020.104452}, pages = {10}, year = {2020}, abstract = {How is semantic information in the mental lexicon accessed and selected during reading? Readers process information of both the foveal and parafoveal words. Recent eye-tracking studies hint at bi-phasic lexical activation dynamics, demonstrating that semantically related parafoveal previews can either facilitate, or interfere with lexical processing of target words in comparison to unrelated previews, with the size and direction of the effect depending on exposure time to parafoveal previews. However, evidence to date is only correlational, because exposure time was determined by participants' pre-target fixation durations. Here we experimentally controlled parafoveal preview exposure duration using a combination of the gaze-contingent fast-priming and boundary paradigms. We manipulated preview duration and examined the time course of parafoveal semantic activation during the oral reading of Chinese sentences in three experiments. Semantic previews led to faster lexical access of target words than unrelated previews only when the previews were presented briefly (80 ms in Experiments 1 and 3). Longer exposure time (100 ms or 150 ms) eliminated semantic preview effects, and full preview without duration limit resulted in preview cost, i.e., a reversal of preview benefit. Our results indicate that high-level semantic information can be obtained from parafoveal words and the size and direction of the parafoveal semantic effect depends on the level of lexical activation.}, language = {en} } @article{FelisattiAagtenMurphyLaubrocketal.2020, author = {Felisatti, Arianna and Aagten-Murphy, David and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {The brain's asymmetric frequency tuning}, series = {Symmetry / Molecular Diversity Preservation International (MDPI)}, volume = {12}, journal = {Symmetry / Molecular Diversity Preservation International (MDPI)}, number = {12}, publisher = {MDPI}, address = {Basel}, issn = {2073-8994}, doi = {10.3390/sym12122083}, pages = {25}, year = {2020}, abstract = {To construct a coherent multi-modal percept, vertebrate brains extract low-level features (such as spatial and temporal frequencies) from incoming sensory signals. However, because frequency processing is lateralized with the right hemisphere favouring low frequencies while the left favours higher frequencies, this introduces asymmetries between the hemispheres. Here, we describe how this lateralization shapes the development of several cognitive domains, ranging from visuo-spatial and numerical cognition to language, social cognition, and even aesthetic appreciation, and leads to the emergence of asymmetries in behaviour. We discuss the neuropsychological and educational implications of these emergent asymmetries and suggest future research approaches.}, language = {en} } @article{TsengLaubrockBateman2021, author = {Tseng, Chiao-I and Laubrock, Jochen and Bateman, John A.}, title = {The impact of multimodal cohesion on attention and interpretation in film}, series = {Discourse, context \& media}, volume = {44}, journal = {Discourse, context \& media}, publisher = {Amsterdam [u.a.]}, address = {Oxford}, issn = {2211-6958}, doi = {10.1016/j.dcm.2021.100544}, pages = {15}, year = {2021}, abstract = {This article presents results of an exploratory investigation combining multimodal cohesion analysis and eye-tracking studies. Multimodal cohesion, as a tool of multimodal discourse analysis, goes beyond lin-guistic cohesive mechanisms to enable the construction of cross-modal discourse structures that system-atically relate technical details of audio, visual and verbal modalities. Patterns of multimodal cohesion from these discourse structures were used to design eye-tracking experiments and questionnaires in order to empirically investigate how auditory and visual cohesive cues affect attention and comprehen-sion. We argue that the cross-modal structures of cohesion revealed by our method offer a strong methodology for addressing empirical questions concerning viewers' comprehension of narrative settings and the comparative salience of visual, verbal and audio cues. Analyses are presented of the beginning of Hitchcock's The Birds (1963) and a sketch from Monty Python filmed in 1971. Our approach balances the narrative-based issue of how narrative elements in film guide meaning interpretation and the recipient -based question of where a film viewer's attention is directed during viewing and how this affects comprehension.}, language = {en} } @article{PanLaubrockYan2020, author = {Pan, Jinger and Laubrock, Jochen and Yan, Ming}, title = {Phonological consistency effects in Chinese sentence reading}, series = {Scientific studies of reading}, volume = {25}, journal = {Scientific studies of reading}, number = {4}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1088-8438}, doi = {10.1080/10888438.2020.1789146}, pages = {335 -- 350}, year = {2020}, abstract = {In two eye-tracking experiments, we investigated the processing of information about phonological consistency of Chinese phonograms during sentence reading. In Experiment 1, we adopted the error disruption paradigm in silent reading and found significant effects of phonological consistency and homophony in the foveal vision, but only in a late processing stage. Adding oral reading to Experiment 2, we found both effects shifted to earlier indices of parafoveal processing. Specifically, low-consistency characters led to a better homophonic foveal recovery effect in Experiment 1 and stronger homophonic preview benefits in Experiment 2. These findings suggest that phonological consistency information can be obtained during sentence reading, and compared to the low-consistency previews the high-consistency previews are processed faster, which leads to greater interference to the recognition of target characters.}, language = {en} } @article{FelisattiLaubrockShakietal.2020, author = {Felisatti, Arianna and Laubrock, Jochen and Shaki, Samuel and Fischer, Martin H.}, title = {A biological foundation for spatial-numerical associations}, series = {Annals of the New York Academy of Sciences}, volume = {1477}, journal = {Annals of the New York Academy of Sciences}, number = {1}, publisher = {Wiley}, address = {Hoboken}, issn = {0077-8923}, doi = {10.1111/nyas.14418}, pages = {44 -- 53}, year = {2020}, abstract = {"Left" and "right" coordinates control our spatial behavior and even influence abstract thoughts. For number concepts, horizontal spatial-numerical associations (SNAs) have been widely documented: we associate few with left and many with right. Importantly, increments are universally coded on the right side even in preverbal humans and nonhuman animals, thus questioning the fundamental role of directional cultural habits, such as reading or finger counting. Here, we propose a biological, nonnumerical mechanism for the origin of SNAs on the basis of asymmetric tuning of animal brains for different spatial frequencies (SFs). The resulting selective visual processing predicts both universal SNAs and their context-dependence. We support our proposal by analyzing the stimuli used to document SNAs in newborns for their SF content. As predicted, the SFs contained in visual patterns with few versus many elements preferentially engage right versus left brain hemispheres, respectively, thus predicting left-versus rightward behavioral biases. Our "brain's asymmetric frequency tuning" hypothesis explains the perceptual origin of horizontal SNAs for nonsymbolic visual numerosities and might be extensible to the auditory domain.}, language = {en} } @article{CajarEngbertLaubrock2020, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {How spatial frequencies and color drive object search in real-world scenes}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {7}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.7.8}, pages = {16}, year = {2020}, abstract = {When studying how people search for objects in scenes, the inhomogeneity of the visual field is often ignored. Due to physiological limitations, peripheral vision is blurred and mainly uses coarse-grained information (i.e., low spatial frequencies) for selecting saccade targets, whereas high-acuity central vision uses fine-grained information (i.e., high spatial frequencies) for analysis of details. Here we investigated how spatial frequencies and color affect object search in real-world scenes. Using gaze-contingent filters, we attenuated high or low frequencies in central or peripheral vision while viewers searched color or grayscale scenes. Results showed that peripheral filters and central high-pass filters hardly affected search accuracy, whereas accuracy dropped drastically with central low-pass filters. Peripheral filtering increased the time to localize the target by decreasing saccade amplitudes and increasing number and duration of fixations. The use of coarse-grained information in the periphery was limited to color scenes. Central filtering increased the time to verify target identity instead, especially with low-pass filters. We conclude that peripheral vision is critical for object localization and central vision is critical for object identification. Visual guidance during peripheral object localization is dominated by low-frequency color information, whereas high-frequency information, relatively independent of color, is most important for object identification in central vision.}, language = {en} } @article{MeixnerNixonLaubrock2022, author = {Meixner, Johannes M. and Nixon, Jessie S. and Laubrock, Jochen}, title = {The perceptual span is dynamically adjusted in response to foveal load by beginning readers}, series = {Journal of experimental psychology : general}, volume = {151}, journal = {Journal of experimental psychology : general}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-3445}, doi = {10.1037/xge0001140}, pages = {1219 -- 1232}, year = {2022}, abstract = {The perceptual span describes the size of the visual field from which information is obtained during a fixation in reading. Its size depends on characteristics of writing system and reader, but-according to the foveal load hypothesis-it is also adjusted dynamically as a function of lexical processing difficulty. Using the moving window paradigm to manipulate the amount of preview, here we directly test whether the perceptual span shrinks as foveal word difficulty increases. We computed the momentary size of the span from word-based eye-movement measures as a function of foveal word frequency, allowing us to separately describe the perceptual span for information affecting spatial saccade targeting and temporal saccade execution. First fixation duration and gaze duration on the upcoming (parafoveal) word N + 1 were significantly shorter when the current (foveal) word N was more frequent. We show that the word frequency effect is modulated by window size. Fixation durations on word N + 1 decreased with high-frequency words N, but only for large windows, that is, when sufficient parafoveal preview was available. This provides strong support for the foveal load hypothesis. To investigate the development of the foveal load effect, we analyzed data from three waves of a longitudinal study on the perceptual span with German children in Grades 1 to 6. Perceptual span adjustment emerged early in development at around second grade and remained stable in later grades. We conclude that the local modulation of the perceptual span indicates a general cognitive process, perhaps an attentional gradient with rapid readjustment.}, language = {en} } @article{PanYanLaubrock2017, author = {Pan, Jinger and Yan, Ming and Laubrock, Jochen}, title = {Perceptual span in oral reading}, series = {Scientific Studies of Reading}, volume = {21}, journal = {Scientific Studies of Reading}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1088-8438}, doi = {10.1080/10888438.2017.1283694}, pages = {254 -- 263}, year = {2017}, abstract = {The present study explores the perceptual span, that is, the physical extent of the area from which useful visual information is obtained during a single fixation, during oral reading of Chinese sentences. Characters outside a window of legible text were replaced by visually similar characters. Results show that the influence of window size on the perceptual span was consistent across different fixation and oculomotor measures. To maintain normal reading behavior when reading aloud, it was necessary to have information provided from three characters to the right of the fixation. Together with findings from previous research, our findings suggest that the physical size of the perceptual span is smaller when reading aloud than in silent reading. This is in agreement with previous studies in English, suggesting that the mechanisms causing the reduced span in oral reading have a common base that generalizes across languages and writing systems.}, language = {en} } @misc{LaubrockEngbertCajar2017, author = {Laubrock, Jochen and Engbert, Ralf and Cajar, Anke}, title = {Gaze-contingent manipulation of the FVF demonstrates the importance of fixation duration for explaining search behavior}, series = {Behavioral and brain sciences : an international journal of current research and theory with open peer commentary}, volume = {40}, journal = {Behavioral and brain sciences : an international journal of current research and theory with open peer commentary}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {0140-525X}, doi = {10.1017/S0140525X16000145}, pages = {31 -- 32}, year = {2017}, abstract = {Hulleman \& Olivers' (H\&O's) model introduces variation of the functional visual field (FVF) for explaining visual search behavior. Our research shows how the FVF can be studied using gaze-contingent displays and how FVF variation can be implemented in models of gaze control. Contrary to H\&O, we believe that fixation duration is an important factor when modeling visual search behavior.}, language = {en} } @misc{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8364}, doi = {10.25932/publishup-56318}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-563184}, pages = {1 -- 7}, year = {2022}, language = {en} }