@article{RisseKliegl2012, author = {Risse, Sarah and Kliegl, Reinhold}, title = {Evidence for delayed Parafoveal-on-Foveal effects from word n+2 in reading}, series = {Journal of experimental psychology : Human perception and performance}, volume = {38}, journal = {Journal of experimental psychology : Human perception and performance}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-1523}, doi = {10.1037/a0027735}, pages = {1026 -- 1042}, year = {2012}, abstract = {During reading information is acquired from word(s) beyond the word that is currently looked at. It is still an open question whether such parafoveal information can influence the current viewing of a word, and if so, whether such parafoveal-on-foveal effects are attributable to distributed processing or to mislocated fixations which occur when the eyes are directed at a parafoveal word but land on another word instead. In two display-change experiments, we orthogonally manipulated the preview and target difficulty of word n+2 to investigate the role of mislocated fixations on the previous word n+1. When the eyes left word n, an easy or difficult word n+2 preview was replaced by an easy or difficult n+2 target word. In Experiment 1, n+2 processing difficulty was manipulated by means of word frequency (i.e., easy high-frequency vs. difficult low-frequency word n+2). In Experiment 2, we varied the visual familiarity of word n+2 (i.e., easy lower-case vs. difficult alternating-case writing). Fixations on the short word n+1, which were likely to be mislocated, were nevertheless not influenced by the difficulty of the adjacent word n+2, the hypothesized target of the mislocated fixation. Instead word n+1 was influenced by the preview difficulty of word n+2, representing a delayed parafoveal-on-foveal effect. The results challenge the mislocated-fixation hypothesis as an explanation of parafoveal-on-foveal effects and provide new insight into the complex spatial and temporal effect structure of processing inside the perceptual span during reading.}, language = {en} } @article{TrukenbrodEngbert2012, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Eye movements in a sequential scanning task - evidence for distributed processing}, series = {Journal of vision}, volume = {12}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/12.1.5}, pages = {12}, year = {2012}, abstract = {Current models of eye movement control are derived from theories assuming serial processing of single items or from theories based on parallel processing of multiple items at a time. This issue has persisted because most investigated paradigms generated data compatible with both serial and parallel models. Here, we study eye movements in a sequential scanning task, where stimulus n indicates the position of the next stimulus n + 1. We investigate whether eye movements are controlled by sequential attention shifts when the task requires serial order of processing. Our measures of distributed processing in the form of parafoveal-on-foveal effects, long-range modulations of target selection, and skipping saccades provide evidence against models strictly based on serial attention shifts. We conclude that our results lend support to parallel processing as a strategy for eye movement control.}, language = {en} } @article{BarthelmeTrukenbrodEngbertetal.2013, author = {Barthelme, Simon and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Modeling fixation locations using spatial point processes}, series = {Journal of vision}, volume = {13}, journal = {Journal of vision}, number = {12}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/13.12.1}, pages = {34}, year = {2013}, abstract = {Whenever eye movements are measured, a central part of the analysis has to do with where subjects fixate and why they fixated where they fixated. To a first approximation, a set of fixations can be viewed as a set of points in space; this implies that fixations are spatial data and that the analysis of fixation locations can be beneficially thought of as a spatial statistics problem. We argue that thinking of fixation locations as arising from point processes is a very fruitful framework for eye-movement data, helping turn qualitative questions into quantitative ones. We provide a tutorial introduction to some of the main ideas of the field of spatial statistics, focusing especially on spatial Poisson processes. We show how point processes help relate image properties to fixation locations. In particular we show how point processes naturally express the idea that image features' predictability for fixations may vary from one image to another. We review other methods of analysis used in the literature, show how they relate to point process theory, and argue that thinking in terms of point processes substantially extends the range of analyses that can be performed and clarify their interpretation.}, language = {en} } @article{LuoYanZhou2013, author = {Luo, Yingyi and Yan, Ming and Zhou, Xiaolin}, title = {Prosodic boundaries delay the processing of upcoming lexical information during silent sentence reading}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {39}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {3}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0029182}, pages = {915 -- 930}, year = {2013}, abstract = {Prosodic boundaries can be used to guide syntactic parsing in both spoken and written sentence comprehension, but it is unknown whether the processing of prosodic boundaries affects the processing of upcoming lexical information. In 3 eye-tracking experiments, participants read silently sentences that allow for 2 possible syntactic interpretations when there is no comma or other cue specifying which interpretation should be taken. In Experiments 1 and 2, participants heard a low-pass filtered auditory version of the sentence, which provided a prosodic boundary cue prior to each sentence. In Experiment 1, we found that the boundary cue helped syntactic disambiguation after the cue and led to longer fixation durations on regions right before the cue than on identical regions without prosodic boundary information. In Experiments 2 and 3, we used a gaze-contingent display-change paradigm to manipulate the parafoveal visibility of the first constituent character of the target word after the disambiguating position. Results of Experiment 2 showed that previewing the first character significantly reduced the reading time of the target word, but this preview benefit was greatly reduced when the prosodic boundary cue was introduced at this position. In Experiment 3, instead of the acoustic cues, a visually presented comma was inserted at the disambiguating position in each sentence. Results showed that the comma effect on lexical processing was essentially the same as the effect of prosodic boundary cue. These findings demonstrate that processing a prosodic boundary could impair the processing of parafoveal information during sentence reading.}, language = {en} } @article{HohensteinKliegl2014, author = {Hohenstein, Sven and Kliegl, Reinhold}, title = {Semantic preview benefit during reading}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {40}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0033670}, pages = {166 -- 190}, year = {2014}, abstract = {Word features in parafoveal vision influence eye movements during reading. The question of whether readers extract semantic information from parafoveal words was studied in 3 experiments by using a gaze-contingent display change technique. Subjects read German sentences containing 1 of several preview words that were replaced by a target word during the saccade to the preview (boundary paradigm). In the 1st experiment the preview word was semantically related or unrelated to the target. Fixation durations on the target were shorter for semantically related than unrelated previews, consistent with a semantic preview benefit. In the 2nd experiment, half the sentences were presented following the rules of German spelling (i.e., previews and targets were printed with an initial capital letter), and the other half were presented completely in lowercase. A semantic preview benefit was obtained under both conditions. In the 3rd experiment, we introduced 2 further preview conditions, an identical word and a pronounceable nonword, while also manipulating the text contrast. Whereas the contrast had negligible effects, fixation durations on the target were reliably different for all 4 types of preview. Semantic preview benefits were greater for pretarget fixations closer to the boundary (large preview space) and, although not as consistently, for long pretarget fixation durations (long preview time). The results constrain theoretical proposals about eye movement control in reading.}, language = {en} } @article{RisseKliegl2014, author = {Risse, Sarah and Kliegl, Reinhold}, title = {Dissociating preview validity and preview difficulty in parafoveal processing of word n+1 during reading}, series = {Journal of experimental psychology : Human perception and performance}, volume = {40}, journal = {Journal of experimental psychology : Human perception and performance}, number = {2}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-1523}, doi = {10.1037/a0034997}, pages = {653 -- 668}, year = {2014}, abstract = {Many studies have shown that previewing the next word n + 1 during reading leads to substantial processing benefit (e.g., shorter word viewing times) when this word is eventually fixated. However, evidence of such preprocessing in fixations on the preceding word n when in fact the information about the preview is acquired is far less consistent. A recent study suggested that such effects may be delayed into fixations on the next word n + 1 (Risse \& Kliegl, 2012). To investigate the time course of parafoveal information-acquisition on the control of eye movements during reading, we conducted 2 gaze-contingent display-change experiments and orthogonally manipulated the processing difficulty (i.e., word frequency) of an n + 1 preview word and its validity relative to the target word. Preview difficulty did not affect fixation durations on the pretarget word n but on the target word n + 1. In fact, the delayed preview-difficulty effect was almost of the same size as the preview benefit associated with the n + 1 preview validity. Based on additional results from quantile-regression analyses on the time course of the 2 preview effects, we discuss consequences as to the integration of foveal and parafoveal information and potential implications for computational models of eye guidance in reading.}, language = {en} } @article{FernandezShalomKliegletal.2014, author = {Fernandez, Gerardo and Shalom, Diego E. and Kliegl, Reinhold and Sigman, Mariano}, title = {Eye movements during reading proverbs and regular sentences: the incoming word predictability effect}, series = {Language, cognition and neuroscience}, volume = {29}, journal = {Language, cognition and neuroscience}, number = {3}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/01690965.2012.760745}, pages = {260 -- 273}, year = {2014}, language = {en} } @article{CunningsPattersonFelser2015, author = {Cunnings, Ian and Patterson, Clare and Felser, Claudia}, title = {Structural constraints on pronoun binding and coreference: evidence from eye movements during reading}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00840}, pages = {18}, year = {2015}, abstract = {A number of recent studies have investigated how syntactic and non-syntactic constraints combine to cue memory retrieval during anaphora resolution. In this paper we investigate how syntactic constraints and gender congruence interact to guide memory retrieval during the resolution of subject pronouns. Subject pronouns are always technically ambiguous, and the application of syntactic constraints on their interpretation depends on properties of the antecedent that is to be retrieved. While pronouns can freely corefer with non-quantified referential antecedents, linking a pronoun to a quantified antecedent is only possible in certain syntactic configurations via variable binding. We report the results from a judgment task and three online reading comprehension experiments investigating pronoun resolution with quantified and non-quantified antecedents. Results from both the judgment task and participants' eye movements during reading indicate that comprehenders freely allow pronouns to corefer with non-quantified antecedents, but that retrieval of quantified antecedents is restricted to specific syntactic environments. We interpret our findings as indicating that syntactic constraints constitute highly weighted cues to memory retrieval during anaphora resolution.}, language = {en} } @article{LaubrockKliegl2015, author = {Laubrock, Jochen and Kliegl, Reinhold}, title = {The eye-voice span during reading aloud}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.01437}, pages = {19}, year = {2015}, abstract = {Although eye movements during reading are modulated by cognitive processing demands, they also reflect visual sampling of the input, and possibly preparation of output for speech or the inner voice. By simultaneously recording eye movements and the voice during reading aloud, we obtained an output measure that constrains the length of time spent on cognitive processing. Here we investigate the dynamics of the eye-voice span (EVS), the distance between eye and voice. We show that the EVS is regulated immediately during fixation of a word by either increasing fixation duration or programming a regressive eye movement against the reading direction. EVS size at the beginning of a fixation was positively correlated with the likelihood of regressions and refixations. Regression probability was further increased if the EVS was still large at the end of a fixation: if adjustment of fixation duration did not sufficiently reduce the EVS during a fixation, then a regression rather than a refixation followed with high probability. We further show that the EVS can help understand cognitive influences on fixation duration during reading: in mixed model analyses, the EVS was a stronger predictor of fixation durations than either word frequency or word length. The EVS modulated the influence of several other predictors on single fixation durations (SFDs). For example, word-N frequency effects were larger with a large EVS, especially when word N-1 frequency was low. Finally, a comparison of SFDs during oral and silent reading showed that reading is governed by similar principles in both reading modes, although EVS maintenance and articulatory processing also cause some differences. In summary, the EVS is regulated by adjusting fixation duration and/or by programming a regressive eye movement when the EVS gets too large. Overall, the EVS appears to be directly related to updating of the working memory buffer during reading.}, language = {en} } @article{FelserPattersonCunnings2015, author = {Felser, Claudia and Patterson, Clare and Cunnings, Ian}, title = {Structural constraints on pronoun binding and coreference: Evidence from eye movements during reading}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {840}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00840}, year = {2015}, abstract = {A number of recent studies have investigated how syntactic and non-syntactic constraints combine to cue memory retrieval during anaphora resolution. In this paper we investigate how syntactic constraints and gender congruence interact to guide memory retrieval during the resolution of subject pronouns. Subject pronouns are always technically ambiguous, and the application of syntactic constraints on their interpretation depends on properties of the antecedent that is to be retrieved. While pronouns can freely corefer with non-quantified referential antecedents, linking a pronoun to a quantified antecedent is only possible in certain syntactic configurations via variable binding. We report the results from a judgment task and three online reading comprehension experiments investigating pronoun resolution with quantified and non-quantified antecedents. Results from both the judgment task and participants' eye movements during reading indicate that comprehenders freely allow pronouns to corefer with non-quantified antecedents, but that retrieval of quantified antecedents is restricted to specific syntactic environments. We interpret our findings as indicating that syntactic constraints constitute highly weighted cues to memory retrieval during anaphora resolution.}, language = {en} } @article{HartmannMastFischer2015, author = {Hartmann, Matthias and Mast, Fred W. and Fischer, Martin H.}, title = {Spatial biases during mental arithmetic: evidence from eye movements on a blank screen}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00012}, pages = {8}, year = {2015}, abstract = {While the influence of spatial-numerical associations in number categorization tasks has been well established, their role in mental arithmetic is less clear. It has been hypothesized that mental addition leads to rightward and upward shifts of spatial attention (along the "mental number line"), whereas subtraction leads to leftward and downward shifts. We addressed this hypothesis by analyzing spontaneous eye movements during mental arithmetic. Participants solved verbally presented arithmetic problems (e.g., 2 + 7, 8-3) aloud while looking at a blank screen. We found that eye movements reflected spatial biases in the ongoing mental operation: Gaze position shifted more upward when participants solved addition compared to subtraction problems, and the horizontal gaze position was partly determined by the magnitude of the operands. Interestingly, the difference between addition and subtraction trials was driven by the operator (plus vs. minus) but was not influenced by the computational process. Thus, our results do not support the idea of a mental movement toward the solution during arithmetic but indicate a semantic association between operation and space.}, language = {en} } @article{YanZhouShuetal.2015, author = {Yan, Ming and Zhou, Wei and Shu, Hua and Kliegl, Reinhold}, title = {Perceptual span depends on font size during the reading of chinese sentences}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {41}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0038097}, pages = {209 -- 219}, year = {2015}, abstract = {The present study explored the perceptual span (i.e., the physical extent of an area from which useful visual information is extracted during a single fixation) during the reading of Chinese sentences in 2 experiments. In Experiment 1, we tested whether the rightward span can go beyond 3 characters when visually similar masks were used. Results showed that Chinese readers needed at least 4 characters to the right of fixation to maintain a normal reading behavior when visually similar masks were used and when characters were displayed in small fonts, indicating that the span is dynamically influenced by masking materials. In Experiments 2 and 3, we asked whether the perceptual span varies as a function of font size in spaced (German) and unspaced (Chinese) scripts. Results clearly suggest perceptual span depends on font size in Chinese, but we failed to find such evidence for German. We propose that the perceptual span in Chinese is flexible; it is strongly constrained by its language-specific properties such as high information density and lack of word spacing. Implications for saccade-target selection during the reading of Chinese sentences are discussed.}, language = {en} } @article{EngbertTrukenbrodBarthelmeetal.2015, author = {Engbert, Ralf and Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A.}, title = {Spatial statistics and attentional dynamics in scene viewing}, series = {Journal of vision}, volume = {15}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/15.1.14}, pages = {17}, year = {2015}, abstract = {In humans and in foveated animals visual acuity is highly concentrated at the center of gaze, so that choosing where to look next is an important example of online, rapid decision-making. Computational neuroscientists have developed biologically-inspired models of visual attention, termed saliency maps, which successfully predict where people fixate on average. Using point process theory for spatial statistics, we show that scanpaths contain, however, important statistical structure, such as spatial clustering on top of distributions of gaze positions. Here, we develop a dynamical model of saccadic selection that accurately predicts the distribution of gaze positions as well as spatial clustering along individual scanpaths. Our model relies on activation dynamics via spatially-limited (foveated) access to saliency information, and, second, a leaky memory process controlling the re-inspection of target regions. This theoretical framework models a form of context-dependent decision-making, linking neural dynamics of attention to behavioral gaze data.}, language = {en} } @article{LaubrockKliegl2015, author = {Laubrock, Jochen and Kliegl, Reinhold}, title = {The eye-voice span during reading aloud}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {1432}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.01432}, year = {2015}, abstract = {Although eye movements during reading are modulated by cognitive processing demands, they also reflect visual sampling of the input, and possibly preparation of output for speech or the inner voice. By simultaneously recording eye movements and the voice during reading aloud, we obtained an output measure that constrains the length of time spent on cognitive processing. Here we investigate the dynamics of the eye-voice span (EVS), the distance between eye and voice. We show that the EVS is regulated immediately during fixation of a word by either increasing fixation duration or programming a regressive eye movement against the reading direction. EVS size at the beginning of a fixation was positively correlated with the likelihood of regressions and refixations. Regression probability was further increased if the EVS was still large at the end of a fixation: if adjustment of fixation duration did not sufficiently reduce the EVS during a fixation, then a regression rather than a refixation followed with high probability. We further show that the EVS can help understand cognitive influences on fixation duration during reading: in mixed model analyses, the EVS was a stronger predictor of fixation durations than either word frequency or word length. The EVS modulated the influence of several other predictors on single fixation durations (SFDs). For example, word-N frequency effects were larger with a large EVS, especially when word N-1 frequency was low. Finally, a comparison of SFDs during oral and silent reading showed that reading is governed by similar principles in both reading modes, although EVS maintenance and articulatory processing also cause some differences. In summary, the EVS is regulated by adjusting fixation duration and/or by programming a regressive eye movement when the EVS gets too large. Overall, the EVS appears to be directly related to updating of the working memory buffer during reading.}, language = {en} } @article{OhlWohltatKliegletal.2016, author = {Ohl, Sven and Wohltat, Christian and Kliegl, Reinhold and Pollatos, Olga and Engbert, Ralf}, title = {Microsaccades Are Coupled to Heartbeat}, series = {The journal of neuroscience}, volume = {36}, journal = {The journal of neuroscience}, publisher = {Society for Neuroscience}, address = {Washington}, issn = {0270-6474}, doi = {10.1523/JNEUROSCI.2211-15.2016}, pages = {1237 -- 1241}, year = {2016}, abstract = {During visual fixation, the eye generates microsaccades and slower components of fixational eye movements that are part of the visual processing strategy in humans. Here, we show that ongoing heartbeat is coupled to temporal rate variations in the generation of microsaccades. Using coregistration of eye recording and ECG in humans, we tested the hypothesis that microsaccade onsets are coupled to the relative phase of the R-R intervals in heartbeats. We observed significantly more microsaccades during the early phase after the R peak in the ECG. This form of coupling between heartbeat and eye movements was substantiated by the additional finding of a coupling between heart phase and motion activity in slow fixational eye movements; i.e., retinal image slip caused by physiological drift. Our findings therefore demonstrate a coupling of the oculomotor system and ongoing heartbeat, which provides further evidence for bodily influences on visuomotor functioning.}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2017, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Temporal evolution of the central fixation bias in scene viewing}, series = {Journal of vision}, volume = {17}, journal = {Journal of vision}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/17.13.3}, pages = {1626 -- 1638}, year = {2017}, abstract = {When watching the image of a natural scene on a computer screen, observers initially move their eyes toward the center of the imageā€”a reliable experimental finding termed central fixation bias. This systematic tendency in eye guidance likely masks attentional selection driven by image properties and top-down cognitive processes. Here, we show that the central fixation bias can be reduced by delaying the initial saccade relative to image onset. In four scene-viewing experiments we manipulated observers' initial gaze position and delayed their first saccade by a specific time interval relative to the onset of an image. We analyzed the distance to image center over time and show that the central fixation bias of initial fixations was significantly reduced after delayed saccade onsets. We additionally show that selection of the initial saccade target strongly depended on the first saccade latency. A previously published model of saccade generation was extended with a central activation map on the initial fixation whose influence declined with increasing saccade latency. This extension was sufficient to replicate the central fixation bias from our experiments. Our results suggest that the central fixation bias is generated by default activation as a response to the sudden image onset and that this default activation pattern decreases over time. Thus, it may often be preferable to use a modified version of the scene viewing paradigm that decouples image onset from the start signal for scene exploration to explicitly reduce the central fixation bias.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2017, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Reich, Sebastian and Wichmann, Felix A. and Engbert, Ralf}, title = {Likelihood-based parameter estimation and comparison of dynamical cognitive models}, series = {Psychological Review}, volume = {124}, journal = {Psychological Review}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000068}, pages = {505 -- 524}, year = {2017}, abstract = {Dynamical models of cognition play an increasingly important role in driving theoretical and experimental research in psychology. Therefore, parameter estimation, model analysis and comparison of dynamical models are of essential importance. In this article, we propose a maximum likelihood approach for model analysis in a fully dynamical framework that includes time-ordered experimental data. Our methods can be applied to dynamical models for the prediction of discrete behavior (e.g., movement onsets); in particular, we use a dynamical model of saccade generation in scene viewing as a case study for our approach. For this model, the likelihood function can be computed directly by numerical simulation, which enables more efficient parameter estimation including Bayesian inference to obtain reliable estimates and corresponding credible intervals. Using hierarchical models inference is even possible for individual observers. Furthermore, our likelihood approach can be used to compare different models. In our example, the dynamical framework is shown to outperform nondynamical statistical models. Additionally, the likelihood based evaluation differentiates model variants, which produced indistinguishable predictions on hitherto used statistics. Our results indicate that the likelihood approach is a promising framework for dynamical cognitive models.}, language = {en} } @article{WernerRaabFischer2018, author = {Werner, Karsten and Raab, Markus and Fischer, Martin H.}, title = {Moving arms}, series = {Thinking \& Reasoning}, volume = {25}, journal = {Thinking \& Reasoning}, number = {2}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1354-6783}, doi = {10.1080/13546783.2018.1494630}, pages = {171 -- 191}, year = {2018}, abstract = {Embodied cognition postulates a bi-directional link between the human body and its cognitive functions. Whether this holds for higher cognitive functions such as problem solving is unknown. We predicted that arm movement manipulations performed by the participants could affect the problem-solving solutions. We tested this prediction in quantitative reasoning tasks that allowed two solutions to each problem (addition or subtraction). In two studies with healthy adults (N=53 and N=50), we found an effect of problem-congruent movements on problem solutions. Consistent with embodied cognition, sensorimotor information gained via right or left arm movements affects the solution in different types of problem-solving tasks.}, language = {en} } @article{SchottervonderMalsburgLeinenger2019, author = {Schotter, Elizabeth Roye and von der Malsburg, Titus Raban and Leinenger, Mallorie}, title = {Forced Fixations, Trans-Saccadic Integration, and Word Recognition}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {45}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/xlm0000617}, pages = {677 -- 688}, year = {2019}, abstract = {Recent studies using the gaze-contingent boundary paradigm reported a reversed preview benefit- shorter fixations on a target word when an unrelated preview was easier to process than the fixated target (Schotter \& Leinenger, 2016). This is explained viaforeedfixatiotzs-short fixations on words that would ideally be skipped (because lexical processing has progressed enough) but could not be because saccade planning reached a point of no return. This contrasts with accounts of preview effects via trans-saccadic integration-shorter fixations on a target word when the preview is more similar to it (see Cutter. Drieghe, \& Liversedge, 2015). In addition, if the previewed word-not the fixated target-determines subsequent eye movements, is it also this word that enters the linguistic processing stream? We tested these accounts by having 24 subjects read 150 sentences in the boundary paradigm in which both the preview and target were initially plausible but later one, both, or neither became implausible, providing an opportunity to probe which one was linguistically encoded. In an intervening buffer region, both words were plausible, providing an opportunity to investigate trans-saccadic integration. The frequency of the previewed word affected progressive saccades (i.e.. forced fixations) as well as when transsaccadic integration failure increased regressions, but, only the implausibility of the target word affected semantic encoding. These data support a hybrid account of saccadic control (Reingold, Reichle. Glaholt, \& Sheridan, 2012) driven by incomplete (often parafoveal) word recognition, which occurs prior to complete (often foveal) word recognition.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{CajarEngbertLaubrock2020, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {How spatial frequencies and color drive object search in real-world scenes}, series = {Journal of vision}, volume = {20}, journal = {Journal of vision}, number = {7}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/jov.20.7.8}, pages = {16}, year = {2020}, abstract = {When studying how people search for objects in scenes, the inhomogeneity of the visual field is often ignored. Due to physiological limitations, peripheral vision is blurred and mainly uses coarse-grained information (i.e., low spatial frequencies) for selecting saccade targets, whereas high-acuity central vision uses fine-grained information (i.e., high spatial frequencies) for analysis of details. Here we investigated how spatial frequencies and color affect object search in real-world scenes. Using gaze-contingent filters, we attenuated high or low frequencies in central or peripheral vision while viewers searched color or grayscale scenes. Results showed that peripheral filters and central high-pass filters hardly affected search accuracy, whereas accuracy dropped drastically with central low-pass filters. Peripheral filtering increased the time to localize the target by decreasing saccade amplitudes and increasing number and duration of fixations. The use of coarse-grained information in the periphery was limited to color scenes. Central filtering increased the time to verify target identity instead, especially with low-pass filters. We conclude that peripheral vision is critical for object localization and central vision is critical for object identification. Visual guidance during peripheral object localization is dominated by low-frequency color information, whereas high-frequency information, relatively independent of color, is most important for object identification in central vision.}, language = {en} } @article{SeeligRabeMalemShinitskietal.2020, author = {Seelig, Stefan A. and Rabe, Maximilian Michael and Malem-Shinitski, Noa and Risse, Sarah and Reich, Sebastian and Engbert, Ralf}, title = {Bayesian parameter estimation for the SWIFT model of eye-movement control during reading}, series = {Journal of mathematical psychology}, volume = {95}, journal = {Journal of mathematical psychology}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-2496}, doi = {10.1016/j.jmp.2019.102313}, pages = {32}, year = {2020}, abstract = {Process-oriented theories of cognition must be evaluated against time-ordered observations. Here we present a representative example for data assimilation of the SWIFT model, a dynamical model of the control of fixation positions and fixation durations during natural reading of single sentences. First, we develop and test an approximate likelihood function of the model, which is a combination of a spatial, pseudo-marginal likelihood and a temporal likelihood obtained by probability density approximation Second, we implement a Bayesian approach to parameter inference using an adaptive Markov chain Monte Carlo procedure. Our results indicate that model parameters can be estimated reliably for individual subjects. We conclude that approximative Bayesian inference represents a considerable step forward for computational models of eye-movement control, where modeling of individual data on the basis of process-based dynamic models has not been possible so far.}, language = {en} } @article{AdamElsner2020, author = {Adam, Maurits and Elsner, Birgit}, title = {The impact of salient action effects on 6-, 7-, and 11-month-olds' goal-predictive gaze shifts for a human grasping action}, series = {PLOS ONE}, volume = {15}, journal = {PLOS ONE}, number = {10}, publisher = {Public Library of Science}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0240165}, pages = {18}, year = {2020}, abstract = {When infants observe a human grasping action, experience-based accounts predict that all infants familiar with grasping actions should be able to predict the goal regardless of additional agency cues such as an action effect. Cue-based accounts, however, suggest that infants use agency cues to identify and predict action goals when the action or the agent is not familiar. From these accounts, we hypothesized that younger infants would need additional agency cues such as a salient action effect to predict the goal of a human grasping action, whereas older infants should be able to predict the goal regardless of agency cues. In three experiments, we presented 6-, 7-, and 11-month-olds with videos of a manual grasping action presented either with or without an additional salient action effect (Exp. 1 and 2), or we presented 7-month-olds with videos of a mechanical claw performing a grasping action presented with a salient action effect (Exp. 3). The 6-month-olds showed tracking gaze behavior, and the 11-month-olds showed predictive gaze behavior, regardless of the action effect. However, the 7-month-olds showed predictive gaze behavior in the action-effect condition, but tracking gaze behavior in the no-action-effect condition and in the action-effect condition with a mechanical claw. The results therefore support the idea that salient action effects are especially important for infants' goal predictions from 7 months on, and that this facilitating influence of action effects is selective for the observation of human hands.}, language = {en} } @article{ParshinaLaurinavichyuteSekerina2021, author = {Parshina, Olga and Laurinavichyute, Anna and Sekerina, Irina A.}, title = {Eye-movement benchmarks in heritage language reading}, series = {Bilingualism : language and cognition}, volume = {24}, journal = {Bilingualism : language and cognition}, number = {1}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1366-7289}, doi = {10.1017/S136672892000019X}, pages = {69 -- 82}, year = {2021}, abstract = {This eye-tracking study establishes basic benchmarks of eye movements during reading in heritage language (HL) by Russian-speaking adults and adolescents of high (n = 21) and low proficiency (n = 27). Heritage speakers (HSs) read sentences in Cyrillic, and their eye movements were compared to those of Russian monolingual skilled adult readers, 8-year-old children and L2 learners. Reading patterns of HSs revealed longer mean fixation durations, lower skipping probabilities, and higher regressive saccade rates than in monolingual adults. High-proficient HSs were more similar to monolingual children, while low-proficient HSs performed on par with L2 learners. Low-proficient HSs differed from high-proficient HSs in exhibiting lower skipping probabilities, higher fixation counts, and larger frequency effects. Taken together, our findings are consistent with the weaker links account of bilingual language processing as well as the divergent attainment theory of HL.}, language = {en} } @article{MeixnerNixonLaubrock2022, author = {Meixner, Johannes M. and Nixon, Jessie S. and Laubrock, Jochen}, title = {The perceptual span is dynamically adjusted in response to foveal load by beginning readers}, series = {Journal of experimental psychology : general}, volume = {151}, journal = {Journal of experimental psychology : general}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-3445}, doi = {10.1037/xge0001140}, pages = {1219 -- 1232}, year = {2022}, abstract = {The perceptual span describes the size of the visual field from which information is obtained during a fixation in reading. Its size depends on characteristics of writing system and reader, but-according to the foveal load hypothesis-it is also adjusted dynamically as a function of lexical processing difficulty. Using the moving window paradigm to manipulate the amount of preview, here we directly test whether the perceptual span shrinks as foveal word difficulty increases. We computed the momentary size of the span from word-based eye-movement measures as a function of foveal word frequency, allowing us to separately describe the perceptual span for information affecting spatial saccade targeting and temporal saccade execution. First fixation duration and gaze duration on the upcoming (parafoveal) word N + 1 were significantly shorter when the current (foveal) word N was more frequent. We show that the word frequency effect is modulated by window size. Fixation durations on word N + 1 decreased with high-frequency words N, but only for large windows, that is, when sufficient parafoveal preview was available. This provides strong support for the foveal load hypothesis. To investigate the development of the foveal load effect, we analyzed data from three waves of a longitudinal study on the perceptual span with German children in Grades 1 to 6. Perceptual span adjustment emerged early in development at around second grade and remained stable in later grades. We conclude that the local modulation of the perceptual span indicates a general cognitive process, perhaps an attentional gradient with rapid readjustment.}, language = {en} } @article{CajarEngbertLaubrock2022, author = {Cajar, Anke and Engbert, Ralf and Laubrock, Jochen}, title = {Potsdam Eye-Movement Corpus for Scene Memorization and Search With Color and Spatial-Frequency Filtering}, series = {Frontiers in psychology / Frontiers Research Foundation}, volume = {13}, journal = {Frontiers in psychology / Frontiers Research Foundation}, publisher = {Frontiers Research Foundation}, address = {Lausanne, Schweiz}, issn = {1664-1078}, doi = {10.3389/fpsyg.2022.850482}, pages = {1 -- 7}, year = {2022}, language = {en} }