@article{YanZhouShuetal.2015, author = {Yan, Ming and Zhou, Wei and Shu, Hua and Kliegl, Reinhold}, title = {Perceptual span depends on font size during the reading of chinese sentences}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {41}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0038097}, pages = {209 -- 219}, year = {2015}, abstract = {The present study explored the perceptual span (i.e., the physical extent of an area from which useful visual information is extracted during a single fixation) during the reading of Chinese sentences in 2 experiments. In Experiment 1, we tested whether the rightward span can go beyond 3 characters when visually similar masks were used. Results showed that Chinese readers needed at least 4 characters to the right of fixation to maintain a normal reading behavior when visually similar masks were used and when characters were displayed in small fonts, indicating that the span is dynamically influenced by masking materials. In Experiments 2 and 3, we asked whether the perceptual span varies as a function of font size in spaced (German) and unspaced (Chinese) scripts. Results clearly suggest perceptual span depends on font size in Chinese, but we failed to find such evidence for German. We propose that the perceptual span in Chinese is flexible; it is strongly constrained by its language-specific properties such as high information density and lack of word spacing. Implications for saccade-target selection during the reading of Chinese sentences are discussed.}, language = {en} } @phdthesis{Wotschack2009, author = {Wotschack, Christiane}, title = {Eye movements in reading strategies : how reading strategies modulate effects of distributed processing and oculomotor control}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-021-2}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-36846}, school = {Universit{\"a}t Potsdam}, pages = {213}, year = {2009}, abstract = {Throughout its empirical research history eye movement research has always been aware of the differences in reading behavior induced by individual differences and task demands. This work introduces a novel comprehensive concept of reading strategy, comprising individual differences in reading style and reading skill as well as reader goals. In a series of sentence reading experiments recording eye movements, the influence of reading strategies on reader- and word-level effects assuming distributed processing has been investigated. Results provide evidence for strategic, top-down influences on eye movement control that extend our understanding of eye guidance in reading.}, language = {en} } @article{WernerRaabFischer2018, author = {Werner, Karsten and Raab, Markus and Fischer, Martin H.}, title = {Moving arms}, series = {Thinking \& Reasoning}, volume = {25}, journal = {Thinking \& Reasoning}, number = {2}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1354-6783}, doi = {10.1080/13546783.2018.1494630}, pages = {171 -- 191}, year = {2018}, abstract = {Embodied cognition postulates a bi-directional link between the human body and its cognitive functions. Whether this holds for higher cognitive functions such as problem solving is unknown. We predicted that arm movement manipulations performed by the participants could affect the problem-solving solutions. We tested this prediction in quantitative reasoning tasks that allowed two solutions to each problem (addition or subtraction). In two studies with healthy adults (N=53 and N=50), we found an effect of problem-congruent movements on problem solutions. Consistent with embodied cognition, sensorimotor information gained via right or left arm movements affects the solution in different types of problem-solving tasks.}, language = {en} } @misc{WernerRaabFischer2018, author = {Werner, Karsten and Raab, Markus and Fischer, Martin H.}, title = {Moving arms}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {488}, issn = {1866-8364}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-420579}, pages = {22}, year = {2018}, abstract = {Embodied cognition postulates a bi-directional link between the human body and its cognitive functions. Whether this holds for higher cognitive functions such as problem solving is unknown. We predicted that arm movement manipulations performed by the participants could affect the problem-solving solutions. We tested this prediction in quantitative reasoning tasks that allowed two solutions to each problem (addition or subtraction). In two studies with healthy adults (N=53 and N=50), we found an effect of problem-congruent movements on problem solutions. Consistent with embodied cognition, sensorimotor information gained via right or left arm movements affects the solution in different types of problem-solving tasks.}, language = {en} } @article{TrukenbrodEngbert2012, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Eye movements in a sequential scanning task - evidence for distributed processing}, series = {Journal of vision}, volume = {12}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/12.1.5}, pages = {12}, year = {2012}, abstract = {Current models of eye movement control are derived from theories assuming serial processing of single items or from theories based on parallel processing of multiple items at a time. This issue has persisted because most investigated paradigms generated data compatible with both serial and parallel models. Here, we study eye movements in a sequential scanning task, where stimulus n indicates the position of the next stimulus n + 1. We investigate whether eye movements are controlled by sequential attention shifts when the task requires serial order of processing. Our measures of distributed processing in the form of parafoveal-on-foveal effects, long-range modulations of target selection, and skipping saccades provide evidence against models strictly based on serial attention shifts. We conclude that our results lend support to parallel processing as a strategy for eye movement control.}, language = {en} } @misc{SinnEngbert2011, author = {Sinn, Petra and Engbert, Ralf}, title = {Saccadic facilitation by modulation of microsaccades in natural backgrounds}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {595}, issn = {1866-8364}, doi = {10.25932/publishup-43181}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431817}, pages = {7}, year = {2011}, abstract = {Saccades move objects of interest into the center of the visual field for high-acuity visual analysis. White, Stritzke, and Gegenfurtner (Current Biology, 18, 124-128, 2008) have shown that saccadic latencies in the context of a structured background are much shorter than those with an unstructured background at equal levels of visibility. This effect has been explained by possible preactivation of the saccadic circuitry whenever a structured background acts as a mask for potential saccade targets. Here, we show that background textures modulate rates of microsaccades during visual fixation. First, after a display change, structured backgrounds induce a stronger decrease of microsaccade rates than do uniform backgrounds. Second, we demonstrate that the occurrence of a microsaccade in a critical time window can delay a subsequent saccadic response. Taken together, our findings suggest that microsaccades contribute to the saccadic facilitation effect, due to a modulation of microsaccade rates by properties of the background.}, language = {en} } @article{SeeligRabeMalemShinitskietal.2020, author = {Seelig, Stefan A. and Rabe, Maximilian Michael and Malem-Shinitski, Noa and Risse, Sarah and Reich, Sebastian and Engbert, Ralf}, title = {Bayesian parameter estimation for the SWIFT model of eye-movement control during reading}, series = {Journal of mathematical psychology}, volume = {95}, journal = {Journal of mathematical psychology}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-2496}, doi = {10.1016/j.jmp.2019.102313}, pages = {32}, year = {2020}, abstract = {Process-oriented theories of cognition must be evaluated against time-ordered observations. Here we present a representative example for data assimilation of the SWIFT model, a dynamical model of the control of fixation positions and fixation durations during natural reading of single sentences. First, we develop and test an approximate likelihood function of the model, which is a combination of a spatial, pseudo-marginal likelihood and a temporal likelihood obtained by probability density approximation Second, we implement a Bayesian approach to parameter inference using an adaptive Markov chain Monte Carlo procedure. Our results indicate that model parameters can be estimated reliably for individual subjects. We conclude that approximative Bayesian inference represents a considerable step forward for computational models of eye-movement control, where modeling of individual data on the basis of process-based dynamic models has not been possible so far.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2017, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Reich, Sebastian and Wichmann, Felix A. and Engbert, Ralf}, title = {Likelihood-based parameter estimation and comparison of dynamical cognitive models}, series = {Psychological Review}, volume = {124}, journal = {Psychological Review}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000068}, pages = {505 -- 524}, year = {2017}, abstract = {Dynamical models of cognition play an increasingly important role in driving theoretical and experimental research in psychology. Therefore, parameter estimation, model analysis and comparison of dynamical models are of essential importance. In this article, we propose a maximum likelihood approach for model analysis in a fully dynamical framework that includes time-ordered experimental data. Our methods can be applied to dynamical models for the prediction of discrete behavior (e.g., movement onsets); in particular, we use a dynamical model of saccade generation in scene viewing as a case study for our approach. For this model, the likelihood function can be computed directly by numerical simulation, which enables more efficient parameter estimation including Bayesian inference to obtain reliable estimates and corresponding credible intervals. Using hierarchical models inference is even possible for individual observers. Furthermore, our likelihood approach can be used to compare different models. In our example, the dynamical framework is shown to outperform nondynamical statistical models. Additionally, the likelihood based evaluation differentiates model variants, which produced indistinguishable predictions on hitherto used statistics. Our results indicate that the likelihood approach is a promising framework for dynamical cognitive models.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{SchottervonderMalsburgLeinenger2019, author = {Schotter, Elizabeth Roye and von der Malsburg, Titus Raban and Leinenger, Mallorie}, title = {Forced Fixations, Trans-Saccadic Integration, and Word Recognition}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {45}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/xlm0000617}, pages = {677 -- 688}, year = {2019}, abstract = {Recent studies using the gaze-contingent boundary paradigm reported a reversed preview benefit- shorter fixations on a target word when an unrelated preview was easier to process than the fixated target (Schotter \& Leinenger, 2016). This is explained viaforeedfixatiotzs-short fixations on words that would ideally be skipped (because lexical processing has progressed enough) but could not be because saccade planning reached a point of no return. This contrasts with accounts of preview effects via trans-saccadic integration-shorter fixations on a target word when the preview is more similar to it (see Cutter. Drieghe, \& Liversedge, 2015). In addition, if the previewed word-not the fixated target-determines subsequent eye movements, is it also this word that enters the linguistic processing stream? We tested these accounts by having 24 subjects read 150 sentences in the boundary paradigm in which both the preview and target were initially plausible but later one, both, or neither became implausible, providing an opportunity to probe which one was linguistically encoded. In an intervening buffer region, both words were plausible, providing an opportunity to investigate trans-saccadic integration. The frequency of the previewed word affected progressive saccades (i.e.. forced fixations) as well as when transsaccadic integration failure increased regressions, but, only the implausibility of the target word affected semantic encoding. These data support a hybrid account of saccadic control (Reingold, Reichle. Glaholt, \& Sheridan, 2012) driven by incomplete (often parafoveal) word recognition, which occurs prior to complete (often foveal) word recognition.}, language = {en} }