@phdthesis{Ong2007, author = {Ong, James Kwan Yau}, title = {The predictability problem}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15025}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Wir versuchen herauszufinden, ob das subjektive Maß der Cloze-Vorhersagbarkeit mit der Kombination objektiver Maße (semantische und n-gram-Maße) gesch{\"a}tzt werden kann, die auf den statistischen Eigenschaften von Textkorpora beruhen. Die semantischen Maße werden entweder durch Abfragen von Internet-Suchmaschinen oder durch die Anwendung der Latent Semantic Analysis gebildet, w{\"a}hrend die n-gram-Wortmaße allein auf den Ergebnissen von Internet-Suchmaschinen basieren. Weiterhin untersuchen wir die Rolle der Cloze-Vorhersagbarkeit in SWIFT, einem Modell der Blickkontrolle, und w{\"a}gen ab, ob andere Parameter den der Vorhersagbarkeit ersetzen k{\"o}nnen. Unsere Ergebnisse legen nahe, dass ein computationales Modell, welches Vorhersagbarkeitswerte berechnet, nicht nur Maße beachten muss, die die Relatiertheit eines Wortes zum Kontext darstellen; das Vorhandensein eines Maßes bez{\"u}glich der Nicht-Relatiertheit ist von ebenso großer Bedeutung. Obwohl hier jedoch nur Relatiertheits-Maße zur Verf{\"u}gung stehen, sollte SWIFT ebensogute Ergebnisse liefern, wenn wir Cloze-Vorhersagbarkeit mit unseren Maßen ersetzen.}, language = {en} } @article{MeixnerNixonLaubrock2022, author = {Meixner, Johannes M. and Nixon, Jessie S. and Laubrock, Jochen}, title = {The perceptual span is dynamically adjusted in response to foveal load by beginning readers}, series = {Journal of experimental psychology : general}, volume = {151}, journal = {Journal of experimental psychology : general}, number = {6}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-3445}, doi = {10.1037/xge0001140}, pages = {1219 -- 1232}, year = {2022}, abstract = {The perceptual span describes the size of the visual field from which information is obtained during a fixation in reading. Its size depends on characteristics of writing system and reader, but-according to the foveal load hypothesis-it is also adjusted dynamically as a function of lexical processing difficulty. Using the moving window paradigm to manipulate the amount of preview, here we directly test whether the perceptual span shrinks as foveal word difficulty increases. We computed the momentary size of the span from word-based eye-movement measures as a function of foveal word frequency, allowing us to separately describe the perceptual span for information affecting spatial saccade targeting and temporal saccade execution. First fixation duration and gaze duration on the upcoming (parafoveal) word N + 1 were significantly shorter when the current (foveal) word N was more frequent. We show that the word frequency effect is modulated by window size. Fixation durations on word N + 1 decreased with high-frequency words N, but only for large windows, that is, when sufficient parafoveal preview was available. This provides strong support for the foveal load hypothesis. To investigate the development of the foveal load effect, we analyzed data from three waves of a longitudinal study on the perceptual span with German children in Grades 1 to 6. Perceptual span adjustment emerged early in development at around second grade and remained stable in later grades. We conclude that the local modulation of the perceptual span indicates a general cognitive process, perhaps an attentional gradient with rapid readjustment.}, language = {en} } @article{AdamElsner2020, author = {Adam, Maurits and Elsner, Birgit}, title = {The impact of salient action effects on 6-, 7-, and 11-month-olds' goal-predictive gaze shifts for a human grasping action}, series = {PLOS ONE}, volume = {15}, journal = {PLOS ONE}, number = {10}, publisher = {Public Library of Science}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0240165}, pages = {18}, year = {2020}, abstract = {When infants observe a human grasping action, experience-based accounts predict that all infants familiar with grasping actions should be able to predict the goal regardless of additional agency cues such as an action effect. Cue-based accounts, however, suggest that infants use agency cues to identify and predict action goals when the action or the agent is not familiar. From these accounts, we hypothesized that younger infants would need additional agency cues such as a salient action effect to predict the goal of a human grasping action, whereas older infants should be able to predict the goal regardless of agency cues. In three experiments, we presented 6-, 7-, and 11-month-olds with videos of a manual grasping action presented either with or without an additional salient action effect (Exp. 1 and 2), or we presented 7-month-olds with videos of a mechanical claw performing a grasping action presented with a salient action effect (Exp. 3). The 6-month-olds showed tracking gaze behavior, and the 11-month-olds showed predictive gaze behavior, regardless of the action effect. However, the 7-month-olds showed predictive gaze behavior in the action-effect condition, but tracking gaze behavior in the no-action-effect condition and in the action-effect condition with a mechanical claw. The results therefore support the idea that salient action effects are especially important for infants' goal predictions from 7 months on, and that this facilitating influence of action effects is selective for the observation of human hands.}, language = {en} } @article{LaubrockKliegl2015, author = {Laubrock, Jochen and Kliegl, Reinhold}, title = {The eye-voice span during reading aloud}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {1432}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.01432}, year = {2015}, abstract = {Although eye movements during reading are modulated by cognitive processing demands, they also reflect visual sampling of the input, and possibly preparation of output for speech or the inner voice. By simultaneously recording eye movements and the voice during reading aloud, we obtained an output measure that constrains the length of time spent on cognitive processing. Here we investigate the dynamics of the eye-voice span (EVS), the distance between eye and voice. We show that the EVS is regulated immediately during fixation of a word by either increasing fixation duration or programming a regressive eye movement against the reading direction. EVS size at the beginning of a fixation was positively correlated with the likelihood of regressions and refixations. Regression probability was further increased if the EVS was still large at the end of a fixation: if adjustment of fixation duration did not sufficiently reduce the EVS during a fixation, then a regression rather than a refixation followed with high probability. We further show that the EVS can help understand cognitive influences on fixation duration during reading: in mixed model analyses, the EVS was a stronger predictor of fixation durations than either word frequency or word length. The EVS modulated the influence of several other predictors on single fixation durations (SFDs). For example, word-N frequency effects were larger with a large EVS, especially when word N-1 frequency was low. Finally, a comparison of SFDs during oral and silent reading showed that reading is governed by similar principles in both reading modes, although EVS maintenance and articulatory processing also cause some differences. In summary, the EVS is regulated by adjusting fixation duration and/or by programming a regressive eye movement when the EVS gets too large. Overall, the EVS appears to be directly related to updating of the working memory buffer during reading.}, language = {en} } @article{LaubrockKliegl2015, author = {Laubrock, Jochen and Kliegl, Reinhold}, title = {The eye-voice span during reading aloud}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.01437}, pages = {19}, year = {2015}, abstract = {Although eye movements during reading are modulated by cognitive processing demands, they also reflect visual sampling of the input, and possibly preparation of output for speech or the inner voice. By simultaneously recording eye movements and the voice during reading aloud, we obtained an output measure that constrains the length of time spent on cognitive processing. Here we investigate the dynamics of the eye-voice span (EVS), the distance between eye and voice. We show that the EVS is regulated immediately during fixation of a word by either increasing fixation duration or programming a regressive eye movement against the reading direction. EVS size at the beginning of a fixation was positively correlated with the likelihood of regressions and refixations. Regression probability was further increased if the EVS was still large at the end of a fixation: if adjustment of fixation duration did not sufficiently reduce the EVS during a fixation, then a regression rather than a refixation followed with high probability. We further show that the EVS can help understand cognitive influences on fixation duration during reading: in mixed model analyses, the EVS was a stronger predictor of fixation durations than either word frequency or word length. The EVS modulated the influence of several other predictors on single fixation durations (SFDs). For example, word-N frequency effects were larger with a large EVS, especially when word N-1 frequency was low. Finally, a comparison of SFDs during oral and silent reading showed that reading is governed by similar principles in both reading modes, although EVS maintenance and articulatory processing also cause some differences. In summary, the EVS is regulated by adjusting fixation duration and/or by programming a regressive eye movement when the EVS gets too large. Overall, the EVS appears to be directly related to updating of the working memory buffer during reading.}, language = {en} } @misc{LaubrockKliegl2015, author = {Laubrock, Jochen and Kliegl, Reinhold}, title = {The eye-voice span during reading aloud}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86904}, year = {2015}, abstract = {Although eye movements during reading are modulated by cognitive processing demands, they also reflect visual sampling of the input, and possibly preparation of output for speech or the inner voice. By simultaneously recording eye movements and the voice during reading aloud, we obtained an output measure that constrains the length of time spent on cognitive processing. Here we investigate the dynamics of the eye-voice span (EVS), the distance between eye and voice. We show that the EVS is regulated immediately during fixation of a word by either increasing fixation duration or programming a regressive eye movement against the reading direction. EVS size at the beginning of a fixation was positively correlated with the likelihood of regressions and refixations. Regression probability was further increased if the EVS was still large at the end of a fixation: if adjustment of fixation duration did not sufficiently reduce the EVS during a fixation, then a regression rather than a refixation followed with high probability. We further show that the EVS can help understand cognitive influences on fixation duration during reading: in mixed model analyses, the EVS was a stronger predictor of fixation durations than either word frequency or word length. The EVS modulated the influence of several other predictors on single fixation durations (SFDs). For example, word-N frequency effects were larger with a large EVS, especially when word N-1 frequency was low. Finally, a comparison of SFDs during oral and silent reading showed that reading is governed by similar principles in both reading modes, although EVS maintenance and articulatory processing also cause some differences. In summary, the EVS is regulated by adjusting fixation duration and/or by programming a regressive eye movement when the EVS gets too large. Overall, the EVS appears to be directly related to updating of the working memory buffer during reading.}, language = {en} } @misc{KupermanDambacherNuthmannetal.2010, author = {Kuperman, Victor and Dambacher, Michael and Nuthmann, Antje and Kliegl, Reinhold}, title = {The effect of word position on eye-movements in sentence and paragraph reading}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56828}, year = {2010}, abstract = {The present study explores the role of the word position-in-text in sentence and paragraph reading. Three eye-movement data sets based on the reading of Dutch and German unrelated sentences reveal a sizeable, replicable increase in reading times over several words in the beginning and the end of sentences. The data from the paragraphbased English-language Dundee corpus replicate the pattern and also indicate that the increase in inspection times is driven by the visual boundaries of the text organized in lines, rather than by syntactic sentence boundaries. We argue that this effect is independent of several established lexical, contextual and oculomotor predictors of eye-movement behavior. We also provide evidence that the effect of word position-intext has two independent components: a start-up effect arguably caused by a strategic oculomotor program of saccade planning over the line of text, and a wrap-up effect originating in cognitive processes of comprehension and semantic integration.}, language = {en} } @phdthesis{Mergenthaler2009, author = {Mergenthaler, Konstantin K.}, title = {The control of fixational eye movements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-29397}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {In normal everyday viewing, we perform large eye movements (saccades) and miniature or fixational eye movements. Most of our visual perception occurs while we are fixating. However, our eyes are perpetually in motion. Properties of these fixational eye movements, which are partly controlled by the brainstem, change depending on the task and the visual conditions. Currently, fixational eye movements are poorly understood because they serve the two contradictory functions of gaze stabilization and counteraction of retinal fatigue. In this dissertation, we investigate the spatial and temporal properties of time series of eye position acquired from participants staring at a tiny fixation dot or at a completely dark screen (with the instruction to fixate a remembered stimulus); these time series were acquired with high spatial and temporal resolution. First, we suggest an advanced algorithm to separate the slow phases (named drift) and fast phases (named microsaccades) of these movements, which are considered to play different roles in perception. On the basis of this identification, we investigate and compare the temporal scaling properties of the complete time series and those time series where the microsaccades are removed. For the time series obtained during fixations on a stimulus, we were able to show that they deviate from Brownian motion. On short time scales, eye movements are governed by persistent behavior and on a longer time scales, by anti-persistent behavior. The crossover point between these two regimes remains unchanged by the removal of microsaccades but is different in the horizontal and the vertical components of the eyes. Other analyses target the properties of the microsaccades, e.g., the rate and amplitude distributions, and we investigate, whether microsaccades are triggered dynamically, as a result of earlier events in the drift, or completely randomly. The results obtained from using a simple box-count measure contradict the hypothesis of a purely random generation of microsaccades (Poisson process). Second, we set up a model for the slow part of the fixational eye movements. The model is based on a delayed random walk approach within the velocity related equation, which allows us to use the data to determine control loop durations; these durations appear to be different for the vertical and horizontal components of the eye movements. The model is also motivated by the known physiological representation of saccade generation; the difference between horizontal and vertical components concurs with the spatially separated representation of saccade generating regions. Furthermore, the control loop durations in the model suggest an external feedback loop for the horizontal but not for the vertical component, which is consistent with the fact that an internal feedback loop in the neurophysiology has only been identified for the vertical component. Finally, we confirmed the scaling properties of the model by semi-analytical calculations. In conclusion, we were able to identify several properties of the different parts of fixational eye movements and propose a model approach that is in accordance with the described neurophysiology and described limitations of fixational eye movement control.}, language = {en} } @phdthesis{Nuthmann2005, author = {Nuthmann, Antje}, title = {The "where" and "when" of eye fixations in reading}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7931}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {To investigate eye-movement control in reading, the present thesis examined three phenomena related to the eyes' landing position within words, (1) the optimal viewing position (OVP), (2) the preferred viewing location (PVL), and (3) the Fixation-Duration Inverted-Optimal Viewing Position (IOVP) Effect. Based on a corpus-analytical approach (Exp. 1), the influence of variables word length, launch site distance, and word frequency was systematically explored. In addition, five experimental manipulations were conducted. First, word center was identified as the OVP, that is the position within a word where refixation probability is minimal. With increasing launch site distance, however, the OVP was found to move towards the word beginning. Several possible causes of refixations were discussed. The issue of refixation saccade programming was extensively investigated, suggesting that pre-planned and directly controlled refixation saccades coexist. Second, PVL curves, that is landing position distributions, show that the eyes are systematically deviated from the OVP, due to visuomotor constraints. By far the largest influence on mean and standard deviation of the Gaussian PVL curve was exhibited by launch site distance. Third, it was investigated how fixation durations vary as a function of landing position. The IOVP effect was replicated: Fixations located at word center are longer than those falling near the edges of a word. The effect of word frequency and/or launch site distance on the IOVP function mainly consisted in a vertical displacement of the curve. The Fixation-Duration IOVP effect is intriguing because word center (the OVP) would appear to be the best place to fixate and process a word. A critical part of the current work was devoted to investigate the origin of the effect. It was suggested that the IOVP effect arises as a consequence of mislocated fixations, i.e. fixations on unintended words, which are caused by saccadic errors. An algorithm for estimating the proportion of mislocated fixations from empirical data was developed, based on extrapolations of landing position distributions beyond word boundaries. As a new central theoretical claim it was suggested that a new saccade program is started immediately if the intended target word is missed. On average, this will lead to decreased durations for mislocated fixations. Because mislocated fixations were shown to be most prevalent at the beginning and end of words, the proposed mechanism generated the inverted U-shape for fixation durations when computed as a function of landing position. The proposed mechanism for generating the effect is generally compatible with both oculomotor and cognitive models of eye-movement control in reading.}, subject = {Allgemeine Psychologie}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2017, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Temporal evolution of the central fixation bias in scene viewing}, series = {Journal of vision}, volume = {17}, journal = {Journal of vision}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/17.13.3}, pages = {1626 -- 1638}, year = {2017}, abstract = {When watching the image of a natural scene on a computer screen, observers initially move their eyes toward the center of the image—a reliable experimental finding termed central fixation bias. This systematic tendency in eye guidance likely masks attentional selection driven by image properties and top-down cognitive processes. Here, we show that the central fixation bias can be reduced by delaying the initial saccade relative to image onset. In four scene-viewing experiments we manipulated observers' initial gaze position and delayed their first saccade by a specific time interval relative to the onset of an image. We analyzed the distance to image center over time and show that the central fixation bias of initial fixations was significantly reduced after delayed saccade onsets. We additionally show that selection of the initial saccade target strongly depended on the first saccade latency. A previously published model of saccade generation was extended with a central activation map on the initial fixation whose influence declined with increasing saccade latency. This extension was sufficient to replicate the central fixation bias from our experiments. Our results suggest that the central fixation bias is generated by default activation as a response to the sudden image onset and that this default activation pattern decreases over time. Thus, it may often be preferable to use a modified version of the scene viewing paradigm that decouples image onset from the start signal for scene exploration to explicitly reduce the central fixation bias.}, language = {en} } @misc{FelserPattersonCunnings2015, author = {Felser, Claudia and Patterson, Clare and Cunnings, Ian}, title = {Structural constraints on pronoun binding and coreference: Evidence from eye movements during reading}, series = {Frontiers in psychology}, journal = {Frontiers in psychology}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78650}, year = {2015}, abstract = {A number of recent studies have investigated how syntactic and non-syntactic constraints combine to cue memory retrieval during anaphora resolution. In this paper we investigate how syntactic constraints and gender congruence interact to guide memory retrieval during the resolution of subject pronouns. Subject pronouns are always technically ambiguous, and the application of syntactic constraints on their interpretation depends on properties of the antecedent that is to be retrieved. While pronouns can freely corefer with non-quantified referential antecedents, linking a pronoun to a quantified antecedent is only possible in certain syntactic configurations via variable binding. We report the results from a judgment task and three online reading comprehension experiments investigating pronoun resolution with quantified and non-quantified antecedents. Results from both the judgment task and participants' eye movements during reading indicate that comprehenders freely allow pronouns to corefer with non-quantified antecedents, but that retrieval of quantified antecedents is restricted to specific syntactic environments. We interpret our findings as indicating that syntactic constraints constitute highly weighted cues to memory retrieval during anaphora resolution.}, language = {en} } @article{CunningsPattersonFelser2015, author = {Cunnings, Ian and Patterson, Clare and Felser, Claudia}, title = {Structural constraints on pronoun binding and coreference: evidence from eye movements during reading}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00840}, pages = {18}, year = {2015}, abstract = {A number of recent studies have investigated how syntactic and non-syntactic constraints combine to cue memory retrieval during anaphora resolution. In this paper we investigate how syntactic constraints and gender congruence interact to guide memory retrieval during the resolution of subject pronouns. Subject pronouns are always technically ambiguous, and the application of syntactic constraints on their interpretation depends on properties of the antecedent that is to be retrieved. While pronouns can freely corefer with non-quantified referential antecedents, linking a pronoun to a quantified antecedent is only possible in certain syntactic configurations via variable binding. We report the results from a judgment task and three online reading comprehension experiments investigating pronoun resolution with quantified and non-quantified antecedents. Results from both the judgment task and participants' eye movements during reading indicate that comprehenders freely allow pronouns to corefer with non-quantified antecedents, but that retrieval of quantified antecedents is restricted to specific syntactic environments. We interpret our findings as indicating that syntactic constraints constitute highly weighted cues to memory retrieval during anaphora resolution.}, language = {en} } @article{FelserPattersonCunnings2015, author = {Felser, Claudia and Patterson, Clare and Cunnings, Ian}, title = {Structural constraints on pronoun binding and coreference: Evidence from eye movements during reading}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, number = {840}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00840}, year = {2015}, abstract = {A number of recent studies have investigated how syntactic and non-syntactic constraints combine to cue memory retrieval during anaphora resolution. In this paper we investigate how syntactic constraints and gender congruence interact to guide memory retrieval during the resolution of subject pronouns. Subject pronouns are always technically ambiguous, and the application of syntactic constraints on their interpretation depends on properties of the antecedent that is to be retrieved. While pronouns can freely corefer with non-quantified referential antecedents, linking a pronoun to a quantified antecedent is only possible in certain syntactic configurations via variable binding. We report the results from a judgment task and three online reading comprehension experiments investigating pronoun resolution with quantified and non-quantified antecedents. Results from both the judgment task and participants' eye movements during reading indicate that comprehenders freely allow pronouns to corefer with non-quantified antecedents, but that retrieval of quantified antecedents is restricted to specific syntactic environments. We interpret our findings as indicating that syntactic constraints constitute highly weighted cues to memory retrieval during anaphora resolution.}, language = {en} } @article{EngbertTrukenbrodBarthelmeetal.2015, author = {Engbert, Ralf and Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A.}, title = {Spatial statistics and attentional dynamics in scene viewing}, series = {Journal of vision}, volume = {15}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/15.1.14}, pages = {17}, year = {2015}, abstract = {In humans and in foveated animals visual acuity is highly concentrated at the center of gaze, so that choosing where to look next is an important example of online, rapid decision-making. Computational neuroscientists have developed biologically-inspired models of visual attention, termed saliency maps, which successfully predict where people fixate on average. Using point process theory for spatial statistics, we show that scanpaths contain, however, important statistical structure, such as spatial clustering on top of distributions of gaze positions. Here, we develop a dynamical model of saccadic selection that accurately predicts the distribution of gaze positions as well as spatial clustering along individual scanpaths. Our model relies on activation dynamics via spatially-limited (foveated) access to saliency information, and, second, a leaky memory process controlling the re-inspection of target regions. This theoretical framework models a form of context-dependent decision-making, linking neural dynamics of attention to behavioral gaze data.}, language = {en} } @article{HartmannMastFischer2015, author = {Hartmann, Matthias and Mast, Fred W. and Fischer, Martin H.}, title = {Spatial biases during mental arithmetic: evidence from eye movements on a blank screen}, series = {Frontiers in psychology}, volume = {6}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00012}, pages = {8}, year = {2015}, abstract = {While the influence of spatial-numerical associations in number categorization tasks has been well established, their role in mental arithmetic is less clear. It has been hypothesized that mental addition leads to rightward and upward shifts of spatial attention (along the "mental number line"), whereas subtraction leads to leftward and downward shifts. We addressed this hypothesis by analyzing spontaneous eye movements during mental arithmetic. Participants solved verbally presented arithmetic problems (e.g., 2 + 7, 8-3) aloud while looking at a blank screen. We found that eye movements reflected spatial biases in the ongoing mental operation: Gaze position shifted more upward when participants solved addition compared to subtraction problems, and the horizontal gaze position was partly determined by the magnitude of the operands. Interestingly, the difference between addition and subtraction trials was driven by the operator (plus vs. minus) but was not influenced by the computational process. Thus, our results do not support the idea of a mental movement toward the solution during arithmetic but indicate a semantic association between operation and space.}, language = {en} } @misc{HartmannMastFischer2015, author = {Hartmann, Matthias and Mast, Fred W. and Fischer, Martin H.}, title = {Spatial biases during mental arithmetic}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {426}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406504}, pages = {8}, year = {2015}, abstract = {While the influence of spatial-numerical associations in number categorization tasks has been well established, their role in mental arithmetic is less clear. It has been hypothesized that mental addition leads to rightward and upward shifts of spatial attention (along the "mental number line"), whereas subtraction leads to leftward and downward shifts. We addressed this hypothesis by analyzing spontaneous eye movements during mental arithmetic. Participants solved verbally presented arithmetic problems (e.g., 2 + 7, 8-3) aloud while looking at a blank screen. We found that eye movements reflected spatial biases in the ongoing mental operation: Gaze position shifted more upward when participants solved addition compared to subtraction problems, and the horizontal gaze position was partly determined by the magnitude of the operands. Interestingly, the difference between addition and subtraction trials was driven by the operator (plus vs. minus) but was not influenced by the computational process. Thus, our results do not support the idea of a mental movement toward the solution during arithmetic but indicate a semantic association between operation and space.}, language = {en} } @phdthesis{Ohl2013, author = {Ohl, Sven}, title = {Small eye movements during fixation : the case of postsaccadic fixation and preparatory influences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69862}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Describing human eye movement behavior as an alternating sequence of saccades and fixations turns out to be an oversimplification because the eyes continue to move during fixation. Small-amplitude saccades (e.g., microsaccades) are typically observed 1-2 times per second during fixation. Research on microsaccades came in two waves. Early studies on microsaccades were dominated by the question whether microsaccades affect visual perception, and by studies on the role of microsaccades in the process of fixation control. The lack of evidence for a unique role of microsaccades led to a very critical view on the importance of microsaccades. Over the last years, microsaccades moved into focus again, revealing many interactions with perception, oculomotor control and cognition, as well as intriguing new insights into the neurophysiological implementation of microsaccades. In contrast to early studies on microsaccades, recent findings on microsaccades were accompanied by the development of models of microsaccade generation. While the exact generating mechanisms vary between the models, they still share the assumption that microsaccades are generated in a topographically organized saccade motor map that includes a representation for small-amplitude saccades in the center of the map (with its neurophysiological implementation in the rostral pole of the superior colliculus). In the present thesis I criticize that models of microsaccade generation are exclusively based on results obtained during prolonged presaccadic fixation. I argue that microsaccades should also be studied in a more natural situation, namely the fixation following large saccadic eye movements. Studying postsaccadic fixation offers a new window to falsify models that aim to account for the generation of small eye movements. I demonstrate that error signals (visual and extra-retinal), as well as non-error signals like target eccentricity influence the characteristics of small-amplitude eye movements. These findings require a modification of a model introduced by Rolfs, Kliegl and Engbert (2008) in order to account for the generation of small-amplitude saccades during postsaccadic fixation. Moreover, I present a promising type of survival analysis that allowed me to examine time-dependent influences on postsaccadic eye movements. In addition, I examined the interplay of postsaccadic eye movements and postsaccadic location judgments, highlighting the need to include postsaccadic eye movements as covariate in the analyses of location judgments in the presented paradigm. In a second goal, I tested model predictions concerning preparatory influences on microsaccade generation during presaccadic fixation. The observation, that the preparatory set significantly influenced microsaccade rate, supports the critical model assumption that increased fixation-related activity results in a larger number of microsaccades. In the present thesis I present important influences on the generation of small-amplitude saccades during fixation. These eye movements constitute a rich oculomotor behavior which still poses many research questions. Certainly, small-amplitude saccades represent an interesting source of information and will continue to influence future studies on perception and cognition.}, language = {en} } @article{HohensteinKliegl2014, author = {Hohenstein, Sven and Kliegl, Reinhold}, title = {Semantic preview benefit during reading}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {40}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {1}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0033670}, pages = {166 -- 190}, year = {2014}, abstract = {Word features in parafoveal vision influence eye movements during reading. The question of whether readers extract semantic information from parafoveal words was studied in 3 experiments by using a gaze-contingent display change technique. Subjects read German sentences containing 1 of several preview words that were replaced by a target word during the saccade to the preview (boundary paradigm). In the 1st experiment the preview word was semantically related or unrelated to the target. Fixation durations on the target were shorter for semantically related than unrelated previews, consistent with a semantic preview benefit. In the 2nd experiment, half the sentences were presented following the rules of German spelling (i.e., previews and targets were printed with an initial capital letter), and the other half were presented completely in lowercase. A semantic preview benefit was obtained under both conditions. In the 3rd experiment, we introduced 2 further preview conditions, an identical word and a pronounceable nonword, while also manipulating the text contrast. Whereas the contrast had negligible effects, fixation durations on the target were reliably different for all 4 types of preview. Semantic preview benefits were greater for pretarget fixations closer to the boundary (large preview space) and, although not as consistently, for long pretarget fixation durations (long preview time). The results constrain theoretical proposals about eye movement control in reading.}, language = {en} } @misc{SinnEngbert2011, author = {Sinn, Petra and Engbert, Ralf}, title = {Saccadic facilitation by modulation of microsaccades in natural backgrounds}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {595}, issn = {1866-8364}, doi = {10.25932/publishup-43181}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431817}, pages = {7}, year = {2011}, abstract = {Saccades move objects of interest into the center of the visual field for high-acuity visual analysis. White, Stritzke, and Gegenfurtner (Current Biology, 18, 124-128, 2008) have shown that saccadic latencies in the context of a structured background are much shorter than those with an unstructured background at equal levels of visibility. This effect has been explained by possible preactivation of the saccadic circuitry whenever a structured background acts as a mask for potential saccade targets. Here, we show that background textures modulate rates of microsaccades during visual fixation. First, after a display change, structured backgrounds induce a stronger decrease of microsaccade rates than do uniform backgrounds. Second, we demonstrate that the occurrence of a microsaccade in a critical time window can delay a subsequent saccadic response. Taken together, our findings suggest that microsaccades contribute to the saccadic facilitation effect, due to a modulation of microsaccade rates by properties of the background.}, language = {en} } @article{LuoYanZhou2013, author = {Luo, Yingyi and Yan, Ming and Zhou, Xiaolin}, title = {Prosodic boundaries delay the processing of upcoming lexical information during silent sentence reading}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {39}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {3}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0029182}, pages = {915 -- 930}, year = {2013}, abstract = {Prosodic boundaries can be used to guide syntactic parsing in both spoken and written sentence comprehension, but it is unknown whether the processing of prosodic boundaries affects the processing of upcoming lexical information. In 3 eye-tracking experiments, participants read silently sentences that allow for 2 possible syntactic interpretations when there is no comma or other cue specifying which interpretation should be taken. In Experiments 1 and 2, participants heard a low-pass filtered auditory version of the sentence, which provided a prosodic boundary cue prior to each sentence. In Experiment 1, we found that the boundary cue helped syntactic disambiguation after the cue and led to longer fixation durations on regions right before the cue than on identical regions without prosodic boundary information. In Experiments 2 and 3, we used a gaze-contingent display-change paradigm to manipulate the parafoveal visibility of the first constituent character of the target word after the disambiguating position. Results of Experiment 2 showed that previewing the first character significantly reduced the reading time of the target word, but this preview benefit was greatly reduced when the prosodic boundary cue was introduced at this position. In Experiment 3, instead of the acoustic cues, a visually presented comma was inserted at the disambiguating position in each sentence. Results showed that the comma effect on lexical processing was essentially the same as the effect of prosodic boundary cue. These findings demonstrate that processing a prosodic boundary could impair the processing of parafoveal information during sentence reading.}, language = {en} }