@phdthesis{Seelig2021, author = {Seelig, Stefan}, title = {Parafoveal processing of lexical information during reading}, doi = {10.25932/publishup-50874}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-508743}, school = {Universit{\"a}t Potsdam}, pages = {xi, 113}, year = {2021}, abstract = {During sentence reading the eyes quickly jump from word to word to sample visual information with the high acuity of the fovea. Lexical properties of the currently fixated word are known to affect the duration of the fixation, reflecting an interaction of word processing with oculomotor planning. While low level properties of words in the parafovea can likewise affect the current fixation duration, results concerning the influence of lexical properties have been ambiguous (Drieghe, Rayner, \& Pollatsek, 2008; Kliegl, Nuthmann, \& Engbert, 2006). Experimental investigations of such lexical parafoveal-on-foveal effects using the boundary paradigm have instead shown, that lexical properties of parafoveal previews affect fixation durations on the upcoming target words (Risse \& Kliegl, 2014). However, the results were potentially confounded with effects of preview validity. The notion of parafoveal processing of lexical information challenges extant models of eye movements during reading. Models containing serial word processing assumptions have trouble explaining such effects, as they usually couple successful word processing to saccade planning, resulting in skipping of the parafoveal word. Although models with parallel word processing are less restricted, in the SWIFT model (Engbert, Longtin, \& Kliegl, 2002) only processing of the foveal word can directly influence the saccade latency. Here we combine the results of a boundary experiment (Chapter 2) with a predictive modeling approach using the SWIFT model, where we explore mechanisms of parafoveal inhibition in a simulation study (Chapter 4). We construct a likelihood function for the SWIFT model (Chapter 3) and utilize the experimental data in a Bayesian approach to parameter estimation (Chapter 3 \& 4). The experimental results show a substantial effect of parafoveal preview frequency on fixation durations on the target word, which can be clearly distinguished from the effect of preview validity. Using the eye movement data from the participants, we demonstrate the feasibility of the Bayesian approach even for a small set of estimated parameters, by comparing summary statistics of experimental and simulated data. Finally, we can show that the SWIFT model can account for the lexical preview effects, when a mechanism for parafoveal inhibition is added. The effects of preview validity were modeled best, when processing dependent saccade cancellation was added for invalid trials. In the simulation study only the control condition of the experiment was used for parameter estimation, allowing for cross validation. Simultaneously the number of free parameters was increased. High correlations of summary statistics demonstrate the capabilities of the parameter estimation approach. Taken together, the results advocate for a better integration of experimental data into computational modeling via parameter estimation.}, language = {en} } @phdthesis{Rothkegel2018, author = {Rothkegel, Lars Oliver Martin}, title = {Human scanpaths in natural scene viewing and natural scene search}, doi = {10.25932/publishup-42000}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-420005}, school = {Universit{\"a}t Potsdam}, pages = {xv, 131}, year = {2018}, abstract = {Understanding how humans move their eyes is an important part for understanding the functioning of the visual system. Analyzing eye movements from observations of natural scenes on a computer screen is a step to understand human visual behavior in the real world. When analyzing eye-movement data from scene-viewing experiments, the impor- tant questions are where (fixation locations), how long (fixation durations) and when (ordering of fixations) participants fixate on an image. By answering these questions, computational models can be developed which predict human scanpaths. Models serve as a tool to understand the underlying cognitive processes while observing an image, especially the allocation of visual attention. The goal of this thesis is to provide new contributions to characterize and model human scanpaths on natural scenes. The results from this thesis will help to understand and describe certain systematic eye-movement tendencies, which are mostly independent of the image. One eye-movement tendency I focus on throughout this thesis is the tendency to fixate more in the center of an image than on the outer parts, called the central fixation bias. Another tendency, which I will investigate thoroughly, is the characteristic distribution of angles between successive eye movements. The results serve to evaluate and improve a previously published model of scanpath generation from our laboratory, the SceneWalk model. Overall, six experiments were conducted for this thesis which led to the following five core results: i) A spatial inhibition of return can be found in scene-viewing data. This means that locations which have already been fixated are afterwards avoided for a certain time interval (Chapter 2). ii) The initial fixation position when observing an image has a long-lasting influence of up to five seconds on further scanpath progression (Chapter 2 \& 3). iii) The often described central fixation bias on images depends strongly on the duration of the initial fixation. Long-lasting initial fixations lead to a weaker central fixation bias than short fixations (Chapter 2 \& 3). iv) Human observers adjust their basic eye-movement parameters, like fixation dura- tions and saccade amplitudes, to the visual properties of a target they look for in visual search (Chapter 4). v) The angle between two adjacent saccades is an indicator for the selectivity of the upcoming saccade target (Chapter 4). All results emphasize the importance of systematic behavioral eye-movement tenden- cies and dynamic aspects of human scanpaths in scene viewing.}, language = {en} } @phdthesis{Trukenbrod2012, author = {Trukenbrod, Hans Arne}, title = {Temporal and spatial aspects of eye-movement control : from reading to scanning}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70206}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Eye movements are a powerful tool to examine cognitive processes. However, in most paradigms little is known about the dynamics present in sequences of saccades and fixations. In particular, the control of fixation durations has been widely neglected in most tasks. As a notable exception, both spatial and temporal aspects of eye-movement control have been thoroughly investigated during reading. There, the scientific discourse was dominated by three controversies, (i), the role of oculomotor vs. cognitive processing on eye-movement control, (ii) the serial vs. parallel processing of words, and, (iii), the control of fixation durations. The main purpose of this thesis was to investigate eye movements in tasks that require sequences of fixations and saccades. While reading phenomena served as a starting point, we examined eye guidance in non-reading tasks with the aim to identify general principles of eye-movement control. In addition, the investigation of eye movements in non-reading tasks helped refine our knowledge about eye-movement control during reading. Our approach included the investigation of eye movements in non-reading experiments as well as the evaluation and development of computational models. I present three main results : First, oculomotor phenomena during reading can also be observed in non-reading tasks (Chapter 2 \& 4). Oculomotor processes determine the fixation position within an object. The fixation position, in turn, modulates both the next saccade target and the current fixation duration. Second, predicitions of eye-movement models based on sequential attention shifts were falsified (Chapter 3). In fact, our results suggest that distributed processing of multiple objects forms the basis of eye-movement control. Third, fixation durations are under asymmetric control (Chapter 4). While increasing processing demands immediately prolong fixation durations, decreasing processing demands reduce fixation durations only with a temporal delay. We propose a computational model ICAT to account for asymmetric control. In this model, an autonomous timer initiates saccades after random time intervals independent of ongoing processing. However, processing demands that are higher than expected inhibit the execution of the next saccade and, thereby, prolong the current fixation. On the other hand, lower processing demands will not affect the duration before the next saccade is executed. Since the autonomous timer adjusts to expected processing demands from fixation to fixation, a decrease in processing demands may lead to a temporally delayed reduction of fixation durations. In an extended version of ICAT, we evaluated its performance while simulating both temporal and spatial aspects of eye-movement control. The eye-movement phenomena investigated in this thesis have now been observed in a number of different tasks, which suggests that they represent general principles of eye guidance. I propose that distributed processing of the visual input forms the basis of eye-movement control, while fixation durations are controlled by the principles outlined in ICAT. In addition, oculomotor control contributes considerably to the variability observed in eye movements. Interpretations for the relation between eye movements and cognition strongly benefit from a precise understanding of this interplay.}, language = {en} }