@article{BrandtKobeleHoehle2014, author = {Brandt-Kobele, Oda-Christina and H{\"o}hle, Barbara}, title = {The detection of subject-verb agreement violations by German-speaking children: An eye-tracking study}, series = {Lingua : international review of general linguistics}, volume = {144}, journal = {Lingua : international review of general linguistics}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0024-3841}, doi = {10.1016/j.lingua.2013.12.008}, pages = {7 -- 20}, year = {2014}, abstract = {This study examines the processing of sentences with and without subject verb agreement violations in German-speaking children at three and five years of age. An eye-tracking experiment was conducted to measure whether children's looking behavior was influenced by the grammaticality of the test sentences. The older group of children turned their gaze faster towards a target picture and looked longer at it when the object noun referring to the target was presented in a grammatical sentence with subject verb agreement compared to when the object noun was presented in a sentence in which an agreement violation occurred. The younger group of children displayed less conclusive results, with a tendency to look longer but not faster towards the target picture in the grammatical compared to the ungrammatical condition. This is the first experimental evidence that German-speaking five-year old children are sensitive to subject verb agreement and violations thereof. Our results additionally substantiate that the eye-tracking paradigm is suitable to examine children's sensitivity to subtle grammatical violations.}, language = {en} } @article{BosHanneWartenburgeretal.2014, author = {Bos, Laura S. and Hanne, Sandra and Wartenburger, Isabell and Bastiaanse, Roelien}, title = {Losing track of time? Processing of time reference inflection in agrammatic and healthy speakers of German}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {65}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2014.10.026}, pages = {180 -- 190}, year = {2014}, abstract = {Background: Individuals with agrammatic aphasia (IWAs) have problems with grammatical decoding of tense inflection. However, these difficulties depend on the time frame that the tense refers to. Verb morphology with reference to the past is more difficult than with reference to the non-past, because a link needs to be made to the past event in discourse, as captured in the PAst Discourse Linking Hypothesis (PADILIH; Bastiaanse, R., Bamyaci, E., Hsu, C., Lee, J., Yarbay Duman, T., Thompson, C. K., 2011. Time reference in agrammatic aphasia: A cross-linguistic study. J. Neurolinguist. 24, 652-673). With respect to reference to the (non-discourse-linked) future, data so far indicate that IWAs experience less difficulties as compared to past time reference (Bastiaanse, R., Bamyaci, E., Hsu, C., Lee, J., Yarbay Duman, T., Thompson, C. K., 2011. Time reference in agrammatic aphasia: A cross-linguistic study. J. Neurolinguist. 24, 652-673), supporting the assumptions of the PADILIH. Previous online studies of time reference in aphasia used methods such as reaction times analysis (e.g., Faroqi-Shah, Y., Dickey, M. W., 2009. On-line processing of tense and temporality in agrammatic aphasia. Brain Lang. 108, 97-111). So far, no such study used eye-tracking, even though this technique can bring additional insights (Burchert, F., Hanne, S., Vasishth, S., 2013. Sentence comprehension disorders in aphasia: the concept of chance performance revisited. Aphasiology 27, 112-125, doi:10.1080/02687038.2012.730603). Aims: This study investigated (1) whether processing of future and past time reference inflection differs between non-brain-damaged individuals (NBDs) and IWAs, and (2) underlying mechanisms of time reference comprehension failure by IWAs. Results and discussion: NBDs scored at ceiling and significantly higher than the IWAs. IWAs had below-ceiling performance on the future condition, and both participant groups were faster to respond to the past than to the future condition. These differences are attributed to a pre-existing preference to look at a past picture, which has to be overcome. Eye movement patterns suggest that both groups interpret future time reference similarly, while IWAs show a delay relative to NBDs in interpreting past time reference inflection. The eye tracking results support the PADILIH, because processing reference to the past in discourse syntax requires additional resources and thus, is problematic and delayed for people with aphasia. (C) 2014 Elsevier Ltd. All rights reserved.}, language = {en} } @article{HanneBurchertDeBleseretal.2015, author = {Hanne, Sandra and Burchert, Frank and De Bleser, Ria and Vasishth, Shravan}, title = {Sentence comprehension and morphological cues in aphasia: What eye-tracking reveals about integration and prediction}, series = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, volume = {34}, journal = {Journal of neurolinguistics : an international journal for the study of brain function in language behavior and experience}, publisher = {Elsevier}, address = {Oxford}, issn = {0911-6044}, doi = {10.1016/j.jneuroling.2014.12.003}, pages = {83 -- 111}, year = {2015}, abstract = {Comprehension of non-canonical sentences can be difficult for individuals with aphasia (IWA). It is still unclear to which extent morphological cues like case marking or verb inflection may influence IWA's performance or even help to override deficits in sentence comprehension. Until now, studies have mainly used offline methods to draw inferences about syntactic deficits and, so far, only a few studies have looked at online syntactic processing in aphasia. We investigated sentence processing in German-speaking IWA by combining an offline (sentence-picture matching) and an online (eye-tracking in the visual-world paradigm) method. Our goal was to determine whether IWA are capable of using inflectional morphology (number-agreement markers on verbs and case markers in noun phrases) as a cue to sentence interpretation. We report results of two visual-world experiments using German reversible SVO and OVS sentences. In each study, there were eight IWA and 20 age-matched controls. Experiment 1 targeted the role of unambiguous case morphology, while Experiment 2 looked at processing of number-agreement cues at the verb in caseambiguous sentences. IWA showed deficits in using both types of morphological markers as a cue to non-canonical sentence interpretation and the results indicate that in aphasia, processing of case-marking cues is more vulnerable as compared to verbagreement morphology. We ascribe this finding to the higher cue reliability of agreement cues, which renders them more resistant against impairments in aphasia. However, the online data revealed that IWA are in principle capable of successfully computing morphological cues, but the integration of morphological information is delayed as compared to age-matched controls. Furthermore, we found striking differences between controls and IWA regarding subject-before-object parsing predictions. While in case-unambiguous sentences IWA showed evidence for early subjectbefore-object parsing commitments, they exhibited no straightforward subject-first prediction in case-ambiguous sentences, although controls did so for ambiguous structures. IWA delayed their parsing decisions in case-ambiguous sentences until unambiguous morphological information, such as a subject-verbnumber-agreement cue, was available. We attribute the results for IWA to deficits in predictive processes based on morphosyntactic cues during sentence comprehension. The results indicate that IWA adopt a wait-and-see strategy and initiate prediction of upcoming syntactic structure only when unambiguous case or agreement cues are available. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{GaroufiStaudteKolleretal.2016, author = {Garoufi, Konstantina and Staudte, Maria and Koller, Alexander and Crocker, Matthew W.}, title = {Exploiting Listener Gaze to Improve Situated Communication in Dynamic Virtual Environments}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12298}, pages = {1671 -- 1703}, year = {2016}, abstract = {Beyond the observation that both speakers and listeners rapidly inspect the visual targets of referring expressions, it has been argued that such gaze may constitute part of the communicative signal. In this study, we investigate whether a speaker may, in principle, exploit listener gaze to improve communicative success. In the context of a virtual environment where listeners follow computer-generated instructions, we provide two kinds of support for this claim. First, we show that listener gaze provides a reliable real-time index of understanding even in dynamic and complex environments, and on a per-utterance basis. Second, we show that a language generation system that uses listener gaze to provide rapid feedback improves overall task performance in comparison with two systems that do not use gaze. Aside from demonstrating the utility of listener gaze insituated communication, our findings open the door to new methods for developing and evaluating multi-modal models of situated interaction.}, language = {en} } @article{vonderMalsburgAngele2017, author = {von der Malsburg, Titus Raban and Angele, Bernhard}, title = {False positives and other statistical errors in standard analyses of eye movements in reading}, series = {Journal of memory and language}, volume = {94}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2016.10.003}, pages = {119 -- 133}, year = {2017}, abstract = {In research on eye movements in reading, it is common to analyze a number of canonical dependent measures to study how the effects of a manipulation unfold over time. Although this gives rise to the well-known multiple comparisons problem, i.e. an inflated probability that the null hypothesis is incorrectly rejected (Type I error), it is accepted standard practice not to apply any correction procedures. Instead, there appears to be a widespread belief that corrections are not necessary because the increase in false positives is too small to matter. To our knowledge, no formal argument has ever been presented to justify this assumption. Here, we report a computational investigation of this issue using Monte Carlo simulations. Our results show that, contrary to conventional wisdom, false positives are increased to unacceptable levels when no corrections are applied. Our simulations also show that counter-measures like the Bonferroni correction keep false positives in check while reducing statistical power only moderately. Hence, there is little reason why such corrections should not be made a standard requirement. Further, we discuss three statistical illusions that can arise when statistical power is low, and we show how power can be improved to prevent these illusions. In sum, our work renders a detailed picture of the various types of statistical errors than can occur in studies of reading behavior and we provide concrete guidance about how these errors can be avoided. (C) 2016 Elsevier Inc. All rights reserved.}, language = {en} } @article{KocJanuchtaHoefflerThomaetal.2017, author = {Koc-Januchta, Marta and H{\"o}ffler, Tim and Thoma, Gun-Brit and Prechtl, Helmut and Leutner, Detlev}, title = {Visualizers versus verbalizers}, series = {Computers in human behavior}, volume = {68}, journal = {Computers in human behavior}, publisher = {Elsevier}, address = {Oxford}, issn = {0747-5632}, doi = {10.1016/j.chb.2016.11.028}, pages = {170 -- 179}, year = {2017}, abstract = {This study was conducted in order to examine the differences between visualizers and verbalizers in the way they gaze at pictures and texts while learning. Using a collection of questionnaires, college students were classified according to their visual or verbal cognitive style and were asked to learn about two different, in terms of subject and type of knowledge, topics by means of text-picture combinations. Eye-tracking was used to investigate their gaze behavior. The results show that visualizers spent significantly more time inspecting pictures than verbalizers, while verbalizers spent more time inspecting texts. Results also suggest that both visualizers' and verbalizers' way of learning is active but mostly within areas providing the source of information in line with their cognitive style (pictures or text). Verbalizers tended to enter non-informative, irrelevant areas of pictures sooner than visualizers. The comparison of learning outcomes showed that the group of visualizers achieved better results than the group of verbalizers on a comprehension test.}, language = {en} } @article{SoechtingTrapp2020, author = {S{\"o}chting, Maximilian and Trapp, Matthias}, title = {Controlling image-stylization techniques using eye tracking}, series = {Science and Technology Publications}, journal = {Science and Technology Publications}, publisher = {Springer}, address = {Berlin}, issn = {2184-4321}, pages = {10}, year = {2020}, abstract = {With the spread of smart phones capable of taking high-resolution photos and the development of high-speed mobile data infrastructure, digital visual media is becoming one of the most important forms of modern communication. With this development, however, also comes a devaluation of images as a media form with the focus becoming the frequency at which visual content is generated instead of the quality of the content. In this work, an interactive system using image-abstraction techniques and an eye tracking sensor is presented, which allows users to experience diverting and dynamic artworks that react to their eye movement. The underlying modular architecture enables a variety of different interaction techniques that share common design principles, making the interface as intuitive as possible. The resulting experience allows users to experience a game-like interaction in which they aim for a reward, the artwork, while being held under constraints, e.g., not blinking. The co nscious eye movements that are required by some interaction techniques hint an interesting, possible future extension for this work into the field of relaxation exercises and concentration training.}, language = {en} } @article{TsengLaubrockBateman2021, author = {Tseng, Chiao-I and Laubrock, Jochen and Bateman, John A.}, title = {The impact of multimodal cohesion on attention and interpretation in film}, series = {Discourse, context \& media}, volume = {44}, journal = {Discourse, context \& media}, publisher = {Amsterdam [u.a.]}, address = {Oxford}, issn = {2211-6958}, doi = {10.1016/j.dcm.2021.100544}, pages = {15}, year = {2021}, abstract = {This article presents results of an exploratory investigation combining multimodal cohesion analysis and eye-tracking studies. Multimodal cohesion, as a tool of multimodal discourse analysis, goes beyond lin-guistic cohesive mechanisms to enable the construction of cross-modal discourse structures that system-atically relate technical details of audio, visual and verbal modalities. Patterns of multimodal cohesion from these discourse structures were used to design eye-tracking experiments and questionnaires in order to empirically investigate how auditory and visual cohesive cues affect attention and comprehen-sion. We argue that the cross-modal structures of cohesion revealed by our method offer a strong methodology for addressing empirical questions concerning viewers' comprehension of narrative settings and the comparative salience of visual, verbal and audio cues. Analyses are presented of the beginning of Hitchcock's The Birds (1963) and a sketch from Monty Python filmed in 1971. Our approach balances the narrative-based issue of how narrative elements in film guide meaning interpretation and the recipient -based question of where a film viewer's attention is directed during viewing and how this affects comprehension.}, language = {en} }