@phdthesis{Mertzen2022, author = {Mertzen, Daniela}, title = {A cross-linguistic investigation of similarity-based interference in sentence comprehension}, doi = {10.25932/publishup-55668}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-556685}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 129}, year = {2022}, abstract = {The aim of this dissertation was to conduct a larger-scale cross-linguistic empirical investigation of similarity-based interference effects in sentence comprehension. Interference studies can offer valuable insights into the mechanisms that are involved in long-distance dependency completion. Many studies have investigated similarity-based interference effects, showing that syntactic and semantic information are employed during long-distance dependency formation (e.g., Arnett \& Wagers, 2017; Cunnings \& Sturt, 2018; Van Dyke, 2007, Van Dyke \& Lewis, 2003; Van Dyke \& McElree, 2011). Nevertheless, there are some important open questions in the interference literature that are critical to our understanding of the constraints involved in dependency resolution. The first research question concerns the relative timing of syntactic and semantic interference in online sentence comprehension. Only few interference studies have investigated this question, and, to date, there is not enough data to draw conclusions with regard to their time course (Van Dyke, 2007; Van Dyke \& McElree, 2011). Our first cross-linguistic study explores the relative timing of syntactic and semantic interference in two eye-tracking reading experiments that implement the study design used in Van Dyke (2007). The first experiment tests English sentences. The second, larger-sample experiment investigates the two interference types in German. Overall, the data suggest that syntactic and semantic interference can arise simultaneously during retrieval. The second research question concerns a special case of semantic interference: We investigate whether cue-based retrieval interference can be caused by semantically similar items which are not embedded in a syntactic structure. This second interference study builds on a landmark study by Van Dyke \& McElree (2006). The study design used in their study is unique in that it is able to pin down the source of interference as a consequence of cue overload during retrieval, when semantic retrieval cues do not uniquely match the retrieval target. Unlike most other interference studies, this design is able to rule out encoding interference as an alternative explanation. Encoding accounts postulate that it is not cue overload at the retrieval site but the erroneous encoding of similar linguistic items in memory that leads to interference (Lewandowsky et al., 2008; Oberauer \& Kliegl, 2006). While Van Dyke \& McElree (2006) reported cue-based retrieval interference from sentence-external distractors, the evidence for this effect was weak. A subsequent study did not show interference of this type (Van Dyke et al., 2014). Given these inconclusive findings, further research is necessary to investigate semantic cue-based retrieval interference. The second study in this dissertation provides a larger-scale cross-linguistic investigation of cue-based retrieval interference from sentence-external items. Three larger-sample eye-tracking studies in English, German, and Russian tested cue-based interference in the online processing of filler-gap dependencies. This study further extends the previous research by investigating interference in each language under varying task demands (Logačev \& Vasishth, 2016; Swets et al., 2008). Overall, we see some very modest support for proactive cue-based retrieval interference in English. Unexpectedly, this was observed only under a low task demand. In German and Russian, there is some evidence against the interference effect. It is possible that interference is attenuated in languages with richer case marking. In sum, the cross-linguistic experiments on the time course of syntactic and semantic interference from sentence-internal distractors support existing evidence of syntactic and semantic interference during sentence comprehension. Our data further show that both types of interference effects can arise simultaneously. Our cross-linguistic experiments investigating semantic cue-based retrieval interference from sentence-external distractors suggest that this type of interference may arise only in specific linguistic contexts.}, language = {en} } @article{TamasiMckeanGafosetal.2018, author = {Tamasi, Katalin and Mckean, Cristina and Gafos, Adamantios I. and H{\"o}hle, Barbara}, title = {Children's gradient sensitivity to phonological mismatch}, series = {Journal of child language}, volume = {46}, journal = {Journal of child language}, number = {1}, publisher = {Cambridge Univ. Press}, address = {New York}, issn = {0305-0009}, doi = {10.1017/S0305000918000259}, pages = {1 -- 23}, year = {2018}, abstract = {In a preferential looking paradigm, we studied how children's looking behavior and pupillary response were modulated by the degree of phonological mismatch between the correct label of a target referent and its manipulated form. We manipulated degree of mismatch by introducing one or more featural changes to the target label. Both looking behavior and pupillary response were sensitive to degree of mismatch, corroborating previous studies that found differential responses in one or the other measure. Using time-course analyses, we present for the first time results demonstrating full separability among conditions (detecting difference not only between one vs. more, but also between two and three featural changes). Furthermore, the correct labels and small featural changes were associated with stable target preference, while large featural changes were associated with oscillating looking behavior, suggesting significant shifts in looking preference over time. These findings further support and extend the notion that early words are represented in great detail, containing subphonemic information.}, language = {en} } @article{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Frontiers in Psychology}, volume = {11}, journal = {Frontiers in Psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.565651}, pages = {17}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @misc{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {698}, issn = {1866-8364}, doi = {10.25932/publishup-49060}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490607}, pages = {19}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @phdthesis{BrandtKobele2014, author = {Brandt-Kobele, Oda-Christina}, title = {Comprehension of verb inflection in German-speaking children}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-216-2}, issn = {1869-3822}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62046}, school = {Universit{\"a}t Potsdam}, pages = {xx, 325}, year = {2014}, abstract = {Previous studies on the acquisition of verb inflection in normally developing children have revealed an astonishing pattern: children use correctly inflected verbs in their own speech but fail to make use of verb inflections when comprehending sentences uttered by others. Thus, a three-year old might well be able to say something like 'The cat sleeps on the bed', but fails to understand that the same sentence, when uttered by another person, refers to only one sleeping cat but not more than one. The previous studies that have examined children's comprehension of verb inflections have employed a variant of a picture selection task in which the child was asked to explicitly indicate (via pointing) what semantic meaning she had inferred from the test sentence. Recent research on other linguistic structures, such as pronouns or focus particles, has indicated that earlier comprehension abilities can be found when methods are used that do not require an explicit reaction, like preferential looking tasks. This dissertation aimed to examine whether children are truly not able to understand the connection the the verb form and the meaning of the sentence subject until the age of five years or whether earlier comprehension can be found when a different measure, preferential looking, is used. Additionally, children's processing of subject-verb agreement violations was examined. The three experiments of this thesis that examined children's comprehension of verb inflections revealed the following: German-speaking three- to four-year old children looked more to a picture showing one actor when hearing a sentence with a singular inflected verb but only when their eye gaze was tracked and they did not have to perform a picture selection task. When they were asked to point to the matching picture, they performed at chance-level. This pattern indicates asymmetries in children's language performance even within the receptive modality. The fourth experiment examined sensitivity to subject-verb agreement violations and did not reveal evidence for sensitivity toward agreement violations in three- and four-year old children, but only found that children's looking patterns were influenced by the grammatical violations at the age of five. The results from these experiments are discussed in relation to the existence of a production-comprehension asymmetry in the use of verb inflections and children's underlying grammatical knowledge.}, language = {en} } @misc{SoechtingTrapp2020, author = {S{\"o}chting, Maximilian and Trapp, Matthias}, title = {Controlling image-stylization techniques using eye tracking}, series = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, journal = {Postprints der Universit{\"a}t Potsdam : Reihe der Digital Engineering Fakult{\"a}t}, number = {7}, doi = {10.25932/publishup-52471}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-524717}, pages = {12}, year = {2020}, abstract = {With the spread of smart phones capable of taking high-resolution photos and the development of high-speed mobile data infrastructure, digital visual media is becoming one of the most important forms of modern communication. With this development, however, also comes a devaluation of images as a media form with the focus becoming the frequency at which visual content is generated instead of the quality of the content. In this work, an interactive system using image-abstraction techniques and an eye tracking sensor is presented, which allows users to experience diverting and dynamic artworks that react to their eye movement. The underlying modular architecture enables a variety of different interaction techniques that share common design principles, making the interface as intuitive as possible. The resulting experience allows users to experience a game-like interaction in which they aim for a reward, the artwork, while being held under constraints, e.g., not blinking. The co nscious eye movements that are required by some interaction techniques hint an interesting, possible future extension for this work into the field of relaxation exercises and concentration training.}, language = {en} } @article{LagoSloggettSchlueteretal.2017, author = {Lago, Sol and Sloggett, Shayne and Schl{\"u}ter, Zoe and Chow, Wing Yee and Williams, Alexander and Lau, Ellen and Phillips, Colin}, title = {Coreference and Antecedent Representation Across Languages}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {43}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/xlm0000343}, pages = {795 -- 817}, year = {2017}, language = {en} } @article{SafaviHusainVasishth2016, author = {Safavi, Molood S. and Husain, Samar and Vasishth, Shravan}, title = {Dependency Resolution Difficulty Increases with Distance in Persian Separable Complex Predicates}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00403}, pages = {1 -- 15}, year = {2016}, abstract = {Delaying the appearance of a verb in a noun-verb dependency tends to increase processing difficulty at the verb; one explanation for this locality effect is decay and/or interference of the noun in working memory. Surprisal, an expectation-based account, predicts that delaying the appearance of a verb either renders it no more predictable or more predictable, leading respectively to a prediction of no effect of distance or a facilitation. Recently, Husain et al. (2014) suggested that when the exact identity of the upcoming verb is predictable (strong predictability), increasing argument-verb distance leads to facilitation effects, which is consistent with surprisal; but when the exact identity of the upcoming verb is not predictable (weak predictability), locality effects are seen. We investigated Husain et al.'s proposal using Persian complex predicates (CPs), which consist of a non-verbal element—a noun in the current study—and a verb. In CPs, once the noun has been read, the exact identity of the verb is highly predictable (strong predictability); this was confirmed using a sentence completion study. In two self-paced reading (SPR) and two eye-tracking (ET) experiments, we delayed the appearance of the verb by interposing a relative clause (Experiments 1 and 3) or a long PP (Experiments 2 and 4). We also included a simple Noun-Verb predicate configuration with the same distance manipulation; here, the exact identity of the verb was not predictable (weak predictability). Thus, the design crossed Predictability Strength and Distance. We found that, consistent with surprisal, the verb in the strong predictability conditions was read faster than in the weak predictability conditions. Furthermore, greater verb-argument distance led to slower reading times; strong predictability did not neutralize or attenuate the locality effects. As regards the effect of distance on dependency resolution difficulty, these four experiments present evidence in favor of working memory accounts of argument-verb dependency resolution, and against the surprisal-based expectation account of Levy (2008). However, another expectation-based measure, entropy, which was computed using the offline sentence completion data, predicts reading times in Experiment 1 but not in the other experiments. Because participants tend to produce more ungrammatical continuations in the long-distance condition in Experiment 1, we suggest that forgetting due to memory overload leads to greater entropy at the verb.}, language = {en} } @misc{SafaviHusainVasishth2016, author = {Safavi, Molood S. and Husain, Samar and Vasishth, Shravan}, title = {Dependency Resolution Difficulty Increases with Distance in Persian Separable Complex Predicates}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90728}, pages = {1 -- 15}, year = {2016}, abstract = {Delaying the appearance of a verb in a noun-verb dependency tends to increase processing difficulty at the verb; one explanation for this locality effect is decay and/or interference of the noun in working memory. Surprisal, an expectation-based account, predicts that delaying the appearance of a verb either renders it no more predictable or more predictable, leading respectively to a prediction of no effect of distance or a facilitation. Recently, Husain et al. (2014) suggested that when the exact identity of the upcoming verb is predictable (strong predictability), increasing argument-verb distance leads to facilitation effects, which is consistent with surprisal; but when the exact identity of the upcoming verb is not predictable (weak predictability), locality effects are seen. We investigated Husain et al.'s proposal using Persian complex predicates (CPs), which consist of a non-verbal element—a noun in the current study—and a verb. In CPs, once the noun has been read, the exact identity of the verb is highly predictable (strong predictability); this was confirmed using a sentence completion study. In two self-paced reading (SPR) and two eye-tracking (ET) experiments, we delayed the appearance of the verb by interposing a relative clause (Experiments 1 and 3) or a long PP (Experiments 2 and 4). We also included a simple Noun-Verb predicate configuration with the same distance manipulation; here, the exact identity of the verb was not predictable (weak predictability). Thus, the design crossed Predictability Strength and Distance. We found that, consistent with surprisal, the verb in the strong predictability conditions was read faster than in the weak predictability conditions. Furthermore, greater verb-argument distance led to slower reading times; strong predictability did not neutralize or attenuate the locality effects. As regards the effect of distance on dependency resolution difficulty, these four experiments present evidence in favor of working memory accounts of argument-verb dependency resolution, and against the surprisal-based expectation account of Levy (2008). However, another expectation-based measure, entropy, which was computed using the offline sentence completion data, predicts reading times in Experiment 1 but not in the other experiments. Because participants tend to produce more ungrammatical continuations in the long-distance condition in Experiment 1, we suggest that forgetting due to memory overload leads to greater entropy at the verb.}, language = {en} } @article{YangWangYanetal.2012, author = {Yang, Jiongjiong and Wang, Aobing and Yan, Ming and Zhu, Zijian and Chen, Cheng and Wang, Yizhou}, title = {Distinct processing for pictures of animals and objects Evidence from eye movements}, series = {Emotion : a new journal from the American Psychological Association}, volume = {12}, journal = {Emotion : a new journal from the American Psychological Association}, number = {3}, publisher = {American Psychological Association}, address = {Washington}, issn = {1528-3542}, doi = {10.1037/a0026848}, pages = {540 -- 551}, year = {2012}, abstract = {Many studies have suggested that emotional stimuli orient and engage attention. There is also evidence that animate stimuli, such as those from humans and animals, cause attentional bias. However, categorical and emotional factors are usually mixed, and it is unclear to what extent human context influences attentional allocation. To address this issue, we tracked participants' eye movements while they viewed pictures with animals and inanimate images (i.e., category) as focal objects. These pictures had either negative or neutral emotional valence, and either human body parts or nonhuman parts were near the focal objects (i.e., context). The picture's valence, arousal, position, size, and most of the low-level visual features were matched across categories. The results showed that nonhuman animals were more likely to be attended to and to be attended to for longer times than inanimate objects. The same pattern held for the human contexts (vs. nonhuman contexts). The effects of emotional valence, category, and context interacted. Specifically, in images with a negative valence, focal animals and objects with human context had comparable numbers of gaze fixations and gaze duration. These results highlighted the attentional bias to animate parts of a picture and clarified that the effects of category, valence, and picture context interacted to influence attentional allocation.}, language = {en} }