@article{TsaiKlieglYan2012, author = {Tsai, Jie-Li and Kliegl, Reinhold and Yan, Ming}, title = {Parafoveal semantic information extraction in traditional Chinese reading}, series = {Acta psychologica : international journal of psychonomics}, volume = {141}, journal = {Acta psychologica : international journal of psychonomics}, number = {1}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0001-6918}, doi = {10.1016/j.actpsy.2012.06.004}, pages = {17 -- 23}, year = {2012}, abstract = {Semantic information extraction from the parafovea has been reported only in simplified Chinese for a special subset of characters and its generalizability has been questioned. This study uses traditional Chinese, which differs from simplified Chinese in visual complexity and in mapping semantic forms, to demonstrate access to parafoveal semantic information during reading of this script. Preview duration modulates various types (identical, phonological, and unrelated) of parafoveal information extraction. Parafoveal semantic extraction is more elusive in English; therefore, we conclude that such effects in Chinese are presumably caused by substantial cross-language differences from alphabetic scripts. The property of Chinese characters carrying rich lexical information in a small region provides the possibility of semantic extraction in the parafovea.}, language = {en} } @article{YanRisseZhouetal.2012, author = {Yan, Ming and Risse, Sarah and Zhou, Xiaolin and Kliegl, Reinhold}, title = {Preview fixation duration modulates identical and semantic preview benefit in Chinese reading}, series = {Reading and writing : an interdisciplinary journal}, volume = {25}, journal = {Reading and writing : an interdisciplinary journal}, number = {5}, publisher = {Springer}, address = {Dordrecht}, issn = {0922-4777}, doi = {10.1007/s11145-010-9274-7}, pages = {1093 -- 1111}, year = {2012}, abstract = {Semantic preview benefit from parafoveal words is critical for proposals of distributed lexical processing during reading. Semantic preview benefit has been demonstrated for Chinese reading with the boundary paradigm in which unrelated or semantically related previews of a target word N + 1 are replaced by the target word once the eyes cross an invisible boundary located after word N (Yan et al., 2009); for the target word in position N + 2, only identical compared to unrelated-word preview led to shorter fixation times on the target word (Yan et al., in press). A reanalysis of these data reveals that identical and semantic preview benefits depend on preview duration (i.e., the fixation duration on the preboundary word). Identical preview benefit from word N + 1 increased with preview duration. The identical preview benefit was also significant for N + 2, but did not significantly interact with preview duration. The previously reported semantic preview benefit from word N + 1 was mainly due to single- or first-fixation durations following short previews. We discuss implications for notions of serial attention shifts and parallel distributed processing of words during reading.}, language = {en} } @article{YanZhouShuetal.2012, author = {Yan, Ming and Zhou, Wei and Shu, Hua and Kliegl, Reinhold}, title = {Lexical and sublexical semantic preview benefits in chinese reading}, series = {Journal of experimental psychology : Learning, memory, and cognition}, volume = {38}, journal = {Journal of experimental psychology : Learning, memory, and cognition}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0278-7393}, doi = {10.1037/a0026935}, pages = {1069 -- 1075}, year = {2012}, abstract = {Semantic processing from parafoveal words is an elusive phenomenon in alphabetic languages, but it has been demonstrated only for a restricted set of noncompound Chinese characters. Using the gaze-contingent boundary paradigm, this experiment examined whether parafoveal lexical and sublexical semantic information was extracted from compound preview characters. Results generalized parafoveal semantic processing to this representative set of Chinese characters and extended the parafoveal processing to radical (sublexical) level semantic information extraction. Implications for notions of parafoveal information extraction during Chinese reading are discussed.}, language = {en} } @article{YangWangYanetal.2012, author = {Yang, Jiongjiong and Wang, Aobing and Yan, Ming and Zhu, Zijian and Chen, Cheng and Wang, Yizhou}, title = {Distinct processing for pictures of animals and objects Evidence from eye movements}, series = {Emotion : a new journal from the American Psychological Association}, volume = {12}, journal = {Emotion : a new journal from the American Psychological Association}, number = {3}, publisher = {American Psychological Association}, address = {Washington}, issn = {1528-3542}, doi = {10.1037/a0026848}, pages = {540 -- 551}, year = {2012}, abstract = {Many studies have suggested that emotional stimuli orient and engage attention. There is also evidence that animate stimuli, such as those from humans and animals, cause attentional bias. However, categorical and emotional factors are usually mixed, and it is unclear to what extent human context influences attentional allocation. To address this issue, we tracked participants' eye movements while they viewed pictures with animals and inanimate images (i.e., category) as focal objects. These pictures had either negative or neutral emotional valence, and either human body parts or nonhuman parts were near the focal objects (i.e., context). The picture's valence, arousal, position, size, and most of the low-level visual features were matched across categories. The results showed that nonhuman animals were more likely to be attended to and to be attended to for longer times than inanimate objects. The same pattern held for the human contexts (vs. nonhuman contexts). The effects of emotional valence, category, and context interacted. Specifically, in images with a negative valence, focal animals and objects with human context had comparable numbers of gaze fixations and gaze duration. These results highlighted the attentional bias to animate parts of a picture and clarified that the effects of category, valence, and picture context interacted to influence attentional allocation.}, language = {en} }