@article{ElsnerPfeiferParkeretal.2013, author = {Elsner, Birgit and Pfeifer, Caroline and Parker, Charlene and Hauf, Petra}, title = {Infants' perception of actions and situational constraints - an eye-tracking study}, series = {Journal of experimental child psychology}, volume = {116}, journal = {Journal of experimental child psychology}, number = {2}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-0965}, doi = {10.1016/j.jecp.2012.11.014}, pages = {428 -- 442}, year = {2013}, abstract = {Rational action understanding requires that infants evaluate the efficiency of a movement in achieving a goal with respect to situational constraints. In contrast, recent accounts have highlighted the impact of perceptual characteristics of the demonstrated movement or constraints to explain infants' behavior in so-called rational imitation tasks. The current study employed eye tracking to investigate how 13- to 15-month-old infants distribute their visual attention to different aspects of an action demonstration. In three tasks (touchlight, house, and obstacle), infants watched videos in which a model performed an unusual action while she was or was not restricted by situational constraints. Infants' overall looking to the demonstration as well as looking to four segments of the video (initial segment, constraint demonstration, action performance, and final segment) and to specific areas (constraint area of interest [AOI] and action AOI) was analyzed. Overall, infants looked longer at the demonstration in the constraint condition compared with the no-constraint condition. The condition differences occurred in the two video segments where the constraint or action was displayed and were especially profound for the constraint AOI. These findings indicate that infants processed the situational constraints. However, the pattern of condition differences varied slightly in the three tasks. In sum, the data imply that infants process perceptual characteristics of the movement or constraints and that low-level perceptual processes interact with higher level cognitive processes in infants' action perception.}, language = {en} } @article{RothkegelTrukenbrodSchuettetal.2016, author = {Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Sch{\"u}tt, Heiko Herbert and Wichmann, Felix A. and Engbert, Ralf}, title = {Influence of initial fixation position in scene viewing}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {129}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2016.09.012}, pages = {33 -- 49}, year = {2016}, language = {en} } @article{MyachykovEllisCangelosietal.2013, author = {Myachykov, Andriy and Ellis, Rob and Cangelosi, Angelo and Fischer, Martin H.}, title = {Visual and linguistic cues to graspable objects}, series = {Experimental brain research}, volume = {229}, journal = {Experimental brain research}, number = {4}, publisher = {Springer}, address = {New York}, issn = {0014-4819}, doi = {10.1007/s00221-013-3616-z}, pages = {545 -- 559}, year = {2013}, abstract = {Two experiments investigated (1) how activation of manual affordances is triggered by visual and linguistic cues to manipulable objects and (2) whether graspable object parts play a special role in this process. Participants pressed a key to categorize manipulable target objects copresented with manipulable distractor objects on a computer screen. Three factors were varied in Experiment 1: (1) the target's and (2) the distractor's handles' orientation congruency with the lateral manual response and (3) the Visual Focus on one of the objects. In Experiment 2, a linguistic cue factor was added to these three factors-participants heard the name of one of the two objects prior to the target display onset. Analysis of participants' motor and oculomotor behaviour confirmed that perceptual and linguistic cues potentiated activation of grasp affordances. Both target- and distractor-related affordance effects were modulated by the presence of visual and linguistic cues. However, a differential visual attention mechanism subserved activation of compatibility effects associated with target and distractor objects. We also registered an independent implicit attention attraction effect from objects' handles, suggesting that graspable parts automatically attract attention during object viewing. This effect was further amplified by visual but not linguistic cues, thus providing initial evidence for a recent hypothesis about differential roles of visual and linguistic information in potentiating stable and variable affordances (Borghi in Language and action in cognitive neuroscience. Psychology Press, London, 2012).}, language = {en} }