@misc{ElsnerAdam2020, author = {Elsner, Birgit and Adam, Maurits}, title = {Infants' goal prediction for simple action events}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {1}, issn = {1866-8364}, doi = {10.25932/publishup-51665}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-516657}, pages = {45 -- 62}, year = {2020}, abstract = {Looking times and gaze behavior indicate that infants can predict the goal state of an observed simple action event (e.g., object-directed grasping) already in the first year of life. The present paper mainly focuses on infants' predictive gaze-shifts toward the goal of an ongoing action. For this, infants need to generate a forward model of the to-be-obtained goal state and to disengage their gaze from the moving agent at a time when information about the action event is still incomplete. By about 6 months of age, infants show goal-predictive gaze-shifts, but mainly for familiar actions that they can perform themselves (e.g., grasping) and for familiar agents (e.g., a human hand). Therefore, some theoretical models have highlighted close relations between infants' ability for action-goal prediction and their motor development and/or emerging action experience. Recent research indicates that infants can also predict action goals of familiar simple actions performed by non-human agents (e.g., object-directed grasping by a mechanical claw) when these agents display agency cues, such as self-propelled movement, equifinality of goal approach, or production of a salient action effect. This paper provides a review on relevant findings and theoretical models, and proposes that the impacts of action experience and of agency cues can be explained from an action-event perspective. In particular, infants' goal-predictive gaze-shifts are seen as resulting from an interplay between bottom-up processing of perceptual information and top-down influences exerted by event schemata that store information about previously executed or observed actions.}, language = {en} } @article{ElsnerAdam2021, author = {Elsner, Birgit and Adam, Maurits}, title = {Infants' goal prediction for simple action events}, series = {Topics in cognitive science / Cognitive Science Society}, volume = {13}, journal = {Topics in cognitive science / Cognitive Science Society}, number = {1}, publisher = {Wiley}, address = {Oxford}, issn = {1756-8765}, doi = {10.1111/tops.12494}, pages = {45 -- 62}, year = {2021}, abstract = {Looking times and gaze behavior indicate that infants can predict the goal state of an observed simple action event (e.g., object-directed grasping) already in the first year of life. The present paper mainly focuses on infants' predictive gaze-shifts toward the goal of an ongoing action. For this, infants need to generate a forward model of the to-be-obtained goal state and to disengage their gaze from the moving agent at a time when information about the action event is still incomplete. By about 6 months of age, infants show goal-predictive gaze-shifts, but mainly for familiar actions that they can perform themselves (e.g., grasping) and for familiar agents (e.g., a human hand). Therefore, some theoretical models have highlighted close relations between infants' ability for action-goal prediction and their motor development and/or emerging action experience. Recent research indicates that infants can also predict action goals of familiar simple actions performed by non-human agents (e.g., object-directed grasping by a mechanical claw) when these agents display agency cues, such as self-propelled movement, equifinality of goal approach, or production of a salient action effect. This paper provides a review on relevant findings and theoretical models, and proposes that the impacts of action experience and of agency cues can be explained from an action-event perspective. In particular, infants' goal-predictive gaze-shifts are seen as resulting from an interplay between bottom-up processing of perceptual information and top-down influences exerted by event schemata that store information about previously executed or observed actions.}, language = {en} } @article{AdamReitenbachElsner2017, author = {Adam, Maurits and Reitenbach, Ivanina and Elsner, Birgit}, title = {Agency cues and 11-month-olds' and adults' anticipation of action goals}, series = {Cognitive Development}, volume = {43}, journal = {Cognitive Development}, publisher = {Elsevier}, address = {New York}, issn = {0885-2014}, doi = {10.1016/j.cogdev.2017.02.008}, pages = {37 -- 48}, year = {2017}, abstract = {For the processing of goal-directed actions, some accounts emphasize the importance of experience with the action or the agent. Other accounts stress the importance of agency cues. We investigated the impact of agency cues on 11-month-olds' and adults' goal anticipation for a grasping-action performed by a mechanical claw. With an eyetracker, we measured anticipations in two conditions, where the claw was displayed either with or without agency cues. In two experiments, 11-month-olds were predictive when agency cues were present, but reactive when no agency cues were presented. Adults were predictive in both conditions. Furthermore, 11-month-olds rapidly learned to predict the goal in the agency condition, but not in the mechanical condition. Adults' predictions did not change across trials in the agency condition, but decelerated in the mechanical condition. Thus, agency cues and own action experience are important for infants' and adults' online processing of goal-directed actions by non-human agents.}, language = {en} } @article{AdamElsner2018, author = {Adam, Maurits and Elsner, Birgit}, title = {Action effects foster 11-month-olds' prediction of action goals for a non-human agent}, series = {Infant behavior \& development : an international and interdisciplinary journal}, volume = {53}, journal = {Infant behavior \& development : an international and interdisciplinary journal}, publisher = {Elsevier}, address = {New York}, issn = {0163-6383}, doi = {10.1016/j.infbeh.2018.09.002}, pages = {49 -- 55}, year = {2018}, abstract = {Action effects have been stated to be important for infants' processing of goal-directed actions. In this study, 11-month-olds showed equally fast predictive gaze shifts to a claw's action goal when the grasping action was presented either with three agency cues (self-propelled movement, equifinality of goal achievement and a salient action effect) or with only a salient action effect, but infants showed tracking gaze when the claw showed only self-propelled movement and equifinality of goal achievement. The results suggest that action effects, compared to purely kinematic cues, seem to be especially important for infants' online processing of goal-directed actions.}, language = {en} } @article{AdamElsner2020, author = {Adam, Maurits and Elsner, Birgit}, title = {The impact of salient action effects on 6-, 7-, and 11-month-olds' goal-predictive gaze shifts for a human grasping action}, series = {PLOS ONE}, volume = {15}, journal = {PLOS ONE}, number = {10}, publisher = {Public Library of Science}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0240165}, pages = {18}, year = {2020}, abstract = {When infants observe a human grasping action, experience-based accounts predict that all infants familiar with grasping actions should be able to predict the goal regardless of additional agency cues such as an action effect. Cue-based accounts, however, suggest that infants use agency cues to identify and predict action goals when the action or the agent is not familiar. From these accounts, we hypothesized that younger infants would need additional agency cues such as a salient action effect to predict the goal of a human grasping action, whereas older infants should be able to predict the goal regardless of agency cues. In three experiments, we presented 6-, 7-, and 11-month-olds with videos of a manual grasping action presented either with or without an additional salient action effect (Exp. 1 and 2), or we presented 7-month-olds with videos of a mechanical claw performing a grasping action presented with a salient action effect (Exp. 3). The 6-month-olds showed tracking gaze behavior, and the 11-month-olds showed predictive gaze behavior, regardless of the action effect. However, the 7-month-olds showed predictive gaze behavior in the action-effect condition, but tracking gaze behavior in the no-action-effect condition and in the action-effect condition with a mechanical claw. The results therefore support the idea that salient action effects are especially important for infants' goal predictions from 7 months on, and that this facilitating influence of action effects is selective for the observation of human hands.}, language = {en} } @article{AdamReitenbachPapenmeieretal.2016, author = {Adam, Maurits and Reitenbach, Ivanina and Papenmeier, Frank and Gredeb{\"a}ck, Gustaf and Elsner, Claudia and Elsner, Birgit}, title = {actions, but not for mechanical claws}, series = {Meteoritics \& planetary science : journal of the Meteoritical Society}, volume = {44}, journal = {Meteoritics \& planetary science : journal of the Meteoritical Society}, publisher = {Elsevier}, address = {New York}, issn = {0163-6383}, doi = {10.1016/j.infbeh.2016.05.001}, pages = {29 -- 37}, year = {2016}, abstract = {Previous research indicates that infants' prediction of the goals of observed actions is influenced by own experience with the type of agent performing the action (i.e., human hand vs. non-human agent) as well as by action-relevant features of goal objects (e.g., object size). The present study investigated the combined effects of these factors on 12-month-olds' action prediction. Infants' (N = 49) goal-directed gaze shifts were recorded as they observed 14 trials in which either a human hand or a mechanical claw reached for a small goal area (low-saliency goal) or a large goal area (high-saliency goal). Only infants who had observed the human hand reaching for a high-saliency goal fixated the goal object ahead of time, and they rapidly learned to predict the action goal across trials. By contrast, infants in all other conditions did not track the observed action in a predictive manner, and their gaze shifts to the action goal did not change systematically across trials. Thus, high-saliency goals seem to boost infants' predictive gaze shifts during the observation of human manual actions, but not of actions performed by a mechanical device. This supports the assumption that infants' action predictions are based on interactive effects of action-relevant object features (e.g., size) and own action experience.}, language = {en} } @article{EiteljoergeAdamElsneretal.2019, author = {Eiteljoerge, Sarah Fe Vivian and Adam, Maurits and Elsner, Birgit and Mani, Nivedita}, title = {Consistency of co-occurring actions influences young children's word learning learning}, series = {Royal Society Open Science}, volume = {6}, journal = {Royal Society Open Science}, number = {8}, publisher = {Royal Society}, address = {London}, issn = {2054-5703}, doi = {10.1098/rsos.190097}, pages = {17}, year = {2019}, abstract = {Communication with young children is often multimodal in nature, involving, for example, language and actions. The simultaneous presentation of information from both domains may boost language learning by highlighting the connection between an object and a word, owing to temporal overlap in the presentation of multimodal input. However, the overlap is not merely temporal but can also covary in the extent to which particular actions co-occur with particular words and objects, e.g. carers typically produce a hopping action when talking about rabbits and a snapping action for crocodiles. The frequency with which actions and words co-occurs in the presence of the referents of these words may also impact young children's word learning. We, therefore, examined the extent to which consistency in the co-occurrence of particular actions and words impacted children's learning of novel word-object associations. Children (18 months, 30 months and 36-48 months) and adults were presented with two novel objects and heard their novel labels while different actions were performed on these objects, such that the particular actions and word-object pairings always co-occurred (Consistent group) or varied across trials (Inconsistent group). At test, participants saw both objects and heard one of the labels to examine whether participants recognized the target object upon hearing its label. Growth curve models revealed that 18-month-olds did not learn words for objects in either condition, and 30-month-old and 36- to 48-month-old children learned words for objects only in the Consistent condition, in contrast to adults who learned words for objects independent of the actions presented. Thus, consistency in the multimodal input influenced word learning in early childhood but not in adulthood. In terms of a dynamic systems account of word learning, our study shows how multimodal learning settings interact with the child's perceptual abilities to shape the learning experience.}, language = {en} } @article{AdamGumbschButzetal.2021, author = {Adam, Maurits and Gumbsch, Christian and Butz, Martin V. and Elsner, Birgit}, title = {The impact of action effects on infants' predictive gaze shifts for a non-human grasping action at 7, 11, and 18 months}, series = {Frontiers in psychology}, volume = {12}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2021.695550}, pages = {10}, year = {2021}, abstract = {During the observation of goal-directed actions, infants usually predict the goal at an earlier age when the agent is familiar (e.g., human hand) compared to unfamiliar (e.g., mechanical claw). These findings implicate a crucial role of the developing agentive self for infants' processing of others' action goals. Recent theoretical accounts suggest that predictive gaze behavior relies on an interplay between infants' agentive experience (top-down processes) and perceptual information about the agent and the action-event (bottom-up information; e.g., agency cues). The present study examined 7-, 11-, and 18-month-old infants' predictive gaze behavior for a grasping action performed by an unfamiliar tool, depending on infants' age-related action knowledge about tool-use and the display of the agency cue of producing a salient action effect. The results are in line with the notion of a systematic interplay between experience-based top-down processes and cue-based bottom-up information: Regardless of the salient action effect, predictive gaze shifts did not occur in the 7-month-olds (least experienced age group), but did occur in the 18-month-olds (most experienced age group). In the 11-month-olds, however, predictive gaze shifts occurred only when a salient action effect was presented. This sheds new light on how the developing agentive self, in interplay with available agency cues, supports infants' action-goal prediction also for observed tool-use actions.}, language = {en} } @phdthesis{Adam2019, author = {Adam, Maurits}, title = {Action-goal predictions in infancy}, school = {Universit{\"a}t Potsdam}, pages = {137}, year = {2019}, language = {en} } @article{EiteljoergeAdamElsneretal.2019, author = {Eiteljoerge, Sarah F. V. and Adam, Maurits and Elsner, Birgit and Mani, Nivedita}, title = {Word-object and action-object association learning across early development}, series = {PLoS one}, volume = {14}, journal = {PLoS one}, number = {8}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0220317}, pages = {22}, year = {2019}, abstract = {Successful communication often involves comprehension of both spoken language and observed actions with and without objects. Even very young infants can learn associations between actions and objects as well as between words and objects. However, in daily life, children are usually confronted with both kinds of input simultaneously. Choosing the critical information to attend to in such situations might help children structure the input, and thereby, allow for successful learning. In the current study, we therefore, investigated the developmental time course of children's and adults' word and action learning when given the opportunity to learn both word-object and action-object associations for the same object. All participants went through a learning phase and a test phase. In the learning phase, they were presented with two novel objects which were associated with a distinct novel name (e.g., "Look, a Tanu") and a distinct novel action (e.g., moving up and down while tilting sideways). In the test phase, participants were presented with both objects on screen in a baseline phase, then either heard one of the two labels or saw one of the two actions in a prime phase, and then saw the two objects again on screen in a recognition phase. Throughout the trial, participants' target looking was recorded to investigate whether participants looked at the target object upon hearing its label or seeing its action, and thus, would show learning of the word-object and action-object associations. Growth curve analyses revealed that 12-month-olds showed modest learning of action-object associations, 36-month-olds learned word-object associations, and adults learned word-object and action-object associations. These results highlight how children attend to the different information types from the two modalities through which communication is addressed to them. Over time, with increased exposure to systematic word-object mappings, children attend less to action-object mappings, with the latter potentially being mediated by word-object learning even in adulthood. Thus, choosing between different kinds of input that may be more relevant in their rich environment encompassing different modalities might help learning at different points in development.}, language = {en} }