@article{MarimonTarterHofmannVerissimoetal.2021, author = {Marimon Tarter, Mireia and Hofmann, Andrea and Ver{\´i}ssimo, Joao Marques and M{\"a}nnel, Claudia and Friederici, Angela Dorkas and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Children's Learning of Non-adjacent Dependencies Using a Web-Based Computer Game Setting}, series = {Frontiers in Psychology}, volume = {12}, journal = {Frontiers in Psychology}, publisher = {Frontiers}, address = {Lausanne, Schweiz}, issn = {1664-1078}, doi = {10.3389/fpsyg.2021.734877}, pages = {1 -- 15}, year = {2021}, abstract = {Infants show impressive speech decoding abilities and detect acoustic regularities that highlight the syntactic relations of a language, often coded via non-adjacent dependencies (NADs, e.g., is singing). It has been claimed that infants learn NADs implicitly and associatively through passive listening and that there is a shift from effortless associative learning to a more controlled learning of NADs after the age of 2 years, potentially driven by the maturation of the prefrontal cortex. To investigate if older children are able to learn NADs, Lammertink et al. (2019) recently developed a word-monitoring serial reaction time (SRT) task and could show that 6-11-year-old children learned the NADs, as their reaction times (RTs) increased then they were presented with violated NADs. In the current study we adapted their experimental paradigm and tested NAD learning in a younger group of 52 children between the age of 4-8 years in a remote, web-based, game-like setting (whack-a-mole). Children were exposed to Italian phrases containing NADs and had to monitor the occurrence of a target syllable, which was the second element of the NAD. After exposure, children did a "Stem Completion" task in which they were presented with the first element of the NAD and had to choose the second element of the NAD to complete the stimuli. Our findings show that, despite large variability in the data, children aged 4-8 years are sensitive to NADs; they show the expected differences in r RTs in the SRT task and could transfer the NAD-rule in the Stem Completion task. We discuss these results with respect to the development of NAD dependency learning in childhood and the practical impact and limitations of collecting these data in a web-based setting.}, language = {en} } @article{PetroneTruckenbrodtWellmannetal.2017, author = {Petrone, Caterina and Truckenbrodt, Hubert and Wellmann, Caroline and Holzgrefe-Lang, Julia and Wartenburger, Isabell and H{\"o}hle, Barbara}, title = {Prosodic boundary cues in German}, series = {Journal of phonetics}, volume = {61}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2017.01.002}, pages = {71 -- 92}, year = {2017}, abstract = {This study investigates prosodic phrasing of bracketed lists in German. We analyze variation in pauses, phrase-final lengthening and f0 in speech production and how these cues affect boundary perception. In line with the literature, it was found that pauses are often used to signal intonation phrase boundaries, while final lengthening and f0 are employed across different levels of the prosodic hierarchy. Deviations from expectations based on the standard syntax-prosody mapping are interpreted in terms of task-specific effects. That is, we argue that speakers add/delete prosodic boundaries to enhance the phonological contrast between different bracketings in the experimental task. In perception, three experiments were run, in which we tested only single cues (but temporally distributed at different locations of the sentences). Results from identification tasks and reaction time measurements indicate that pauses lead to a more abrupt shift in listeners׳ prosodic judgments, while f0 and final lengthening are exploited in a more gradient manner. Hence, pauses, final lengthening and f0 have an impact on boundary perception, though listeners show different sensitivity to the three acoustic cues.}, language = {en} } @article{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Frontiers in Psychology}, volume = {11}, journal = {Frontiers in Psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.565651}, pages = {17}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @misc{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {698}, issn = {1866-8364}, doi = {10.25932/publishup-49060}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490607}, pages = {19}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @article{HolzgrefeLangWellmannHoehleetal.2018, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Infants' Processing of Prosodic Cues}, series = {Language and speech}, volume = {61}, journal = {Language and speech}, number = {1}, publisher = {Sage Publ.}, address = {London}, issn = {0023-8309}, doi = {10.1177/0023830917730590}, pages = {153 -- 169}, year = {2018}, abstract = {Infants as young as six months are sensitive to prosodic phrase boundaries marked by three acoustic cues: pitch change, final lengthening, and pause. Behavioral studies suggest that a language-specific weighting of these cues develops during the first year of life; recent work on German revealed that eight-month-olds, unlike six-month-olds, are capable of perceiving a prosodic boundary on the basis of pitch change and final lengthening only. The present study uses Event-Related Potentials (ERPs) to investigate the neuro-cognitive development of prosodic cue perception in German-learning infants. In adults' ERPs, prosodic boundary perception is clearly reflected by the so-called Closure Positive Shift (CPS). To date, there is mixed evidence on whether an infant CPS exists that signals early prosodic cue perception, or whether the CPS emerges only later—the latter implying that infantile brain responses to prosodic boundaries reflect acoustic, low-level pause detection. We presented six- and eight-month-olds with stimuli containing either no boundary cues, only a pitch cue, or a combination of both pitch change and final lengthening. For both age groups, responses to the former two conditions did not differ, while brain responses to prosodic boundaries cued by pitch change and final lengthening showed a positivity that we interpret as a CPS-like infant ERP component. This hints at an early sensitivity to prosodic boundaries that cannot exclusively be based on pause detection. Instead, infants' brain responses indicate an early ability to exploit subtle, relational prosodic cues in speech perception—presumably even earlier than could be concluded from previous behavioral results.}, language = {en} }