@article{WellmannHolzgrefeLangTruckenbrodtetal.2012, author = {Wellmann, Caroline and Holzgrefe-Lang, Julia and Truckenbrodt, Hubert and Wartenburger, Isabell and H{\"o}hle, Barbara}, title = {How each prosodic boundary cue matters evidence from German infants}, series = {Frontiers in psychology}, volume = {3}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2012.00580}, pages = {13}, year = {2012}, abstract = {Previous studies have revealed that infants aged 6-10 months are able to use the acoustic correlates of major prosodic boundaries, that is, pitch change, preboundary lengthening, and pause, for the segmentation of the continuous speech signal. Moreover, investigations with American-English- and Dutch-learning infants suggest that processing prosodic boundary markings involves a weighting of these cues. This weighting seems to develop with increasing exposure to the native language and to underlie crosslinguistic variation. In the following, we report the results of four experiments using the headturn preference procedure to explore the perception of prosodic boundary cues in German infants. We presented 8-month-old infants with a sequence of names in two different prosodic groupings, with or without boundary markers. Infants discriminated both sequences when the boundary was marked by all three cues (Experiment 1) and when it was marked by a pitch change and preboundary lengthening in combination (Experiment 2). The presence of a pitch change (Experiment 3) or preboundary lengthening (Experiment 4) as single cues did not lead to a successful discrimination. Our results indicate that pause is not a necessary cue for German infants. Pitch change and preboundary lengthening in combination, but not as single cues, are sufficient. Hence, by 8 months infants only rely on a convergence of boundary markers. Comparisons with adults' performance on the same stimulus materials suggest that the pattern observed with the 8-month-olds is already consistent with that of adults. We discuss our findings with respect to crosslinguistic variation and the development of a language-specific prosodic cue weighting.}, language = {en} } @article{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Frontiers in Psychology}, volume = {11}, journal = {Frontiers in Psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.565651}, pages = {17}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @misc{RichterPaulHoehleetal.2020, author = {Richter, Maria and Paul, Mariella and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Common Ground Information Affects Reference Resolution}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {698}, issn = {1866-8364}, doi = {10.25932/publishup-49060}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-490607}, pages = {19}, year = {2020}, abstract = {One of the most important social cognitive skills in humans is the ability to "put oneself in someone else's shoes," that is, to take another person's perspective. In socially situated communication, perspective taking enables the listener to arrive at a meaningful interpretation of what is said (sentence meaning) and what is meant (speaker's meaning) by the speaker. To successfully decode the speaker's meaning, the listener has to take into account which information he/she and the speaker share in their common ground (CG). We here further investigated competing accounts about when and how CG information affects language comprehension by means of reaction time (RT) measures, accuracy data, event-related potentials (ERPs), and eye-tracking. Early integration accounts would predict that CG information is considered immediately and would hence not expect to find costs of CG integration. Late integration accounts would predict a rather late and effortful integration of CG information during the parsing process that might be reflected in integration or updating costs. Other accounts predict the simultaneous integration of privileged ground (PG) and CG perspectives. We used a computerized version of the referential communication game with object triplets of different sizes presented visually in CG or PG. In critical trials (i.e., conflict trials), CG information had to be integrated while privileged information had to be suppressed. Listeners mastered the integration of CG (response accuracy 99.8\%). Yet, slower RTs, and enhanced late positivities in the ERPs showed that CG integration had its costs. Moreover, eye-tracking data indicated an early anticipation of referents in CG but an inability to suppress looks to the privileged competitor, resulting in later and longer looks to targets in those trials, in which CG information had to be considered. Our data therefore support accounts that foresee an early anticipation of referents to be in CG but a rather late and effortful integration if conflicting information has to be processed. We show that both perspectives, PG and CG, contribute to socially situated language processing and discuss the data with reference to theoretical accounts and recent findings on the use of CG information for reference resolution.}, language = {en} } @article{PetroneTruckenbrodtWellmannetal.2017, author = {Petrone, Caterina and Truckenbrodt, Hubert and Wellmann, Caroline and Holzgrefe-Lang, Julia and Wartenburger, Isabell and H{\"o}hle, Barbara}, title = {Prosodic boundary cues in German}, series = {Journal of phonetics}, volume = {61}, journal = {Journal of phonetics}, publisher = {Elsevier}, address = {London}, issn = {0095-4470}, doi = {10.1016/j.wocn.2017.01.002}, pages = {71 -- 92}, year = {2017}, abstract = {This study investigates prosodic phrasing of bracketed lists in German. We analyze variation in pauses, phrase-final lengthening and f0 in speech production and how these cues affect boundary perception. In line with the literature, it was found that pauses are often used to signal intonation phrase boundaries, while final lengthening and f0 are employed across different levels of the prosodic hierarchy. Deviations from expectations based on the standard syntax-prosody mapping are interpreted in terms of task-specific effects. That is, we argue that speakers add/delete prosodic boundaries to enhance the phonological contrast between different bracketings in the experimental task. In perception, three experiments were run, in which we tested only single cues (but temporally distributed at different locations of the sentences). Results from identification tasks and reaction time measurements indicate that pauses lead to a more abrupt shift in listeners׳ prosodic judgments, while f0 and final lengthening are exploited in a more gradient manner. Hence, pauses, final lengthening and f0 have an impact on boundary perception, though listeners show different sensitivity to the three acoustic cues.}, language = {en} } @misc{MarimonTarterHofmannVerissimoetal.2021, author = {Marimon Tarter, Mireia and Hofmann, Andrea and Ver{\´i}ssimo, Joao Marques and M{\"a}nnel, Claudia and Friederici, Angela Dorkas and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Children's Learning of Non-adjacent Dependencies Using a Web-Based Computer Game Setting}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, volume = {12}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1866-8364}, doi = {10.25932/publishup-55083}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550834}, pages = {1 -- 15}, year = {2021}, abstract = {Infants show impressive speech decoding abilities and detect acoustic regularities that highlight the syntactic relations of a language, often coded via non-adjacent dependencies (NADs, e.g., is singing). It has been claimed that infants learn NADs implicitly and associatively through passive listening and that there is a shift from effortless associative learning to a more controlled learning of NADs after the age of 2 years, potentially driven by the maturation of the prefrontal cortex. To investigate if older children are able to learn NADs, Lammertink et al. (2019) recently developed a word-monitoring serial reaction time (SRT) task and could show that 6-11-year-old children learned the NADs, as their reaction times (RTs) increased then they were presented with violated NADs. In the current study we adapted their experimental paradigm and tested NAD learning in a younger group of 52 children between the age of 4-8 years in a remote, web-based, game-like setting (whack-a-mole). Children were exposed to Italian phrases containing NADs and had to monitor the occurrence of a target syllable, which was the second element of the NAD. After exposure, children did a "Stem Completion" task in which they were presented with the first element of the NAD and had to choose the second element of the NAD to complete the stimuli. Our findings show that, despite large variability in the data, children aged 4-8 years are sensitive to NADs; they show the expected differences in r RTs in the SRT task and could transfer the NAD-rule in the Stem Completion task. We discuss these results with respect to the development of NAD dependency learning in childhood and the practical impact and limitations of collecting these data in a web-based setting.}, language = {en} } @article{MarimonTarterHofmannVerissimoetal.2021, author = {Marimon Tarter, Mireia and Hofmann, Andrea and Ver{\´i}ssimo, Joao Marques and M{\"a}nnel, Claudia and Friederici, Angela Dorkas and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Children's Learning of Non-adjacent Dependencies Using a Web-Based Computer Game Setting}, series = {Frontiers in Psychology}, volume = {12}, journal = {Frontiers in Psychology}, publisher = {Frontiers}, address = {Lausanne, Schweiz}, issn = {1664-1078}, doi = {10.3389/fpsyg.2021.734877}, pages = {1 -- 15}, year = {2021}, abstract = {Infants show impressive speech decoding abilities and detect acoustic regularities that highlight the syntactic relations of a language, often coded via non-adjacent dependencies (NADs, e.g., is singing). It has been claimed that infants learn NADs implicitly and associatively through passive listening and that there is a shift from effortless associative learning to a more controlled learning of NADs after the age of 2 years, potentially driven by the maturation of the prefrontal cortex. To investigate if older children are able to learn NADs, Lammertink et al. (2019) recently developed a word-monitoring serial reaction time (SRT) task and could show that 6-11-year-old children learned the NADs, as their reaction times (RTs) increased then they were presented with violated NADs. In the current study we adapted their experimental paradigm and tested NAD learning in a younger group of 52 children between the age of 4-8 years in a remote, web-based, game-like setting (whack-a-mole). Children were exposed to Italian phrases containing NADs and had to monitor the occurrence of a target syllable, which was the second element of the NAD. After exposure, children did a "Stem Completion" task in which they were presented with the first element of the NAD and had to choose the second element of the NAD to complete the stimuli. Our findings show that, despite large variability in the data, children aged 4-8 years are sensitive to NADs; they show the expected differences in r RTs in the SRT task and could transfer the NAD-rule in the Stem Completion task. We discuss these results with respect to the development of NAD dependency learning in childhood and the practical impact and limitations of collecting these data in a web-based setting.}, language = {en} } @article{HolzgrefeLangWellmannPetroneetal.2013, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and Petrone, Caterina and Truckenbrodt, Hubert and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Brain response to prosodic boundary cues depends on boundary position}, series = {Frontiers in psychology}, volume = {4}, journal = {Frontiers in psychology}, number = {28}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2013.00421}, pages = {14}, year = {2013}, abstract = {Prosodic information is crucial for spoken language comprehension and especially for syntactic parsing, because prosodic cues guide the hearer's syntactic analysis. The time course and mechanisms of this interplay of prosody and syntax are not yet well-understood. In particular, there is an ongoing debate whether local prosodic cues are taken into account automatically or whether they are processed in relation to the global prosodic context in which they appear. The present study explores whether the perception of a prosodic boundary is affected by its position within an utterance. In an event-related potential (PRP) study we tested if the brain response evoked by the prosodic boundary differs when the boundary occurs early in a list of three names connected by conjunctions (i.e., after the first name) as compared to later in the utterance (i.e., after the second name). A closure positive shift (CPS)-marking the processing of a prosodic phrase boundary-was elicited for stimuli with a late boundary, but not for stimuli with an early boundary. This result is further evidence for an immediate integration of prosodic information into the parsing of an utterance. In addition, it shows that the processing of prosodic boundary cues depends on the previously processed information from the preceding prosodic context.}, language = {en} } @article{HolzgrefeLangWellmannPetroneetal.2016, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and Petrone, Caterina and Raeling, Romy and Truckenbrodt, Hubert and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {How pitch change and final lengthening cue boundary perception in German: converging evidence from ERPs and prosodic judgements}, series = {Language, cognition and neuroscience}, volume = {31}, journal = {Language, cognition and neuroscience}, publisher = {Begell House}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/23273798.2016.1157195}, pages = {904 -- 920}, year = {2016}, abstract = {This study examines the role of pitch and final lengthening in German intonation phrase boundary (IPB) perception. Since a prosody-related event-related potential (ERP) component termed Closure Positive Shift reflects the processing of major prosodic boundaries, we combined ERP and behavioural measures (i.e. a prosodic judgement task) to systematically test the impact of sole and combined cue occurrences on IPB perception. In two experiments we investigated whether adult listeners perceived an IPB in acoustically manipulated speech material that contained none, one, or two of the prosodic boundary cues. Both ERP and behavioural results suggest that pitch and final lengthening cues have to occur in combination to trigger IPB perception. Hence, the combination of behavioural and electrophysiological measures provides a comprehensive insight into prosodic boundary cue perception in German and leads to an argument in favour of interrelated cues from the frequency (i.e. pitch change) and the time (i.e. final lengthening) domain.}, language = {en} } @article{HolzgrefeLangWellmannHoehleetal.2018, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Infants' Processing of Prosodic Cues}, series = {Language and speech}, volume = {61}, journal = {Language and speech}, number = {1}, publisher = {Sage Publ.}, address = {London}, issn = {0023-8309}, doi = {10.1177/0023830917730590}, pages = {153 -- 169}, year = {2018}, abstract = {Infants as young as six months are sensitive to prosodic phrase boundaries marked by three acoustic cues: pitch change, final lengthening, and pause. Behavioral studies suggest that a language-specific weighting of these cues develops during the first year of life; recent work on German revealed that eight-month-olds, unlike six-month-olds, are capable of perceiving a prosodic boundary on the basis of pitch change and final lengthening only. The present study uses Event-Related Potentials (ERPs) to investigate the neuro-cognitive development of prosodic cue perception in German-learning infants. In adults' ERPs, prosodic boundary perception is clearly reflected by the so-called Closure Positive Shift (CPS). To date, there is mixed evidence on whether an infant CPS exists that signals early prosodic cue perception, or whether the CPS emerges only later—the latter implying that infantile brain responses to prosodic boundaries reflect acoustic, low-level pause detection. We presented six- and eight-month-olds with stimuli containing either no boundary cues, only a pitch cue, or a combination of both pitch change and final lengthening. For both age groups, responses to the former two conditions did not differ, while brain responses to prosodic boundaries cued by pitch change and final lengthening showed a positivity that we interpret as a CPS-like infant ERP component. This hints at an early sensitivity to prosodic boundaries that cannot exclusively be based on pause detection. Instead, infants' brain responses indicate an early ability to exploit subtle, relational prosodic cues in speech perception—presumably even earlier than could be concluded from previous behavioral results.}, language = {en} }