@article{KrasotkinaGoetzHoehleetal.2021, author = {Krasotkina, Anna and G{\"o}tz, Antonia and H{\"o}hle, Barbara and Schwarzer, Gudrun}, title = {Perceptual narrowing in face- and speech-perception domains in infancy}, series = {Infant behavior \& development : an international and interdisciplinary journal}, volume = {64}, journal = {Infant behavior \& development : an international and interdisciplinary journal}, publisher = {Elsevier}, address = {New York}, issn = {0163-6383}, doi = {10.1016/j.infbeh.2021.101607}, pages = {9}, year = {2021}, abstract = {During the first year of life, infants undergo a process known as perceptual narrowing, which reduces their sensitivity to classes of stimuli which the infants do not encounter in their environment. It has been proposed that perceptual narrowing for faces and speech may be driven by shared domain-general processes. To investigate this theory, our study longitudinally tested 50 German Caucasian infants with respect to these domains first at 6 months of age followed by a second testing at 9 months of age. We used an infant-controlled habituation-dishabituation paradigm to test the infants' ability to discriminate among other-race Asian faces and non-native Cantonese speech tones, as well as same-race Caucasian faces as a control. We found that while at 6 months of age infants could discriminate among all stimuli, by 9 months of age they could no longer discriminate among other-race faces or non-native tones. However, infants could discriminate among same-race stimuli both at 6 and at 9 months of age. These results demonstrate that the same infants undergo perceptual narrowing for both other-race faces and non-native speech tones between the ages of 6 and 9 months. This parallel development of perceptual narrowing occurring in both the face and speech perception modalities over the same period of time lends support to the domain-general theory of perceptual narrowing in face and speech perception.}, language = {en} } @article{BollAvetisyanBhataraHoehle2020, author = {Boll-Avetisyan, Natalie and Bhatara, Anjali and H{\"o}hle, Barbara}, title = {Processing of rhythm in speech and music in adult dyslexia}, series = {Brain Sciences}, volume = {10}, journal = {Brain Sciences}, number = {5}, publisher = {MDPI}, address = {Basel}, issn = {2076-3425}, doi = {10.3390/brainsci10050261}, pages = {26}, year = {2020}, abstract = {Recent studies have suggested that musical rhythm perception ability can affect the phonological system. The most prevalent causal account for developmental dyslexia is the phonological deficit hypothesis. As rhythm is a subpart of phonology, we hypothesized that reading deficits in dyslexia are associated with rhythm processing in speech and in music. In a rhythmic grouping task, adults with diagnosed dyslexia and age-matched controls listened to speech streams with syllables alternating in intensity, duration, or neither, and indicated whether they perceived a strong-weak or weak-strong rhythm pattern. Additionally, their reading and musical rhythm abilities were measured. Results showed that adults with dyslexia had lower musical rhythm abilities than adults without dyslexia. Moreover, lower musical rhythm ability was associated with lower reading ability in dyslexia. However, speech grouping by adults with dyslexia was not impaired when musical rhythm perception ability was controlled: like adults without dyslexia, they showed consistent preferences. However, rhythmic grouping was predicted by musical rhythm perception ability, irrespective of dyslexia. The results suggest associations among musical rhythm perception ability, speech rhythm perception, and reading ability. This highlights the importance of considering individual variability to better understand dyslexia and raises the possibility that musical rhythm perception ability is a key to phonological and reading acquisition.}, language = {en} } @misc{BollAvetisyanBhataraHoehle2020, author = {Boll-Avetisyan, Natalie and Bhatara, Anjali and H{\"o}hle, Barbara}, title = {Processing of rhythm in speech and music in adult dyslexia}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {630}, issn = {1866-8364}, doi = {10.25932/publishup-46978}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-469781}, pages = {28}, year = {2020}, abstract = {Recent studies have suggested that musical rhythm perception ability can affect the phonological system. The most prevalent causal account for developmental dyslexia is the phonological deficit hypothesis. As rhythm is a subpart of phonology, we hypothesized that reading deficits in dyslexia are associated with rhythm processing in speech and in music. In a rhythmic grouping task, adults with diagnosed dyslexia and age-matched controls listened to speech streams with syllables alternating in intensity, duration, or neither, and indicated whether they perceived a strong-weak or weak-strong rhythm pattern. Additionally, their reading and musical rhythm abilities were measured. Results showed that adults with dyslexia had lower musical rhythm abilities than adults without dyslexia. Moreover, lower musical rhythm ability was associated with lower reading ability in dyslexia. However, speech grouping by adults with dyslexia was not impaired when musical rhythm perception ability was controlled: like adults without dyslexia, they showed consistent preferences. However, rhythmic grouping was predicted by musical rhythm perception ability, irrespective of dyslexia. The results suggest associations among musical rhythm perception ability, speech rhythm perception, and reading ability. This highlights the importance of considering individual variability to better understand dyslexia and raises the possibility that musical rhythm perception ability is a key to phonological and reading acquisition.}, language = {en} } @article{GhaffarvandMokariGafosWilliams2020, author = {Ghaffarvand-Mokari, Payam and Gafos, Adamantios I. and Williams, Daniel}, title = {Perceptuomotor compatibility effects in vowels}, series = {Attention, perception, \& psychophysics}, volume = {82}, journal = {Attention, perception, \& psychophysics}, number = {5}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-020-02014-1}, pages = {2751 -- 2764}, year = {2020}, abstract = {Perceptuomotor compatibility between phonemically identical spoken and perceived syllables has been found to speed up response times (RTs) in speech production tasks. However, research on compatibility effects between perceived and produced stimuli at the subphonemic level is limited. Using a cue-distractor task, we investigated the effects of phonemic and subphonemic congruency in pairs of vowels. On each trial, a visual cue prompted individuals to produce a response vowel, and after the visual cue appeared a distractor vowel was auditorily presented while speakers were planning to produce the response vowel. The results revealed effects on RTs due to phonemic congruency (same vs. different vowels) between the response and distractor vowels, which resemble effects previously seen for consonants. Beyond phonemic congruency, we assessed how RTs are modulated as a function of the degree of subphonemic similarity between the response and distractor vowels. Higher similarity between the response and distractor in terms of phonological distance-defined by number of mismatching phonological features-resulted in faster RTs. However, the exact patterns of RTs varied across response-distractor vowel pairs. We discuss how different assumptions about phonological feature representations may account for the different patterns observed in RTs across response-distractor pairs. Our findings on the effects of perceived stimuli on produced speech at a more detailed level of representation than phonemic identity necessitate a more direct and specific formulation of the perception-production link. Additionally, these results extend previously reported perceptuomotor interactions mainly involving consonants to vowels.}, language = {en} } @misc{KrasotkinaGoetzHoehleetal.2018, author = {Krasotkina, Anna and G{\"o}tz, Antonia and H{\"o}hle, Barbara and Schwarzer, Gudrun}, title = {Perceptual narrowing in speech and face recognition}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {639}, issn = {1866-8364}, doi = {10.25932/publishup-45918}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459180}, pages = {7}, year = {2018}, abstract = {During the first year of life, infants undergo perceptual narrowing in the domains of speech and face perception. This is typically characterized by improvements in infants' abilities in discriminating among stimuli of familiar types, such as native speech tones and same-race faces. Simultaneously, infants begin to decline in their ability to discriminate among stimuli of types with which they have little experience, such as nonnative tones and other-race faces. The similarity in time-frames during which perceptual narrowing seems to occur in the domains of speech and face perception has led some researchers to hypothesize that the perceptual narrowing in these domains could be driven by shared domain-general processes. To explore this hypothesis, we tested 53 Caucasian 9-month-old infants from monolingual German households on their ability to discriminate among non-native Cantonese speech tones, as well among same-race German faces and other-race Chinese faces. We tested the infants using an infant-controlled habituation-dishabituation paradigm, with infants' preferences for looking at novel stimuli versus the habituated stimuli (dishabituation scores) acting as indicators of discrimination ability. As expected for their age, infants were able to discriminate between same-race faces, but not between other-race faces or non-native speech tones. Most interestingly, we found that infants' dishabituation scores for the non-native speech tones and other-race faces showed significant positive correlations, while the dishabituation scores for non-native speech tones and same-race faces did not. These results therefore support the hypothesis that shared domain-general mechanisms may drive perceptual narrowing in the domains of speech and face perception.}, language = {en} } @article{KrasotkinaGoetzHoehleetal.2018, author = {Krasotkina, Anna and G{\"o}tz, Antonia and H{\"o}hle, Barbara and Schwarzer, Gudrun}, title = {Perceptual narrowing in speech and face recognition}, series = {Frontiers in psychology}, volume = {9}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2018.01711}, pages = {5}, year = {2018}, abstract = {During the first year of life, infants undergo perceptual narrowing in the domains of speech and face perception. This is typically characterized by improvements in infants' abilities in discriminating among stimuli of familiar types, such as native speech tones and same-race faces. Simultaneously, infants begin to decline in their ability to discriminate among stimuli of types with which they have little experience, such as nonnative tones and other-race faces. The similarity in time-frames during which perceptual narrowing seems to occur in the domains of speech and face perception has led some researchers to hypothesize that the perceptual narrowing in these domains could be driven by shared domain-general processes. To explore this hypothesis, we tested 53 Caucasian 9-month-old infants from monolingual German households on their ability to discriminate among non-native Cantonese speech tones, as well among same-race German faces and other-race Chinese faces. We tested the infants using an infant-controlled habituation-dishabituation paradigm, with infants' preferences for looking at novel stimuli versus the habituated stimuli (dishabituation scores) acting as indicators of discrimination ability. As expected for their age, infants were able to discriminate between same-race faces, but not between other-race faces or non-native speech tones. Most interestingly, we found that infants' dishabituation scores for the non-native speech tones and other-race faces showed significant positive correlations, while the dishabituation scores for non-native speech tones and same-race faces did not. These results therefore support the hypothesis that shared domain-general mechanisms may drive perceptual narrowing in the domains of speech and face perception.}, language = {en} } @misc{WilliamsEscuderoGafos2018, author = {Williams, Daniel and Escudero, Paola and Gafos, Adamantios I.}, title = {Perceptual sensitivity to spectral change in Australian English close front vowels}, series = {19 th annual conference of the international speech communicaton association (INTERSPEECH 2018), VOLS 1-6: Speech research for emerging marjets in multilingual societies}, journal = {19 th annual conference of the international speech communicaton association (INTERSPEECH 2018), VOLS 1-6: Speech research for emerging marjets in multilingual societies}, publisher = {ISCA-International Speech Communication Association}, address = {Baixas}, isbn = {978-1-5108-7221-9}, issn = {2308-457X}, doi = {10.21437/Interspeech.2018-2505}, pages = {1442 -- 1446}, year = {2018}, abstract = {Speech scientists have long noted that the qualities of naturally-produced vowels do not remain constant over their durations regardless of being nominally "monophthongs" or "diphthongs". Recent acoustic corpora show that there are consistent patterns of first (F1) and second (F2) formant frequency change across different vowel categories. The three Australian English (AusE) close front vowels /i:, 1, i/ provide a striking example: while their midpoint or mean F1 and F2 frequencies are virtually identical, their spectral change patterns distinctly differ. The results indicate that, despite the distinct patterns of spectral change of AusE /i:, i, la/ in production, its perceptual relevance is not uniform, but rather vowel-category dependent.}, language = {en} } @article{HolzgrefeLangWellmannHoehleetal.2018, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Infants' Processing of Prosodic Cues}, series = {Language and speech}, volume = {61}, journal = {Language and speech}, number = {1}, publisher = {Sage Publ.}, address = {London}, issn = {0023-8309}, doi = {10.1177/0023830917730590}, pages = {153 -- 169}, year = {2018}, abstract = {Infants as young as six months are sensitive to prosodic phrase boundaries marked by three acoustic cues: pitch change, final lengthening, and pause. Behavioral studies suggest that a language-specific weighting of these cues develops during the first year of life; recent work on German revealed that eight-month-olds, unlike six-month-olds, are capable of perceiving a prosodic boundary on the basis of pitch change and final lengthening only. The present study uses Event-Related Potentials (ERPs) to investigate the neuro-cognitive development of prosodic cue perception in German-learning infants. In adults' ERPs, prosodic boundary perception is clearly reflected by the so-called Closure Positive Shift (CPS). To date, there is mixed evidence on whether an infant CPS exists that signals early prosodic cue perception, or whether the CPS emerges only later—the latter implying that infantile brain responses to prosodic boundaries reflect acoustic, low-level pause detection. We presented six- and eight-month-olds with stimuli containing either no boundary cues, only a pitch cue, or a combination of both pitch change and final lengthening. For both age groups, responses to the former two conditions did not differ, while brain responses to prosodic boundaries cued by pitch change and final lengthening showed a positivity that we interpret as a CPS-like infant ERP component. This hints at an early sensitivity to prosodic boundaries that cannot exclusively be based on pause detection. Instead, infants' brain responses indicate an early ability to exploit subtle, relational prosodic cues in speech perception—presumably even earlier than could be concluded from previous behavioral results.}, language = {en} } @article{BollAvetisyanBhataraHoehle2017, author = {Boll-Avetisyan, Natalie and Bhatara, Anjali and H{\"o}hle, Barbara}, title = {Effects of musicality on the perception of rhythmic structure in speech}, series = {Laboratory phonology}, volume = {8}, journal = {Laboratory phonology}, number = {1}, publisher = {Ubiquity Press}, address = {London}, issn = {1868-6346}, doi = {10.5334/labphon.91}, pages = {16}, year = {2017}, abstract = {Language and music share many rhythmic properties, such as variations in intensity and duration leading to repeating patterns. Perception of rhythmic properties may rely on cognitive networks that are shared between the two domains. If so, then variability in speech rhythm perception may relate to individual differences in musicality. To examine this possibility, the present study focuses on rhythmic grouping, which is assumed to be guided by a domain-general principle, the Iambic/Trochaic law, stating that sounds alternating in intensity are grouped as strong-weak, and sounds alternating in duration are grouped as weak-strong. German listeners completed a grouping task: They heard streams of syllables alternating in intensity, duration, or neither, and had to indicate whether they perceived a strong-weak or weak-strong pattern. Moreover, their music perception abilities were measured, and they filled out a questionnaire reporting their productive musical experience. Results showed that better musical rhythm perception - ability was associated with more consistent rhythmic grouping of speech, while melody perception - ability and productive musical experience were not. This suggests shared cognitive procedures in the perception of rhythm in music and speech. Also, the results highlight the relevance of - considering individual differences in musicality when aiming to explain variability in prosody perception.}, language = {en} } @phdthesis{HolzgrefeLang2017, author = {Holzgrefe-Lang, Julia}, title = {Prosodic phrase boundary perception in adults and infants}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405943}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 141}, year = {2017}, abstract = {Prosody is a rich source of information that heavily supports spoken language comprehension. In particular, prosodic phrase boundaries divide the continuous speech stream into chunks reflecting the semantic and syntactic structure of an utterance. This chunking or prosodic phrasing plays a critical role in both spoken language processing and language acquisition. Aiming at a better understanding of the underlying processing mechanisms and their acquisition, the present work investigates factors that influence prosodic phrase boundary perception in adults and infants. Using the event-related potential (ERP) technique, three experimental studies examined the role of prosodic context (i.e., phrase length) in German phrase boundary perception and of the main prosodic boundary cues, namely pitch change, final lengthening, and pause. With regard to the boundary cues, the dissertation focused on the questions which cues or cue combination are essential for the perception of a prosodic boundary and on whether and how this cue weighting develops during infancy. Using ERPs is advantageous because the technique captures the immediate impact of (linguistic) information during on-line processing. Moreover, as it can be applied independently of specific task demands or an overt response performance, it can be used with both infants and adults. ERPs are particularly suitable to study the time course and underlying mechanisms of boundary perception, because a specific ERP component, the Closure Positive Shift (CPS) is well established as neuro-physiological indicator of prosodic boundary perception in adults. The results of the three experimental studies first underpin that the prosodic context plays an immediate role in the processing of prosodic boundary information. Moreover, the second study reveals that adult listeners perceive a prosodic boundary also on the basis of a sub-set of the boundary cues available in the speech signal. Both ERP and simultaneously collected behavioral data (i.e., prosodic judgements) suggest that the combination of pitch change and final lengthening triggers boundary perception; however, when presented as single cues, neither pitch change nor final lengthening were sufficient. Finally, testing six- and eight-month-old infants shows that the early sensitivity for prosodic information is reflected in a brain response resembling the adult CPS. For both age groups, brain responses to prosodic boundaries cued by pitch change and final lengthening revealed a positivity that can be interpreted as a CPS-like infant ERP component. In contrast, but comparable to the adults' response pattern, pitch change as a single cue does not provoke an infant CPS. These results show that infant phrase boundary perception is not exclusively based on pause detection and hint at an early ability to exploit subtle, relational prosodic cues in speech perception.}, language = {en} } @article{SidiropoulosDeBleserAblingeretal.2015, author = {Sidiropoulos, Kyriakos and De Bleser, Ria and Ablinger, Irene and Ackermann, Hermann}, title = {The relationship between verbal and nonverbal auditory signal processing in conduction aphasia: behavioral and anatomical evidence for common decoding mechanisms}, series = {Neurocase : the neural basis of cognition}, volume = {21}, journal = {Neurocase : the neural basis of cognition}, number = {3}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {1355-4794}, doi = {10.1080/13554794.2014.902471}, pages = {377 -- 393}, year = {2015}, abstract = {The processing of nonverbal auditory stimuli has not yet been sufficiently investigated in patients with aphasia. On the basis of a duration discrimination task, we examined whether patients with left-sided cerebrovascular lesions were able to perceive time differences in the scale of approximately 150ms. Further linguistic and memory-related tasks were used to characterize more exactly the relationships in the performances between auditory nonverbal task and selective linguistic or mnemonic disturbances. All examined conduction aphasics showed increased thresholds in the duration discrimination task. The low thresholds on this task were in a strong correlative relation to the reduced performances in repetition and working memory task. This was interpreted as an indication of a pronounced disturbance in integrating auditory verbal information into a long-term window (sampling disturbance) resulting in an additional load of working memory. In order to determine the lesion topography of patients with sampling disturbances, the anatomical and psychophysical data were correlated on the basis of a voxelwise statistical approach. It was found that tissue damage extending through the insula, the posterior superior temporal gyrus, and the supramarginal gyrus causes impairments in sequencing of time-sensitive information.}, language = {en} } @article{HolzgrefeLangWellmannPetroneetal.2013, author = {Holzgrefe-Lang, Julia and Wellmann, Caroline and Petrone, Caterina and Truckenbrodt, Hubert and H{\"o}hle, Barbara and Wartenburger, Isabell}, title = {Brain response to prosodic boundary cues depends on boundary position}, series = {Frontiers in psychology}, volume = {4}, journal = {Frontiers in psychology}, number = {28}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2013.00421}, pages = {14}, year = {2013}, abstract = {Prosodic information is crucial for spoken language comprehension and especially for syntactic parsing, because prosodic cues guide the hearer's syntactic analysis. The time course and mechanisms of this interplay of prosody and syntax are not yet well-understood. In particular, there is an ongoing debate whether local prosodic cues are taken into account automatically or whether they are processed in relation to the global prosodic context in which they appear. The present study explores whether the perception of a prosodic boundary is affected by its position within an utterance. In an event-related potential (PRP) study we tested if the brain response evoked by the prosodic boundary differs when the boundary occurs early in a list of three names connected by conjunctions (i.e., after the first name) as compared to later in the utterance (i.e., after the second name). A closure positive shift (CPS)-marking the processing of a prosodic phrase boundary-was elicited for stimuli with a late boundary, but not for stimuli with an early boundary. This result is further evidence for an immediate integration of prosodic information into the parsing of an utterance. In addition, it shows that the processing of prosodic boundary cues depends on the previously processed information from the preceding prosodic context.}, language = {en} } @article{WellmannHolzgrefeLangTruckenbrodtetal.2012, author = {Wellmann, Caroline and Holzgrefe-Lang, Julia and Truckenbrodt, Hubert and Wartenburger, Isabell and H{\"o}hle, Barbara}, title = {How each prosodic boundary cue matters evidence from German infants}, series = {Frontiers in psychology}, volume = {3}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2012.00580}, pages = {13}, year = {2012}, abstract = {Previous studies have revealed that infants aged 6-10 months are able to use the acoustic correlates of major prosodic boundaries, that is, pitch change, preboundary lengthening, and pause, for the segmentation of the continuous speech signal. Moreover, investigations with American-English- and Dutch-learning infants suggest that processing prosodic boundary markings involves a weighting of these cues. This weighting seems to develop with increasing exposure to the native language and to underlie crosslinguistic variation. In the following, we report the results of four experiments using the headturn preference procedure to explore the perception of prosodic boundary cues in German infants. We presented 8-month-old infants with a sequence of names in two different prosodic groupings, with or without boundary markers. Infants discriminated both sequences when the boundary was marked by all three cues (Experiment 1) and when it was marked by a pitch change and preboundary lengthening in combination (Experiment 2). The presence of a pitch change (Experiment 3) or preboundary lengthening (Experiment 4) as single cues did not lead to a successful discrimination. Our results indicate that pause is not a necessary cue for German infants. Pitch change and preboundary lengthening in combination, but not as single cues, are sufficient. Hence, by 8 months infants only rely on a convergence of boundary markers. Comparisons with adults' performance on the same stimulus materials suggest that the pattern observed with the 8-month-olds is already consistent with that of adults. We discuss our findings with respect to crosslinguistic variation and the development of a language-specific prosodic cue weighting.}, language = {en} } @article{TelkemeyerRossiNierhausetal.2011, author = {Telkemeyer, Silke and Rossi, Sonja and Nierhaus, Till and Steinbrink, Jens and Obrig, Hellmuth and Wartenburger, Isabell}, title = {Acoustic processing of temporally modulated sounds in infants evidence from a combined near-infrared spectroscopy and EEG study}, series = {Frontiers in psychology}, volume = {2}, journal = {Frontiers in psychology}, number = {2}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2011.00062}, pages = {14}, year = {2011}, abstract = {Speech perception requires rapid extraction of the linguistic content from the acoustic signal. The ability to efficiently process rapid changes in auditory information is important for decoding speech and thereby crucial during language acquisition. Investigating functional networks of speech perception in infancy might elucidate neuronal ensembles supporting perceptual abilities that gate language acquisition. Interhemispheric specializations for language have been demonstrated in infants. How these asymmetries are shaped by basic temporal acoustic properties is under debate. We recently provided evidence that newborns process non-linguistic sounds sharing temporal features with language in a differential and lateralized fashion. The present study used the same material while measuring brain responses of 6 and 3 month old infants using simultaneous recordings of electroencephalography (EEG) and near-infrared spectroscopy (NIRS). NIRS reveals that the lateralization observed in newborns remains constant over the first months of life. While fast acoustic modulations elicit bilateral neuronal activations, slow modulations lead to right-lateralized responses. Additionally, auditory-evoked potentials and oscillatory EEG responses show differential responses for fast and slow modulations indicating a sensitivity for temporal acoustic variations. Oscillatory responses reveal an effect of development, that is, 6 but not 3 month old infants show stronger theta-band desynchronization for slowly modulated sounds. Whether this developmental effect is due to increasing fine-grained perception for spectrotemporal sounds in general remains speculative. Our findings support the notion that a more general specialization for acoustic properties can be considered the basis for lateralization of speech perception. The results show that concurrent assessment of vascular based imaging and electrophysiological responses have great potential in the research on language acquisition.}, language = {en} }