@phdthesis{Ziehe2005, author = {Ziehe, Andreas}, title = {Blind source separation based on joint diagonalization of matrices with applications in biomedical signal processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5694}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {This thesis is concerned with the solution of the blind source separation problem (BSS). The BSS problem occurs frequently in various scientific and technical applications. In essence, it consists in separating meaningful underlying components out of a mixture of a multitude of superimposed signals. In the recent research literature there are two related approaches to the BSS problem: The first is known as Independent Component Analysis (ICA), where the goal is to transform the data such that the components become as independent as possible. The second is based on the notion of diagonality of certain characteristic matrices derived from the data. Here the goal is to transform the matrices such that they become as diagonal as possible. In this thesis we study the latter method of approximate joint diagonalization (AJD) to achieve a solution of the BSS problem. After an introduction to the general setting, the thesis provides an overview on particular choices for the set of target matrices that can be used for BSS by joint diagonalization. As the main contribution of the thesis, new algorithms for approximate joint diagonalization of several matrices with non-orthogonal transformations are developed. These newly developed algorithms will be tested on synthetic benchmark datasets and compared to other previous diagonalization algorithms. Applications of the BSS methods to biomedical signal processing are discussed and exemplified with real-life data sets of multi-channel biomagnetic recordings.}, subject = {Signaltrennung}, language = {en} } @phdthesis{VenturaBort2020, author = {Ventura-Bort, Carlos}, title = {Temporo-spatial dynamics of the impact of emotional contexts on visual processing and memory}, doi = {10.25932/publishup-55023}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-550236}, school = {Universit{\"a}t Potsdam}, pages = {208}, year = {2020}, abstract = {It has frequently been observed that single emotional events are not only more efficiently processed, but also better remembered, and form longer-lasting memory traces than neutral material. However, when emotional information is perceived as a part of a complex event, such as in the context of or in relation to other events and/or source details, the modulatory effects of emotion are less clear. The present work aims to investigate how emotional, contextual source information modulates the initial encoding and subsequent long-term retrieval of associated neutral material (item memory) and contextual source details (contextual source memory). To do so, a two-task experiment was used, consisting of an incidental encoding task in which neutral objects were displayed over different contextual background scenes which varied in emotional content (unpleasant, pleasant, and neutral), and a delayed retrieval task (1 week), in which previously-encoded objects and new ones were presented. In a series of studies, behavioral indices (Studies 2, 3, and 5), event-related potentials (ERPs; Studies 1-4), and functional magnetic resonance imaging (Study 5) were used to investigate whether emotional contexts can rapidly tune the visual processing of associated neutral information (Study 1) and modulate long-term item memory (Study 2), how different recognition memory processes (familiarity vs. recollection) contribute to these emotion effects on item and contextual source memory (Study 3), whether the emotional effects of item memory can also be observed during spontaneous retrieval (Sstudy 4), and which brain regions underpin the modulatory effects of emotional contexts on item and contextual source memory (Study 5). In Study 1, it was observed that emotional contexts by means of emotional associative learning, can rapidly alter the processing of associated neutral information. Neutral items associated with emotional contexts (i.e. emotional associates) compared to neutral ones, showed enhanced perceptual and more elaborate processing after one single pairing, as indexed by larger amplitudes in the P100 and LPP components, respectively. Study 2 showed that emotional contexts produce longer-lasting memory effects, as evidenced by better item memory performance and larger ERP Old/New differences for emotional associates. In Study 3, a mnemonic differentiation was observed between item and contextual source memory which was modulated by emotion. Item memory was driven by familiarity, independently of emotional contexts during encoding, whereas contextual source memory was driven by recollection, and better for emotional material. As in Study 2, enhancing effects of emotional contexts for item memory were observed in ERPs associated with recollection processes. Likewise, for contextual source memory, a pronounced recollection-related ERP enhancement was observed for exclusively emotional contexts. Study 4 showed that the long-term recollection enhancement of emotional contexts on item memory can be observed even when retrieval is not explicitly attempted, as measured with ERPs, suggesting that the emotion enhancing effects on memory are not related to the task embedded during recognition, but to the motivational relevance of the triggering event. In Study 5, it was observed that enhancing effects of emotional contexts on item and contextual source memory involve stronger engagement of the brain's regions which are associated with memory recollection, including areas of the medial temporal lobe, posterior parietal cortex, and prefrontal cortex. Taken together, these findings suggest that emotional contexts rapidly modulate the initial processing of associated neutral information and the subsequent, long-term item and contextual source memories. The enhanced memory effects of emotional contexts are strongly supported by recollection rather than familiarity processes, and are shown to be triggered when retrieval is both explicitly and spontaneously attempted. These results provide new insights into the modulatory role of emotional information on the visual processing and the long-term recognition memory of complex events. The present findings are integrated into the current theoretical models and future ventures are discussed.}, language = {en} } @phdthesis{Ribback2003, author = {Ribback, Sven}, title = {Psychophysiologische Untersuchung mentaler Beanspruchung in simulierten Mensch-Maschine-Interaktionen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000833}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In der vorliegenden Untersuchung wurde ein arbeitspsychologisches Problem thematisiert, dass in Mensch-Maschine-Systemen auftritt. In Mensch-Maschine-Systemen werden Informationen in kodierter Form ausgetauscht. Diese inhaltlich verk{\"u}rzte Informations{\"u}bertragung hat den Vorteil, keine lange Zustandsbeschreibung zu ben{\"o}tigen, so dass der Mensch auf die ver{\"a}nderten Zust{\"a}nde schnell und effizient reagieren kann. Dies wird aber nur dann erm{\"o}glicht, wenn der Mensch die kodierten Informationen (Kodes) vorher erlernten Bedeutungen zuordnen kann. Je nach Art der kodierten Informationen (visuelle, akustische oder alphanumerische Signale) wurden Gestaltungsempfehlungen f{\"u}r Kodealphabete entwickelt. F{\"u}r Operateure resultiert die mentale Belastung durch Dekodierungsprozesse vor allem aus dem Umfang des Kodealphabetes (Anzahl von Kodezeichen), der wahrnehmungsm{\"a}ßigen Gestaltung der Kodes und den Regeln {\"u}ber die Zuordnung von Bedeutungen zu Kodezeichen. Die Entscheidung {\"u}ber die G{\"u}te von Kodealphabeten geschieht in der Arbeitspsychologie in der Regel {\"u}ber Leistungsindikatoren. Dies sind {\"u}blicherweise die zur Dekodierung der Kodes ben{\"o}tigte Zeit und dabei auftretende Zuordnungsfehler. Psychophysiologische Daten werden oft nicht herangezogen. Fraglich ist allerdings, ob Zeiten und Fehler allein verl{\"a}ssliche Indikatoren f{\"u}r den kognitiven Aufwand bei Dekodierungsprozessen sind, da im hochge{\"u}bten Zustand bei gleichen Alphabetl{\"a}ngen, aber unterschiedlicher Kodezeichengestaltung sich h{\"a}ufig die mittleren Dekodierungszeiten zwischen Kodealphabeten nicht signifikant unterscheiden und Fehler {\"u}berhaupt nicht auftreten. Die in der vorliegenden Arbeit postulierte Notwendigkeit der Ableitung von Biosignalen gr{\"u}ndet sich auf die Annahme, dass mit ihrer Hilfe zus{\"a}tzliche Informationen {\"u}ber die mentale Beanspruchung bei Dekodierungsprozessen gewonnen werden k{\"o}nnen, die mit der Erhebung von Leistungsdaten nicht erfasst werden. Denn gerade dann, wenn sich die Leistungsdaten zweier Kodealphabete nicht unterscheiden, k{\"o}nnen psychophysiologische Daten unterschiedliche Aspekte mentaler Beanspruchung erfassen, die mit Hilfe von Leistungsdaten nicht bestimmt werden k{\"o}nnen. Daher wird in Erweiterung des etablierten Untersuchungsansatzes vorgeschlagen, Biosignale als dritten Datenbereich, neben Leistungsdaten und subjektiven Daten mentaler Beanspruchung, abzuleiten, um zus{\"a}tzliche Informationen {\"u}ber die mentale Beanspruchung bei Dekodierungsprozessen zu erhalten. Diese Annahme sollte mit Hilfe der Ableitung von Biosignalen {\"u}berpr{\"u}ft werden. Der Begriff mentaler Beanspruchung wird in der bisherigen Literatur nur unzureichend definiert und differenziert. Daher wird zur Untersuchung dieses Konzepts, die wissenschaftliche Literatur ber{\"u}cksichtigend, ein erweitertes Modell mentaler Beanspruchung vorgestellt. Dabei wird die mentale Beanspruchung abgegrenzt von der emotionalen Beanspruchung. Mentale Beanspruchung wird weiterhin unterschieden in psychomotorische, perzeptive und kognitive Beanspruchung. Diese Aspekte mentaler Beanspruchung werden jeweils vom psychomotorischen, perzeptiven oder kognitiven Aufwand der zu bearbeitenden Aufgabe ausgel{\"o}st. In der vorliegenden Untersuchung wurden zwei zentrale Fragestellungen untersucht: Einerseits wurde die Analyse der anwendungsbezogenen Frage fokussiert, inwieweit psychophysiologische Indikatoren mentaler Beanspruchung {\"u}ber die Leistungsdaten (Dekodierungszeiten und Fehleranzahl) hinaus, zus{\"a}tzliche Informationen zur Bestimmung der G{\"u}te von Kodealphabeten liefern. Andererseits wurde der Forschungsaspekt untersucht, inwieweit psychophysiologische Indikatoren mentaler Beanspruchung die zur Dekodierung notwendigen perzeptiven und kognitiven Aspekte mentaler Beanspruchung differenzieren k{\"o}nnen. Emotionale Beanspruchung war nicht Gegenstand der Analysen, weshalb in der Operationalisierung versucht wurde, sie weitgehend zu vermeiden. Psychomotorische Beanspruchung als dritter Aspekt mentaler Beanspruchung (neben perzeptiver und kognitiver Beanspruchung) wurde f{\"u}r beide Experimentalgruppen weitgehend konstant gehalten. In Lernexperimenten hatten zwei anhand eines Lern- und Ged{\"a}chtnistests homogenisierte Stichproben jeweils die Bedeutung von 54 Kodes eines Kodealphabets zu erwerben. Dabei wurde jeder der zwei unahbh{\"a}ngigen Stichproben ein anderes Kodealphabet vorgelegt, wobei sich die Kodealphabete hinsichtlich Buchstabenanzahl (Kodel{\"a}nge) und anzuwendender Zuordnungsregeln unterschieden. Damit differierten die Kodealphabete im perzeptiven und kognitiven Aspekt mentaler Beanspruchung. Die Kombination der Abk{\"u}rzungen entsprach den in einer Feuerwehrleitzentrale verwendeten (Kurzbeschreibungen von Notfallsituationen). In der Lernphase wurden den Probanden zun{\"a}chst die Kodealphabete geblockt mit ihren Bedeutungen pr{\"a}sentiert. Anschließend wurden die Kodes (ohne deren Bedeutung) in sechs aufeinanderfolgenden Pr{\"u}fphasen randomisiert einzeln dargeboten, wobei die Probanden instruiert waren, die Bedeutung der jeweiligen Kodes in ein Mikrofon zu sprechen. W{\"a}hrend des gesamten Experiments wurden, neben Leistungsdaten (Dekodierungszeiten und Fehleranzahl) und subjektiven Daten {\"u}ber die mentale Beanspruchung im Verlauf der Experimente, folgende zentralnerv{\"o}se und peripherphysiologische Biosignale abgeleitet: Blutdruck, Herzrate, phasische und tonische elektrodermale Aktivit{\"a}t und Elektroenzephalogramm. Aus ihnen wurden zun{\"a}chst 13 peripherphysiologische und 7 zentralnerv{\"o}se Parameter berechnet, von denen 7 peripherphysiologische und 3 zentralnerv{\"o}se Parameter die statistischen Voraussetzungen (Einschlusskriterien) soweit erf{\"u}llten, dass sie in die inferenzstatistische Datenanalyse einbezogen wurden. Leistungsdaten und subjektive Beanspruchungseinsch{\"a}tzungen der Versuchsdurchg{\"a}nge wurden zu den psychophysiologischen Parametern in Beziehung gesetzt. Die Befunde zeigen, dass mittels der psychophysiologischen Daten zus{\"a}tzliche Erkenntnisse {\"u}ber den kognitiven Aufwand gewonnen werden k{\"o}nnen. Als weitere Analyse wurden die Kodes post hoc in zwei neue Kodealphabete eingeteilt. Ziel dieser Analyse war es, die Unterschiede zwischen beiden Kodealphabeten zu erh{\"o}hen, um deutlichere reizbezogene psychophysiologische Unterschiede in den EEG-Daten zwischen den Kodealphabeten zu erhalten. Dazu wurde diejenigen, hinsichtlich ihrer Bedeutung, parallelen Kodes in beiden Kodealphabeten ausgew{\"a}hlt, die sich in der Dekodierungszeit maximal voneinander unterschieden. Eine erneute Analyse der EEG-Daten erbrachte jedoch keine Verbesserung der Ergebnisse. Drei Hauptergebnisse bez{\"u}glich der psychophysiologischen Parameter konnten festgestellt werden: Das erste Ergebnis ist f{\"u}r die psychophysiologische Methodik bedeutsam. Viele psychophysiologische Parameter unterschieden zwischen den Pr{\"u}fphasen und zeigen damit eine hinreichende Sensitivit{\"a}t zur Untersuchung mentaler Beanspruchung bei Dekodierungsprozessen an. Dazu geh{\"o}ren die Anzahl der spontanen Hautleitwertsreaktionen, die Amplitude der Hautleitwertsreaktionen, das Hautleitwertsniveau, die Herzrate, die Herzratendifferenz und das Beta-2-Band des EEG. Diese Parameter zeigen einen {\"a}hnlichen Verlauf wie die Leistungsdaten. Dies zeigt, dass es m{\"o}glich ist, die hier operationaliserte Art mentaler Beanspruchung in Form von Dekodierungsprozessen psychophysiologisch zu analysieren. Ein zweites Ergebnis betrifft die M{\"o}glichkeit, Unterschiede mentaler Beanspruchung zwischen beiden Gruppen psychophysiologisch abzubilden: Das Hautleitwertsniveau und das Theta-Frequenzband des Spontan-EEG zeigten Unterschiede zwischen beiden Stichproben von der ersten Pr{\"u}fphase an. Diese Parameter indizieren unterschiedlichen kognitiven Aufwand in beiden Stichproben {\"u}ber alle Pr{\"u}fphasen. Das wichtigste Ergebnis betrifft die Frage nach einem Informationsgewinn bei Einsatz psychophysiologischer Methoden zur Bewertung der G{\"u}te von Kodealphabeten: Einen tats{\"a}chlichen Informationsgewinn gegen{\"u}ber den Leistungsdaten zeigte die Amplitude der elektrodermalen Aktivit{\"a}t und die Herzraten-Differenz an. Denn in den sp{\"a}teren Pr{\"u}fphasen, wenn sich die Leistungsdaten beider Kodealphabete nicht mehr unterschieden, konnten unterschiedliche Auspr{\"a}gungen dieser psychophysiologischen Parameter zwischen beiden Kodealphabeten verzeichnet werden. Damit konnten unterschiedliche Aspekte mentaler Beanspruchung in beiden Kodealphabeten in den sp{\"a}teren Pr{\"u}fphasen erfasst werden, in denen sich die Leistungsdaten nicht mehr unterschieden. Alle drei Ergebnisse zeigen, dass es, trotz erheblichen technischen und methodischen Aufwands, sinnvoll erscheint, bei der Charakterisierung mentaler Belastungen und f{\"u}r die Gestaltung von Kodealphabeten auch psychophysiologische Daten heranzuziehen, da zus{\"a}tzliche Informationen {\"u}ber den perzeptiven und kognitiven Dekodierungsaufwand gewonnen werden k{\"o}nnen.}, language = {de} } @phdthesis{Patzwald2020, author = {Patzwald, Christiane}, title = {Actions through the lens of communicative cues}, school = {Universit{\"a}t Potsdam}, pages = {156}, year = {2020}, abstract = {The PhD thesis entitled "Actions through the lens of communicative cues. The influence of verbal cues and emotional cues on action processing and action selection in the second year of life" is based on four studies, which examined the cognitive integration of another person's communicative cues (i.e., verbal cues, emotional cues) with behavioral cues in 18- and 24-month-olds. In the context of social learning of instrumental actions, it was investigated how the intention-related coherence of either a verbally announced action intention or an emotionally signaled action evaluation with an action demonstration influenced infants' neuro-cognitive processing (Study I) and selection (Studies II, III, IV) of a novel object-directed action. Developmental research has shown that infants benefit from another's behavioral cues (e.g., action effect, persistency, selectivity) to infer the underlying goal or intention, respectively, of an observed action (e.g., Cannon \& Woodward, 2012; Woodward, 1998). Particularly action effects support infants in distinguishing perceptual action features (e.g., target object identity, movement trajectory, final target object state) from conceptual action features such as goals and intentions. However, less is known about infants' ability to cognitively integrate another's behavioral cues with additional action-related communicative cues. There is some evidence showing that in the second year of life, infants selectively imitate a novel action that is verbally ("There!") or emotionally (positive expression) marked as aligning with the model's action intention over an action that is verbally ("Whoops!") or emotionally (negative expression) marked as unintentional (Carpenter, Akhtar, \& Tomasello, 1998; Olineck \& Poulin-Dubois, 2005, 2009; Repacholi, 2009; Repacholi, Meltzoff, Toub, \& Ruba, 2016). Yet, it is currently unclear which role the specific intention-related coherence of a communicative cue with a behavioral cue plays in infants' action processing and action selection that is, whether the communicative cue confirms, contrasts, clarifies, or is unrelated to the behavioral cue. Notably, by using both verbal cues and emotional cues, we examined not only two domains of communicative cues but also two qualitatively distinct relations between behavioral cues on the one hand and communicative cues on the other hand. More specifically, a verbal cue has the potential to communicate an action intention in the absence of an action demonstration and thus a prior-intention (Searle, 1983), whereas an emotional cue evaluates an ongoing or past action demonstration and thus signals an intention-in-action (Searle, 1983). In a first research focus, this thesis examined infants' capacity to cognitively integrate another's intention-related communicative cues and behavioral cues, and also focused on the role of the social cues' coherence in infants' action processing and action selection. In a second research focus, and to gain more elaborate insights into how the sub-processes of social learning (attention, encoding, response; cf. Bandura, 1977) are involved in this coherence-sensitive integrative processing, we employed a multi-measures approach. More specifically, we used Electroencephalography (EEG) and looking times to examine how the cues' coherence influenced the compound of attention and encoding, and imitation (including latencies to first-touch and first-action) to address the compound of encoding and response. Based on the action-reconstruction account (Csibra, 2007), we predicted that infants use extra-motor information (i.e., communicative cues) together with behavioral cues to reconstruct another's action intention. Accordingly, we expected infants to possess a flexibly organized internal action hierarchy, which they adapt according to the cues' coherence that is, according to what they inferred to be the overarching action goal. More specifically, in a social-learning situation that comprised an adult model, who demonstrated an action on a novel object that offered two actions, we expected the demonstrated action to lead infants' action hierarchy when the communicative (i.e., verbal, emotional) cue conveyed similar (confirming coherence) or no additional (un-related coherence) intention-related information relative to the behavioral cue. In terms of action selection, this action hierarchy should become evident in a selective imitation of the demonstrated action. However, when the communicative cue questioned (contrasting coherence) the behaviorally implied action goal or was the only cue conveying meaningful intention-related information (clarifying coherence), the verbally/emotionally intended action should ascend infants' action hierarchy. Consequently, infants' action selection should align with the verbally/emotionally intended action (goal emulation). Notably, these predictions oppose the direct-matching perspective (Rizzolatti \& Craighero, 2004), according to which the observation of another's action directly resonates with the observer's motor repertoire, with this motor resonance enabling the identification of the underlying action goal. Importantly, the direct-matching perspective predicts a rather inflexible action hierarchy inasmuch as the process of goal identification should solely rely on the behavioral cue, irrespective of the behavioral cue's coherence with extra-motor intention-related information, as it may be conveyed via communicative cues. As to the role of verbal cues, Study I used EEG to examine the influence of a confirming (Congruent) versus contrasting (Incongruent) coherence of a verbal action intention with the same action demonstration on 18-month-olds' conceptual action processing (as measured via mid-latency mean negative ERP amplitude) and motor activation (as measured via central mu-frequency band power). The action was demonstrated on a novel object that offered two action alternatives from a neutral position. We expected mid-latency ERP negativity to be enhanced in Incongruent compared to Congruent, because past EEG research has demonstrated enhanced conceptual processing for stimuli that mismatched rather than matched the semantic context (Friedrich \& Friederici, 2010; Kaduk et al., 2016). Regarding motor activation, Csibra (2007) posited that the identification of a clear action goal constitutes a crucial basis for motor activation to occur. We therefore predicted reduced mu power (indicating enhanced motor activation) for Congruent than Incongruent, because in Congruent, the cues' match provides unequivocal information about the model's action goal, whereas in Incongruent, the conflict may render the model's action goal more unclear. Unexpectedly, in the entire sample, 18-month-olds' mid-latency ERP negativity during the observation of the same action demonstration did not differ significantly depending on whether this action was congruent or incongruent with the model's verbal action intention. Yet, post hoc analyses revealed the presence of two subgroups of infants, each of which exhibited significantly different mid-latency ERP negativity for Congruent versus Incongruent, but in opposing directions. The subgroups differed in their productive action-related language skills, with the linguistically more advanced infants exhibiting the expected response pattern of enhanced ERP mean negativity in Incongruent than Congruent, indicating enhanced conceptual processing of an action demonstration that was contrasted rather than confirmed by the verbal action context. As expected, central mu power in the entire sample was reduced in Congruent relative to Incongruent, indicating enhanced motor activation when the action demonstration was preceded by a confirming relative to a contrasting verbal action intention. This finding may indicate the covert preparation for a preferential imitation of the congruent relative to the incongruent action (Filippi et al., 2016; Frey \& Gerry, 2006). Overall, these findings are in line with the action-reconstruction account (Csibra, 2007), because they suggest a coherence-sensitive attention to and encoding of the same perceptual features of another's behavior and thus a cognitive integration of intention-related verbal cues and behavioral cues. Yet, because the subgroup constellation in infants' ERPs was only discovered post hoc, future research is clearly required to substantiate this finding. Also, future research should validate our interpretation that enhanced motor activation may reflect an electrophysiological marker of subsequent imitation by employing EEG and imitation in a within-subjects design. Study II built on Study I by investigating the impact of coherence of a verbal cue and a behavioral cue on 18- and 24-month-olds' action selection in an imitation study. When infants of both age groups observed a confirming (Congruent) or unrelated (Pseudo-word: action demonstration was associated with novel verb-like cue) coherence, they selectively imitated the demonstrated action over the not demonstrated, alternative action, with no difference between these two conditions. These findings suggest that, as expected, infants' action hierarchy was led by the demonstrated action when the verbal cue provided similar (Congruent) or no additional (Pseudo-word) intention-related information relative to a meaningful behavioral cue. These findings support the above-mentioned interpretation that enhanced motor activation during action observation may reflect a covert preparation for imitation (Study I). Interestingly, infants did not seem to benefit from the intention-highlighting effect of the verbal cue in Congruent, suggesting that the verbal cue had an unspecific (e.g., attention-guiding) effect on infants' action selection. Contrary, when infants observed a contrasting (Incongruent) or clarifying (Failed-attempt: model failed to manipulate the object but verbally announced a certain action intention) coherence, their action selection varied with age and also varied across the course of the experiment (block 1 vs. block 2). More specifically, the 24-month-olds made stronger use of the verbal cue for their action selection in block 1 than did the 18-month-olds. However, while the 18-month-olds' use of the verbal cue increased across blocks, particularly in Incongruent, the 24-month-olds' use of the verbal cue decreased across blocks. Overall, these results suggest that, as expected, infants' action hierarchy in Incongruent (both age groups) and Failed-attempt (only 24-month-olds) drew on the verbal action intention, because in both age groups, infants emulated the verbal intention about as often as they imitated the demonstrated action or even emulated the verbal action intention preferentially. Yet, these findings were confined to certain blocks. It may be argued that the younger age group had a harder time inferring and emulating the intended, yet never observed action, because this requirement is more demanding in cognitive and motor terms. These demands may explain why the 18-month-olds needed some time to take account of the verbal action intention. Contrary, it seems that the 24-month-olds, although demonstrating their principle capacity to take account of the verbal cue in block 1, lost trust in the model's verbal cue, maybe because the verbal cue did not have predictive value for the model's actual behavior. Supporting this interpretation, research on selective trust has demonstrated that already infants evaluate another's reliability or competence, respectively, based on how that model handles familiar objects (behavioral reliability) or labels familiar objects (verbal reliability; for reviews, see Mills, 2013; Poulin-Dubois \& Brosseau-Liard, 2016). Relatedly, imitation research has demonstrated that the interpersonal aspects of a social-learning situation gain increasing relevance for infants during the second year of life (Gell{\´e}n \& Buttelmann, 2019; Matheson, Moore, \& Akhtar, 2013; Uzgiris, 1981). It may thus be argued that when the 24-month-olds were repeatedly faced with a verbally unreliable model, they de-evaluated the verbal cue as signaling the model's action intention and instead relied more heavily on alternative cues such as the behavioral cue (Incongruent) or the action context (e.g., object affordances, salience; Failed-attempt). Infants' first-action latencies were higher in Incongruent and Failed-attempt than in both Congruent and Pseudo-word, and were also higher in Failed-attempt than in Incongruent. These latency-findings thus indicate that situations involving a meaningful verbal cue that deviated from the behavioral cue are cognitively more demanding, resulting in a delayed initiation of a behavioral response. In sum, the findings of Study II suggest that both age groups were highly flexible in their integration of a verbal cue and behavioral cue. Moreover, our results do not indicate a general superiority of either cue. Instead, it seems to depend on the informational gain conveyed by the verbal cue whether it exerts a specific, intention-highlighting effect (Incongruent, Failed-attempt) or an unspecific (e.g., attention-guiding) effect (Congruent, Pseudo-word). Studies III and IV investigated the impact of another's action-related emotional cues on 18-month-olds' action selection. In Study III, infants observed a model, who demonstrated two actions on a novel object in direct succession, and who combined one of the two actions with a positive (happy) emotional expression and the other action with a negative (sad) emotional expression. As expected, infants imitated the positively emoted (PE) action more often than the negatively emoted (NE) action. This preference arose from an increase in infants' readiness to perform the PE action from the baseline period (prior to the action demonstrations) to the test period (following the action demonstrations), rather than from a decrease in readiness to the perform the NE action. The positive cue thus had a stronger behavior-regulating effect than the negative cue. Notably, infants' more general object-directed behavior in terms of first-touch latencies remained unaffected by the emotional cues' valence, indicating that infants had linked the emotional cues specifically to the corresponding action and not the object as a whole (Repacholi, 2009). Also, infants' looking times during the action demonstration did not differ significantly as a function of emotional valence and were characterized by a predominant attentional focus to the action/object rather than to the model's face. Together with the findings on infants' first-touch latencies, these results indicate a sensitivity for the notion that emotions can have very specific referents (referential specificity; Martin, Maza, McGrath, \& Phelps, 2014). Together, Study III provided evidence for selective imitation based on another's intention-related (particularly positive) emotional cues in an action-selection task, and thus indicates that infants' action hierarchy flexibly responds to another's emotional evaluation of observed actions. According to Repacholi (2009), we suggest that infants used the model's emotional evaluation to re-appraise the corresponding action (effect), for instance in terms of desirability. Study IV followed up on Study III by investigating the role of the negative emotional cue for infants' action selection in more detail. Specifically, we investigated whether a contrasting (negative) emotional cue alone would be sufficient to differentially rank the two actions along infants' action hierarchy or whether instead infants require direct information about the model's action intention (in the form of a confirming action-emotion pair) to align their action selection with the emotional cues. Also, we examined whether the absence of a direct behavior-regulating effect of the negative cue in Study III was due to the negative cue itself or to the concurrently available positive cue masking the negative cue's potential effect. To this end, we split the demonstration of the two action-emotion pairs across two trials. In each trial, one action was thus demonstrated and emoted (PE, NE action), and one action was not demonstrated and un-emoted (UE action). For trial 1, we predicted that infants, who observed a PE action demonstration, would selectively imitate the PE action, whereas infants, who observed a NE action demonstration would selectively emulate the UE action. As to trial 2, we expected the complementary action-emotion pair to provide additional clarifying information as the model's emotional evaluation of both actions, which should either lead to adaptive perseveration (if infants' action selection in trial 1 had already drawn on the emotional cue) or adaptive change (if infants' action selection in trial 1 signaled a disregard of the emotional cue). As to trial 1, our findings revealed that, as expected, infants imitated the PE action more often than they emulated the UE action. Like in Study III, this selectivity arose from an increase in infants' propensity to perform the PE action from baseline to trial 1. Also like in Study III, infants performed the NE action about equally often in baseline and trial 1, which speaks against a direct behavior-regulating effect of the negative cue also when presented in isolation. However, after a NE action demonstration, infants emulated the UE action more often in trial 1 than in baseline, suggesting an indirect behavior-regulating effect of the negative cue. Yet, this indirect effect did not yield a selective emulation of the UE action, because infants performed both action alternatives about equally often in trial 1. Unexpectedly, infants' action selection in trial 2 was unaffected by the emotional cue. Instead, infants perseverated their action selection of trial 1 in trial 2, irrespective of whether it was adaptive or non-adaptive with respect to the model's emotional evaluation of the action. It seems that infants changed their strategy across trials, from an initial adherence to the emotional (particularly positive) cue, towards bringing about a salient action effect (Marcovich \& Zelazo, 2009). In sum, Studies III and IV indicate a dynamic interplay of different action-selection strategies, depending on valence and presentation order. Apparently, at least in infancy, action reconstruction as one basis for selective action performance reaches its limits when infants can only draw on indirect intention-related information (i.e., which action should be avoided). Overall, our findings favor the action-reconstruction account (Csibra, 2007), according to which actions are flexibly organized along a hierarchy, depending on inferential processes based on extra-motor intention-related information. At the same time, the findings question the direct-matching hypothesis (Rizzolatti \& Craighero, 2004), according to which the identification (and pursuit) of action goals hinges on a direct simulation of another's behavioral cues. Based on the studies' findings, a preliminary working model is introduced, which seeks to integrate the two theoretical accounts by conceptualizing the routes that activation induced by social cues may take to eventually influence an infant's action selection. Our findings indicate that it is useful to strive a differentiated conceptualization of communicative cues, because they seem to operate at different places within the process of cue integration, depending on their potential to convey direct intention-related information. Moreover, we suggest that there is bidirectional exchange within each compound of adjacent sub-processes (i.e., between attention and encoding, and encoding and response), and between the compounds. Hence, our findings highlight the benefits of a multi-measures approach when studying the development of infants' social-cognitive abilities, because it provides a more comprehensive picture how the concerted use of social cues from different domains influences infants' processing and selection of instrumental actions. Finally, this thesis points to potential future directions to substantiate our current interpretation of the findings.. Moreover, an extension to additional kinds of coherence is suggested to get closer to infants' everyday-world of experience.}, language = {en} } @phdthesis{Metzner2015, author = {Metzner, Paul-Philipp}, title = {Eye movements and brain responses in natural reading}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82806}, school = {Universit{\"a}t Potsdam}, pages = {xv, 160}, year = {2015}, abstract = {Intuitively, it is clear that neural processes and eye movements in reading are closely connected, but only few studies have investigated both signals simultaneously. Instead, the usual approach is to record them in separate experiments and to subsequently consolidate the results. However, studies using this approach have shown that it is feasible to coregister eye movements and EEG in natural reading and contributed greatly to the understanding of oculomotor processes in reading. The present thesis builds upon that work, assessing to what extent coregistration can be helpful for sentence processing research. In the first study, we explore how well coregistration is suited to study subtle effects common to psycholinguistic experiments by investigating the effect of distance on dependency resolution. The results demonstrate that researchers must improve the signal-to-noise ratio to uncover more subdued effects in coregistration. In the second study, we compare oscillatory responses in different presentation modes. Using robust effects from world knowledge violations, we show that the generation and retrieval of memory traces may differ between natural reading and word-by-word presentation. In the third study, we bridge the gap between our knowledge of behavioral and neural responses to integration difficulties in reading by analyzing the EEG in the context of regressive saccades. We find the P600, a neural indicator of recovery processes, when readers make a regressive saccade in response to integration difficulties. The results in the present thesis demonstrate that coregistration can be a useful tool for the study of sentence processing. However, they also show that it may not be suitable for some questions, especially if they involve subtle effects.}, language = {en} } @phdthesis{Dornhege2006, author = {Dornhege, Guido}, title = {Increasing information transfer rates for brain-computer interfacing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7690}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The goal of a Brain-Computer Interface (BCI) consists of the development of a unidirectional interface between a human and a computer to allow control of a device only via brain signals. While the BCI systems of almost all other groups require the user to be trained over several weeks or even months, the group of Prof. Dr. Klaus-Robert M{\"u}ller in Berlin and Potsdam, which I belong to, was one of the first research groups in this field which used machine learning techniques on a large scale. The adaptivity of the processing system to the individual brain patterns of the subject confers huge advantages for the user. Thus BCI research is considered a hot topic in machine learning and computer science. It requires interdisciplinary cooperation between disparate fields such as neuroscience, since only by combining machine learning and signal processing techniques based on neurophysiological knowledge will the largest progress be made. In this work I particularly deal with my part of this project, which lies mainly in the area of computer science. I have considered the following three main points: Establishing a performance measure based on information theory: I have critically illuminated the assumptions of Shannon's information transfer rate for application in a BCI context. By establishing suitable coding strategies I was able to show that this theoretical measure approximates quite well to what is practically achieveable. Transfer and development of suitable signal processing and machine learning techniques: One substantial component of my work was to develop several machine learning and signal processing algorithms to improve the efficiency of a BCI. Based on the neurophysiological knowledge that several independent EEG features can be observed for some mental states, I have developed a method for combining different and maybe independent features which improved performance. In some cases the performance of the combination algorithm outperforms the best single performance by more than 50 \%. Furthermore, I have theoretically and practically addressed via the development of suitable algorithms the question of the optimal number of classes which should be used for a BCI. It transpired that with BCI performances reported so far, three or four different mental states are optimal. For another extension I have combined ideas from signal processing with those of machine learning since a high gain can be achieved if the temporal filtering, i.e., the choice of frequency bands, is automatically adapted to each subject individually. Implementation of the Berlin brain computer interface and realization of suitable experiments: Finally a further substantial component of my work was to realize an online BCI system which includes the developed methods, but is also flexible enough to allow the simple realization of new algorithms and ideas. So far, bitrates of up to 40 bits per minute have been achieved with this system by absolutely untrained users which, compared to results of other groups, is highly successful.}, subject = {Kybernetik}, language = {en} } @phdthesis{BreakellFernandez2016, author = {Breakell Fernandez, Leigh}, title = {Investigating word order processing using pupillometry and event-related potentials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91438}, school = {Universit{\"a}t Potsdam}, pages = {x, 122}, year = {2016}, abstract = {In this thesis sentence processing was investigated using a psychophysiological measure known as pupillometry as well as Event-Related Potentials (ERP). The scope of the the- sis was broad, investigating the processing of several different movement constructions with native speakers of English and second language learners of English, as well as word order and case marking in German speaking adults and children. Pupillometry and ERP allowed us to test competing linguistic theories and use novel methodologies to investigate the processing of word order. In doing so we also aimed to establish pupillometry as an effective way to investigate the processing of word order thus broadening the methodological spectrum.}, language = {en} } @phdthesis{Allefeld2004, author = {Allefeld, Carsten}, title = {Phase synchronization analysis of event-related brain potentials in language processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001873}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Das Forschungsthema Synchronisation bildet einen Schnittpunkt von Nichtlinearer Dynamik und Neurowissenschaft. So hat zum einen neurobiologische Forschung gezeigt, daß die Synchronisation neuronaler Aktivit{\"a}t einen wesentlichen Aspekt der Funktionsweise des Gehirns darstellt. Zum anderen haben Fortschritte in der physikalischen Theorie zur Entdeckung des Ph{\"a}nomens der Phasensynchronisation gef{\"u}hrt. Eine dadurch motivierte Datenanalysemethode, die Phasensynchronisations-Analyse, ist bereits mit Erfolg auf empirische Daten angewandt worden. Die vorliegende Dissertation kn{\"u}pft an diese konvergierenden Forschungslinien an. Ihren Gegenstand bilden methodische Beitr{\"a}ge zur Fortentwicklung der Phasensynchronisations-Analyse, sowie deren Anwendung auf ereigniskorrelierte Potentiale, eine besonders in den Kognitionswissenschaften wichtige Form von EEG-Daten. Die methodischen Beitr{\"a}ge dieser Arbeit bestehen zum ersten in einer Reihe spezialisierter statistischer Tests auf einen Unterschied der Synchronisationsst{\"a}rke in zwei verschiedenen Zust{\"a}nden eines Systems zweier Oszillatoren. Zweitens wird im Hinblick auf den viel-kanaligen Charakter von EEG-Daten ein Ansatz zur multivariaten Phasensynchronisations-Analyse vorgestellt. Zur empirischen Untersuchung neuronaler Synchronisation wurde ein klassisches Experiment zur Sprachverarbeitung repliziert, in dem der Effekt einer semantischen Verletzung im Satzkontext mit demjenigen der Manipulation physischer Reizeigenschaften (Schriftfarbe) verglichen wird. Hier zeigt die Phasensynchronisations-Analyse eine Verringerung der globalen Synchronisationsst{\"a}rke f{\"u}r die semantische Verletzung sowie eine Verst{\"a}rkung f{\"u}r die physische Manipulation. Im zweiten Fall l{\"a}ßt sich der global beobachtete Synchronisationseffekt mittels der multivariaten Analyse auf die Interaktion zweier symmetrisch gelegener Gehirnareale zur{\"u}ckf{\"u}hren. Die vorgelegten Befunde zeigen, daß die physikalisch motivierte Methode der Phasensynchronisations-Analyse einen wesentlichen Beitrag zur Untersuchung ereigniskorrelierter Potentiale in den Kognitionswissenschaften zu leisten vermag.}, language = {en} }