@article{Rabovsky2020, author = {Rabovsky, Milena}, title = {Change in a probabilistic representation of meaning can account for N400 effects on articles}, series = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, volume = {143}, journal = {Neuropsychologia : an international journal in behavioural and cognitive neuroscience}, publisher = {Elsevier}, address = {Oxford}, issn = {0028-3932}, doi = {10.1016/j.neuropsychologia.2020.107466}, pages = {7}, year = {2020}, abstract = {Increased N400 amplitudes on indefinite articles (a/an) incompatible with expected nouns have been initially taken as strong evidence for probabilistic pre-activation of phonological word forms, and recently been intensely debated because they have been difficult to replicate. Here, these effects are simulated using a neural network model of sentence comprehension that we previously used to simulate a broad range of empirical N400 effects. The model produces the effects when the cue validity of the articles concerning upcoming noun meaning in the learning environment is high, but fails to produce the effects when the cue validity of the articles is low due to adjectives presented between articles and nouns during training. These simulations provide insight into one of the factors potentially contributing to the small size of the effects in empirical studies and generate predictions for cross-linguistic differences in article induced N400 effects based on articles' cue validity. The model accounts for article induced N400 effects without assuming pre-activation of word forms, and instead simulates these effects as the stimulus-induced change in a probabilistic representation of meaning corresponding to an implicit semantic prediction error.}, language = {en} } @article{Rabovsky2020, author = {Rabovsky, Milena}, title = {Change in a probabilistic representation of meaning can account for N400 effects on articles: a neural network model}, series = {Neuropsychologia}, volume = {143}, journal = {Neuropsychologia}, publisher = {Elsevier}, address = {Amsterdam}, pages = {7}, year = {2020}, abstract = {Increased N400 amplitudes on indefinite articles (a/an) incompatible with expected nouns have been initially taken as strong evidence for probabilistic pre-activation of phonological word forms, and recently been intensely debated because they have been difficult to replicate. Here, these effects are simulated using a neural network model of sentence comprehension that we previously used to simulate a broad range of empirical N400 effects. The model produces the effects when the cue validity of the articles concerning upcoming noun meaning in the learning environment is high, but fails to produce the effects when the cue validity of the articles is low due to adjectives presented between articles and nouns during training. These simulations provide insight into one of the factors potentially contributing to the small size of the effects in empirical studies and generate predictions for cross-linguistic differences in article induced N400 effects based on articles' cue validity. The model accounts for article induced N400 effects without assuming pre-activation of word forms, and instead simulates these effects as the stimulus-induced change in a probabilistic representation of meaning corresponding to an implicit semantic prediction error.}, language = {en} } @article{RabovskyMcClelland2020, author = {Rabovsky, Milena and McClelland, James L.}, title = {Quasi-compositional mapping from form to meaning}, series = {Philosophical transactions of the Royal Society of London : B, Biological sciences}, volume = {375}, journal = {Philosophical transactions of the Royal Society of London : B, Biological sciences}, number = {1791}, publisher = {Royal Society}, address = {London}, issn = {0962-8436}, doi = {10.1098/rstb.2019.0313}, pages = {9}, year = {2020}, abstract = {We argue that natural language can be usefully described as quasi-compositional and we suggest that deep learning-based neural language models bear long-term promise to capture how language conveys meaning. We also note that a successful account of human language processing should explain both the outcome of the comprehension process and the continuous internal processes underlying this performance. These points motivate our discussion of a neural network model of sentence comprehension, the Sentence Gestalt model, which we have used to account for the N400 component of the event-related brain potential (ERP), which tracks meaning processing as it happens in real time. The model, which shares features with recent deep learning-based language models, simulates N400 amplitude as the automatic update of a probabilistic representation of the situation or event described by the sentence, corresponding to a temporal difference learning signal at the level of meaning. We suggest that this process happens relatively automatically, and that sometimes a more-controlled attention-dependent process is necessary for successful comprehension, which may be reflected in the subsequent P600 ERP component. We relate this account to current deep learning models as well as classic linguistic theory, and use it to illustrate a domain general perspective on some specific linguistic operations postulated based on compositional analyses of natural language. This article is part of the theme issue 'Towards mechanistic models of meaning composition'.}, language = {en} } @misc{Rabovsky2020, author = {Rabovsky, Milena}, title = {Change in a probabilistic representation of meaning can account for N400 effects on articles: a neural network model}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, issn = {1866-8364}, doi = {10.25932/publishup-52698}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-526988}, pages = {9}, year = {2020}, abstract = {Increased N400 amplitudes on indefinite articles (a/an) incompatible with expected nouns have been initially taken as strong evidence for probabilistic pre-activation of phonological word forms, and recently been intensely debated because they have been difficult to replicate. Here, these effects are simulated using a neural network model of sentence comprehension that we previously used to simulate a broad range of empirical N400 effects. The model produces the effects when the cue validity of the articles concerning upcoming noun meaning in the learning environment is high, but fails to produce the effects when the cue validity of the articles is low due to adjectives presented between articles and nouns during training. These simulations provide insight into one of the factors potentially contributing to the small size of the effects in empirical studies and generate predictions for cross-linguistic differences in article induced N400 effects based on articles' cue validity. The model accounts for article induced N400 effects without assuming pre-activation of word forms, and instead simulates these effects as the stimulus-induced change in a probabilistic representation of meaning corresponding to an implicit semantic prediction error.}, language = {en} }