@article{KuhlmannKollerSatta2015, author = {Kuhlmann, Marco and Koller, Alexander and Satta, Giorgio}, title = {Lexicalization and Generative Power in CCG}, series = {Computational linguistics}, volume = {41}, journal = {Computational linguistics}, number = {2}, publisher = {MIT Press}, address = {Cambridge}, issn = {0891-2017}, doi = {10.1162/COLI_a_00219}, pages = {215 -- 247}, year = {2015}, abstract = {The weak equivalence of Combinatory Categorial Grammar (CCG) and Tree-Adjoining Grammar (TAG) is a central result of the literature on mildly context-sensitive grammar formalisms. However, the categorial formalism for which this equivalence has been established differs significantly from the versions of CCG that are in use today. In particular, it allows restriction of combinatory rules on a per grammar basis, whereas modern CCG assumes a universal set of rules, isolating all cross-linguistic variation in the lexicon. In this article we investigate the formal significance of this difference. Our main result is that lexicalized versions of the classical CCG formalism are strictly less powerful than TAG.}, language = {en} } @article{GaroufiStaudteKolleretal.2016, author = {Garoufi, Konstantina and Staudte, Maria and Koller, Alexander and Crocker, Matthew W.}, title = {Exploiting Listener Gaze to Improve Situated Communication in Dynamic Virtual Environments}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12298}, pages = {1671 -- 1703}, year = {2016}, abstract = {Beyond the observation that both speakers and listeners rapidly inspect the visual targets of referring expressions, it has been argued that such gaze may constitute part of the communicative signal. In this study, we investigate whether a speaker may, in principle, exploit listener gaze to improve communicative success. In the context of a virtual environment where listeners follow computer-generated instructions, we provide two kinds of support for this claim. First, we show that listener gaze provides a reliable real-time index of understanding even in dynamic and complex environments, and on a per-utterance basis. Second, we show that a language generation system that uses listener gaze to provide rapid feedback improves overall task performance in comparison with two systems that do not use gaze. Aside from demonstrating the utility of listener gaze insituated communication, our findings open the door to new methods for developing and evaluating multi-modal models of situated interaction.}, language = {en} } @article{GaroufiKoller2014, author = {Garoufi, Konstantina and Koller, Alexander}, title = {Generation of effective referring expressions in situated context}, series = {Language, cognition and neuroscience}, volume = {29}, journal = {Language, cognition and neuroscience}, number = {8}, publisher = {Routledge, Taylor \& Francis Group}, address = {Abingdon}, issn = {2327-3798}, doi = {10.1080/01690965.2013.847190}, pages = {986 -- 1001}, year = {2014}, abstract = {In task-oriented communication, references often need to be effective in their distinctive function, that is, help the hearer identify the referent correctly and as effortlessly as possible. However, it can be challenging for computational or empirical studies to capture referential effectiveness. Empirical findings indicate that human-produced references are not always optimally effective, and that their effectiveness may depend on different aspects of the situational context that can evolve dynamically over the course of an interaction. On this basis, we propose a computational model of effective reference generation which distinguishes speaker behaviour according to its helpfulness to the hearer in a certain situation, and explicitly aims at modelling highly helpful speaker behaviour rather than speaker behaviour invariably. Our model, which extends the planning-based paradigm of sentence generation with a statistical account of effectiveness, can adapt to the situational context by making this distinction newly for each new reference. We find that the generated references resemble those of effective human speakers more closely than references of baseline models, and that they are resolved correctly more often than those of other models participating in a shared-task evaluation with human hearers. Finally, we argue that the model could serve as a methodological framework for computational and empirical research on referential effectiveness.}, language = {en} } @article{DembergKellerKoller2013, author = {Demberg, Vera and Keller, Frank and Koller, Alexander}, title = {Incremental, Predictive Parsing with Psycholinguistically motivatedTree-adjoining grammar}, series = {Computational linguistics}, volume = {39}, journal = {Computational linguistics}, number = {4}, publisher = {MIT Press}, address = {Cambridge}, issn = {0891-2017}, doi = {10.1162/COLI_a_00160}, pages = {1025 -- 1066}, year = {2013}, abstract = {Psycholinguistic research shows that key properties of the human sentence processor are incrementality, connectedness (partial structures contain no unattached nodes), and prediction (upcoming syntactic structure is anticipated). There is currently no broad-coverage parsing model with these properties, however. In this article, we present the first broad-coverage probabilistic parser for PLTAG, a variant of TAG that supports all three requirements. We train our parser on a TAG-transformed version of the Penn Treebank and show that it achieves performance comparable to existing TAG parsers that are incremental but not predictive. We also use our PLTAG model to predict human reading times, demonstrating a better fit on the Dundee eye-tracking corpus than a standard surprisal model.}, language = {en} }