@inproceedings{LueckingRieserStaudacher2006, author = {L{\"u}cking, Andy and Rieser, Hannes and Staudacher, Marc}, title = {Multi-modal integration for gesture and speech}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-10393}, year = {2006}, abstract = {Demonstratives, in particular gestures that "only" accompany speech, are not a big issue in current theories of grammar. If we deal with gestures, fixing their function is one big problem, the other one is how to integrate the representations originating from different channels and, ultimately, how to determine their composite meanings. The growing interest in multi-modal settings, computer simulations, human-machine interfaces and VRapplications increases the need for theories ofmultimodal structures and events. In our workshopcontribution we focus on the integration of multimodal contents and investigate different approaches dealing with this problem such as Johnston et al. (1997) and Johnston (1998), Johnston and Bangalore (2000), Chierchia (1995), Asher (2005), and Rieser (2005).}, language = {en} } @inproceedings{LueckingRieserStaudacher2006, author = {L{\"u}cking, Andy and Rieser, Hannes and Staudacher, Marc}, title = {SDRT and multi-modal situated communication}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-10348}, year = {2006}, abstract = {Classical SDRT (Asher and Lascarides, 2003) discussed essential features of dialogue like adjacency pairs or corrections and up-dating. Recent work in SDRT (Asher, 2002, 2005) aims at the description of natural dialogue. We use this work to model situated communication, i.e. dialogue, in which sub-sentential utterances and gestures (pointing and grasping) are used as conventional modes of communication. We show that in addition to cognitive modelling in SDRT, capturing mental states and speech-act related goals, special postulates are needed to extract meaning out of contexts. Gestural meaning anchors Discourse Referents in contextually given domains. Both sorts of meaning are fused with the meaning of fragments to get at fully developed dialogue moves. This task accomplished, the standard SDRT machinery, tagged SDRSs, rhetorical relations, the up-date mechanism, and the Maximize Discourse Coherence constraint generate coherent structures. In sum, meanings from different verbal and non-verbal sources are assembled using extended SDRT to form coherent wholes.}, language = {en} } @inproceedings{KranstedtLueckingPfeifferetal.2006, author = {Kranstedt, Alfred and L{\"u}cking, Andy and Pfeiffer, Thies and Rieser, Hannes and Staudacher, Marc}, title = {Measuring and reconstructing pointing in visual contexts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-10362}, year = {2006}, abstract = {We describe an experiment to gather original data on geometrical aspects of pointing. In particular, we are focusing upon the concept of the pointing cone, a geometrical model of a pointing's extension. In our setting we employed methodological and technical procedures of a new type to integrate data from annotations as well as from tracker recordings. We combined exact information on position and orientation with rater's classifications. Our first results seem to challenge classical linguistic and philosophical theories of demonstration in that they advise to separate pointings from reference.}, language = {en} }