@article{ZaikinRosenblumScheffczyketal.1997, author = {Zaikin, Alexei A. and Rosenblum, Michael and Scheffczyk, Christian and Engbert, Ralf and Krampe, Ralf-Thomas and Kurths, J{\"u}rgen}, title = {Modeling qualitative changes in bimanual movements}, year = {1997}, language = {en} } @article{WidmannEngbertSchroeger2014, author = {Widmann, Andreas and Engbert, Ralf and Schroeger, Erich}, title = {Microsaccadic responses indicate fast categorization of sounds: A novel approach to study auditory cognition}, series = {The journal of neuroscience}, volume = {34}, journal = {The journal of neuroscience}, number = {33}, publisher = {Society for Neuroscience}, address = {Washington}, issn = {0270-6474}, doi = {10.1523/JNEUROSCI.1568-14.2014}, pages = {11152 -- 11158}, year = {2014}, abstract = {The mental chronometry of the human brain's processing of sounds to be categorized as targets has intensively been studied in cognitive neuroscience. According to current theories, a series of successive stages consisting of the registration, identification, and categorization of the sound has to be completed before participants are able to report the sound as a target by button press after similar to 300-500 ms. Here we use miniature eye movements as a tool to study the categorization of a sound as a target or nontarget, indicating that an initial categorization is present already after 80-100 ms. During visual fixation, the rate of microsaccades, the fastest components of miniature eye movements, is transiently modulated after auditory stimulation. In two experiments, we measured microsaccade rates in human participants in an auditory three-tone oddball paradigm (including rare nontarget sounds) and observed a difference in the microsaccade rates between targets and nontargets as early as 142 ms after sound onset. This finding was replicated in a third experiment with directed saccades measured in a paradigm in which tones had to be matched to score-like visual symbols. Considering the delays introduced by (motor) signal transmission and data analysis constraints, the brain must have differentiated target from nontarget sounds as fast as 80-100 ms after sound onset in both paradigms. We suggest that predictive information processing for expected input makes higher cognitive attributes, such as a sound's identity and category, available already during early sensory processing. The measurement of eye movements is thus a promising approach to investigate hearing.}, language = {en} } @article{WarschburgerCalvanoRichteretal.2015, author = {Warschburger, Petra and Calvano, Claudia and Richter, Eike M. and Engbert, Ralf}, title = {Analysis of Attentional Bias towards Attractive and Unattractive Body Regions among Overweight Males and Females}, series = {PLoS one}, journal = {PLoS one}, number = {10}, publisher = {Public Library of Science}, address = {Lawrence, Kan.}, issn = {1932-6203}, doi = {10.1371/journal.pone.0140813}, pages = {17}, year = {2015}, abstract = {Background Body image distortion is highly prevalent among overweight individuals. Whilst there is evidence that body-dissatisfied women and those suffering from disordered eating show a negative attentional bias towards their own unattractive body parts and others' attractive body parts, little is known about visual attention patterns in the area of obesity and with respect to males. Since eating disorders and obesity share common features in terms of distorted body image and body dissatisfaction, the aim of this study was to examine whether overweight men and women show a similar attentional bias. Methods/Design We analyzed eye movements in 30 overweight individuals (18 females) and 28 normalweight individuals (16 females) with respect to the participants' own pictures as well as gender- and BMI-matched control pictures (front and back view). Additionally, we assessed body image and disordered eating using validated questionnaires. Discussion The overweight sample rated their own body as less attractive and showed a more disturbed body image. Contrary to our assumptions, they focused significantly longer on attractive compared to unattractive regions of both their own and the control body. For one's own body, this was more pronounced for women. A higher weight status and more frequent body checking predicted attentional bias towards attractive body parts. We found that overweight adults exhibit an unexpected and stable pattern of selective attention, with a distinctive focus on their own attractive body regions despite higher levels of body dissatisfaction. This positive attentional bias may either be an indicator of a more pronounced pattern of attentional avoidance or a self-enhancing strategy. Further research is warranted to clarify these results.}, language = {en} } @article{WarschburgerCalvanoRichteretal.2015, author = {Warschburger, Petra and Calvano, Claudia and Richter, Eike M. and Engbert, Ralf}, title = {Analysis of Attentional Bias towards Attractive and Unattractive Body Regions among Overweight Males and Females: An Eye-Movement Study}, series = {PLoS one}, volume = {10}, journal = {PLoS one}, number = {10}, publisher = {PLoS}, address = {San Fransisco}, issn = {1932-6203}, doi = {10.1371/journal.pone.0140813}, pages = {17}, year = {2015}, abstract = {Background Body image distortion is highly prevalent among overweight individuals. Whilst there is evidence that body-dissatisfied women and those suffering from disordered eating show a negative attentional bias towards their own unattractive body parts and others' attractive body parts, little is known about visual attention patterns in the area of obesity and with respect to males. Since eating disorders and obesity share common features in terms of distorted body image and body dissatisfaction, the aim of this study was to examine whether overweight men and women show a similar attentional bias. Methods/Design We analyzed eye movements in 30 overweight individuals (18 females) and 28 normal-weight individuals (16 females) with respect to the participants' own pictures as well as gender- and BMI-matched control pictures (front and back view). Additionally, we assessed body image and disordered eating using validated questionnaires. Discussion The overweight sample rated their own body as less attractive and showed a more disturbed body image. Contrary to our assumptions, they focused significantly longer on attractive compared to unattractive regions of both their own and the control body. For one's own body, this was more pronounced for women. A higher weight status and more frequent body checking predicted attentional bias towards attractive body parts. We found that overweight adults exhibit an unexpected and stable pattern of selective attention, with a distinctive focus on their own attractive body regions despite higher levels of body dissatisfaction. This positive attentional bias may either be an indicator of a more pronounced pattern of attentional avoidance or a self-enhancing strategy. Further research is warranted to clarify these results.}, language = {en} } @article{TrukenbrodEngbert2012, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Eye movements in a sequential scanning task - evidence for distributed processing}, series = {Journal of vision}, volume = {12}, journal = {Journal of vision}, number = {1}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/12.1.5}, pages = {12}, year = {2012}, abstract = {Current models of eye movement control are derived from theories assuming serial processing of single items or from theories based on parallel processing of multiple items at a time. This issue has persisted because most investigated paradigms generated data compatible with both serial and parallel models. Here, we study eye movements in a sequential scanning task, where stimulus n indicates the position of the next stimulus n + 1. We investigate whether eye movements are controlled by sequential attention shifts when the task requires serial order of processing. Our measures of distributed processing in the form of parafoveal-on-foveal effects, long-range modulations of target selection, and skipping saccades provide evidence against models strictly based on serial attention shifts. We conclude that our results lend support to parallel processing as a strategy for eye movement control.}, language = {en} } @article{TrukenbrodEngbert2007, author = {Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Oculomotor control in a sequential search task}, doi = {10.1016/j.visres.2007.05.010}, year = {2007}, abstract = {Using a serial search paradigm, we observed several effects of within-object fixation position on spatial and temporal control of eye movements: the preferred viewing location, launch site effect, the optimal viewing position, and the inverted optimal viewing position of fixation duration. While these effects were first identified by eye-movement studies in reading, our approach permits an analysis of the functional relationships between the effects in a different paradigm. Our results demonstrate that the fixation position is an important predictor of the subsequent saccade by influencing both fixation duration and the selection of the next saccade target.}, language = {en} } @article{TrukenbrodBarthelmeWichmannetal.2019, author = {Trukenbrod, Hans Arne and Barthelme, Simon and Wichmann, Felix A. and Engbert, Ralf}, title = {Spatial statistics for gaze patterns in scene viewing}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {5}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.6.5}, pages = {1 -- 19}, year = {2019}, abstract = {Scene viewing is used to study attentional selection in complex but still controlled environments. One of the main observations on eye movements during scene viewing is the inhomogeneous distribution of fixation locations: While some parts of an image are fixated by almost all observers and are inspected repeatedly by the same observer, other image parts remain unfixated by observers even after long exploration intervals. Here, we apply spatial point process methods to investigate the relationship between pairs of fixations. More precisely, we use the pair correlation function, a powerful statistical tool, to evaluate dependencies between fixation locations along individual scanpaths. We demonstrate that aggregation of fixation locations within 4 degrees is stronger than expected from chance. Furthermore, the pair correlation function reveals stronger aggregation of fixations when the same image is presented a second time. We use simulations of a dynamical model to show that a narrower spatial attentional span may explain differences in pair correlations between the first and the second inspection of the same image.}, language = {en} } @article{StarzynskiEngbert2009, author = {Starzynski, Christian and Engbert, Ralf}, title = {Noise-enhanced target discrimination under the influence of fixational eye movements and external noise}, issn = {1054-1500}, doi = {10.1063/1.3098950}, year = {2009}, abstract = {Active motor processes are present in many sensory systems to enhance perception. In the human visual system, miniature eye movements are produced involuntarily and unconsciously when we fixate a stationary target. These fixational eye movements represent self-generated noise which serves important perceptual functions. Here we investigate fixational eye movements under the influence of external noise. In a two-choice discrimination task, the target stimulus performed a random walk with varying noise intensity. We observe noise-enhanced discrimination of the target stimulus characterized by a U-shaped curve of manual response times as a function of the diffusion constant of the stimulus. Based on the experiments, we develop a stochastic information-accumulator model for stimulus discrimination in a noisy environment. Our results provide a new explanation for the constructive role of fixational eye movements in visual perception.}, language = {en} } @article{SinnEngbert2011, author = {Sinn, Petra and Engbert, Ralf}, title = {Saccadic facilitation by modulation of microsaccades in natural backgrounds}, series = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, volume = {73}, journal = {Attention, perception, \& psychophysics : AP\&P ; a journal of the Psychonomic Society, Inc.}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1943-3921}, doi = {10.3758/s13414-011-0107-9}, pages = {1029 -- 1033}, year = {2011}, abstract = {Saccades move objects of interest into the center of the visual field for high-acuity visual analysis. White, Stritzke, and Gegenfurtner (Current Biology, 18, 124-128, 2008) have shown that saccadic latencies in the context of a structured background are much shorter than those with an unstructured background at equal levels of visibility. This effect has been explained by possible preactivation of the saccadic circuitry whenever a structured background acts as a mask for potential saccade targets. Here, we show that background textures modulate rates of microsaccades during visual fixation. First, after a display change, structured backgrounds induce a stronger decrease of microsaccade rates than do uniform backgrounds. Second, we demonstrate that the occurrence of a microsaccade in a critical time window can delay a subsequent saccadic response. Taken together, our findings suggest that microsaccades contribute to the saccadic facilitation effect, due to a modulation of microsaccade rates by properties of the background.}, language = {en} } @article{SinnEngbert2016, author = {Sinn, Petra and Engbert, Ralf}, title = {Small saccades versus microsaccades: Experimental distinction and model-based unification}, series = {Vision research : an international journal for functional aspects of vision.}, volume = {118}, journal = {Vision research : an international journal for functional aspects of vision.}, publisher = {Elsevier}, address = {Oxford}, issn = {0042-6989}, doi = {10.1016/j.visres.2015.05.012}, pages = {132 -- 143}, year = {2016}, abstract = {Natural vision is characterized by alternating sequences of rapid gaze shifts (saccades) and fixations. During fixations, microsaccades and slower drift movements occur spontaneously, so that the eye is never motionless. Theoretical models of fixational eye movements predict that microsaccades are dynamically coupled to slower drift movements generated immediately before microsaccades, which might be used as a criterion to distinguish microsaccades from small voluntary saccades. Here we investigate a sequential scanning task, where participants generate goal-directed saccades and microsaccades with overlapping amplitude distributions. We show that properties of microsaccades are correlated with precursory drift motion, while amplitudes of goal-directed saccades do not dependent on previous drift epochs. We develop and test a mathematical model that integrates goal-directed and fixational eye movements, including microsaccades. Using model simulations, we reproduce the experimental finding of correlations within fixational eye movement components (i.e., between physiological drift and microsaccades) but not between goal-directed saccades and fixational drift motion. These results lend support to a functional difference between microsaccades and goal-directed saccades, while, at the same time, both types of behavior may be part of an oculomotor continuum that is quantitatively described by our mathematical model. (C) 2015 Elsevier Ltd. All rights reserved.}, language = {en} } @article{SeeligRisseEngbert2021, author = {Seelig, Stefan and Risse, Sarah and Engbert, Ralf}, title = {Predictive modeling of parafoveal information processing during reading}, series = {Scientific Reports}, volume = {11}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {Berlin}, issn = {2045-2322}, pages = {9}, year = {2021}, abstract = {Skilled reading requires information processing of the fixated and the not-yet-fixated words to generate precise control of gaze. Over the last 30 years, experimental research provided evidence that word processing is distributed across the perceptual span, which permits recognition of the fixated (foveal) word as well as preview of parafoveal words to the right of fixation. However, theoretical models have been unable to differentiate the specific influences of foveal and parafoveal information on saccade control. Here we show how parafoveal word difficulty modulates spatial and temporal control of gaze in a computational model to reproduce experimental results. In a fully Bayesian framework, we estimated model parameters for different models of parafoveal processing and carried out large-scale predictive simulations and model comparisons for a gaze-contingent reading experiment. We conclude that mathematical modeling of data from gaze-contingent experiments permits the precise identification of pathways from parafoveal information processing to gaze control, uncovering potential mechanisms underlying the parafoveal contribution to eye-movement control.}, language = {en} } @article{SeeligRisseEngbert2021, author = {Seelig, Stefan and Risse, Sarah and Engbert, Ralf}, title = {Predictive modeling of parafoveal information processing during reading}, series = {Scientific reports}, volume = {11}, journal = {Scientific reports}, number = {1}, publisher = {Nature Portfolio}, address = {Berlin}, issn = {2045-2322}, doi = {10.1038/s41598-021-92140-z}, pages = {9}, year = {2021}, abstract = {Skilled reading requires information processing of the fixated and the not-yet-fixated words to generate precise control of gaze. Over the last 30 years, experimental research provided evidence that word processing is distributed across the perceptual span, which permits recognition of the fixated (foveal) word as well as preview of parafoveal words to the right of fixation. However, theoretical models have been unable to differentiate the specific influences of foveal and parafoveal information on saccade control. Here we show how parafoveal word difficulty modulates spatial and temporal control of gaze in a computational model to reproduce experimental results. In a fully Bayesian framework, we estimated model parameters for different models of parafoveal processing and carried out large-scale predictive simulations and model comparisons for a gaze-contingent reading experiment. We conclude that mathematical modeling of data from gaze-contingent experiments permits the precise identification of pathways from parafoveal information processing to gaze control, uncovering potential mechanisms underlying the parafoveal contribution to eye-movement control.}, language = {en} } @article{SeeligRabeMalemShinitskietal.2020, author = {Seelig, Stefan A. and Rabe, Maximilian Michael and Malem-Shinitski, Noa and Risse, Sarah and Reich, Sebastian and Engbert, Ralf}, title = {Bayesian parameter estimation for the SWIFT model of eye-movement control during reading}, series = {Journal of mathematical psychology}, volume = {95}, journal = {Journal of mathematical psychology}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-2496}, doi = {10.1016/j.jmp.2019.102313}, pages = {32}, year = {2020}, abstract = {Process-oriented theories of cognition must be evaluated against time-ordered observations. Here we present a representative example for data assimilation of the SWIFT model, a dynamical model of the control of fixation positions and fixation durations during natural reading of single sentences. First, we develop and test an approximate likelihood function of the model, which is a combination of a spatial, pseudo-marginal likelihood and a temporal likelihood obtained by probability density approximation Second, we implement a Bayesian approach to parameter inference using an adaptive Markov chain Monte Carlo procedure. Our results indicate that model parameters can be estimated reliably for individual subjects. We conclude that approximative Bayesian inference represents a considerable step forward for computational models of eye-movement control, where modeling of individual data on the basis of process-based dynamic models has not been possible so far.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2017, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Reich, Sebastian and Wichmann, Felix A. and Engbert, Ralf}, title = {Likelihood-based parameter estimation and comparison of dynamical cognitive models}, series = {Psychological Review}, volume = {124}, journal = {Psychological Review}, number = {4}, publisher = {American Psychological Association}, address = {Washington}, issn = {0033-295X}, doi = {10.1037/rev0000068}, pages = {505 -- 524}, year = {2017}, abstract = {Dynamical models of cognition play an increasingly important role in driving theoretical and experimental research in psychology. Therefore, parameter estimation, model analysis and comparison of dynamical models are of essential importance. In this article, we propose a maximum likelihood approach for model analysis in a fully dynamical framework that includes time-ordered experimental data. Our methods can be applied to dynamical models for the prediction of discrete behavior (e.g., movement onsets); in particular, we use a dynamical model of saccade generation in scene viewing as a case study for our approach. For this model, the likelihood function can be computed directly by numerical simulation, which enables more efficient parameter estimation including Bayesian inference to obtain reliable estimates and corresponding credible intervals. Using hierarchical models inference is even possible for individual observers. Furthermore, our likelihood approach can be used to compare different models. In our example, the dynamical framework is shown to outperform nondynamical statistical models. Additionally, the likelihood based evaluation differentiates model variants, which produced indistinguishable predictions on hitherto used statistics. Our results indicate that the likelihood approach is a promising framework for dynamical cognitive models.}, language = {en} } @article{SchuettRothkegelTrukenbrodetal.2019, author = {Sch{\"u}tt, Heiko Herbert and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf and Wichmann, Felix A.}, title = {Disentangling bottom-up versus top-down and low-level versus high-level influences on eye movements over time}, series = {Journal of vision}, volume = {19}, journal = {Journal of vision}, number = {3}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/19.3.1}, pages = {23}, year = {2019}, abstract = {Bottom-up and top-down as well as low-level and high-level factors influence where we fixate when viewing natural scenes. However, the importance of each of these factors and how they interact remains a matter of debate. Here, we disentangle these factors by analyzing their influence over time. For this purpose, we develop a saliency model that is based on the internal representation of a recent early spatial vision model to measure the low-level, bottom-up factor. To measure the influence of high-level, bottom-up features, we use a recent deep neural network-based saliency model. To account for top-down influences, we evaluate the models on two large data sets with different tasks: first, a memorization task and, second, a search task. Our results lend support to a separation of visual scene exploration into three phases: the first saccade, an initial guided exploration characterized by a gradual broadening of the fixation density, and a steady state that is reached after roughly 10 fixations. Saccade-target selection during the initial exploration and in the steady state is related to similar areas of interest, which are better predicted when including high-level features. In the search data set, fixation locations are determined predominantly by top-down processes. In contrast, the first fixation follows a different fixation density and contains a strong central fixation bias. Nonetheless, first fixations are guided strongly by image properties, and as early as 200 ms after image onset, fixations are better predicted by high-level information. We conclude that any low-level, bottom-up factors are mainly limited to the generation of the first saccade. All saccades are better explained when high-level features are considered, and later, this high-level, bottom-up control can be overruled by top-down influences.}, language = {en} } @article{SchwetlickRothkegelTrukenbrodetal.2020, author = {Schwetlick, Lisa and Rothkegel, Lars Oliver Martin and Trukenbrod, Hans Arne and Engbert, Ralf}, title = {Modeling the effects of perisaccadic attention on gaze statistics during scene viewing}, series = {Communications biology}, volume = {3}, journal = {Communications biology}, number = {1}, publisher = {Springer Nature}, address = {London}, issn = {2399-3642}, doi = {10.1038/s42003-020-01429-8}, pages = {11}, year = {2020}, abstract = {Lisa Schwetlick et al. present a computational model linking visual scan path generation in scene viewing to physiological and experimental work on perisaccadic covert attention, the act of attending to an object visually without obviously moving the eyes toward it. They find that integrating covert attention into predictive models of visual scan paths greatly improves the model's agreement with experimental data.
How we perceive a visual scene depends critically on the selection of gaze positions. For this selection process, visual attention is known to play a key role in two ways. First, image-features attract visual attention, a fact that is captured well by time-independent fixation models. Second, millisecond-level attentional dynamics around the time of saccade drives our gaze from one position to the next. These two related research areas on attention are typically perceived as separate, both theoretically and experimentally. Here we link the two research areas by demonstrating that perisaccadic attentional dynamics improve predictions on scan path statistics. In a mathematical model, we integrated perisaccadic covert attention with dynamic scan path generation. Our model reproduces saccade amplitude distributions, angular statistics, intersaccadic turning angles, and their impact on fixation durations as well as inter-individual differences using Bayesian inference. Therefore, our result lend support to the relevance of perisaccadic attention to gaze statistics.}, language = {en} } @article{SchiekScheffczykEngbertetal.1997, author = {Schiek, Michael and Scheffczyk, Christian and Engbert, Ralf and Kurths, J{\"u}rgen and Krampe, Ralf-Thomas and Kliegl, Reinhold and Drepper, Friedhelm R.}, title = {Symbolic dynamics of physiological synchronisation : examples from bimanual movements and cardiorespiratory interaction}, year = {1997}, abstract = {Key words: Nonlinear time series analysis, symbolic dynamics, phase transitions, physiological data, biological synchronization, production of polyrhythms, cardiorespiratory interaction, variation of control parameter}, language = {en} } @article{SchiekDrepperEngbertetal.1998, author = {Schiek, Michael and Drepper, Friedhelm R. and Engbert, Ralf and Suder, Katrin and Abel, Hans-Henning}, title = {Cardiorespiratory Synchronization}, isbn = {3-540-63481-9}, year = {1998}, abstract = {The complex behaviour of cardiorespiratory dynamics is shown to be related to the interaction between several physiological oscillators. This study is based on electrocardiogram and respiratory flow data obtained from 3 different subjects during paced breathing at 10 different pacing cycle lengths ranging from 5 s to 12 s. Two different methods ideally suited for the analysis of synchronization pattern of coupled oscillators are applied: 1. Symbolic dynamics based on symbol coding adapted for the detection of respiratory modulation of cardiac parasympathetic activity discloses two regimes of different synchronization behaviour within the frequency area corresponding to the Arnold tongue of 1:1 frequency-locking between respiratory flow and respiratory heartbeat variation (respiratory sinus arrhythmia). 2. The analysis of the phase shift between respiratory flow and respiratory sinus arrhythmia indicates that synchronization is not a static but a dynamic phenomenon. The observed dependence of the phase shift on respiratory cycle length shows large inter-individual variation. These findings turn out to be further hints for the existence of an additional central oscillator in the frequency range of respiration interacting with the central respiratory oscillator driving mechanical respiration.}, language = {en} } @article{ScheffczykKrampeEngbertetal.1997, author = {Scheffczyk, Christian and Krampe, Ralf-Thomas and Engbert, Ralf and Rosenblum, Michael and Kurths, J{\"u}rgen and Kliegl, Reinhold}, title = {Tempo-induced transitions in polyrhythmic hand movements}, year = {1997}, abstract = {We investigate the cognitive control in polyrhythmic hand movements as a model paradigm for bimanual coordination. Using a symbolic coding of the recorded time series, we demonstrate the existence of qualitative transitions induced by experimental manipulation of the tempo. A nonlinear model with delayed feedback control is proposed, which accounts for these dynamical transitions in terms of bifurcations resulting from variation of the external control parameter. Furthermore, it is shown that transitions can also be observed due to fluctuations in the timing control level. We conclude that the complexity of coordinated bimanual movements results from interactions between nonlinear control mechanisms with delayed feedback and stochastic timing components.}, language = {en} } @article{ScheffczykEngbertKurthsetal.1995, author = {Scheffczyk, Christian and Engbert, Ralf and Kurths, J{\"u}rgen and Krampe, Ralf-Thomas and Kliegl, Reinhold}, title = {Nonlinear Phenomena in Polyrhythmic Hand Movements}, isbn = {981-02-2689-6}, year = {1995}, abstract = {In this paper we apply symbolic transformations as a visualisation technique for analysing rhythm production. It is shown that qualitative information can be extracted from the experimental data. This approach may provide new insights into the organisation of temporal order by the brain on different levels of description. A simple phenomenological model for the explanation of the observed phenomena is proposed.}, language = {en} }