@article{SchuttWichmann2017, author = {Schutt, Heiko Herbert and Wichmann, Felix A.}, title = {An image-computable psychophysical spatial vision model}, series = {Journal of vision}, volume = {17}, journal = {Journal of vision}, publisher = {Association for Research in Vision and Opthalmology}, address = {Rockville}, issn = {1534-7362}, doi = {10.1167/17.12.12}, pages = {35}, year = {2017}, abstract = {A large part of classical visual psychophysics was concerned with the fundamental question of how pattern information is initially encoded in the human visual system. From these studies a relatively standard model of early spatial vision emerged, based on spatial frequency and orientation-specific channels followed by an accelerating nonlinearity and divisive normalization: contrast gain-control. Here we implement such a model in an image-computable way, allowing it to take arbitrary luminance images as input. Testing our implementation on classical psychophysical data, we find that it explains contrast detection data including the ModelFest data, contrast discrimination data, and oblique masking data, using a single set of parameters. Leveraging the advantage of an image-computable model, we test our model against a recent dataset using natural images as masks. We find that the model explains these data reasonably well, too. To explain data obtained at different presentation durations, our model requires different parameters to achieve an acceptable fit. In addition, we show that contrast gain-control with the fitted parameters results in a very sparse encoding of luminance information, in line with notions from efficient coding. Translating the standard early spatial vision model to be image-computable resulted in two further insights: First, the nonlinear processing requires a denser sampling of spatial frequency and orientation than optimal coding suggests. Second, the normalization needs to be fairly local in space to fit the data obtained with natural image masks. Finally, our image-computable model can serve as tool in future quantitative analyses: It allows optimized stimuli to be used to test the model and variants of it, with potential applications as an image-quality metric. In addition, it may serve as a building block for models of higher level processing.}, language = {en} } @article{LaubrockKinder2014, author = {Laubrock, Jochen and Kinder, Annette}, title = {Incidental sequence learning in a motion coherence discrimination task: how response learning affects perception}, series = {Journal of experimental psychology : Human perception and performance}, volume = {40}, journal = {Journal of experimental psychology : Human perception and performance}, number = {5}, publisher = {American Psychological Association}, address = {Washington}, issn = {0096-1523}, doi = {10.1037/a0037315}, pages = {1963 -- 1977}, year = {2014}, abstract = {The serial reaction time task (SRTT) is a standard task used to investigate incidental sequence learning. Whereas incidental learning of motor sequences is well-established, few and disputed results support learning of perceptual sequences. Here we adapt a motion coherence discrimination task (Newsome \& Pare, 1988) to the sequence learning paradigm. The new task has 2 advantages: (a) the stimulus is presented at fixation, thereby obviating overt eye movements, and (b) by varying coherence a perceptual threshold measure is available in addition to the performance measure of RT. Results from 3 experiments show that action relevance of the sequence is necessary for sequence learning to occur, that the amount of sequence knowledge varies with the ease of encoding the motor sequence, and that sequence knowledge, once acquired, has the ability to modify perceptual thresholds.}, language = {en} }