@article{StoberSternin2018, author = {Stober, Sebastian and Sternin, Avital}, title = {Decoding music perception and imagination using deep-learning techniques}, series = {Signal processing and machine learning for brain-machine interfaces}, volume = {114}, journal = {Signal processing and machine learning for brain-machine interfaces}, publisher = {Institution of Engineering and Technology}, address = {London}, isbn = {978-1-78561-399-9}, doi = {10.1049/PBCE114E}, pages = {271 -- 299}, year = {2018}, abstract = {Deep learning is a sub-field of machine learning that has recently gained substantial popularity in various domains such as computer vision, automatic speech recognition, natural language processing, and bioinformatics. Deep-learning techniques are able to learn complex feature representations from raw signals and thus also have potential to improve signal processing in the context of brain-computer interfaces (BCIs). However, they typically require large amounts of data for training - much more than what can often be provided with reasonable effort when working with brain activity recordings of any kind. In order to still leverage the power of deep-learning techniques with limited available data, special care needs to be taken when designing the BCI task, defining the structure of the deep model, and choosing the training method. This chapter presents example approaches for the specific scenario of music-based brain-computer interaction through electroencephalography - in the hope that these will prove to be valuable in different settings as well. We explain important decisions for the design of the BCI task and their impact on the models and training techniques that can be used. Furthermore, we present and compare various pre-training techniques that aim to improve the signal-to-noise ratio. Finally, we discuss approaches to interpret the trained models.}, language = {en} } @article{Stober2017, author = {Stober, Sebastian}, title = {Toward Studying Music Cognition with Information Retrieval Techniques: Lessons Learned from the OpenMIIR Initiative}, series = {Frontiers in psychology}, volume = {8}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2017.01255}, pages = {17}, year = {2017}, abstract = {As an emerging sub-field of music information retrieval (MIR), music imagery information retrieval (MIIR) aims to retrieve information from brain activity recorded during music cognition-such as listening to or imagining music pieces. This is a highly interdisciplinary endeavor that requires expertise in MIR as well as cognitive neuroscience and psychology. The OpenMIIR initiative strives to foster collaborations between these fields to advance the state of the art in MIIR. As a first step, electroencephalography (EEG) recordings ofmusic perception and imagination have beenmade publicly available, enabling MIR researchers to easily test and adapt their existing approaches for music analysis like fingerprinting, beat tracking or tempo estimation on this new kind of data. This paper reports on first results of MIIR experiments using these OpenMIIR datasets and points out how these findings could drive new research in cognitive neuroscience.}, language = {en} } @article{Stober2017, author = {Stober, Sebastian}, title = {Model-based frameworks for user adapted information exploration}, series = {Companion technology : a paradigm shift in human-technology interaction}, volume = {Cham}, journal = {Companion technology : a paradigm shift in human-technology interaction}, publisher = {Springer}, isbn = {978-3-319-43664-7}, pages = {37 -- 56}, year = {2017}, language = {en} } @article{Stober2017, author = {Stober, Sebastian}, title = {Toward Studying Music Cognition with Information Retrieval Techniques}, series = {Frontiers in psychology}, volume = {8}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2017.01255}, year = {2017}, abstract = {As an emerging sub-field of music information retrieval (MIR), music imagery information retrieval (MIIR) aims to retrieve information from brain activity recorded during music cognition-such as listening to or imagining music pieces. This is a highly inter-disciplinary endeavor that requires expertise in MIR as well as cognitive neuroscience and psychology. The OpenMIIR initiative strives to foster collaborations between these fields to advance the state of the art in MIIR. As a first step, electroencephalography (EEG) recordings of music perception and imagination have been made publicly available, enabling MIR researchers to easily test and adapt their existing approaches for music analysis like fingerprinting, beat tracking or tempo estimation on this new kind of data. This paper reports on first results of MIIR experiments using these OpenMIIR datasets and points out how these findings could drive new research in cognitive neuroscience.}, language = {en} }