@article{EiserbeckEngeRabovskyetal.2022, author = {Eiserbeck, Anna and Enge, Alexander and Rabovsky, Milena and Abdel Rahman, Rasha}, title = {Electrophysiological chronometry of graded consciousness during the attentional blink}, series = {Cerebral cortex}, volume = {32}, journal = {Cerebral cortex}, number = {6}, publisher = {Oxford University Press}, address = {New York, NY}, issn = {1047-3211}, doi = {10.1093/cercor/bhab289}, pages = {1244 -- 1259}, year = {2022}, abstract = {One of the ongoing debates about visual consciousness is whether it can be considered as an all-or-none or a graded phenomenon. While there is increasing evidence for the existence of graded states of conscious awareness based on paradigms such as visual masking, only little and mixed evidence is available for the attentional blink paradigm, specifically in regard to electrophysiological measures. Thereby, the all-or-none pattern reported in some attentional blink studies might have originated from specifics of the experimental design, suggesting the need to examine the generalizability of results. In the present event-related potential (ERP) study (N = 32), visual awareness of T2 face targets was assessed via subjective visibility ratings on a perceptual awareness scale in combination with ERPs time-locked to T2 onset (components P1, N1, N2, and P3). Furthermore, a classification task preceding visibility ratings allowed to track task performance. The behavioral results indicate a graded rather than an all-or-none pattern of visual awareness. Corresponding graded differences in the N1, N2, and P3 components were observed for the comparison of visibility levels. These findings suggest that conscious perception during the attentional blink can occur in a graded fashion.}, language = {en} } @article{AdolfsHoqueShprits2022, author = {Adolfs, Marjolijn and Hoque, Mohammed Mainul and Shprits, Yuri Y.}, title = {Storm-time relative total electron content modelling using machine learning techniques}, series = {Remote sensing}, volume = {14}, journal = {Remote sensing}, number = {23}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {10.3390/rs14236155}, pages = {17}, year = {2022}, abstract = {Accurately predicting total electron content (TEC) during geomagnetic storms is still a challenging task for ionospheric models. In this work, a neural-network (NN)-based model is proposed which predicts relative TEC with respect to the preceding 27-day median TEC, during storm time for the European region (with longitudes 30 degrees W-50 degrees E and latitudes 32.5 degrees N-70 degrees N). The 27-day median TEC (referred to as median TEC), latitude, longitude, universal time, storm time, solar radio flux index F10.7, global storm index SYM-H and geomagnetic activity index Hp30 are used as inputs and the output of the network is the relative TEC. The relative TEC can be converted to the actual TEC knowing the median TEC. The median TEC is calculated at each grid point over the European region considering data from the last 27 days before the storm using global ionosphere maps (GIMs) from international GNSS service (IGS) sources. A storm event is defined when the storm time disturbance index Dst drops below 50 nanotesla. The model was trained with storm-time relative TEC data from the time period of 1998 until 2019 (2015 is excluded) and contains 365 storms. Unseen storm data from 33 storm events during 2015 and 2020 were used to test the model. The UQRG GIMs were used because of their high temporal resolution (15 min) compared to other products from different analysis centers. The NN-based model predictions show the seasonal behavior of the storms including positive and negative storm phases during winter and summer, respectively, and show a mixture of both phases during equinoxes. The model's performance was also compared with the Neustrelitz TEC model (NTCM) and the NN-based quiet-time TEC model, both developed at the German Aerospace Agency (DLR). The storm model has a root mean squared error (RMSE) of 3.38 TEC units (TECU), which is an improvement by 1.87 TECU compared to the NTCM, where an RMSE of 5.25 TECU was found. This improvement corresponds to a performance increase by 35.6\%. The storm-time model outperforms the quiet-time model by 1.34 TECU, which corresponds to a performance increase by 28.4\% from 4.72 to 3.38 TECU. The quiet-time model was trained with Carrington averaged TEC and, therefore, is ideal to be used as an input instead of the GIM derived 27-day median. We found an improvement by 0.8 TECU which corresponds to a performance increase by 17\% from 4.72 to 3.92 TECU for the storm-time model using the quiet-time-model predicted TEC as an input compared to solely using the quiet-time model.}, language = {en} } @article{LosterKoumarelasNaumann2021, author = {Loster, Michael and Koumarelas, Ioannis and Naumann, Felix}, title = {Knowledge transfer for entity resolution with siamese neural networks}, series = {ACM journal of data and information quality}, volume = {13}, journal = {ACM journal of data and information quality}, number = {1}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {1936-1955}, doi = {10.1145/3410157}, pages = {25}, year = {2021}, abstract = {The integration of multiple data sources is a common problem in a large variety of applications. Traditionally, handcrafted similarity measures are used to discover, merge, and integrate multiple representations of the same entity-duplicates-into a large homogeneous collection of data. Often, these similarity measures do not cope well with the heterogeneity of the underlying dataset. In addition, domain experts are needed to manually design and configure such measures, which is both time-consuming and requires extensive domain expertise.
We propose a deep Siamese neural network, capable of learning a similarity measure that is tailored to the characteristics of a particular dataset. With the properties of deep learning methods, we are able to eliminate the manual feature engineering process and thus considerably reduce the effort required for model construction. In addition, we show that it is possible to transfer knowledge acquired during the deduplication of one dataset to another, and thus significantly reduce the amount of data required to train a similarity measure. We evaluated our method on multiple datasets and compare our approach to state-of-the-art deduplication methods. Our approach outperforms competitors by up to +26 percent F-measure, depending on task and dataset. In addition, we show that knowledge transfer is not only feasible, but in our experiments led to an improvement in F-measure of up to +4.7 percent.}, language = {en} }