@article{JaegerGraupnerPelchenetal.2018, author = {Jaeger, David and Graupner, Hendrik and Pelchen, Chris and Cheng, Feng and Meinel, Christoph}, title = {Fast Automated Processing and Evaluation of Identity Leaks}, series = {International journal of parallel programming}, volume = {46}, journal = {International journal of parallel programming}, number = {2}, publisher = {Springer}, address = {New York}, issn = {0885-7458}, doi = {10.1007/s10766-016-0478-6}, pages = {441 -- 470}, year = {2018}, abstract = {The relevance of identity data leaks on the Internet is more present than ever. Almost every week we read about leakage of databases with more than a million users in the news. Smaller but not less dangerous leaks happen even multiple times a day. The public availability of such leaked data is a major threat to the victims, but also creates the opportunity to learn not only about security of service providers but also the behavior of users when choosing passwords. Our goal is to analyze this data and generate knowledge that can be used to increase security awareness and security, respectively. This paper presents a novel approach to the processing and analysis of a vast majority of bigger and smaller leaks. We evolved from a semi-manual to a fully automated process that requires a minimum of human interaction. Our contribution is the concept and a prototype implementation of a leak processing workflow that includes the extraction of digital identities from structured and unstructured leak-files, the identification of hash routines and a quality control to ensure leak authenticity. By making use of parallel and distributed programming, we are able to make leaks almost immediately available for analysis and notification after they have been published. Based on the data collected, this paper reveals how easy it is for criminals to collect lots of passwords, which are plain text or only weakly hashed. We publish those results and hope to increase not only security awareness of Internet users but also security on a technical level on the service provider side.}, language = {en} } @article{LogacevVasishth2016, author = {Logacev, Pavel and Vasishth, Shravan}, title = {A Multiple-Channel Model of Task-Dependent Ambiguity Resolution in Sentence Comprehension}, series = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, volume = {40}, journal = {Cognitive science : a multidisciplinary journal of anthropology, artificial intelligence, education, linguistics, neuroscience, philosophy, psychology ; journal of the Cognitive Science Society}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {0364-0213}, doi = {10.1111/cogs.12228}, pages = {266 -- 298}, year = {2016}, abstract = {Traxler, Pickering, and Clifton (1998) found that ambiguous sentences are read faster than their unambiguous counterparts. This so-called ambiguity advantage has presented a major challenge to classical theories of human sentence comprehension (parsing) because its most prominent explanation, in the form of the unrestricted race model (URM), assumes that parsing is non-deterministic. Recently, Swets, Desmet, Clifton, and Ferreira (2008) have challenged the URM. They argue that readers strategically underspecify the representation of ambiguous sentences to save time, unless disambiguation is required by task demands. When disambiguation is required, however, readers assign sentences full structure—and Swets et al. provide experimental evidence to this end. On the basis of their findings, they argue against the URM and in favor of a model of task-dependent sentence comprehension. We show through simulations that the Swets et al. data do not constitute evidence for task-dependent parsing because they can be explained by the URM. However, we provide decisive evidence from a German self-paced reading study consistent with Swets et al.'s general claim about task-dependent parsing. Specifically, we show that under certain conditions, ambiguous sentences can be read more slowly than their unambiguous counterparts, suggesting that the parser may create several parses, when required. Finally, we present the first quantitative model of task-driven disambiguation that subsumes the URM, and we show that it can explain both Swets et al.'s results and our findings.}, language = {en} } @article{GoetheOberauerKliegl2016, author = {G{\"o}the, Katrin and Oberauer, Klaus and Kliegl, Reinhold}, title = {Eliminating dual-task costs by minimizing crosstalk between tasks: The role of modality and feature pairings}, series = {Cognition : international journal of cognitive science}, volume = {150}, journal = {Cognition : international journal of cognitive science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0010-0277}, doi = {10.1016/j.cognition.2016.02.003}, pages = {92 -- 108}, year = {2016}, abstract = {We tested the independent influences of two content-based factors on dual-task costs, and on the parallel processing ability: The pairing of S-R modalities and the pairing of relevant features between stimuli and responses of two tasks. The two pairing factors were realized across four dual-task groups. Within each group the two tasks comprised two different stimulus modalities (visual and auditory), two different relevant stimulus features (spatial and verbal) and two response modalities (manual and vocal). Pairings of S-R modalities (standard: visual-manual and auditory-vocal, non-standard: visual-vocal and auditory manual) and feature pairings (standard: spatial-manual and verbal-vocal, non-standard: spatial-vocal and verbal-manual) varied across groups. All participants practiced their respective dual-task combination in a paradigm with simultaneous stimulus onset before being transferred to a psychological refractory period paradigm varying stimulus-onset asynchrony. A comparison at the end of practice revealed similar dual-task costs and similar pairing effects in both paradigms. Dual-task costs depended on modality and feature pairings. Groups training with non-standard feature pairings (i.e., verbal stimulus features mapped to spatially separated response keys, or spatial stimulus features mapped to verbal responses) and non-standard modality pairings (i.e., auditory stimulus mapped to manual response, or visual stimulus mapped to vocal responses) had higher dual-task costs than respective standard pairings. In contrast, irrespective of modality pairing dual-task costs virtually disappeared with standard feature pairings after practice in both paradigms. The results can be explained by crosstalk between feature-binding processes for the two tasks. Crosstalk was present for non-standard but absent for standard feature pairings. Therefore, standard feature pairings enabled parallel processing at the end of practice. (C) 2016 Elsevier B.V. All rights reserved.}, language = {en} }