@article{WeymarBradleySegeetal.2018, author = {Weymar, Mathias and Bradley, Margaret M. and Sege, Christopher T. and Lang, Peter J.}, title = {Neural activation and memory for natural scenes}, series = {Psychophysiology : journal of the Society for Psychophysiological Research}, volume = {55}, journal = {Psychophysiology : journal of the Society for Psychophysiological Research}, number = {10}, publisher = {Wiley}, address = {Hoboken}, issn = {0048-5772}, doi = {10.1111/psyp.13197}, pages = {12}, year = {2018}, abstract = {Stimulus repetition elicits either enhancement or suppression in neural activity, and a recent fMRI meta-analysis of repetition effects for visual stimuli (Kim, 2017) reported cross-stimulus repetition enhancement in medial and lateral parietal cortex, as well as regions of prefrontal, temporal, and posterior cingulate cortex. Repetition enhancement was assessed here for repeated and novel scenes presented in the context of either an explicit episodic recognition task or an implicit judgment task, in order to study the role of spontaneous retrieval of episodic memories. Regardless of whether episodic memory was explicitly probed or not, repetition enhancement was found in medial posterior parietal (precuneus/cuneus), lateral parietal cortex (angular gyrus), as well as in medial prefrontal cortex (frontopolar), which did not differ by task. Enhancement effects in the posterior cingulate cortex were significantly larger during explicit compared to implicit task, primarily due to a lack of functional activity for new scenes. Taken together, the data are consistent with an interpretation that medial and (ventral) lateral parietal cortex are associated with spontaneous episodic retrieval, whereas posterior cingulate cortical regions may reflect task or decision processes.}, language = {en} } @phdthesis{Samaras2016, author = {Samaras, Stefanos}, title = {Microphysical retrieval of non-spherical aerosol particles using regularized inversion of multi-wavelength lidar data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396528}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 190}, year = {2016}, abstract = {Numerous reports of relatively rapid climate changes over the past century make a clear case of the impact of aerosols and clouds, identified as sources of largest uncertainty in climate projections. Earth's radiation balance is altered by aerosols depending on their size, morphology and chemical composition. Competing effects in the atmosphere can be further studied by investigating the evolution of aerosol microphysical properties, which are the focus of the present work. The aerosol size distribution, the refractive index, and the single scattering albedo are commonly used such properties linked to aerosol type, and radiative forcing. Highly advanced lidars (light detection and ranging) have reduced aerosol monitoring and optical profiling into a routine process. Lidar data have been widely used to retrieve the size distribution through the inversion of the so-called Lorenz-Mie model (LMM). This model offers a reasonable treatment for spherically approximated particles, it no longer provides, though, a viable description for other naturally occurring arbitrarily shaped particles, such as dust particles. On the other hand, non-spherical geometries as simple as spheroids reproduce certain optical properties with enhanced accuracy. Motivated by this, we adapt the LMM to accommodate the spheroid-particle approximation introducing the notion of a two-dimensional (2D) shape-size distribution. Inverting only a few optical data points to retrieve the shape-size distribution is classified as a non-linear ill-posed problem. A brief mathematical analysis is presented which reveals the inherent tendency towards highly oscillatory solutions, explores the available options for a generalized solution through regularization methods and quantifies the ill-posedness. The latter will improve our understanding on the main cause fomenting instability in the produced solution spaces. The new approach facilitates the exploitation of additional lidar data points from depolarization measurements, associated with particle non-sphericity. However, the generalization of LMM vastly increases the complexity of the problem. The underlying theory for the calculation of the involved optical cross sections (T-matrix theory) is computationally so costly, that would limit a retrieval analysis to an unpractical point. Moreover the discretization of the model equation by a 2D collocation method, proposed in this work, involves double integrations which are further time consuming. We overcome these difficulties by using precalculated databases and a sophisticated retrieval software (SphInX: Spheroidal Inversion eXperiments) especially developed for our purposes, capable of performing multiple-dataset inversions and producing a wide range of microphysical retrieval outputs. Hybrid regularization in conjunction with minimization processes is used as a basis for our algorithms. Synthetic data retrievals are performed simulating various atmospheric scenarios in order to test the efficiency of different regularization methods. The gap in contemporary literature in providing full sets of uncertainties in a wide variety of numerical instances is of major concern here. For this, the most appropriate methods are identified through a thorough analysis on an overall-behavior basis regarding accuracy and stability. The general trend of the initial size distributions is captured in our numerical experiments and the reconstruction quality depends on data error level. Moreover, the need for more or less depolarization points is explored for the first time from the point of view of the microphysical retrieval. Finally, our approach is tested in various measurement cases giving further insight for future algorithm improvements.}, language = {en} } @article{Paape2016, author = {Paape, Dario L. J. F.}, title = {Filling the Silence: Reactivation, not Reconstruction}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00027}, pages = {18}, year = {2016}, abstract = {In a self-paced reading experiment, we investigated the processing of sluicing constructions ("sluices") whose antecedent contained a known garden path structure in German. Results showed decreased processing times for sluices with garden-path antecedents as well as a disadvantage for antecedents with non-canonical word order downstream from the ellipsis site. A post-hoc analysis showed the garden-path advantage also to be present in the region right before the ellipsis site. While no existing account of ellipsis processing explicitly predicted the results, we argue that they are best captured by combining a local antecedent mismatch effect with memory trace reactivation through reanalysis.}, language = {en} } @article{Paape2016, author = {Paape, Dario L. J. F.}, title = {Filling the Silence}, series = {Frontiers in psychology}, volume = {7}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2016.00027}, pages = {1 -- 18}, year = {2016}, abstract = {In a self-paced reading experiment, we investigated the processing of sluicing constructions ("sluices") whose antecedent contained a known garden-path structure in German. Results showed decreased processing times for sluices with garden-path antecedents as well as a disadvantage for antecedents with non-canonical word order downstream from the ellipsis site. A post-hoc analysis showed the garden-path advantage also to be present in the region right before the ellipsis site. While no existing account of ellipsis processing explicitly predicted the results, we argue that they are best captured by combining a local antecedent mismatch effect with memory trace reactivation through reanalysis.}, language = {en} } @misc{Paape2016, author = {Paape, Dario L. J. F.}, title = {Filling the Silence}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90480}, year = {2016}, abstract = {In a self-paced reading experiment, we investigated the processing of sluicing constructions ("sluices") whose antecedent contained a known garden-path structure in German. Results showed decreased processing times for sluices with garden-path antecedents as well as a disadvantage for antecedents with non-canonical word order downstream from the ellipsis site. A post-hoc analysis showed the garden-path advantage also to be present in the region right before the ellipsis site. While no existing account of ellipsis processing explicitly predicted the results, we argue that they are best captured by combining a local antecedent mismatch effect with memory trace reactivation through reanalysis.}, language = {en} } @misc{MuellerBoeckmannKolgotinetal.2016, author = {M{\"u}ller, Detlef and B{\"o}ckmann, Christine and Kolgotin, Alexei and Schneidenbach, Lars and Chemyakin, Eduard and Rosemann, Julia and Znak, Pavel and Romanov, Anton}, title = {Microphysical particle properties derived from inversion algorithms developed in the framework of EARLINET}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {565}, issn = {1866-8372}, doi = {10.25932/publishup-41193}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411934}, pages = {29}, year = {2016}, abstract = {We present a summary on the current status of two inversion algorithms that are used in EARLINET (European Aerosol Research Lidar Network) for the inversion of data collected with EARLINET multiwavelength Raman lidars. These instruments measure backscatter coefficients at 355, 532, and 1064 nm, and extinction coefficients at 355 and 532 nm. Development of these two algorithms started in 2000 when EARLINET was founded. The algorithms are based on a manually controlled inversion of optical data which allows for detailed sensitivity studies. The algorithms allow us to derive particle effective radius as well as volume and surface area concentration with comparably high confidence. The retrieval of the real and imaginary parts of the complex refractive index still is a challenge in view of the accuracy required for these parameters in climate change studies in which light absorption needs to be known with high accuracy. It is an extreme challenge to retrieve the real part with an accuracy better than 0.05 and the imaginary part with accuracy better than 0.005-0.1 or +/- 50 \%. Single-scattering albedo can be computed from the retrieved microphysical parameters and allows us to categorize aerosols into high-and low-absorbing aerosols. On the basis of a few exemplary simulations with synthetic optical data we discuss the current status of these manually operated algorithms, the potentially achievable accuracy of data products, and the goals for future work. One algorithm was used with the purpose of testing how well microphysical parameters can be derived if the real part of the complex refractive index is known to at least 0.05 or 0.1. The other algorithm was used to find out how well microphysical parameters can be derived if this constraint for the real part is not applied. The optical data used in our study cover a range of Angstrom exponents and extinction-to-backscatter (lidar) ratios that are found from lidar measurements of various aerosol types. We also tested aerosol scenarios that are considered highly unlikely, e.g. the lidar ratios fall outside the commonly accepted range of values measured with Raman lidar, even though the underlying microphysical particle properties are not uncommon. The goal of this part of the study is to test the robustness of the algorithms towards their ability to identify aerosol types that have not been measured so far, but cannot be ruled out based on our current knowledge of aerosol physics. We computed the optical data from monomodal logarithmic particle size distributions, i.e. we explicitly excluded the more complicated case of bimodal particle size distributions which is a topic of ongoing research work. Another constraint is that we only considered particles of spherical shape in our simulations. We considered particle radii as large as 7-10 mu m in our simulations where the Potsdam algorithm is limited to the lower value. We considered optical-data errors of 15\% in the simulation studies. We target 50\% uncertainty as a reasonable threshold for our data products, though we attempt to obtain data products with less uncertainty in future work.}, language = {en} } @phdthesis{Kappel2015, author = {Kappel, David}, title = {Multi-spectrum retrieval of maps of Venus' surface emissivity in the infrared}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-85301}, school = {Universit{\"a}t Potsdam}, pages = {xix, 226}, year = {2015}, abstract = {The main goal of this cumulative thesis is the derivation of surface emissivity data in the infrared from radiance measurements of Venus. Since these data are diagnostic of the chemical composition and grain size of the surface material, they can help to improve knowledge of the planet's geology. Spectrally resolved images of nightside emissions in the range 1.0-5.1 μm were recently acquired by the InfraRed Mapping channel of the Visible and InfraRed Thermal Imaging Spectrometer (VIRTIS-M-IR) aboard ESA's Venus EXpress (VEX). Surface and deep atmospheric thermal emissions in this spectral range are strongly obscured by the extremely opaque atmosphere, but three narrow spectral windows at 1.02, 1.10, and 1.18 μm allow the sounding of the surface. Additional windows between 1.3 and 2.6 μm provide information on atmospheric parameters that is required to interpret the surface signals. Quantitative data on surface and atmosphere can be retrieved from the measured spectra by comparing them to simulated spectra. A numerical radiative transfer model is used in this work to simulate the observable radiation as a function of atmospheric, surface, and instrumental parameters. It is a line-by-line model taking into account thermal emissions by surface and atmosphere as well as absorption and multiple scattering by gases and clouds. The VIRTIS-M-IR measurements are first preprocessed to obtain an optimal data basis for the subsequent steps. In this process, a detailed detector responsivity analysis enables the optimization of the data consistency. The measurement data have a relatively low spectral information content, and different parameter vectors can describe the same measured spectrum equally well. A usual method to regularize the retrieval of the wanted parameters from a measured spectrum is to take into account a priori mean values and standard deviations of the parameters to be retrieved. This decreases the probability to obtain unreasonable parameter values. The multi-spectrum retrieval algorithm MSR is developed to additionally consider physically realistic spatial and temporal a priori correlations between retrieval parameters describing different measurements. Neglecting geologic activity, MSR also allows the retrieval of an emissivity map as a parameter vector that is common to several spectrally resolved images that cover the same surface target. Even applying MSR, it is difficult to obtain reliable emissivity maps in absolute values. A detailed retrieval error analysis based on synthetic spectra reveals that this is mainly due to interferences from parameters that cannot be derived from the spectra themselves, but that have to be set to assumed values to enable the radiative transfer simulations. The MSR retrieval of emissivity maps relative to a fixed emissivity is shown to effectively avoid most emissivity retrieval errors. Relative emissivity maps at 1.02, 1.10, and 1.18 μm are finally derived from many VIRTIS-M-IR measurements that cover a surface target at Themis Regio. They are interpreted as spatial variations relative to an assumed emissivity mean of the target. It is verified that the maps are largely independent of the choice of many interfering parameters as well as the utilized measurement data set. These are the first Venus IR emissivity data maps based on a consistent application of a full radiative transfer simulation and a retrieval algorithm that respects a priori information. The maps are sufficiently reliable for future geologic interpretations.}, language = {en} } @article{HofmeisterVasishth2014, author = {Hofmeister, Philip and Vasishth, Shravan}, title = {Distinctiveness and encoding effects in online sentence comprehension}, series = {Frontiers in psychology}, volume = {5}, journal = {Frontiers in psychology}, publisher = {Frontiers Research Foundation}, address = {Lausanne}, issn = {1664-1078}, doi = {10.3389/fpsyg.2014.01237}, pages = {13}, year = {2014}, abstract = {In explicit memory recall and recognition tasks, elaboration and contextual isolation both facilitate memory performance. Here, we investigate these effects in the context of sentence processing: targets for retrieval during online sentence processing of English object relative clause constructions differ in the amount of elaboration associated with the target noun phrase, or the homogeneity of superficial features (text color). Experiment 1 shows that greater elaboration for targets during the encoding phase reduces reading times at retrieval sites, but elaboration of non-targets has considerably weaker effects. Experiment 2 illustrates that processing isolated superficial features of target noun phrases-here, a green word in a sentence with words colored white-does not lead to enhanced memory performance, despite triggering longer encoding times. These results are interpreted in the light of the memory models of Nairne, 1990, 2001, 2006, which state that encoding remnants contribute to the set of retrieval cues that provide the basis for similarity-based interference effects.}, language = {en} } @article{FyndanisArcaraCapassoetal.2018, author = {Fyndanis, Valantis and Arcara, Giorgio and Capasso, Rita and Christidou, Paraskevi and De Pellegrin, Serena and Gandolfi, Marialuisa and Messinis, Lambros and Panagea, Evgenia and Papathanasopoulos, Panagiotis and Smania, Nicola and Semenza, Carlo and Miceli, Gabriele}, title = {Time reference in nonfluent and fluent aphasia}, series = {Clinical linguistics \& phonetics}, volume = {32}, journal = {Clinical linguistics \& phonetics}, number = {9}, publisher = {Taylor \& Francis Group}, address = {Philadelphia}, issn = {0269-9206}, doi = {10.1080/02699206.2018.1445291}, pages = {823 -- 843}, year = {2018}, abstract = {Recent studies by Bastiaanse and colleagues found that time reference is selectively impaired in people with nonfluent agrammatic aphasia, with reference to the past being more difficult to process than reference to the present or to the future. To account for this dissociation, they formulated the PAst DIscourse LInking Hypothesis (PADILIH), which posits that past reference is more demanding than present/future reference because it involves discourse linking. There is some evidence that this hypothesis can be applied to people with fluent aphasia as well. However, the existing evidence for the PADILIH is contradictory, and most of it has been provided by employing a test that predominantly taps retrieval processes, leaving largely unexplored the underlying ability to encode time reference-related prephonological features. Within a cross-linguistic approach, this study tests the PADILIH by means of a sentence completion task that 'equally' taps encoding and retrieval abilities. This study also investigates if the PADILIH's scope can be extended to fluent aphasia. Greek- and Italian-speaking individuals with aphasia participated in the study. The Greek group consisted of both individuals with nonfluent agrammatic aphasia and individuals with fluent aphasia, who also presented signs of agrammatism. The Italian group consisted of individuals with agrammatic nonfluent aphasia only. The two Greek subgroups performed similarly. Neither language group of participants with aphasia exhibited a pattern of performance consistent with the predictions of the PADILIH. However, a double dissociation observed within the Greek group suggests a hypothesis that may reconcile the present results with the PADILIH.}, language = {en} } @article{DattaSachsFreitasdaCruzetal.2021, author = {Datta, Suparno and Sachs, Jan Philipp and Freitas da Cruz, Harry and Martensen, Tom and Bode, Philipp and Morassi Sasso, Ariane and Glicksberg, Benjamin S. and B{\"o}ttinger, Erwin}, title = {FIBER}, series = {JAMIA open}, volume = {4}, journal = {JAMIA open}, number = {3}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {2574-2531}, doi = {10.1093/jamiaopen/ooab048}, pages = {10}, year = {2021}, abstract = {Objectives: The development of clinical predictive models hinges upon the availability of comprehensive clinical data. Tapping into such resources requires considerable effort from clinicians, data scientists, and engineers. Specifically, these efforts are focused on data extraction and preprocessing steps required prior to modeling, including complex database queries. A handful of software libraries exist that can reduce this complexity by building upon data standards. However, a gap remains concerning electronic health records (EHRs) stored in star schema clinical data warehouses, an approach often adopted in practice. In this article, we introduce the FlexIBle EHR Retrieval (FIBER) tool: a Python library built on top of a star schema (i2b2) clinical data warehouse that enables flexible generation of modeling-ready cohorts as data frames. Materials and Methods: FIBER was developed on top of a large-scale star schema EHR database which contains data from 8 million patients and over 120 million encounters. To illustrate FIBER's capabilities, we present its application by building a heart surgery patient cohort with subsequent prediction of acute kidney injury (AKI) with various machine learning models. Results: Using FIBER, we were able to build the heart surgery cohort (n = 12 061), identify the patients that developed AKI (n = 1005), and automatically extract relevant features (n = 774). Finally, we trained machine learning models that achieved area under the curve values of up to 0.77 for this exemplary use case. Conclusion: FIBER is an open-source Python library developed for extracting information from star schema clinical data warehouses and reduces time-to-modeling, helping to streamline the clinical modeling process.}, language = {en} } @article{AvetisyanLagoVasishth2020, author = {Avetisyan, Serine and Lago, Sol and Vasishth, Shravan}, title = {Does case marking affect agreement attraction in comprehension?}, series = {Journal of memory and language}, volume = {112}, journal = {Journal of memory and language}, publisher = {Elsevier}, address = {San Diego}, issn = {0749-596X}, doi = {10.1016/j.jml.2020.104087}, pages = {18}, year = {2020}, abstract = {Previous studies have suggested that distinctive case marking on noun phrases reduces attraction effects in production, i.e., the tendency to produce a verb that agrees with a nonsubject noun. An important open question is whether attraction effects are modulated by case information in sentence comprehension. To address this question, we conducted three attraction experiments in Armenian, a language with a rich and productive case system. The experiments showed clear attraction effects, and they also revealed an overall role of case marking such that participants showed faster response and reading times when the nouns in the sentence had different case. However, we found little indication that distinctive case marking modulated attraction effects. We present a theoretical proposal of how case and number information may be used differentially during agreement licensing in comprehension. More generally, this work sheds light on the nature of the retrieval cues deployed when completing morphosyntactic dependencies.}, language = {en} }