@phdthesis{Foshag2017, author = {Foshag, Silja Kai}, title = {"Es seye eine Forcht, was sie gestohlen ..."}, series = {Historische Zeitbilder}, journal = {Historische Zeitbilder}, number = {9}, publisher = {Morstadt}, address = {Kehl}, isbn = {978-3-88571-382-1}, school = {Universit{\"a}t Potsdam}, pages = {596}, year = {2017}, language = {de} } @phdthesis{Voelsch2017, author = {V{\"o}lsch, Juliane}, title = {"Wortabruf im Handumdrehen"?}, school = {Universit{\"a}t Potsdam}, pages = {270}, year = {2017}, language = {de} } @phdthesis{Ehrig2017, author = {Ehrig, Sebastian}, title = {3D curvature and its role on tissue organization}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2017}, abstract = {Shape change is a fundamental process occurring in biological tissues during embryonic development and regeneration of tissues and organs. This process is regulated by cells that are constrained within a complex environment of biochemical and physical cues. The spatial constraint due to geometry has a determining role on tissue mechanics and the spatial distribution of force patterns that, in turn, influences the organization of the tissue structure. An understanding of the underlying principles of tissue organization may have wide consequences for the understanding of healing processes and the development of organs and, as such, is of fundamental interest for the tissue engineering community. This thesis aims to further our understanding of how the collective behaviour of cells is influenced by the 3D geometry of the environment. Previous research studying the role of geometry on tissue growth has mainly focused either on flat surfaces or on substrates where at least one of the principal curvatures is zero. In the present work, tissue growth from MC3T3-E1 pre-osteoblasts was investigated on surfaces of controlled mean curvature. One key aspect of this thesis was the development of substrates of controlled mean curvature and their visualization in 3D. It was demonstrated that substrates of controlled mean curvature suitable for cell culture can be fabricated using liquid polymers and surface tension effects. Using these substrates, it was shown that the mean surface curvature has a strong impact on the rate of tissue growth and on the organization of the tissue structure. It was thereby not only demonstrated that the amount of tissue produced (i.e. growth rates) by the cells depends on the mean curvature of the substrate but also that the tissue surface behaves like a viscous fluid with an equilibrium shape governed by the Laplace-Young-law. It was observed that more tissue was formed on highly concave surfaces compared to flat or convex surfaces. Motivated by these observations, an analytical model was developed, where the rate of tissue growth is a function of the mean curvature, which could successfully describe the growth kinetics. This model was also able to reproduce the growth kinetics of previous experiments where tissues have been cultured in straight-sided prismatic pores. A second part of this thesis focuses on the tissue structure, which influences the mechanical properties of the mature bone tissue. Since the extracellular matrix is produced by the cells, the cell orientation has a strong impact on the direction of the tissue fibres. In addition, it was recently shown that some cell types exhibit collective alignment similar to liquid crystals. Based on this observation, a computational model of self-propelled active particles was developed to explore in an abstract manner how the collective behaviour of cells is influenced by 3D curvature. It was demonstrated that the 3D curvature has a strong impact on the self-organization of active particles and gives, therefore, first insights into the principles of self-organization of cells on curved surfaces.}, language = {en} } @phdthesis{Hakansson2017, author = {H{\aa}kansson, Nils}, title = {A Dark Matter line search using 3D-modeling of Cherenkov showers below 10 TeV with VERITAS}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397670}, school = {Universit{\"a}t Potsdam}, pages = {107, xxxvi}, year = {2017}, abstract = {Dark matter, DM, has not yet been directly observed, but it has a very solid theoretical basis. There are observations that provide indirect evidence, like galactic rotation curves that show that the galaxies are rotating too fast to keep their constituent parts, and galaxy clusters that bends the light coming from behind-lying galaxies more than expected with respect to the mass that can be calculated from what can be visibly seen. These observations, among many others, can be explained with theories that include DM. The missing piece is to detect something that can exclusively be explained by DM. Direct observation in a particle accelerator is one way and indirect detection using telescopes is another. This thesis is focused on the latter method. The Very Energetic Radiation Imaging Telescope Array System, V ERITAS, is a telescope array that detects Cherenkov radiation. Theory predicts that DM particles annihilate into, e.g., a γγ pair and create a distinctive energy spectrum when detected by such telescopes, e.i., a monoenergetic line at the same energy as the particle mass. This so called "smoking-gun" signature is sought with a sliding window line search within the sub-range ∼ 0.3 - 10 TeV of the VERITAS energy range, ∼ 0.01 - 30 TeV. Standard analysis within the VERITAS collaboration uses Hillas analysis and look-up tables, acquired by analysing particle simulations, to calculate the energy of the particle causing the Cherenkov shower. In this thesis, an improved analysis method has been used. Modelling each shower as a 3Dgaussian should increase the energy recreation quality. Five dwarf spheroidal galaxies were chosen as targets with a total of ∼ 224 hours of data. The targets were analysed individually and stacked. Particle simulations were based on two simulation packages, CARE and GrISU. Improvements have been made to the energy resolution and bias correction, up to a few percent each, in comparison to standard analysis. Nevertheless, no line with a relevant significance has been detected. The most promising line is at an energy of ∼ 422 GeV with an upper limit cross section of 8.10 · 10^-24 cm^3 s^-1 and a significance of ∼ 2.73 σ, before trials correction and ∼ 1.56 σ after. Upper limit cross sections have also been calculated for the γγ annihilation process and four other outcomes. The limits are in line with current limits using other methods, from ∼ 8.56 · 10^-26 - 6.61 · 10^-23 cm^3s^-1. Future larger telescope arrays, like the upcoming Cherenkov Telescope Array, CTA, will provide better results with the help of this analysis method.}, language = {en} } @phdthesis{Beyhl2017, author = {Beyhl, Thomas}, title = {A framework for incremental view graph maintenance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405929}, school = {Universit{\"a}t Potsdam}, pages = {VII, 293}, year = {2017}, abstract = {Nowadays, graph data models are employed, when relationships between entities have to be stored and are in the scope of queries. For each entity, this graph data model locally stores relationships to adjacent entities. Users employ graph queries to query and modify these entities and relationships. These graph queries employ graph patterns to lookup all subgraphs in the graph data that satisfy certain graph structures. These subgraphs are called graph pattern matches. However, this graph pattern matching is NP-complete for subgraph isomorphism. Thus, graph queries can suffer a long response time, when the number of entities and relationships in the graph data or the graph patterns increases. One possibility to improve the graph query performance is to employ graph views that keep ready graph pattern matches for complex graph queries for later retrieval. However, these graph views must be maintained by means of an incremental graph pattern matching to keep them consistent with the graph data from which they are derived, when the graph data changes. This maintenance adds subgraphs that satisfy a graph pattern to the graph views and removes subgraphs that do not satisfy a graph pattern anymore from the graph views. Current approaches for incremental graph pattern matching employ Rete networks. Rete networks are discrimination networks that enumerate and maintain all graph pattern matches of certain graph queries by employing a network of condition tests, which implement partial graph patterns that together constitute the overall graph query. Each condition test stores all subgraphs that satisfy the partial graph pattern. Thus, Rete networks suffer high memory consumptions, because they store a large number of partial graph pattern matches. But, especially these partial graph pattern matches enable Rete networks to update the stored graph pattern matches efficiently, because the network maintenance exploits the already stored partial graph pattern matches to find new graph pattern matches. However, other kinds of discrimination networks exist that can perform better in time and space than Rete networks. Currently, these other kinds of networks are not used for incremental graph pattern matching. This thesis employs generalized discrimination networks for incremental graph pattern matching. These discrimination networks permit a generalized network structure of condition tests to enable users to steer the trade-off between memory consumption and execution time for the incremental graph pattern matching. For that purpose, this thesis contributes a modeling language for the effective definition of generalized discrimination networks. Furthermore, this thesis contributes an efficient and scalable incremental maintenance algorithm, which updates the (partial) graph pattern matches that are stored by each condition test. Moreover, this thesis provides a modeling evaluation, which shows that the proposed modeling language enables the effective modeling of generalized discrimination networks. Furthermore, this thesis provides a performance evaluation, which shows that a) the incremental maintenance algorithm scales, when the graph data becomes large, and b) the generalized discrimination network structures can outperform Rete network structures in time and space at the same time for incremental graph pattern matching.}, language = {en} } @phdthesis{Reike2017, author = {Reike, Dennis}, title = {A look behind perceptual performance in numerical cognition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407821}, school = {Universit{\"a}t Potsdam}, pages = {vi, 136}, year = {2017}, abstract = {Recognizing, understanding, and responding to quantities are considerable skills for human beings. We can easily communicate quantities, and we are extremely efficient in adapting our behavior to numerical related tasks. One usual task is to compare quantities. We also use symbols like digits in numerical-related tasks. To solve tasks including digits, we must to rely on our previously learned internal number representations. This thesis elaborates on the process of number comparison with the use of noisy mental representations of numbers, the interaction of number and size representations and how we use mental number representations strategically. For this, three studies were carried out. In the first study, participants had to decide which of two presented digits was numerically larger. They had to respond with a saccade in the direction of the anticipated answer. Using only a small set of meaningfully interpretable parameters, a variant of random walk models is described that accounts for response time, error rate, and variance of response time for the full matrix of 72 digit pairs. In addition, the used random walk model predicts a numerical distance effect even for error response times and this effect clearly occurs in the observed data. In relation to corresponding correct answers error responses were systematically faster. However, different from standard assumptions often made in random walk models, this account required that the distributions of step sizes of the induced random walks be asymmetric to account for this asymmetry between correct and incorrect responses. Furthermore, the presented model provides a well-defined framework to investigate the nature and scale (e.g., linear vs. logarithmic) of the mapping of numerical magnitude onto its internal representation. In comparison of the fits of proposed models with linear and logarithmic mapping, the logarithmic mapping is suggested to be prioritized. Finally, we discuss how our findings can help interpret complex findings (e.g., conflicting speed vs. accuracy trends) in applied studies that use number comparison as a well-established diagnostic tool. Furthermore, a novel oculomotoric effect is reported, namely the saccadic overschoot effect. The participants responded by saccadic eye movements and the amplitude of these saccadic responses decreases with numerical distance. For the second study, an experimental design was developed that allows us to apply the signal detection theory to a task where participants had to decide whether a presented digit was physically smaller or larger. A remaining question is, whether the benefit in (numerical magnitude - physical size) congruent conditions is related to a better perception than in incongruent conditions. Alternatively, the number-size congruency effect is mediated by response biases due to numbers magnitude. The signal detection theory is a perfect tool to distinguish between these two alternatives. It describes two parameters, namely sensitivity and response bias. Changes in the sensitivity are related to the actual task performance due to real differences in perception processes whereas changes in the response bias simply reflect strategic implications as a stronger preparation (activation) of an anticipated answer. Our results clearly demonstrate that the number-size congruency effect cannot be reduced to mere response bias effects, and that genuine sensitivity gains for congruent number-size pairings contribute to the number-size congruency effect. Third, participants had to perform a SNARC task - deciding whether a presented digit was odd or even. Local transition probability of irrelevant attributes (magnitude) was varied while local transition probability of relevant attributes (parity) and global probability occurrence of each stimulus were kept constantly. Participants were quite sensitive in recognizing the underlying local transition probability of irrelevant attributes. A gain in performance was observed for actual repetitions of the irrelevant attribute in relation to changes of the irrelevant attribute in high repetition conditions compared to low repetition conditions. One interpretation of these findings is that information about the irrelevant attribute (magnitude) in the previous trial is used as an informative precue, so that participants can prepare early processing stages in the current trial, with the corresponding benefits and costs typical of standard cueing studies. Finally, the results reported in this thesis are discussed in relation to recent studies in numerical cognition.}, language = {en} } @phdthesis{Horn2017, author = {Horn, Juliane}, title = {A modelling framework for exploration of a multidimensional factor causing decline in honeybee health}, school = {Universit{\"a}t Potsdam}, pages = {221}, year = {2017}, language = {en} } @phdthesis{Lin2017, author = {Lin, Huijuan}, title = {Acceleration and Amplification of Biomimetric Actuation}, school = {Universit{\"a}t Potsdam}, pages = {99}, year = {2017}, language = {en} } @phdthesis{Martin2017, author = {Martin, Thorsten}, title = {Advances in spatial econometrics and the political economy of local housing supply}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406836}, school = {Universit{\"a}t Potsdam}, pages = {207}, year = {2017}, abstract = {This cumulative dissertation consists of five chapters. In terms of research content, my thesis can be divided into two parts. Part one examines local interactions and spillover effects between small regional governments using spatial econometric methods. The second part focuses on patterns within municipalities and inspects which institutions of citizen participation, elections and local petitions, influence local housing policies.}, language = {en} } @phdthesis{Graja2017, author = {Graja, Antonia}, title = {Aging-related changes of progenitor cell function and microenvironment impair brown adipose tissue regeneration}, school = {Universit{\"a}t Potsdam}, pages = {152}, year = {2017}, language = {en} } @phdthesis{Radon2017, author = {Radon, Christin}, title = {Analyse der Funktion der dualen Lokalisation der 3-Mercaptopyruvat Sulfurtransferase im Menschen}, school = {Universit{\"a}t Potsdam}, pages = {123}, year = {2017}, language = {de} } @phdthesis{Wambura2017, author = {Wambura, Frank Joseph}, title = {Analysis of anthropogenic impacts on water resources in the Wami River basin, Tanzania}, school = {Universit{\"a}t Potsdam}, pages = {116}, year = {2017}, language = {en} } @phdthesis{DiezCocero2017, author = {Diez Cocero, Mercedes}, title = {Analysis of Rubisco - carbonic anhydrase fusions in tobacco as an approach to reduce photorespiration}, school = {Universit{\"a}t Potsdam}, pages = {116}, year = {2017}, language = {en} } @phdthesis{DiezCocero2017, author = {Diez Cocero, Mercedes}, title = {Analysis of Rubisco - carbonic anhydrase fusions in tobacco as an approach to reduce photorespiration}, school = {Universit{\"a}t Potsdam}, pages = {116}, year = {2017}, abstract = {Rubisco catalyses the first step of CO2 assimilation into plant biomass. Despite its crucial role, it is notorious for its low catalytic rate and its tendency to fix O2 instead of CO2, giving rise to a toxic product that needs to be recycled in a process known as photorespiration. Since almost all our food supply relies on Rubisco, even small improvements in its specificity for CO2 could lead to an improvement of photosynthesis and ultimately, crop yield. In this work, we attempted to improve photosynthesis by decreasing photorespiration with an artificial CCM based on a fusion between Rubisco and a carbonic anhydrase (CA). A preliminary set of plants contained fusions between one of two CAs, bCA1 and CAH3, and the N- or C-terminus of RbcL connected by a small flexible linker of 5 amino acids. Subsequently, further fusion proteins were created between RbcL C-terminus and bCA1/CAH3 with linkers of 14, 23, 32, and 41 amino acids. The transplastomic tobacco plants carrying fusions with bCA1 were able to grow autotrophically even with the shortest linkers, albeit at a low rate, and accumulated very low levels of the fusion protein. On the other hand, plants carrying fusions with CAH3 were autotrophic only with the longer linkers. The longest linker permitted nearly wild-type like growth of the plants carrying fusions with CAH3 and increased the levels of fusion protein, but also of smaller degradation products. The fusion of catalytically inactive CAs to RbcL did not cause a different phenotype from the fusions with catalytically active CAs, suggesting that the selected CAs were not active in the fusion with RbcL or their activity did not have an effect on CO2 assimilation. However, fusions to RbcL did not abolish RbcL catalytic activity, as shown by the autotrophic growth, gas exchange and in vitro activity measurements. Furthermore, Rubisco carboxylation rate and specificity for CO2 was not altered in some of the fusion proteins, suggesting that despite the defect in RbcL folding or assembly caused by the fusions, the addition of 60-150 amino acids to RbcL does not affect its catalytic properties. On the contrary, most growth defects of the plants carrying RbcL-CA fusions are related to their reduced Rubisco content, likely caused by impaired RbcL folding or assembly. Finally, we found that fusions with RbcL C-terminus were better tolerated than with the N-terminus, and increasing the length of the linker relieved the growth impairment imposed by the fusion to RbcL. Together, the results of this work constitute considerable relevant findings for future Rubisco engineering.}, language = {en} } @phdthesis{Bajdzienko2017, author = {Bajdzienko, Krzysztof}, title = {Analysis of Target of Rapamycin (Tor) induced changes of the Arabidopsis thaliana proteome using sub-cellular resolution}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2017}, language = {en} } @phdthesis{Ussath2017, author = {Ussath, Martin Georg}, title = {Analytical approaches for advanced attacks}, school = {Universit{\"a}t Potsdam}, pages = {169}, year = {2017}, language = {en} } @phdthesis{Neuber2017, author = {Neuber, Corinna}, title = {Analytik zur Biotransformation des Sphingosin 1-phosphat-abbauproduktes (2E)-Hexadecenal}, school = {Universit{\"a}t Potsdam}, pages = {161}, year = {2017}, language = {de} } @phdthesis{Schmidt2017, author = {Schmidt, Silke Regina}, title = {Analyzing lakes in the time frequency domain}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406955}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 126}, year = {2017}, abstract = {The central aim of this thesis is to demonstrate the benefits of innovative frequency-based methods to better explain the variability observed in lake ecosystems. Freshwater ecosystems may be the most threatened part of the hydrosphere. Lake ecosystems are particularly sensitive to changes in climate and land use because they integrate disturbances across their entire catchment. This makes understanding the dynamics of lake ecosystems an intriguing and important research priority. This thesis adds new findings to the baseline knowledge regarding variability in lake ecosystems. It provides a literature-based, data-driven and methodological framework for the investigation of variability and patterns in environmental parameters in the time frequency domain. Observational data often show considerable variability in the environmental parameters of lake ecosystems. This variability is mostly driven by a plethora of periodic and stochastic processes inside and outside the ecosystems. These run in parallel and may operate at vastly different time scales, ranging from seconds to decades. In measured data, all of these signals are superimposed, and dominant processes may obscure the signals of other processes, particularly when analyzing mean values over long time scales. Dominant signals are often caused by phenomena at long time scales like seasonal cycles, and most of these are well understood in the limnological literature. The variability injected by biological, chemical and physical processes operating at smaller time scales is less well understood. However, variability affects the state and health of lake ecosystems at all time scales. Besides measuring time series at sufficiently high temporal resolution, the investigation of the full spectrum of variability requires innovative methods of analysis. Analyzing observational data in the time frequency domain allows to identify variability at different time scales and facilitates their attribution to specific processes. The merit of this approach is subsequently demonstrated in three case studies. The first study uses a conceptual analysis to demonstrate the importance of time scales for the detection of ecosystem responses to climate change. These responses often occur during critical time windows in the year, may exhibit a time lag and can be driven by the exceedance of thresholds in their drivers. This can only be detected if the temporal resolution of the data is high enough. The second study applies Fast Fourier Transform spectral analysis to two decades of daily water temperature measurements to show how temporal and spatial scales of water temperature variability can serve as an indicator for mixing in a shallow, polymictic lake. The final study uses wavelet coherence as a diagnostic tool for limnology on a multivariate high-frequency data set recorded between the onset of ice cover and a cyanobacteria summer bloom in the year 2009 in a polymictic lake. Synchronicities among limnological and meteorological time series in narrow frequency bands were used to identify and disentangle prevailing limnological processes. Beyond the novel empirical findings reported in the three case studies, this thesis aims to more generally be of interest to researchers dealing with now increasingly available time series data at high temporal resolution. A set of innovative methods to attribute patterns to processes, their drivers and constraints is provided to help make more efficient use of this kind of data.}, language = {en} } @phdthesis{Buschhueter2017, author = {Buschh{\"u}ter, David}, title = {Anforderungsrelevante mathematik- und physikbezogene Leistungsdispositionen von Physikanf{\"a}ngerinnen und - anf{\"a}ngern}, school = {Universit{\"a}t Potsdam}, year = {2017}, language = {de} } @phdthesis{Paape2017, author = {Paape, Dario L. J. F.}, title = {Antecedent complexity effects on ellipsis processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411689}, school = {Universit{\"a}t Potsdam}, pages = {ix, 134}, year = {2017}, abstract = {This dissertation explores whether the processing of ellipsis is affected by changes in the complexity of the antecedent, either due to added linguistic material or to the presence of a temporary ambiguity. Murphy (1985) hypothesized that ellipsis is resolved via a string copying procedure when the antecedent is within the same sentence, and that copying longer strings takes more time. Such an account also implies that the antecedent is copied without its structure, which in turn implies that recomputing its syntax and semantics may be necessary at the ellipsis gap. Alternatively, several accounts predict null effects of antecedent complexity, as well as no reparsing. These either involve a structure copying mechanism that is cost-free and whose finishing time is thus independent of the form of the antecedent (Frazier \& Clifton, 2001), treat ellipsis as a pointer into content-addressable memory with direct access (Martin \& McElree, 2008, 2009), or assume that one structure is 'shared' between antecedent and gap (Frazier \& Clifton, 2005). In a self-paced reading study on German sluicing, temporarily ambiguous garden-path clauses were used as antecedents, but no evidence of reparsing in the form of a slowdown at the ellipsis site was found. Instead, results suggest that antecedents which had been reanalyzed from an initially incorrect structure were easier to retrieve at the gap. This finding that can be explained within the framework of cue-based retrieval parsing (Lewis \& Vasishth, 2005), where additional syntactic operations on a structure yield memory reactivation effects. Two further self-paced reading studies on German bare argument ellipsis and English verb phrase ellipsis investigated if adding linguistic content to the antecedent would increase processing times for the ellipsis, and whether insufficiently demanding comprehension tasks may have been responsible for earlier null results (Frazier \& Clifton, 2000; Martin \& McElree, 2008). It has also been suggested that increased antecedent complexity should shorten rather than lengthen retrieval times by providing more unique memory features (Hofmeister, 2011). Both experiments failed to yield reliable evidence that antecedent complexity affects ellipsis processing times in either direction, irrespectively of task demands. Finally, two eye-tracking studies probed more deeply into the proposed reactivation-induced speedup found in the first experiment. The first study used three different kinds of French garden-path sentences as antecedents, with two of them failing to yield evidence for reactivation. Moreover, the third sentence type showed evidence suggesting that having failed to assign a structure to the antecedent leads to a slowdown at the ellipsis site, as well as regressions towards the ambiguous part of the sentence. The second eye-tracking study used the same materials as the initial self-paced reading study on German, with results showing a pattern similar to the one originally observed, with some notable differences. Overall, the experimental results are compatible with the view that adding linguistic material to the antecedent has no or very little effect on the ease with which ellipsis is resolved, which is consistent with the predictions of cost-free copying, pointer-based approaches and structure sharing. Additionally, effects of the antecedent's parsing history on ellipsis processing may be due to reactivation, the availability of multiple representations in memory, or complete failure to retrieve a matching target.}, language = {en} }