@phdthesis{Kraft2018, author = {Kraft, Frederik}, title = {Be Creative, Now!}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-414009}, school = {Universit{\"a}t Potsdam}, pages = {VII, 230}, year = {2018}, abstract = {Purpose - This thesis set out to explore, describe, and evaluate the reality behind the rhetoric of freedom and control in the context of creativity. The overarching subject is concerned with the relationship between creativity, freedom, and control, considering freedom is also seen as an element of control to manage creativity. Design/methodology/approach - In-depth qualitative data gathered from at two innovative start-ups. Two ethnographic studies were conducted. The data are based on participatory observations, interviews, and secondary sources, each of which included a three months field study and a total of 41 interviews from both organizations. Findings - The thesis provides explanations for the practice of freedom and the control of creativity within organizations and expands the existing theory of neo-normative control. The findings indicate that organizations use complex control systems that allow a high degree of freedom that paradoxically leads to more control. Freedom is a cover of control, which in turn leads to creativity. Covert control even results in the responsibility to be creative outside working hours. Practical implications - Organizations, which rely on creativity might use the results of this thesis. Positive workplace control of creativity provides both freedom and structure for creative work. While freedom leads to organizational members being more motivated and committing themselves more strongly to their and the organization's goals, and a specific structure also helps to provide the requirements for creativity. Originality/value - The thesis provides an insight into an approach to workplace control, which has mostly neglected in creativity research and proposes a modified concept of neo-normative control. It serves to provide a further understanding of freedom for creativity and to challenge the liberal claims of new control forms.}, language = {en} } @phdthesis{VillatoroLeal2018, author = {Villatoro Leal, Jos{\´e} Andr{\´e}s}, title = {A combined approach for the analysis of biomolecules using IR-MALDI ion mobility spectrometry and molecular dynamics simulations of peptide ions in the gas phase}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419723}, school = {Universit{\"a}t Potsdam}, pages = {133}, year = {2018}, abstract = {The aim of this doctoral thesis was to establish a technique for the analysis of biomolecules with infrared matrix-assisted laser dispersion (IR-MALDI) ion mobility (IM) spectrometry. The main components of the work were the characterization of the IR-MALDI process, the development and characterization of different ion mobility spectrometers, the use of IR-MALDI-IM spectrometry as a robust, standalone spectrometer and the development of a collision cross-section estimation approach for peptides based on molecular dynamics and thermodynamic reweighting. First, the IR-MALDI source was studied with atmospheric pressure ion mobility spectrometry and shadowgraphy. It consisted of a metal capillary, at the tip of which a self-renewing droplet of analyte solution was met by an IR laser beam. A relationship between peak shape, ion desolvation, diffusion and extraction pulse delay time (pulse delay) was established. First order desolvation kinetics were observed and related to peak broadening by diffusion, both influenced by the pulse delay. The transport mechanisms in IR-MALDI were then studied by relating different laser impact positions on the droplet surface to the corresponding ion mobility spectra. Two different transport mechanisms were determined: phase explosion due to the laser pulse and electrical transport due to delayed ion extraction. The velocity of the ions stemming from the phase explosion was then measured by ion mobility and shadowgraphy at different time scales and distances from the source capillary, showing an initially very high but rapidly decaying velocity. Finally, the anatomy of the dispersion plume was observed in detail with shadowgraphy and general conclusions over the process were drawn. Understanding the IR-MALDI process enabled the optimization of the different IM spectrometers at atmospheric and reduced pressure (AP and RP, respectively). At reduced pressure, both an AP and an RP IR-MALDI source were used. The influence of the pulsed ion extraction parameters (pulse delay, width and amplitude) on peak shape, resolution and area was systematically studied in both AP and RP IM spectrometers and discussed in the context of the IR-MALDI process. Under RP conditions, the influence of the closing field and of the pressure was also examined for both AP and RP sources. For the AP ionization RP IM spectrometer, the influence of the inlet field (IF) in the source region was also examined. All of these studies led to the determination of the optimal analytical parameters as well as to a better understanding of the initial ion cloud anatomy. The analytical performance of the spectrometer was then studied. Limits of detection (LOD) and linear ranges were determined under static and pulsed ion injection conditions and interpreted in the context of the IR-MALDI mechanism. Applications in the separation of simple mixtures were also illustrated, demonstrating good isomer separation capabilities and the advantages of singly charged peaks. The possibility to couple high performance liquid chromatography (HPLC) to IR-MALDI-IM spectrometry was also demonstrated. Finally, the reduced pressure spectrometer was used to study the effect of high reduced field strength on the mobility of polyatomic ions in polyatomic gases. The last focus point was on the study of peptide ions. A dataset obtained with electrospray IM spectrometry was characterized and used for the calibration of a collision cross-section (CCS) determination method based on molecular dynamics (MD) simulations at high temperature. Instead of producing candidate structures which are evaluated one by one, this semi-automated method uses the simulation as a whole to determine a single average collision cross-section value by reweighting the CCS of a few representative structures. The method was compared to the intrinsic size parameter (ISP) method and to experimental results. Additional MD data obtained from the simulations was also used to further analyze the peptides and understand the experimental results, an advantage with regard to the ISP method. Finally, the CCS of peptide ions analyzed by IR-MALDI were also evaluated with both ISP and MD methods and the results compared to experiment, resulting in a first validation of the MD method. Thus, this thesis brings together the soft ionization technique that is IR-MALDI, which produces mostly singly charged peaks, with ion mobility spectrometry, which can distinguish between isomers, and a collision cross-section determination method which also provides structural information on the analyte at hand.}, language = {en} } @phdthesis{Knospe2018, author = {Knospe, Gloria-Mona}, title = {Processing of pronouns and reflexives in Turkish-German bilinguals}, doi = {10.25932/publishup-43644}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-436442}, school = {Universit{\"a}t Potsdam}, pages = {xxii, 410}, year = {2018}, abstract = {Previous studies on native language (L1) anaphor resolution have found that monolingual native speakers are sensitive to syntactic, pragmatic, and semantic constraints on pronouns and reflexive resolution. However, most studies have focused on English and other Germanic languages, and little is currently known about the online (i.e., real-time) processing of anaphors in languages with syntactically less restricted anaphors, such as Turkish. We also know relatively little about how 'non-standard' populations such as non-native (L2) speakers and heritage speakers (HSs) resolve anaphors. This thesis investigates the interpretation and real-time processing of anaphors in German and in a typologically different and as yet understudied language, Turkish. It compares hypotheses about differences between native speakers' (L1ers) and L2 speakers' (L2ers) sentence processing, looking into differences in processing mechanisms as well as the possibility of cross-linguistic influence. To help fill the current research gap regarding HS sentence comprehension, it compares findings for this group with those for L2ers. To investigate the representation and processing of anaphors in these three populations, I carried out a series of offline questionnaires and Visual-World eye-tracking experiments on the resolution of reflexives and pronouns in both German and Turkish. In the German experiments, native German speakers as well as L2ers of German were tested, while in the Turkish experiments, non-bilingual native Turkish speakers as well as HSs of Turkish with L2 German were tested. This allowed me to observe both cross-linguistic differences as well as population differences between monolinguals' and different types of bilinguals' resolution of anaphors. Regarding the comprehension of Turkish anaphors by L1ers, contrary to what has been previously assumed, I found that Turkish has no reflexive that follows Condition A of Binding theory (Chomsky, 1981). Furthermore, I propose more general cross-linguistic differences between Turkish and German, in the form of a stronger reliance on pragmatic information in anaphor resolution overall in Turkish compared to German. As for the processing differences between L1ers and L2ers of a language, I found evidence in support of hypotheses which propose that L2ers of German rely more strongly on non-syntactic information compared to L1ers (Clahsen \& Felser, 2006, 2017; Cunnings, 2016, 2017) independent of a potential influence of their L1. HSs, on the other hand, showed a tendency to overemphasize interpretational contrasts between different Turkish anaphors compared to monolingual native speakers. However, lower-proficiency HSs were likely to merge different forms for simplified representation and processing. Overall, L2ers and HSs showed differences from monolingual native speakers both in their final interpretation of anaphors and during online processing. However, these differences were not parallel between the two types of bilingual and thus do not support a unified model of L2 and HS processing (cf. Montrul, 2012). The findings of this thesis contribute to the field of anaphor resolution by providing data from a previously unexplored language, Turkish, as well as contributing to research on native and non-native processing differences. My results also illustrate the importance of considering individual differences in the acquisition process when studying bilingual language comprehension. Factors such as age of acquisition, language proficiency and the type of input a language learner receives may influence the processing mechanisms they develop and employ, both between and within different bilingual populations.}, language = {en} } @phdthesis{Siegmund2018, author = {Siegmund, Jonatan Frederik}, title = {Quantifying impacts of climate extreme events on vegetation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407095}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {Together with the gradual change of mean values, ongoing climate change is projected to increase frequency and amplitude of temperature and precipitation extremes in many regions of Europe. The impacts of such in most cases short term extraordinary climate situations on terrestrial ecosystems are a matter of central interest of recent climate change research, because it can not per se be assumed that known dependencies between climate variables and ecosystems are linearly scalable. So far, yet, there is a high demand for a method to quantify such impacts in terms of simultaneities of event time series. In the course of this manuscript the new statistical approach of Event Coincidence Analysis (ECA) as well as it's R implementation is introduced, a methodology that allows assessing whether or not two types of event time series exhibit similar sequences of occurrences. Applications of the method are presented, analyzing climate impacts on different temporal and spacial scales: the impact of extraordinary expressions of various climatic variables on tree stem variations (subdaily and local scale), the impact of extreme temperature and precipitation events on the owering time of European shrub species (weekly and country scale), the impact of extreme temperature events on ecosystem health in terms of NDVI (weekly and continental scale) and the impact of El Ni{\~n}o and La Ni{\~n}a events on precipitation anomalies (seasonal and global scale). The applications presented in this thesis refine already known relationships based on classical methods and also deliver substantial new findings to the scientific community: the widely known positive correlation between flowering time and temperature for example is confirmed to be valid for the tails of the distributions while the widely assumed positive dependency between stem diameter variation and temperature is shown to be not valid for very warm and very cold days. The larger scale investigations underline the sensitivity of anthrogenically shaped landscapes towards temperature extremes in Europe and provide a comprehensive global ENSO impact map for strong precipitation events. Finally, by publishing the R implementation of the method, this thesis shall enable other researcher to further investigate on similar research questions by using Event Coincidence Analysis.}, language = {en} } @phdthesis{Ostrowski2018, author = {Ostrowski, Max}, title = {Modern constraint answer set solving}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407799}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2018}, abstract = {Answer Set Programming (ASP) is a declarative problem solving approach, combining a rich yet simple modeling language with high-performance solving capabilities. Although this has already resulted in various applications, certain aspects of such applications are more naturally modeled using variables over finite domains, for accounting for resources, fine timings, coordinates, or functions. Our goal is thus to extend ASP with constraints over integers while preserving its declarative nature. This allows for fast prototyping and elaboration tolerant problem descriptions of resource related applications. The resulting paradigm is called Constraint Answer Set Programming (CASP). We present three different approaches for solving CASP problems. The first one, a lazy, modular approach combines an ASP solver with an external system for handling constraints. This approach has the advantage that two state of the art technologies work hand in hand to solve the problem, each concentrating on its part of the problem. The drawback is that inter-constraint dependencies cannot be communicated back to the ASP solver, impeding its learning algorithm. The second approach translates all constraints to ASP. Using the appropriate encoding techniques, this results in a very fast, monolithic system. Unfortunately, due to the large, explicit representation of constraints and variables, translation techniques are restricted to small and mid-sized domains. The third approach merges the lazy and the translational approach, combining the strength of both while removing their weaknesses. To this end, we enhance the dedicated learning techniques of an ASP solver with the inferences of the translating approach in a lazy way. That is, the important knowledge is only made explicit when needed. By using state of the art techniques from neighboring fields, we provide ways to tackle real world, industrial size problems. By extending CASP to reactive solving, we open up new application areas such as online planning with continuous domains and durations.}, language = {en} } @phdthesis{Vogel2018, author = {Vogel, Thomas}, title = {Model-driven engineering of self-adaptive software}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409755}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 357}, year = {2018}, abstract = {The development of self-adaptive software requires the engineering of an adaptation engine that controls the underlying adaptable software by a feedback loop. State-of-the-art approaches prescribe the feedback loop in terms of numbers, how the activities (e.g., monitor, analyze, plan, and execute (MAPE)) and the knowledge are structured to a feedback loop, and the type of knowledge. Moreover, the feedback loop is usually hidden in the implementation or framework and therefore not visible in the architectural design. Additionally, an adaptation engine often employs runtime models that either represent the adaptable software or capture strategic knowledge such as reconfiguration strategies. State-of-the-art approaches do not systematically address the interplay of such runtime models, which would otherwise allow developers to freely design the entire feedback loop. This thesis presents ExecUtable RuntimE MegAmodels (EUREMA), an integrated model-driven engineering (MDE) solution that rigorously uses models for engineering feedback loops. EUREMA provides a domain-specific modeling language to specify and an interpreter to execute feedback loops. The language allows developers to freely design a feedback loop concerning the activities and runtime models (knowledge) as well as the number of feedback loops. It further supports structuring the feedback loops in the adaptation engine that follows a layered architectural style. Thus, EUREMA makes the feedback loops explicit in the design and enables developers to reason about design decisions. To address the interplay of runtime models, we propose the concept of a runtime megamodel, which is a runtime model that contains other runtime models as well as activities (e.g., MAPE) working on the contained models. This concept is the underlying principle of EUREMA. The resulting EUREMA (mega)models are kept alive at runtime and they are directly executed by the EUREMA interpreter to run the feedback loops. Interpretation provides the flexibility to dynamically adapt a feedback loop. In this context, EUREMA supports engineering self-adaptive software in which feedback loops run independently or in a coordinated fashion within the same layer as well as on top of each other in different layers of the adaptation engine. Moreover, we consider preliminary means to evolve self-adaptive software by providing a maintenance interface to the adaptation engine. This thesis discusses in detail EUREMA by applying it to different scenarios such as single, multiple, and stacked feedback loops for self-repairing and self-optimizing the mRUBiS application. Moreover, it investigates the design and expressiveness of EUREMA, reports on experiments with a running system (mRUBiS) and with alternative solutions, and assesses EUREMA with respect to quality attributes such as performance and scalability. The conducted evaluation provides evidence that EUREMA as an integrated and open MDE approach for engineering self-adaptive software seamlessly integrates the development and runtime environments using the same formalism to specify and execute feedback loops, supports the dynamic adaptation of feedback loops in layered architectures, and achieves an efficient execution of feedback loops by leveraging incrementality.}, language = {en} } @phdthesis{Westbury2018, author = {Westbury, Michael V.}, title = {Unraveling evolution through Next Generation Sequencing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409981}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {The sequencing of the human genome in the early 2000s led to an increased interest in cheap and fast sequencing technologies. This interest culminated in the advent of next generation sequencing (NGS). A number of different NGS platforms have arisen since then all promising to do the same thing, i.e. produce large amounts of genetic information for relatively low costs compared to more traditional methods such as Sanger sequencing. The capabilities of NGS meant that researchers were no longer bound to species for which a lot of previous work had already been done (e.g. model organisms and humans) enabling a shift in research towards more novel and diverse species of interest. This capability has greatly benefitted many fields within the biological sciences, one of which being the field of evolutionary biology. Researchers have begun to move away from the study of laboratory model organisms to wild, natural populations and species which has greatly expanded our knowledge of evolution. NGS boasts a number of benefits over more traditional sequencing approaches. The main benefit comes from the capability to generate information for drastically more loci for a fraction of the cost. This is hugely beneficial to the study of wild animals as, even when large numbers of individuals are unobtainable, the amount of data produced still allows for accurate, reliable population and species level results from a small selection of individuals. The use of NGS to study species for which little to no previous research has been carried out on and the production of novel evolutionary information and reference datasets for the greater scientific community were the focuses of this thesis. Two studies in this thesis focused on producing novel mitochondrial genomes from shotgun sequencing data through iterative mapping, bypassing the need for a close relative to serve as a reference sequence. These mitochondrial genomes were then used to infer species level relationships through phylogenetic analyses. The first of these studies involved reconstructing a complete mitochondrial genome of the bat eared fox (Otocyon megalotis). Phylogenetic analyses of the mitochondrial genome confidently placed the bat eared fox as sister to the clade consisting of the raccoon dog and true foxes within the canidae family. The next study also involved reconstructing a mitochondrial genome but in this case from the extinct Macrauchenia of South America. As this study utilised ancient DNA, it involved a lot of parameter testing, quality controls and strict thresholds to obtain a near complete mitochondrial genome devoid of contamination known to plague ancient DNA studies. Phylogenetic analyses confidently placed Macrauchenia as sister to all living representatives of Perissodactyla with a divergence time of ~66 million years ago. The third and final study of this thesis involved de novo assemblies of both nuclear and mitochondrial genomes from brown and striped hyena and focussed on demographic, genetic diversity and population genomic analyses within the brown hyena. Previous studies of the brown hyena hinted at very low levels of genomic diversity and, perhaps due to this, were unable to find any notable population structure across its range. By incorporating a large number of genetic loci, in the form of complete nuclear genomes, population structure within the brown hyena was uncovered. On top of this, genomic diversity levels were compared to a number of other species. Results showed the brown hyena to have the lowest genomic diversity out of all species included in the study which was perhaps caused by a continuous and ongoing decline in effective population size that started about one million years ago and dramatically accelerated towards the end of the Pleistocene. The studies within this thesis show the power NGS sequencing has and its utility within evolutionary biology. The most notable capabilities outlined in this thesis involve the study of species for which no reference data is available and in the production of large amounts of data, providing evolutionary answers at the species and population level that data produced using more traditional techniques simply could not.}, language = {en} } @phdthesis{Bazhenova2018, author = {Bazhenova, Ekaterina}, title = {Discovery of Decision Models Complementary to Process Models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410020}, school = {Universit{\"a}t Potsdam}, year = {2018}, abstract = {Business process management is an acknowledged asset for running an organization in a productive and sustainable way. One of the most important aspects of business process management, occurring on a daily basis at all levels, is decision making. In recent years, a number of decision management frameworks have appeared in addition to existing business process management systems. More recently, Decision Model and Notation (DMN) was developed by the OMG consortium with the aim of complementing the widely used Business Process Model and Notation (BPMN). One of the reasons for the emergence of DMN is the increasing interest in the evolving paradigm known as the separation of concerns. This paradigm states that modeling decisions complementary to processes reduces process complexity by externalizing decision logic from process models and importing it into a dedicated decision model. Such an approach increases the agility of model design and execution. This provides organizations with the flexibility to adapt to the ever increasing rapid and dynamic changes in the business ecosystem. The research gap, identified by us, is that the separation of concerns, recommended by DMN, prescribes the externalization of the decision logic of process models in one or more separate decision models, but it does not specify this can be achieved. The goal of this thesis is to overcome the presented gap by developing a framework for discovering decision models in a semi-automated way from information about existing process decision making. Thus, in this thesis we develop methodologies to extract decision models from: (1) control flow and data of process models that exist in enterprises; and (2) from event logs recorded by enterprise information systems, encapsulating day-to-day operations. Furthermore, we provide an extension of the methodologies to discover decision models from event logs enriched with fuzziness, a tool dealing with partial knowledge of the process execution information. All the proposed techniques are implemented and evaluated in case studies using real-life and synthetic process models and event logs. The evaluation of these case studies shows that the proposed methodologies provide valid and accurate output decision models that can serve as blueprints for executing decisions complementary to process models. Thus, these methodologies have applicability in the real world and they can be used, for example, for compliance checks, among other uses, which could improve the organization's decision making and hence it's overall performance.}, language = {en} } @phdthesis{Ramos2018, author = {Ramos, Catalina}, title = {Structure and petrophysical properties of the Southern Chile subduction zone along 38.25°S from seismic data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409183}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 111}, year = {2018}, abstract = {Active and passive source data from two seismic experiments within the interdisciplinary project TIPTEQ (from The Incoming Plate to mega Thrust EarthQuake processes) were used to image and identify the structural and petrophysical properties (such as P- and S-velocities, Poisson's ratios, pore pressure, density and amount of fluids) within the Chilean seismogenic coupling zone at 38.25°S, where in 1960 the largest earthquake ever recorded (Mw 9.5) occurred. Two S-wave velocity models calculated using traveltime and noise tomography techniques were merged with an existing velocity model to obtain a 2D S-wave velocity model, which gathered the advantages of each individual model. In a following step, P- and S-reflectivity images of the subduction zone were obtained using different pre stack and post-stack depth migration techniques. Among them, the recent prestack line-drawing depth migration scheme yielded revealing results. Next, synthetic seismograms modelled using the reflectivity method allowed, through their input 1D synthetic P- and S-velocities, to infer the composition and rocks within the subduction zone. Finally, an image of the subduction zone is given, jointly interpreting the results from this work with results from other studies. The Chilean seismogenic coupling zone at 38.25°S shows a continental crust with highly reflective horizontal, as well as (steep) dipping events. Among them, the Lanalhue Fault Zone (LFZ), which is interpreted to be east-dipping, is imaged to very shallow depths. Some steep reflectors are observed for the first time, for example one near the coast, related to high seismicity and another one near the LFZ. Steep shallow reflectivity towards the volcanic arc could be related to a steep west-dipping reflector interpreted as fluids and/or melts, migrating upwards due to material recycling in the continental mantle wedge. The high resolution of the S-velocity model in the first kilometres allowed to identify several sedimentary basins, characterized by very low P- and S-velocities, high Poisson's ratios and possible steep reflectivity. Such high Poisson's ratios are also observed within the oceanic crust, which reaches the seismogenic zone hydrated due to bending-related faulting. It is interpreted to release water until reaching the coast and under the continental mantle wedge. In terms of seismic velocities, the inferred composition and rocks in the continental crust is in agreement with field geology observations at the surface along the proflle. Furthermore, there is no requirement to call on the existence of measurable amounts of present-day fluids above the plate interface in the continental crust of the Coastal Cordillera and the Central Valley in this part of the Chilean convergent margin. A large-scale anisotropy in the continental crust and upper mantle, previously proposed from magnetotelluric studies, is proposed from seismic velocities. However, quantitative studies on this topic in the continental crust of the Chilean seismogenic zone at 38.25°S do not exist to date.}, language = {en} } @phdthesis{Muench2018, author = {M{\"u}nch, Thomas}, title = {Interpretation of temperature signals from ice cores}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-414963}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 197}, year = {2018}, abstract = {Earth's climate varies continuously across space and time, but humankind has witnessed only a small snapshot of its entire history, and instrumentally documented it for a mere 200 years. Our knowledge of past climate changes is therefore almost exclusively based on indirect proxy data, i.e. on indicators which are sensitive to changes in climatic variables and stored in environmental archives. Extracting the data from these archives allows retrieval of the information from earlier times. Obtaining accurate proxy information is a key means to test model predictions of the past climate, and only after such validation can the models be used to reliably forecast future changes in our warming world. The polar ice sheets of Greenland and Antarctica are one major climate archive, which record information about local air temperatures by means of the isotopic composition of the water molecules embedded in the ice. However, this temperature proxy is, as any indirect climate data, not a perfect recorder of past climatic variations. Apart from local air temperatures, a multitude of other processes affect the mean and variability of the isotopic data, which hinders their direct interpretation in terms of climate variations. This applies especially to regions with little annual accumulation of snow, such as the Antarctic Plateau. While these areas in principle allow for the extraction of isotope records reaching far back in time, a strong corruption of the temperature signal originally encoded in the isotopic data of the snow is expected. This dissertation uses observational isotope data from Antarctica, focussing especially on the East Antarctic low-accumulation area around the Kohnen Station ice-core drilling site, together with statistical and physical methods, to improve our understanding of the spatial and temporal isotope variability across different scales, and thus to enhance the applicability of the proxy for estimating past temperature variability. The presented results lead to a quantitative explanation of the local-scale (1-500 m) spatial variability in the form of a statistical noise model, and reveal the main source of the temporal variability to be the mixture of a climatic seasonal cycle in temperature and the effect of diffusional smoothing acting on temporally uncorrelated noise. These findings put significant limits on the representativity of single isotope records in terms of local air temperature, and impact the interpretation of apparent cyclicalities in the records. Furthermore, to extend the analyses to larger scales, the timescale-dependency of observed Holocene isotope variability is studied. This offers a deeper understanding of the nature of the variations, and is crucial for unravelling the embedded true temperature variability over a wide range of timescales.}, language = {en} }