@phdthesis{Stock2010, author = {Stock, Maria}, title = {Charakterisierung der troposph{\"a}rischen Aerosolvariabilit{\"a}t in der europ{\"a}ischen Arktis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49203}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Auf der Grundlage von Sonnenphotometermessungen an drei Messstationen (AWIPEV/ Koldewey in Ny-{\AA}lesund (78.923 °N, 11.923 °O) 1995-2008, 35. Nordpol Driftstation - NP-35 (84.3-85.5 °N, 41.7-56.6 °O) M{\"a}rz/April 2008, Sodankyl{\"a} (67.37 °N, 26.65 °O) 2004-2007) wird die Aerosolvariabilit{\"a}t in der europ{\"a}ischen Arktis und deren Ursachen untersucht. Der Schwerpunkt liegt dabei auf der Frage des Zusammenhanges zwischen den an den Stationen gemessenen Aerosolparametern (Aerosol optische Dicke, Angstr{\"o}m Koeffizient, usw.) und dem Transport des Aerosols sowohl auf kurzen Zeitskalen (Tagen) als auch auf langen Zeitskalen (Monate, Jahre). Um diesen Zusammenhang herzustellen, werden f{\"u}r die kurzen Zeitskalen mit dem Trajektorienmodell PEP-Tracer 5-Tage R{\"u}ckw{\"a}rtstrajektorien in drei Starth{\"o}hen (850 hPa, 700 hPa, 500 hPa) f{\"u}r die Uhrzeiten 00, 06, 12 und 18 Uhr berechnet. Mit Hilfe der nicht-hierarchischen Clustermethode k-means werden die berechneten R{\"u}ckw{\"a}rtstrajektorien dann zu Gruppen zusammengefasst und bestimmten Quellgebieten und den gemessenen Aerosol optischen Dicken zugeordnet. Die Zuordnung von Aerosol optischer Dicke und Quellregion ergibt keinen eindeutigen Zusammenhang zwischen dem Transport verschmutzter Luftmassen aus Europa oder Russland bzw. Asien und erh{\"o}hter Aerosol optischer Dicke. Dennoch ist f{\"u}r einen konkreten Einzelfall (M{\"a}rz 2008) ein direkter Zusammenhang von Aerosoltransport und hohen Aerosol optischen Dicken nachweisbar. In diesem Fall gelangte Waldbrandaerosol aus S{\"u}dwestrussland in die Arktis und konnte sowohl auf der NP-35 als auch in Ny-{\AA}lesund beobachtet werden. In einem weiteren Schritt wird mit Hilfe der EOF-Analyse untersucht, inwieweit großskalige atmosph{\"a}rische Zirkulationsmuster f{\"u}r die Aerosolvariabilit{\"a}t in der europ{\"a}ischen Arktis verantwortlich sind. {\"A}hnlich wie bei der Trajektorienanalyse ist auch die Verbindung der atmosph{\"a}rischen Zirkulation zu den Photometermessungen an den Stationen in der Regel nur schwach ausgepr{\"a}gt. Eine Ausnahme findet sich bei der Betrachtung des Jahresganges des Bodendruckes und der Aerosol optischen Dicke. Hohe Aerosol optische Dicken treten im Fr{\"u}hjahr zum einen dann auf, wenn durch das Islandtief und das sibirische Hochdruckgebiet Luftmassen aus Europa oder Russland/Asien in die Arktis gelangen, und zum anderen, wenn sich ein kr{\"a}ftiges Hochdruckgebiet {\"u}ber Gr{\"o}nland und weiten Teilen der Arktis befindet. Ebenso zeigt sich, dass der {\"U}bergang zwischen Fr{\"u}hjahr und Sommer zumindest teilweise bedingt ist durch denWechsel vom stabilen Polarhoch im Winter und Fr{\"u}hjahr zu einer st{\"a}rker von Tiefdruckgebieten bestimmten arktischen Atmosph{\"a}re im Sommer. Die geringere Aerosolkonzentration im Sommer kann zum Teil mit einer Zunahme der nassen Deposition als Aerosolsenke begr{\"u}ndet werden. F{\"u}r Ny-{\AA}lesund wird neben den Transportmustern auch die chemische Zusammensetzung des Aerosols mit Hilfe von Impaktormessungen an der Zeppelinstation auf dem Zeppelinberg (474m {\"u}.NN) nahe Ny-{\AA}lesund abgeleitet. Dabei ist die positive Korrelation der Aerosoloptischen Dicke mit der Konzentration von Sulfationen und Ruß sehr deutlich. Beide Stoffe gelangen zu einem Großteil durch anthropogene Emissionen in die Atmosph{\"a}re. Die damit nachweisbar anthropogen gepr{\"a}gte Zusammensetzung des arktischen Aerosols steht im Widerspruch zum nicht eindeutig herstellbaren Zusammenhang mit dem Transport des Aerosols aus Industrieregionen. Dies kann nur durch einen oder mehrere gleichzeitig stattfindende Transformationsprozesse (z. B. Nukleation von Schwefels{\"a}urepartikeln) w{\"a}hrend des Transportes aus den Quellregionen (Europa, Russland) erkl{\"a}rt werden.}, language = {de} } @phdthesis{Werth2010, author = {Werth, Susanna}, title = {Calibration of the global hydrological model WGHM with water mass variations from GRACE gravity data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41738}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Since the start-up of the GRACE (Gravity Recovery And Climate Experiment) mission in 2002 time dependent global maps of the Earth's gravity field are available to study geophysical and climatologically-driven mass redistributions on the Earth's surface. In particular, GRACE observations of total water storage changes (TWSV) provide a comprehensive data set for analysing the water cycle on large scales. Therefore they are invaluable for validation and calibration of large-scale hydrological models as the WaterGAP Global Hydrology Model (WGHM) which simulates the continental water cycle including its most important components, such as soil, snow, canopy, surface- and groundwater. Hitherto, WGHM exhibits significant differences to GRACE, especially for the seasonal amplitude of TWSV. The need for a validation of hydrological models is further highlighted by large differences between several global models, e.g. WGHM, the Global Land Data Assimilation System (GLDAS) and the Land Dynamics model (LaD). For this purpose, GRACE links geodetic and hydrological research aspects. This link demands the development of adequate data integration methods on both sides, forming the main objectives of this work. They include the derivation of accurate GRACE-based water storage changes, the development of strategies to integrate GRACE data into a global hydrological model as well as a calibration method, followed by the re-calibration of WGHM in order to analyse process and model responses. To achieve these aims, GRACE filter tools for the derivation of regionally averaged TWSV were evaluated for specific river basins. Here, a decorrelation filter using GRACE orbits for its design is most efficient among the tested methods. Consistency in data and equal spatial resolution between observed and simulated TWSV were realised by the inclusion of all most important hydrological processes and an equal filtering of both data sets. Appropriate calibration parameters were derived by a WGHM sensitivity analysis against TWSV. Finally, a multi-objective calibration framework was developed to constrain model predictions by both river discharge and GRACE TWSV, realised with a respective evolutionary method, the ε-Non-dominated-Sorting-Genetic-Algorithm-II (ε-NSGAII). Model calibration was done for the 28 largest river basins worldwide and for most of them improved simulation results were achieved with regard to both objectives. From the multi-objective approach more reliable and consistent simulations of TWSV within the continental water cycle were gained and possible model structure errors or mis-modelled processes for specific river basins detected. For tropical regions as such, the seasonal amplitude of water mass variations has increased. The findings lead to an improved understanding of hydrological processes and their representation in the global model. Finally, the robustness of the results is analysed with respect to GRACE and runoff measurement errors. As a main conclusion obtained from the results, not only soil water and snow storage but also groundwater and surface water storage have to be included in the comparison of the modelled and GRACE-derived total water budged data. Regarding model calibration, the regional varying distribution of parameter sensitivity suggests to tune only parameter of important processes within each region. Furthermore, observations of single storage components beside runoff are necessary to improve signal amplitudes and timing of simulated TWSV as well as to evaluate them with higher accuracy. The results of this work highlight the valuable nature of GRACE data when merged into large-scale hydrological modelling and depict methods to improve large-scale hydrological models.}, language = {en} } @phdthesis{Plodeck2010, author = {Plodeck, Judith}, title = {Bruce Nauman und Olafur Eliasson : Strategien performativer Installationen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-032-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-40754}, school = {Universit{\"a}t Potsdam}, pages = {342}, year = {2010}, abstract = {Die Analyse vergleicht Installationen von Bruce Nauman und Olafur Eliasson ausgehend von der Fragestellung, wie sich die k{\"u}nstlerischen Performativit{\"a}tsstrategien der 1960er/70er Jahren und die der zeitgen{\"o}ssischen Kunst in ihren Wirkungen und Effekten unterscheiden lassen. Dabei werden die Positionen der beiden K{\"u}nstler als paradigmatisch f{\"u}r eine {\"A}sthetik des Performativen angesehen. Neben dem Vergleich der K{\"u}nstler steht die theoretische Auseinandersetzung mit der Diskursfigur der Performativit{\"a}t sowie deren methodischen Anwendbarkeit in der Kunstwissenschaft im Vordergrund. W{\"a}hrend sich Installationen der 1960er/70er Jahre besonders durch die psycho-physische Einwirkung auf die Sinneswahrnehmung des Betrachters auszeichnen und durchaus Schockeffekte beim Betrachter hervorrufen, befasst sich die zeitgen{\"o}ssische Kunstpraxis vornehmlich mit visuellen und poetischen Effekten, die eine kontemplative Rezeptionshaltung des Betrachters einfordern. Bruce Nauman war es ein Anliegen, den tradierten Status des Kunstwerks als ein zu Betrachtendes, das sich durch Begriffe wie Form, Ursprung und Originalit{\"a}t fassen ließ, in Frage zu stellen und stattdessen eine reale leibliche Erfahrung f{\"u}r den Betrachter nachvollziehbar werden zu lassen. K{\"u}nstlern wie Olafur Eliasson geht es in den k{\"u}nstlerischen Produktionen vor allem um die Wahrnehmung der Wahrnehmung sowie der Erzeugung von Pr{\"a}senzeffekten. Mit dem Aufkommen solcher Verfahren wurde deutlich, dass performative Installationen nach anderen Beschreibungsformen verlangten und, dass diese durch eine {\"A}sthetik des Performativen gefasst werden k{\"o}nnen. Wie genau vollzieht sich der Wandel von den performativen Strategien der 1960er/70er Jahre zu denen der zeitgen{\"o}ssischen Installationsk{\"u}nstlern? Verl{\"a}uft dieser vom Schock zur Poesie?}, language = {de} } @phdthesis{Childs2010, author = {Childs, Liam H.}, title = {Bioinformatics approaches to analysing RNA mediated regulation of gene expression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41284}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The genome can be considered the blueprint for an organism. Composed of DNA, it harbours all organism-specific instructions for the synthesis of all structural components and their associated functions. The role of carriers of actual molecular structure and functions was believed to be exclusively assumed by proteins encoded in particular segments of the genome, the genes. In the process of converting the information stored genes into functional proteins, RNA - a third major molecule class - was discovered early on to act a messenger by copying the genomic information and relaying it to the protein-synthesizing machinery. Furthermore, RNA molecules were identified to assist in the assembly of amino acids into native proteins. For a long time, these - rather passive - roles were thought to be the sole purpose of RNA. However, in recent years, new discoveries have led to a radical revision of this view. First, RNA molecules with catalytic functions - thought to be the exclusive domain of proteins - were discovered. Then, scientists realized that much more of the genomic sequence is transcribed into RNA molecules than there are proteins in cells begging the question what the function of all these molecules are. Furthermore, very short and altogether new types of RNA molecules seemingly playing a critical role in orchestrating cellular processes were discovered. Thus, RNA has become a central research topic in molecular biology, even to the extent that some researcher dub cells as "RNA machines". This thesis aims to contribute towards our understanding of RNA-related phenomena by applying Bioinformatics means. First, we performed a genome-wide screen to identify sites at which the chemical composition of DNA (the genotype) critically influences phenotypic traits (the phenotype) of the model plant Arabidopsis thaliana. Whole genome hybridisation arrays were used and an informatics strategy developed, to identify polymorphic sites from hybridisation to genomic DNA. Following this approach, not only were genotype-phenotype associations discovered across the entire Arabidopsis genome, but also regions not currently known to encode proteins, thus representing candidate sites for novel RNA functional molecules. By statistically associating them with phenotypic traits, clues as to their particular functions were obtained. Furthermore, these candidate regions were subjected to a novel RNA-function classification prediction method developed as part of this thesis. While determining the chemical structure (the sequence) of candidate RNA molecules is relatively straightforward, the elucidation of its structure-function relationship is much more challenging. Towards this end, we devised and implemented a novel algorithmic approach to predict the structural and, thereby, functional class of RNA molecules. In this algorithm, the concept of treating RNA molecule structures as graphs was introduced. We demonstrate that this abstraction of the actual structure leads to meaningful results that may greatly assist in the characterization of novel RNA molecules. Furthermore, by using graph-theoretic properties as descriptors of structure, we indentified particular structural features of RNA molecules that may determine their function, thus providing new insights into the structure-function relationships of RNA. The method (termed Grapple) has been made available to the scientific community as a web-based service. RNA has taken centre stage in molecular biology research and novel discoveries can be expected to further solidify the central role of RNA in the origin and support of life on earth. As illustrated by this thesis, Bioinformatics methods will continue to play an essential role in these discoveries.}, language = {en} } @phdthesis{Fucik2010, author = {Fucik, Markus}, title = {Bayesian risk management : "Frequency does not make you smarter"}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53089}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Within our research group Bayesian Risk Solutions we have coined the idea of a Bayesian Risk Management (BRM). It claims (1) a more transparent and diligent data analysis as well as (2)an open-minded incorporation of human expertise in risk management. In this dissertation we formulize a framework for BRM based on the two pillars Hardcore-Bayesianism (HCB) and Softcore-Bayesianism (SCB) providing solutions for the claims above. For data analysis we favor Bayesian statistics with its Markov Chain Monte Carlo (MCMC) simulation algorithm. It provides a full illustration of data-induced uncertainty beyond classical point-estimates. We calibrate twelve different stochastic processes to four years of CO2 price data. Besides, we calculate derived risk measures (ex ante/ post value-at-risks, capital charges, option prices) and compare them to their classical counterparts. When statistics fails because of a lack of reliable data we propose our integrated Bayesian Risk Analysis (iBRA) concept. It is a basic guideline for an expertise-driven quantification of critical risks. We additionally review elicitation techniques and tools supporting experts to express their uncertainty. Unfortunately, Bayesian thinking is often blamed for its arbitrariness. Therefore, we introduce the idea of a Bayesian due diligence judging expert assessments according to their information content and their inter-subjectivity.}, language = {en} } @phdthesis{Boeniger2010, author = {B{\"o}niger, Urs}, title = {Attributes and their potential to analyze and interpret 3D GPR data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50124}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Based on technological advances made within the past decades, ground-penetrating radar (GPR) has become a well-established, non-destructive subsurface imaging technique. Catalyzed by recent demands for high-resolution, near-surface imaging (e.g., the detection of unexploded ordnances and subsurface utilities, or hydrological investigations), the quality of today's GPR-based, near-surface images has significantly matured. At the same time, the analysis of oil and gas related reflection seismic data sets has experienced significant advances. Considering the sensitivity of attribute analysis with respect to data positioning in general, and multi-trace attributes in particular, trace positioning accuracy is of major importance for the success of attribute-based analysis flows. Therefore, to study the feasibility of GPR-based attribute analyses, I first developed and evaluated a real-time GPR surveying setup based on a modern tracking total station (TTS). The combination of current GPR systems capability of fusing global positioning system (GPS) and geophysical data in real-time, the ability of modern TTS systems to generate a GPS-like positional output and wireless data transmission using radio modems results in a flexible and robust surveying setup. To elaborate the feasibility of this setup, I studied the major limitations of such an approach: system cross-talk and data delays known as latencies. Experimental studies have shown that when a minimal distance of ~5 m between the GPR and the TTS system is considered, the signal-to-noise ratio of the acquired GPR data using radio communication equals the one without radio communication. To address the limitations imposed by system latencies, inherent to all real-time data fusion approaches, I developed a novel correction (calibration) strategy to assess the gross system latency and to correct for it. This resulted in the centimeter trace accuracy required by high-frequency and/or three-dimensional (3D) GPR surveys. Having introduced this flexible high-precision surveying setup, I successfully demonstrated the application of attribute-based processing to GPR specific problems, which may differ significantly from the geological ones typically addressed by the oil and gas industry using seismic data. In this thesis, I concentrated on archaeological and subsurface utility problems, as they represent typical near-surface geophysical targets. Enhancing 3D archaeological GPR data sets using a dip-steered filtering approach, followed by calculation of coherency and similarity, allowed me to conduct subsurface interpretations far beyond those obtained by classical time-slice analyses. I could show that the incorporation of additional data sets (magnetic and topographic) and attributes derived from these data sets can further improve the interpretation. In a case study, such an approach revealed the complementary nature of the individual data sets and, for example, allowed conclusions about the source location of magnetic anomalies by concurrently analyzing GPR time/depth slices to be made. In addition to archaeological targets, subsurface utility detection and characterization is a steadily growing field of application for GPR. I developed a novel attribute called depolarization. Incorporation of geometrical and physical feature characteristics into the depolarization attribute allowed me to display the observed polarization phenomena efficiently. Geometrical enhancement makes use of an improved symmetry extraction algorithm based on Laplacian high-boosting, followed by a phase-based symmetry calculation using a two-dimensional (2D) log-Gabor filterbank decomposition of the data volume. To extract the physical information from the dual-component data set, I employed a sliding-window principle component analysis. The combination of the geometrically derived feature angle and the physically derived polarization angle allowed me to enhance the polarization characteristics of subsurface features. Ground-truth information obtained by excavations confirmed this interpretation. In the future, inclusion of cross-polarized antennae configurations into the processing scheme may further improve the quality of the depolarization attribute. In addition to polarization phenomena, the time-dependent frequency evolution of GPR signals might hold further information on the subsurface architecture and/or material properties. High-resolution, sparsity promoting decomposition approaches have recently had a significant impact on the image and signal processing community. In this thesis, I introduced a modified tree-based matching pursuit approach. Based on different synthetic examples, I showed that the modified tree-based pursuit approach clearly outperforms other commonly used time-frequency decomposition approaches with respect to both time and frequency resolutions. Apart from the investigation of tuning effects in GPR data, I also demonstrated the potential of high-resolution sparse decompositions for advanced data processing. Frequency modulation of individual atoms themselves allows to efficiently correct frequency attenuation effects and improve resolution based on shifting the average frequency level. GPR-based attribute analysis is still in its infancy. Considering the growing widespread realization of 3D GPR studies there will certainly be an increasing demand towards improved subsurface interpretations in the future. Similar to the assessment of quantitative reservoir properties through the combination of 3D seismic attribute volumes with sparse well-log information, parameter estimation in a combined manner represents another step in emphasizing the potential of attribute-driven GPR data analyses.}, language = {en} } @phdthesis{Frohwerk2010, author = {Frohwerk, Sascha}, title = {Asymmetrien in der Neuen {\"O}konomischen Geographie}, series = {Potsdamer Schriften zur Raumwirtschaft}, journal = {Potsdamer Schriften zur Raumwirtschaft}, number = {3}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-089-2}, issn = {2190-8702}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49158}, school = {Universit{\"a}t Potsdam}, pages = {xii, 213}, year = {2010}, abstract = {Die Neue {\"O}konomische Geographie (NEG) erkl{\"a}rt Agglomerationen aus einem mikro{\"o}konomischen Totalmodell heraus. Zur Vereinfachung werden verschiedene Symmetrieannahmen get{\"a}tigt. So wird davon ausgegangen, dass die betrachteten Regionen die gleiche Gr{\"o}ße haben, die Ausgabenanteile f{\"u}r verschiedene G{\"u}tergruppen identisch sind und die Transportkosten f{\"u}r alle Industrieprodukte die selben sind. Eine Folge dieser Annahmen ist es, dass zwar erkl{\"a}rt werden kann, unter welchen Bedingungen es zur Agglomerationsbildung kommt, nicht aber wo dies geschieht. In dieser Arbeit werden drei Standardmodelle der NEG um verschiedene Asymmetrien erweitert und die Ver{\"a}nderung der Ergebnisse im Vergleich zum jeweiligen Basismodell dargestellt. Dabei wird neben der Theorie auf die Methoden der Simulation eingegangen, die sich grunds{\"a}tzlich auf andere Modelle {\"u}bertragen lassen. Darauf aufbauend wird eine asymmetrische Modellvariante auf die wirtschaftliche Entwicklung Deutschlands angewandt. So l{\"a}sst sich das Ausbleiben eines fl{\"a}chendeckenden Aufschwungs in den neuen L{\"a}ndern, die starken Wanderungsbewegungen in die alten L{\"a}nder und das dauerhafte Lohnsatzgef{\"a}lle in einem Totalmodell erkl{\"a}ren.}, language = {de} } @phdthesis{Ishebabi2010, author = {Ishebabi, Harold}, title = {Architecture synthesis for adaptive multiprocessor systems on chip}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41316}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis presents methods for automated synthesis of flexible chip multiprocessor systems from parallel programs targeted at FPGAs to exploit both task-level parallelism and architecture customization. Automated synthesis is necessitated by the complexity of the design space. A detailed description of the design space is provided in order to determine which parameters should be modeled to facilitate automated synthesis by optimizing a cost function, the emphasis being placed on inclusive modeling of parameters from application, architectural and physical subspaces, as well as their joint coverage in order to avoid pre-constraining the design space. Given a parallel program and a set of an IP library, the automated synthesis problem is to simultaneously (i) select processors (ii) map and schedule tasks to them, and (iii) select one or several networks for inter-task communications such that design constraints and optimization objectives are met. The research objective in this thesis is to find a suitable model for automated synthesis, and to evaluate methods of using the model for architectural optimizations. Our contributions are a holistic approach for the design of such systems, corresponding models to facilitate automated synthesis, evaluation of optimization methods using state of the art integer linear and answer set programming, as well as the development of synthesis heuristics to solve runtime challenges.}, language = {en} } @phdthesis{Abed2010, author = {Abed, Jamil}, title = {An iterative approach to operators on manifolds with singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44757}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {We establish elements of a new approach to ellipticity and parametrices within operator algebras on manifolds with higher singularities, only based on some general axiomatic requirements on parameter-dependent operators in suitable scales of spaes. The idea is to model an iterative process with new generations of parameter-dependent operator theories, together with new scales of spaces that satisfy analogous requirements as the original ones, now on a corresponding higher level. The "full" calculus involves two separate theories, one near the tip of the corner and another one at the conical exit to infinity. However, concerning the conical exit to infinity, we establish here a new concrete calculus of edge-degenerate operators which can be iterated to higher singularities.}, language = {en} } @phdthesis{Poltrock2010, author = {Poltrock, Silvana}, title = {About the relation between implicit Theory of Mind \& the comprehension of complement sentences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52293}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Previous studies on the relation between language and social cognition have shown that children's mastery of embedded sentential complements plays a causal role for the development of a Theory of Mind (ToM). Children start to succeed on complementation tasks in which they are required to report the content of an embedded clause in the second half of the fourth year. Traditional ToM tasks test the child's ability to predict that a person who is holding a false belief (FB) about a situation will act "falsely". In these task, children do not represent FBs until the age of 4 years. According the linguistic determinism hypothesis, only the unique syntax of complement sentences provides the format for representing FBs. However, experiments measuring children's looking behavior instead of their explicit predictions provided evidence that already 2-year olds possess an implicit ToM. This dissertation examined the question of whether there is an interrelation also between implicit ToM and the comprehension of complement sentences in typically developing German preschoolers. Two studies were conducted. In a correlational study (Study 1 ), 3-year-old children's performance on a traditional (explicit) FB task, on an implicit FB task and on language tasks measuring children's comprehension of tensed sentential complements were collected and tested for their interdependence. Eye-tracking methodology was used to assess implicit ToM by measuring participants' spontaneous anticipatory eye movements while they were watching FB movies. Two central findings emerged. First, predictive looking (implicit ToM) was not correlated with complement mastery, although both measures were associated with explicit FB task performance. This pattern of results suggests that explicit, but not implicit ToM is language dependent. Second, as a group, 3-year-olds did not display implicit FB understanding. That is, previous findings on a precocious reasoning ability could not be replicated. This indicates that the characteristics of predictive looking tasks play a role for the elicitation of implicit FB understanding as the current task was completely nonverbal and as complex as traditional FB tasks. Study 2 took a methodological approach by investigating whether children display an earlier comprehension of sentential complements when using the same means of measurement as used in experimental tasks tapping implicit ToM, namely anticipatory looking. Two experiments were conducted. 3-year-olds were confronted either with a complement sentence expressing the protagonist's FB (Exp. 1) or with a complex sentence expressing the protagonist's belief without giving any information about the truth/ falsity of the belief (Exp. 2). Afterwards, their expectations about the protagonist's future behavior were measured. Overall, implicit measures reveal no considerably earlier understanding of sentential complementation. Whereas 3-year-olds did not display a comprehension of complex sentences if these embedded a false proposition, children from 3;9 years on were proficient in processing complement sentences if the truth value of the embedded proposition could not be evaluated. This pattern of results suggests that (1) the linguistic expression of a person's FB does not elicit implicit FB understanding and that (2) the assessment of the purely syntactic understanding of complement sentences is affected by competing reality information. In conclusion, this dissertation found no evidence that the implicit ToM is related to the comprehension of sentential complementation. The findings suggest that implicit ToM might be based on nonlinguistic processes. Results are discussed in the light of recently proposed dual-process models that assume two cognitive mechanisms that account for different levels of ToM task performance.}, language = {en} }