@phdthesis{Mueller2010, author = {M{\"u}ller, Anja}, title = {Wie interpretieren Kinder nur? : Experimentelle Untersuchungen zum Erwerb von Informationsstruktur}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57767}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Im Zentrum der Arbeit steht die Frage, wie sechsj{\"a}hrige monolingual deutsche Kinder S{\"a}tze mit der Fokuspartikel nur interpretieren. In 5 Experimenten wurde untersucht, welchen Einfluss die Oberfl{\"a}chenposition der Fokuspartikel auf das Satzverst{\"a}ndnis hat und ob die kontextuelle Einbettung der nur-S{\"a}tze zu einer zielsprachlichen Interpretation f{\"u}hrt. Im Gegensatz zu den Ergebnissen bisheriger Studien (u.a. Crain, et al. 1994; Paterson et al. 2003) zeigen die Daten der Arbeit, dass die getesteten Kinder die pr{\"a}sentierten nur-S{\"a}tze zielsprachlich interpretierten, wenn diese in einen ad{\"a}quaten Kontext eingebettet waren. Es zeigte sich weiterhin, dass die Kinder mehr Fehler bei der Interpretation von S{\"a}tzen mit nur vor dem Subjekt (Nur die Maus hat einen Ball.) als mit nur vor dem Objekt (Die Maus hat nur einen Ball.) machten. Entgegen dem syntaktisch basierten Ansatz von Crain et al. (1994) und dem semantisch-pragmatisch basierten Ansatz von Paterson et al. (2003) werden in der Arbeit informationsstrukturelle Eigenschaften bzw. Unterschiede der nur-S{\"a}tze f{\"u}r die beobachteten Leistungen verantwortlich gemacht. Der in der Arbeit postulierte Topik-Default Ansatz nimmt an, dass die Kinder das Subjekt eines Satzes immer als Topik analysieren. Dies f{\"u}hrt im Fall der S{\"a}tze mit nur vor dem Subjekt zu einer falschen informationsstrukturellen Repr{\"a}sentation des Satzes. Basierend auf den Ergebnissen der Arbeit und dem postulierten Topik-Default Ansatz wird in der Arbeit abschließend ein Erwerbsmodell f{\"u}r das Verstehen von S{\"a}tzen mit der Fokuspartikel nur entworfen und diskutiert.}, language = {de} } @phdthesis{Fucik2010, author = {Fucik, Markus}, title = {Bayesian risk management : "Frequency does not make you smarter"}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53089}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Within our research group Bayesian Risk Solutions we have coined the idea of a Bayesian Risk Management (BRM). It claims (1) a more transparent and diligent data analysis as well as (2)an open-minded incorporation of human expertise in risk management. In this dissertation we formulize a framework for BRM based on the two pillars Hardcore-Bayesianism (HCB) and Softcore-Bayesianism (SCB) providing solutions for the claims above. For data analysis we favor Bayesian statistics with its Markov Chain Monte Carlo (MCMC) simulation algorithm. It provides a full illustration of data-induced uncertainty beyond classical point-estimates. We calibrate twelve different stochastic processes to four years of CO2 price data. Besides, we calculate derived risk measures (ex ante/ post value-at-risks, capital charges, option prices) and compare them to their classical counterparts. When statistics fails because of a lack of reliable data we propose our integrated Bayesian Risk Analysis (iBRA) concept. It is a basic guideline for an expertise-driven quantification of critical risks. We additionally review elicitation techniques and tools supporting experts to express their uncertainty. Unfortunately, Bayesian thinking is often blamed for its arbitrariness. Therefore, we introduce the idea of a Bayesian due diligence judging expert assessments according to their information content and their inter-subjectivity.}, language = {en} } @phdthesis{Strollo2010, author = {Strollo, Angelo}, title = {Development of techniques for earthquake microzonation studies in different urban environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53807}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The proliferation of megacities in many developing countries, and their location in areas where they are exposed to a high risk from large earthquakes, coupled with a lack of preparation, demonstrates the requirement for improved capabilities in hazard assessment, as well as the rapid adjustment and development of land-use planning. In particular, within the context of seismic hazard assessment, the evaluation of local site effects and their influence on the spatial distribution of ground shaking generated by an earthquake plays an important role. It follows that the carrying out of earthquake microzonation studies, which aim at identify areas within the urban environment that are expected to respond in a similar way to a seismic event, are essential to the reliable risk assessment of large urban areas. Considering the rate at which many large towns in developing countries that are prone to large earthquakes are growing, their seismic microzonation has become mandatory. Such activities are challenging and techniques suitable for identifying site effects within such contexts are needed. In this dissertation, I develop techniques for investigating large-scale urban environments that aim at being non-invasive, cost-effective and quickly deployable. These peculiarities allow one to investigate large areas over a relative short time frame, with a spatial sampling resolution sufficient to provide reliable microzonation. Although there is a negative trade-off between the completeness of available information and extent of the investigated area, I attempt to mitigate this limitation by combining two, what I term layers, of information: in the first layer, the site effects at a few calibration points are well constrained by analyzing earthquake data or using other geophysical information (e.g., shear-wave velocity profiles); in the second layer, the site effects over a larger areal coverage are estimated by means of single-station noise measurements. The microzonation is performed in terms of problem-dependent quantities, by considering a proxy suitable to link information from the first layer to the second one. In order to define the microzonation approach proposed in this work, different methods for estimating site effects have been combined and tested in Potenza (Italy), where a considerable amount of data was available. In particular, the horizontal-to-vertical spectral ratio computed for seismic noise recorded at different sites has been used as a proxy to combine the two levels of information together and to create a microzonation map in terms of spectral intensity ratio (SIR). In the next step, I applied this two-layer approach to Istanbul (Turkey) and Bishkek (Kyrgyzstan). A similar hybrid approach, i.e., combining earthquake and noise data, has been used for the microzonation of these two different urban environments. For both cities, after having calibrated the fundamental frequencies of resonance estimated from seismic noise with those obtained by analysing earthquakes (first layer), a fundamental frequency map has been computed using the noise measurements carried out within the town (second layer). By applying this new approach, maps of the fundamental frequency of resonance for Istanbul and Bishkek have been published for the first time. In parallel, a microzonation map in terms of SIR has been incorporated into a risk scenario for the Potenza test site by means of a dedicated regression between spectral intensity (SI) and macroseismic intensity (EMS). The scenario study confirms the importance of site effects within the risk chain. In fact, their introduction into the scenario led to an increase of about 50\% in estimates of the number of buildings that would be partially or totally collapsed. Last, but not least, considering that the approach developed and applied in this work is based on measurements of seismic noise, their reliability has been assessed. A theoretical model describing the self-noise curves of different instruments usually adopted in microzonation studies (e.g., those used in Potenza, Istanbul and Bishkek) have been considered and compared with empirical data recorded in Cologne (Germany) and Gubbio (Italy). The results show that, depending on the geological and environmental conditions, the instrumental noise could severely bias the results obtained by recording and analysing ambient noise. Therefore, in this work I also provide some guidelines for measuring seismic noise.}, language = {en} } @phdthesis{Hoechner2010, author = {H{\"o}chner, Andreas}, title = {GPS based analysis of earthquake induced phenomena at the Sunda Arc}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-53166}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Indonesia is one of the countries most prone to natural hazards. Complex interaction of several tectonic plates with high relative velocities leads to approximately two earthquakes with magnitude Mw>7 every year, being more than 15\% of the events worldwide. Earthquakes with magnitude above 9 happen far more infrequently, but with catastrophic effects. The most severe consequences thereby arise from tsunamis triggered by these subduction-related earthquakes, as the Sumatra-Andaman event in 2004 showed. In order to enable efficient tsunami early warning, which includes the estimation of wave heights and arrival times, it is necessary to combine different types of real-time sensor data with numerical models of earthquake sources and tsunami propagation. This thesis was created as a result of the GITEWS project (German Indonesian Tsunami Early Warning System). It is based on five research papers and manuscripts. Main project-related task was the development of a database containing realistic earthquake scenarios for the Sunda Arc. This database provides initial conditions for tsunami propagation modeling used by the simulation system at the early warning center. An accurate discretization of the subduction geometry, consisting of 25x150 subfaults was constructed based on seismic data. Green's functions, representing the deformational response to unit dip- and strike slip at the subfaults, were computed using a layered half-space approach. Different scaling relations for earthquake dimensions and slip distribution were implemented. Another project-related task was the further development of the 'GPS-shield' concept. It consists of a constellation of near field GPS-receivers, which are shown to be very valuable for tsunami early warning. The major part of this thesis is related to the geophysical interpretation of GPS data. Coseismic surface displacements caused by the 2004 Sumatra earthquake are inverted for slip at the fault. The effect of different Earth layer models is tested, favoring continental structure. The possibility of splay faulting is considered and shown to be a secondary order effect in respect to tsunamigenity for this event. Tsunami models based on source inversions are compared to satellite radar altimetry observations. Postseismic GPS time series are used to test a wide parameter range of uni- and biviscous rheological models of the asthenosphere. Steady-state Maxwell rheology is shown to be incompatible with near-field GPS data, unless large afterslip, amounting to more than 10\% of the coseismic moment is assumed. In contrast, transient Burgers rheology is in agreement with data without the need for large aseismic afterslip. Comparison to postseismic geoid observation by the GRACE satellites reveals that even with afterslip, the model implementing Maxwell rheology results in amplitudes being too small, and thus supports a biviscous asthenosphere. A simple approach based on the assumption of quasi-static deformation propagation is introduced and proposed for inversion of coseismic near-field GPS time series. Application of this approach to observations from the 2004 Sumatra event fails to quantitatively reconstruct the rupture propagation, since a priori conditions are not fulfilled in this case. However, synthetic tests reveal the feasibility of such an approach for fast estimation of rupturing properties.}, language = {en} } @phdthesis{Pilz2010, author = {Pilz, Marco}, title = {A comparison of proxies for seismic site conditions and amplification for the large urban area of Santiago de Chile}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52961}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Situated in an active tectonic region, Santiago de Chile, the country´s capital with more than six million inhabitants, faces tremendous earthquake hazard. Macroseismic data for the 1985 Valparaiso and the 2010 Maule events show large variations in the distribution of damage to buildings within short distances indicating strong influence of local sediments and the shape of the sediment-bedrock interface on ground motion. Therefore, a temporary seismic network was installed in the urban area for recording earthquake activity, and a study was carried out aiming to estimate site amplification derived from earthquake data and ambient noise. The analysis of earthquake data shows significant dependence on the local geological structure with regards to amplitude and duration. Moreover, the analysis of noise spectral ratios shows that they can provide a lower bound in amplitude for site amplification and, since no variability in terms of time and amplitude is observed, that it is possible to map the fundamental resonance frequency of the soil for a 26 km x 12 km area in the northern part of the Santiago de Chile basin. By inverting the noise spectral rations, local shear wave velocity profiles could be derived under the constraint of the thickness of the sedimentary cover which had previously been determined by gravimetric measurements. The resulting 3D model was derived by interpolation between the single shear wave velocity profiles and shows locally good agreement with the few existing velocity profile data, but allows the entire area, as well as deeper parts of the basin, to be represented in greater detail. The wealth of available data allowed further to check if any correlation between the shear wave velocity in the uppermost 30 m (vs30) and the slope of topography, a new technique recently proposed by Wald and Allen (2007), exists on a local scale. While one lithology might provide a greater scatter in the velocity values for the investigated area, almost no correlation between topographic gradient and calculated vs30 exists, whereas a better link is found between vs30 and the local geology. When comparing the vs30 distribution with the MSK intensities for the 1985 Valparaiso event it becomes clear that high intensities are found where the expected vs30 values are low and over a thick sedimentary cover. Although this evidence cannot be generalized for all possible earthquakes, it indicates the influence of site effects modifying the ground motion when earthquakes occur well outside of the Santiago basin. Using the attained knowledge on the basin characteristics, simulations of strong ground motion within the Santiago Metropolitan area were carried out by means of the spectral element technique. The simulation of a regional event, which has also been recorded by a dense network installed in the city of Santiago for recording aftershock activity following the 27 February 2010 Maule earthquake, shows that the model is capable to realistically calculate ground motion in terms of amplitude, duration, and frequency and, moreover, that the surface topography and the shape of the sediment bedrock interface strongly modify ground motion in the Santiago basin. An examination on the dependency of ground motion on the hypocenter location for a hypothetical event occurring along the active San Ram{\´o}n fault, which is crossing the eastern outskirts of the city, shows that the unfavorable interaction between fault rupture, radiation mechanism, and complex geological conditions in the near-field may give rise to large values of peak ground velocity and therefore considerably increase the level of seismic risk for Santiago de Chile.}, language = {en} } @phdthesis{Borchers2010, author = {Borchers, Andreas}, title = {Glaciomarine sedimentation at the continental margin of Prydz Bay, East Antarctica : implications on palaeoenvironmental changes during the Quaternary}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52620}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The Antarctic plays an important role in the global climate system. On the one hand, the Antarctic Ice Sheet is the largest freshwater reservoir on Earth. On the other hand, a major proportion of the global bottom-water formation takes place in Antarctic shelf regions, forcing the global thermohaline circulation. The main goal of this dissertation is to provide new insights into the dynamics and stability of the EAIS during the Quaternary. Additionally, variations in the activity of bottom-water formation and their causes are investigated. The dissertation is a German contribution to the International Polar Year 2007/ 2008 and was funded by the 'Deutsche Forschungsgesellschaft' (DFG) within the scope of priority program 1158 'Antarctic research with comparative studies in Arctic ice regions'. During RV Polarstern expedition ANT-XXIII/9, glaciomarine sediments were recovered from the Prydz Bay-Kerguelen region. Prydz Bay is a key region for the study of East EAIS dynamics, as 16\% of the EAIS are drained through the Lambert Glacier into the bay. Thereby, the glacier transports sediment into Prydz Bay which is then further distributed by calving icebergs or by current transport. The scientific approach of this dissertation is the reconstruction of past glaciomarine environments to infer on the response of the Lambert Glacier-Amery Ice Shelf system to climate shifts during the Quaternary. To characterize the depositional setting, sedimentological methods are used and statistical analyses are applied. Mineralogical and (bio)geochemical methods provide a means to reconstruct sediment provenances and to provide evidence on changes in the primary production in the surface water column. Age-depth models were constructed based on palaeomagnetic and palaeointensity measurements, diatom stratigraphy and radiocarbon dating. Sea-bed surface sediments in the investigation area show distinct variations in terms of their clay minerals and heavy-mineral assemblages. Considerable differences in the mineralogical composition of surface sediments are determined on the continental shelf. Clay minerals as well as heavy minerals provide useful parameters to differentiate between sediments which originated from erosion of crystalline rocks and sediments originating from Permo-Triassic deposits. Consequently, mineralogical parameters can be used to reconstruct the provenance of current-transported and ice-rafted material. The investigated sediment cores cover the time intervals of the last 1.4 Ma (continental slope) and the last 12.8 cal. ka BP (MacRobertson shelf). The sediment deposits were mainly influenced by glacial and oceanographic processes and further by biological activity (continental shelf), meltwater input and possibly gravitational transport. Sediments from the continental slope document two major deglacial events: the first deglaciation is associated with the mid-Pleistocene warming recognized around the Antarctic. In Prydz Bay, the Lambert Glacier-Amery Ice Shelf retreated far to the south and high biogenic productivity commenced or biogenic remains were better preserved due to increased sedimentation rates. Thereafter, stable glacial conditions continued until 400 - 500 ka BP. Calving of icebergs was restricted to the western part of the Lambert Glacier. The deeper bathymetry in this area allows for floating ice shelf even during times of decreased sea-level. Between 400 - 500 ka BP and the last interglacial (marine isotope stage 5) the glacier was more dynamic. During or shortly after the last interglacial the LAIS retreated again due to sea-level rise of 6 - 9 m. Both deglacial events correlate with a reduction in the thickness of ice masses in the Prince Charles Mountains. It indicates that a disintegration of the Amery Ice Shelf possibly led to increased drainage of ice masses from the Prydz Bay hinterland. A new end-member modelling algorithm was successfully applied on sediments from the MacRobertson shelf used to unmix the sand grain size fractions sorted by current activity and ice transport, respectively. Ice retreat on MacRobertson Shelf commenced 12.8 cal. ka BP and ended around 5.5 cal. ka BP. During the Holocene, strong fluctuations of the bottomwater activity were observed, probably related to variations of sea-ice formation in the Cape Darnley polynya. Increased activity of bottom-water flow was reconstructed at transitions from warm to cool conditions, whereas bottom-water activity receded during the mid- Holocene climate optimum. It can be concluded that the Lambert Glacier-Amery Ice Shelf system was relatively stable in terms of climate variations during the Quaternary. In contrast, bottom-water formation due to polynya activity was very sensitive to changes in atmospheric forcing and should gain more attention in future research.}, language = {en} } @phdthesis{Zaupa2010, author = {Zaupa, Alessandro}, title = {Physical crosslinking of gelatin : a supramolecular approach to biomaterials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52888}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This work describes the realization of physically crosslinked networks based on gelatin by the introduction of functional groups enabling specific supramolecular interactions. Molecular models were developed in order to predict the material properties and permit to establish a knowledge-based approach to material design. The effect of additional supramolecular interactions with hydroxyapaptite was then studied in composite materials. The calculated properties are compared to experimental results to validate the models. The models are then further used for the study of physically crosslinked networks. Gelatin was functionalized with desaminotyrosine (DAT) and desaminotyrosyl-tyrosine (DATT) side groups, derived from the natural amino acid tyrosine. These group can potentially undergo to π-π and hydrogen bonding interactions also under physiological conditions. Molecular dynamics (MD) simulations were performed on models with 0.8 wt.-\% or 25 wt.-\% water content, using the second generation forcefield CFF91. The validation of the models was obtained by the comparison with specific experimental data such as, density, peptide conformational angles and X-ray scattering spectra. The models were then used to predict the supramolecular organization of the polymer chain, analyze the formation of physical netpoints and calculate the mechanical properties. An important finding of simulation was that with the increase of aromatic groups also the number of observed physical netpoints increased. The number of relatively stable physical netpoints, on average zero 0 for natural gelatin, increased to 1 and 6 for DAT and DATT functionalized gelatins respectively. A comparison with the Flory-Rehner model suggested reduced equilibrium swelling by factor 6 of the DATT-functionalized materials in water. The functionalized gelatins could be synthesized by chemoselective coupling of the free carboxylic acid groups of DAT and DATT to the free amino groups of gelatin. At 25 wt.-\% water content, the simulated and experimentally determined elastic mechanical properties (e.g. Young Modulus) were both in the order of GPa and were not influenced by the degree of aromatic modification. The experimental equilibrium degree of swelling in water decreased with increasing the number of inserted aromatic functions (from 2800 vol.-\% for pure gelatin to 300 vol.-\% for the DATT modified gelatin), at the same time, Young's modulus, elongation at break, and maximum tensile strength increased. It could be show that the functionalization with DAT and DATT influences the chain organization of gelatin based materials together with a controlled drying condition. Functionalization with DAT and DATT lead to a drastic reduction of helical renaturation, that could be more finely controlled by the applied drying conditions. The properties of the materials could then be influenced by application of two independent methods. Composite materials of DAT and DATT functionalized gelatins with hydroxyapatite (HAp) show a drastic reduction of swelling degree. In tensile tests and rheological measurements, the composites equilibrated in water had increased Young's moduli (from 200 kPa up to 2 MPa) and tensile strength (from 57 kPa up to 1.1 MPa) compared to the natural polymer matrix without affecting the elongation at break. Furthermore, an increased thermal stability from 40 °C to 85 °C of the networks could be demonstrated. The differences of the behaviour of the functionalized gelatins to pure gelatin as matrix suggested an additional stabilizing bond between the incorporated aromatic groups to the hydroxyapatite.}, language = {en} } @phdthesis{Schattauer2010, author = {Schattauer, Sylvia}, title = {Hybride D{\"u}nnschicht-Solarzellen aus mesopor{\"o}sem Titandioxid und konjugierten Polymeren}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52619}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Das Ziel dieser Arbeit ist die Untersuchung der aktiven Komponenten und ihrer Wechselwirkungen in teilorganischen Hybrid-Solarzellen. Diese bestehen aus einer d{\"u}nnen Titandioxidschicht, kombiniert mit einer d{\"u}nnen Polymerschicht. Die Effizienz der Hybrid-Solarzellen wird durch die Lichtabsorption im Polymer, die Dissoziation der gebildeten Exzitonen an der aktiven Grenzfl{\"a}che zwischen TiO2 und Polymer, sowie durch Generation und Extraktion freier Ladungstr{\"a}ger bestimmt. Zur Optimierung der Solarzellen wurden grundlegende physikalische Wechselwirkungen zwischen den verwendeten Materialen sowie der Einfluss verschiedener Herstellungsparameter untersucht. Unter anderem wurden Fragen zum optimalen Materialeinsatz und Pr{\"a}parationsbedingungen beantwortet sowie grundlegende Einfl{\"u}sse wie Schichtmorphologie und Polymerinfiltration n{\"a}her betrachtet. Zun{\"a}chst wurde aus unterschiedlich hergestelltem Titandioxid (Akzeptor-Schicht) eine Auswahl f{\"u}r den Einsatz in Hybrid-Solarzellen getroffen. Kriterium war hierbei die unterschiedliche Morphologie aufgrund der Oberfl{\"a}chenbeschaffenheit, der Film-Struktur, der Kristallinit{\"a}t und die daraus resultierenden Solarzelleneigenschaften. F{\"u}r die anschließenden Untersuchungen wurden mesopor{\"o}se TiO2-Filme aus einer neuen Nanopartikel-Synthese, welche es erlaubt, kristalline Partikel schon w{\"a}hrend der Synthese herzustellen, als Elektronenakzeptor und konjugierte Polymere auf Poly(p-Phenylen-Vinylen) (PPV)- bzw. Thiophenbasis als Donatormaterial verwendet. Bei der thermischen Behandlung der TiO2-Schichten erfolgt eine temperaturabh{\"a}ngige {\"A}nderung der Morphologie, jedoch nicht der Kristallstruktur. Die Auswirkungen auf die Solarzelleneigenschaften wurden dokumentiert und diskutiert. Um die Vorteile der Nanopartikel-Synthese, die Bildung kristalliner TiO2-Partikel bei tiefen Temperaturen, nutzen zu k{\"o}nnen, wurden erste Versuche zur UV-Vernetzung durchgef{\"u}hrt. Neben der Beschaffenheit der Oxidschicht wurde auch der Einfluss der Polymermorphologie, bedingt durch L{\"o}sungsmittelvariation und Tempertemperatur, untersucht. Hierbei konnte gezeigt werden, dass u.a. die Viskosit{\"a}t der Polymerl{\"o}sung die Infiltration in die TiO2-Schicht und dadurch die Effizienz der Solarzelle beeinflusst. Ein weiterer Ansatz zur Erh{\"o}hung der Effizienz ist die Entwicklung neuer lochleitender Polymere, welche m{\"o}glichst {\"u}ber einen weiten spektralen Bereich Licht absorbieren und an die Bandl{\"u}cke des TiO2 angepasst sind. Hierzu wurden einige neuartige Konzepte, z.B. die Kombination von Thiophen- und Phenyl-Einheiten n{\"a}her untersucht. Auch wurde die Sensibilisierung der Titandioxidschicht in Anlehnung an die h{\"o}heren Effizienzen der Farbstoffzellen in Betracht gezogen. Zusammenfassend konnten im Rahmen dieser Arbeit wichtige Einflussparameter auf die Funktion hybrider Solarzellen identifiziert und z.T. n{\"a}her diskutiert werden. F{\"u}r einige limitierende Faktoren wurden Konzepte zur Verbesserung bzw. Vermeidung vorgestellt.}, language = {de} } @phdthesis{Poltrock2010, author = {Poltrock, Silvana}, title = {About the relation between implicit Theory of Mind \& the comprehension of complement sentences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52293}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Previous studies on the relation between language and social cognition have shown that children's mastery of embedded sentential complements plays a causal role for the development of a Theory of Mind (ToM). Children start to succeed on complementation tasks in which they are required to report the content of an embedded clause in the second half of the fourth year. Traditional ToM tasks test the child's ability to predict that a person who is holding a false belief (FB) about a situation will act "falsely". In these task, children do not represent FBs until the age of 4 years. According the linguistic determinism hypothesis, only the unique syntax of complement sentences provides the format for representing FBs. However, experiments measuring children's looking behavior instead of their explicit predictions provided evidence that already 2-year olds possess an implicit ToM. This dissertation examined the question of whether there is an interrelation also between implicit ToM and the comprehension of complement sentences in typically developing German preschoolers. Two studies were conducted. In a correlational study (Study 1 ), 3-year-old children's performance on a traditional (explicit) FB task, on an implicit FB task and on language tasks measuring children's comprehension of tensed sentential complements were collected and tested for their interdependence. Eye-tracking methodology was used to assess implicit ToM by measuring participants' spontaneous anticipatory eye movements while they were watching FB movies. Two central findings emerged. First, predictive looking (implicit ToM) was not correlated with complement mastery, although both measures were associated with explicit FB task performance. This pattern of results suggests that explicit, but not implicit ToM is language dependent. Second, as a group, 3-year-olds did not display implicit FB understanding. That is, previous findings on a precocious reasoning ability could not be replicated. This indicates that the characteristics of predictive looking tasks play a role for the elicitation of implicit FB understanding as the current task was completely nonverbal and as complex as traditional FB tasks. Study 2 took a methodological approach by investigating whether children display an earlier comprehension of sentential complements when using the same means of measurement as used in experimental tasks tapping implicit ToM, namely anticipatory looking. Two experiments were conducted. 3-year-olds were confronted either with a complement sentence expressing the protagonist's FB (Exp. 1) or with a complex sentence expressing the protagonist's belief without giving any information about the truth/ falsity of the belief (Exp. 2). Afterwards, their expectations about the protagonist's future behavior were measured. Overall, implicit measures reveal no considerably earlier understanding of sentential complementation. Whereas 3-year-olds did not display a comprehension of complex sentences if these embedded a false proposition, children from 3;9 years on were proficient in processing complement sentences if the truth value of the embedded proposition could not be evaluated. This pattern of results suggests that (1) the linguistic expression of a person's FB does not elicit implicit FB understanding and that (2) the assessment of the purely syntactic understanding of complement sentences is affected by competing reality information. In conclusion, this dissertation found no evidence that the implicit ToM is related to the comprehension of sentential complementation. The findings suggest that implicit ToM might be based on nonlinguistic processes. Results are discussed in the light of recently proposed dual-process models that assume two cognitive mechanisms that account for different levels of ToM task performance.}, language = {en} } @phdthesis{Paffhausen2010, author = {Paffhausen, Peter}, title = {Entscheidung {\"u}ber eine {\"O}ffentlich Private Partnerschaft : Empfehlungen f{\"u}r kommunale Entscheidungstr{\"a}ger beim Eingehen einer institutionellen {\"O}ffentlich Privaten Partnerschaft}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52435}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {{\"O}ffentlich Private Partnerschaften ({\"O}PPs) haben in den letzten zehn bis f{\"u}nfzehn Jahren in Deutschland einen beachtlichen Stellenwert erreicht. Auch zuk{\"u}nftig ist aufgrund der Finanzkrise der Kommunen mit einem weiteren Bedeutungszuwachs zu rechnen. Damit {\"O}PPs die von der {\"o}ffentlichen Hand gew{\"u}nschten Vorteile mit sich bringen k{\"o}nnen, wie zum Beispiel die Entlastung des {\"o}ffentlichen Haushalts oder Effizienzsteigerungen, sollten sie im Vorfeld und im Tagesgesch{\"a}ft aktiv und umsichtig begleitet werden. In diesem Zusammenhang ergibt sich der Ansatzpunkt f{\"u}r die Themenstellung der Dissertation sowie angesichts der Erkenntnis, dass bisher noch keine umfassend fundierten und systematischen Untersuchungen vorliegen, welche die bestehenden Praxiserfahrungen mit {\"O}PPs mit anwendbaren Theorien in Verbindung setzen und Entscheidungshilfen f{\"u}r {\"o}ffentliche Akteure ableiten. Aufgrund der verschiedenen m{\"o}glichen Auspr{\"a}gungsformen wurde eine Eingrenzung des Themas auf institutionelle {\"O}PPs auf kommunaler Ebene vorgenommen. Die Untersuchung beginnt mit der Auseinandersetzung der Grundlagen zu {\"O}PPs, um ein generelles Verst{\"a}ndnis f{\"u}r dieses Themengebiet zu schaffen. Nachdem der Begriff erl{\"a}utert und Merkmale von {\"O}PPs herausgearbeitet wurden, erfolgt eine Abgrenzung zwischen vertraglichen und institutionellen {\"O}PPs. Daraufhin werden m{\"o}gliche Motive der {\"o}ffentlichen und privaten Seite beim Eingehen einer solchen Partnerschaft aufgef{\"u}hrt sowie erste m{\"o}gliche Chancen und Risiken skizziert. Im Anschluss erfolgt mit Hilfe der wissenschaftlichen Theorie des Neuen Institutionalismus eine vertiefende Analyse zu institutionellen {\"O}PPs. Dabei schließt sich die Dissertation an die von Mayntz und Scharpf vorgenommene Einteilung in einen {\"o}konomischen, (organisations-) soziologischen und politikwissenschaftlichen Neo-Institutionalismus an. Der Neue {\"O}konomische Institutionalismus wurde anhand der drei Teillehren Transaktionskostentheorie, Prinzipal-Agent-Theorie und Theorie der Verf{\"u}gungsrechte untersucht. Zun{\"a}chst werden theoretische Erkenntnisse zu den einzelnen Theorien herausgearbeitet und erste Schlussfolgerungen f{\"u}r institutionelle {\"O}PPs gezogen. Daraus werden nachfolgend Untersuchungskriterien in Form von Fragestellungen f{\"u}r den sp{\"a}teren Fallstudienvergleich entwickelt. Nach Abschluss des Theorieteils erfolgt eine Betrachtung institutioneller {\"O}PPs aus realer empirischer Sicht. Hierzu werden Fallstudien vorgestellt und an den Untersuchungskriterien, welche aus den einzelnen Theorien abgeleitet wurden, gespiegelt. Zuerst werden recherchierte Fallstudien analysiert, beginnend mit den Teilprivatisierungen der Stadtentw{\"a}sserung Dresden GmbH und der Stadtwerke G{\"o}rlitz AG, bei denen sich die Zusammenarbeit wohl positiv entwickelt. Als Negativbeispiel wird dann auf die Privatisierung der Wasserversorgung von Grenoble und ihre sp{\"a}tere Rekommunalisierung eingegangen. Im folgenden Schritt werden Fallstudien aus den realen Erfahrungen des Verfassers diskutiert. Hierbei bildet die Teilprivatisierung und anschließende Rekommunalisierung des Wasserbetriebes in Potsdam den Schwerpunkt. Erg{\"a}nzt wird dies durch die Darstellung der positiven Zusammenarbeit mit dem privaten Gesellschafter bei der Energie und Wasser Potsdam GmbH. Abschließend werden die anf{\"a}nglichen Probleme zwischen Kommune und Privat bei der teilprivatisierten STEP Stadtentsorgung Potsdam untersucht und aufgezeigt, wie die Partnerschaft zum Vorteil der {\"o}ffentlichen Seite ver{\"a}ndert wurde. Aus dem Vergleich von Theorie und Praxis konnten wissenschaftlich fundierte Schlussfolgerungen f{\"u}r institutionelle {\"O}PPs gezogen und Erfolgsfaktoren f{\"u}r das Gelingen einer solchen Kooperation abgeleitet werden. Die gewonnenen Erkenntnisse werden in Form von Thesen zusammengefasst und dienen als Basis f{\"u}r die Ableitung von Handlungsempfehlungen f{\"u}r kommunale Akteure beim Eingehen einer institutionellen {\"O}PP. Zu Beginn erfolgt eine Darstellung der Empfehlungen, die sich aus den Untersuchungskriterien der jeweiligen Theorien ergeben haben. Nachfolgend wird diese Betrachtung erweitert, indem n{\"a}her auf die wesentlichen Phasen des Entscheidungsfindungsprozesses eingegangen und eine Untersetzung dieser Phasen mit den erarbeiteten Handlungsempfehlungen vorgenommen wird. Auf diese Weise kann den kommunalen Entscheidungstr{\"a}gern eine sehr praxisnahe Hilfestellung gegeben werden. Insgesamt betrachtet, geht aus der Dissertation ein umfangreicher, fundierter und sehr praxisrelevanter Leitfaden hervor, der wichtige Anhaltspunkte f{\"u}r das Eingehen einer institutionellen {\"O}PP im kommunalen Bereich gibt. Aus der Spiegelung von Theorie und Praxis werden wertvolle Hinweise abgeleitet, wodurch insbesondere deutlich wird, an welchen Stellen sich die {\"o}ffentliche Seite absichern sollte. Dar{\"u}ber hinaus k{\"o}nnen die kommunalen Entscheidungstr{\"a}ger durch die gewonnenen Erkenntnisse sensibilisiert und ihr Blick f{\"u}r den individuellen Fall gesch{\"a}rft werden. Letztendlich werden dadurch wichtige Voraussetzungen geschaffen, um ein solches Vorhaben zum Erfolg zu f{\"u}hren.}, language = {de} }