@phdthesis{Winkler2013, author = {Winkler, Henning}, title = {Synthese von thermoplastisch verarbeitbaren Fetts{\"a}ure-Acylderivaten der St{\"a}rke und Proteine}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71089}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In den vergangenen Jahren wurden stetig wachsende Produktionskapazit{\"a}ten von Biokunststoffen aus nachwachsenden Rohstoffe nverzeichnet. Trotz großer Produktionskapazit{\"a}ten und einem geeigneten Eigenschaftsprofil findet St{\"a}rke nur als hydrophile, mit Weichmachern verarbeitete thermoplastische St{\"a}rke (TPS) in Form von Blends mit z. B. Polyestern Anwendung. Gleiches gilt f{\"u}r Kunststoffe auf Proteinbasis. Die vorliegende Arbeit hat die Entwicklung von Biokunststoffen auf St{\"a}rkebasis zum Ziel, welche ohne externe Weichmacher thermoplastisch verarbeitbar und hydrophob sind sowie ein mechanisches Eigenschaftsprofil aufweisen, welches ein Potenzial zur Herstellung von Materialien f{\"u}r eine Anwendung als Verpackungsmittel bietet. Um die Rohstoffbasis f{\"u}r Biokunststoffe zu erweitern, soll das erarbeitete Konzept auf zwei industriell verf{\"u}gbare Proteintypen, Zein und Molkenproteinisolat (WPI), {\"u}bertragen werden. Als geeignete Materialklasse wurden Fetts{\"a}ureester der St{\"a}rke herausgearbeitet. Zun{\"a}chst fand ein Vergleich der S{\"a}urechlorid-Veresterung und der Umesterung von Fetts{\"a}urevinylestern statt, woraus letztere als geeignetere Methode hervorging. Durch Variation der Reaktionsparameter konnte diese optimiert und auf eine Serie der Fetts{\"a}urevinylester von Butanoat bis Stearat f{\"u}r DS-Werte bis zu 2,2-2,6 angewandt werden. M{\"o}glich war somit eine systematische Studie unter Variation der veresterten Fetts{\"a}ure sowie des Substitutionsgrades (DS). S{\"a}mtliche Produkte mit einem DS ab 1,5 wiesen eine ausgpr{\"a}gte L{\"o}slichkeit in organischen L{\"o}sungsmitteln auf wodurch sowohl die Aufnahme von NMR-Spektren als auch Molmassenbestimmung mittels Gr{\"o}ßenausschlusschromatographie mit gekoppelter Mehrwinkel-Laserlichtstreuung (GPC-MALLS) m{\"o}glich waren. Durch dynamische Lichtstreuung (DLS) wurde das L{\"o}slichkeitsverhalten veranschaulicht. S{\"a}mtliche Produkte konnten zu Filmen verarbeitet werden, wobei Materialien mit DS 1,5-1,7 hohe Zugfestigkeiten (bis zu 42 MPa) und Elastizit{\"a}tsmodule (bis 1390 MPa) aufwiesen. Insbesondere St{\"a}rkehexanoat mit DS <2 sowie St{\"a}rkebutanoat mit DS >2 hatten ein mechanisches Eigenschaftsprofil, welches insbesondere in Bezug auf die Festigkeit/Steifigkeit vergleichbar mit Verpackungsmaterialien wie Polyethylen war (Zugfestigkeit: 15-32 MPa, E-Modul: 300-1300 MPa). Zugfestigkeit und Elastizit{\"a}tsmodul nahmen mit steigender Kettenl{\"a}nge der veresterten Fetts{\"a}ure ab. Ester l{\"a}ngerkettiger Fetts{\"a}uren (C16-C18) waren spr{\"o}de. {\"U}ber Weitwinkel-R{\"o}ntgenstreuung (WAXS) und Infrarotspektroskopie (ATR-FTIR) konnte der Verlauf der Festigkeiten mit einer zunehmenden Distanz der St{\"a}rke im Material begr{\"u}ndet werden. Es konnten von DS und Kettenl{\"a}nge abh{\"a}ngige Glas{\"u}berg{\"a}nge detektiert werden, die kristallinen Strukturen der langkettigen Fetts{\"a}uren zeigten einen Schmelzpeak. Die Hydrophobie der Filme wurde anhand von Kontaktwinkeln >95° gegen Wasser dargestellt. Blends mit biobasierten Polyterpenen sowie den in der Arbeit hergestellten Zein-Acylderivaten erm{\"o}glichten eine weitere Verbesserung der Zugfestigkeit bzw. des Elastizit{\"a}tsmoduls hochsubstituierter Produkte. Eine thermoplastische Verarbeitung mittels Spritzgießen war sowohl f{\"u}r Produkte mit hohem als auch mittlerem DS-Wert ohne jeglichen Zusatz von Weichmachern m{\"o}glich. Es entstanden homogene, transparente Pr{\"u}fst{\"a}be. Untersuchungen der H{\"a}rte ergaben auch hier f{\"u}r St{\"a}rkehexanoat und -butanoat mit Polyethylen vergleichbare Werte. Ausgew{\"a}hlte Produkte wurden zu Fasern nach dem Schmelzspinnverfahren verarbeitet. Hierbei wurden insbesondere f{\"u}r hochsubstituierte Derivate homogenen Fasern erstellt, welche im Vergleich zur Gießfolie signifikant h{\"o}here Zugfestigkeiten aufwiesen. St{\"a}rkeester mit mittlerem DS ließen sich ebenfalls verarbeiten. Zun{\"a}chst wurden f{\"u}r eine {\"U}bertragung des Konzeptes auf die Proteine Zein und WPI verschiedene Synthesemethoden verglichen. Die Veresterung mit S{\"a}urechloriden ergab hierbei die h{\"o}chsten Werte. Im Hinblick auf eine gute L{\"o}slichkeit in organischen L{\"o}sungsmitteln wurde f{\"u}r WPI die Veresterung mit carbonyldiimidazol (CDI)-aktivierten Fetts{\"a}uren in DMSO und f{\"u}r Zein die Veresterung mit S{\"a}u-rechloriden in Pyridin bevorzugt. Es stellte sich heraus, dass acyliertes WPI zwar hydrophob, jedoch ohne Weichmacher nicht thermoplastisch verarbeitet werden konnte. Die Erstellung von Gießfolien f{\"u}hrte zu Spr{\"o}dbruchverhalten. Unter Zugabe der biobasierten {\"O}ls{\"a}ure wurde die Anwendung von acyliertem WPI als thermoplastischer Filler z. B. in Blends mit St{\"a}rkeestern dargestellt. Im Gegensatz hierzu zeigte acyliertes Zein Glas{\"u}berg{\"a}nge <100 °C bei ausreichender Stabilit{\"a}t (150-200 °C). Zeinoleat konnte ohne Weichmacher zu einer transparenten Gießfolie verarbeitet werden. S{\"a}mtliche Derivate erwiesen sich als ausgepr{\"a}gt hydrophob. Zeinoleat konnte {\"u}ber das Schmelzspinnverfahren zu thermoplastischen Fasern verarbeitet werden.}, language = {de} } @phdthesis{Wechakama2013, author = {Wechakama, Maneenate}, title = {Multi-messenger constraints and pressure from dark matter annihilation into electron-positron pairs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67401}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Despite striking evidence for the existence of dark matter from astrophysical observations, dark matter has still escaped any direct or indirect detection until today. Therefore a proof for its existence and the revelation of its nature belongs to one of the most intriguing challenges of nowadays cosmology and particle physics. The present work tries to investigate the nature of dark matter through indirect signatures from dark matter annihilation into electron-positron pairs in two different ways, pressure from dark matter annihilation and multi-messenger constraints on the dark matter annihilation cross-section. We focus on dark matter annihilation into electron-positron pairs and adopt a model-independent approach, where all the electrons and positrons are injected with the same initial energy E_0 ~ m_dm*c^2. The propagation of these particles is determined by solving the diffusion-loss equation, considering inverse Compton scattering, synchrotron radiation, Coulomb collisions, bremsstrahlung, and ionization. The first part of this work, focusing on pressure from dark matter annihilation, demonstrates that dark matter annihilation into electron-positron pairs may affect the observed rotation curve by a significant amount. The injection rate of this calculation is constrained by INTEGRAL, Fermi, and H.E.S.S. data. The pressure of the relativistic electron-positron gas is computed from the energy spectrum predicted by the diffusion-loss equation. For values of the gas density and magnetic field that are representative of the Milky Way, it is estimated that the pressure gradients are strong enough to balance gravity in the central parts if E_0 < 1 GeV. The exact value depends somewhat on the astrophysical parameters, and it changes dramatically with the slope of the dark matter density profile. For very steep slopes, as those expected from adiabatic contraction, the rotation curves of spiral galaxies would be affected on kiloparsec scales for most values of E_0. By comparing the predicted rotation curves with observations of dwarf and low surface brightness galaxies, we show that the pressure from dark matter annihilation may improve the agreement between theory and observations in some cases, but it also imposes severe constraints on the model parameters (most notably, the inner slope of the halo density profile, as well as the mass and the annihilation cross-section of dark matter particles into electron-positron pairs). In the second part, upper limits on the dark matter annihilation cross-section into electron-positron pairs are obtained by combining observed data at different wavelengths (from Haslam, WMAP, and Fermi all-sky intensity maps) with recent measurements of the electron and positron spectra in the solar neighbourhood by PAMELA, Fermi, and H.E.S.S.. We consider synchrotron emission in the radio and microwave bands, as well as inverse Compton scattering and final-state radiation at gamma-ray energies. For most values of the model parameters, the tightest constraints are imposed by the local positron spectrum and synchrotron emission from the central regions of the Galaxy. According to our results, the annihilation cross-section should not be higher than the canonical value for a thermal relic if the mass of the dark matter candidate is smaller than a few GeV. In addition, we also derive a stringent upper limit on the inner logarithmic slope α of the density profile of the Milky Way dark matter halo (α < 1 if m_dm < 5 GeV, α < 1.3 if m_dm < 100 GeV and α < 1.5 if m_dm < 2 TeV) assuming a dark matter annihilation cross-section into electron-positron pairs (σv) = 3*10^-26 cm^3 s^-1, as predicted for thermal relics from the big bang.}, language = {en} } @phdthesis{Vogel2013, author = {Vogel, Kristin}, title = {Applications of Bayesian networks in natural hazard assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69777}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Even though quite different in occurrence and consequences, from a modeling perspective many natural hazards share similar properties and challenges. Their complex nature as well as lacking knowledge about their driving forces and potential effects make their analysis demanding: uncertainty about the modeling framework, inaccurate or incomplete event observations and the intrinsic randomness of the natural phenomenon add up to different interacting layers of uncertainty, which require a careful handling. Nevertheless deterministic approaches are still widely used in natural hazard assessments, holding the risk of underestimating the hazard with disastrous effects. The all-round probabilistic framework of Bayesian networks constitutes an attractive alternative. In contrast to deterministic proceedings, it treats response variables as well as explanatory variables as random variables making no difference between input and output variables. Using a graphical representation Bayesian networks encode the dependency relations between the variables in a directed acyclic graph: variables are represented as nodes and (in-)dependencies between variables as (missing) edges between the nodes. The joint distribution of all variables can thus be described by decomposing it, according to the depicted independences, into a product of local conditional probability distributions, which are defined by the parameters of the Bayesian network. In the framework of this thesis the Bayesian network approach is applied to different natural hazard domains (i.e. seismic hazard, flood damage and landslide assessments). Learning the network structure and parameters from data, Bayesian networks reveal relevant dependency relations between the included variables and help to gain knowledge about the underlying processes. The problem of Bayesian network learning is cast in a Bayesian framework, considering the network structure and parameters as random variables itself and searching for the most likely combination of both, which corresponds to the maximum a posteriori (MAP score) of their joint distribution given the observed data. Although well studied in theory the learning of Bayesian networks based on real-world data is usually not straight forward and requires an adoption of existing algorithms. Typically arising problems are the handling of continuous variables, incomplete observations and the interaction of both. Working with continuous distributions requires assumptions about the allowed families of distributions. To "let the data speak" and avoid wrong assumptions, continuous variables are instead discretized here, thus allowing for a completely data-driven and distribution-free learning. An extension of the MAP score, considering the discretization as random variable as well, is developed for an automatic multivariate discretization, that takes interactions between the variables into account. The discretization process is nested into the network learning and requires several iterations. Having to face incomplete observations on top, this may pose a computational burden. Iterative proceedings for missing value estimation become quickly infeasible. A more efficient albeit approximate method is used instead, estimating the missing values based only on the observations of variables directly interacting with the missing variable. Moreover natural hazard assessments often have a primary interest in a certain target variable. The discretization learned for this variable does not always have the required resolution for a good prediction performance. Finer resolutions for (conditional) continuous distributions are achieved with continuous approximations subsequent to the Bayesian network learning, using kernel density estimations or mixtures of truncated exponential functions. All our proceedings are completely data-driven. We thus avoid assumptions that require expert knowledge and instead provide domain independent solutions, that are applicable not only in other natural hazard assessments, but in a variety of domains struggling with uncertainties.}, language = {en} } @phdthesis{Uhlemann2013, author = {Uhlemann, Steffi}, title = {Understanding trans-basin floods in Germany : data, information and knowledge}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68868}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Large Central European flood events of the past have demonstrated that flooding can affect several river basins at the same time leading to catastrophic economic and humanitarian losses that can stretch emergency resources beyond planned levels of service. For Germany, the spatial coherence of flooding, the contributing processes and the role of trans-basin floods for a national risk assessment is largely unknown and analysis is limited by a lack of systematic data, information and knowledge on past events. This study investigates the frequency and intensity of trans-basin flood events in Germany. It evaluates the data and information basis on which knowledge about trans-basin floods can be generated in order to improve any future flood risk assessment. In particu-lar, the study assesses whether flood documentations and related reports can provide a valuable data source for understanding trans-basin floods. An adaptive algorithm was developed that systematically captures trans-basin floods using series of mean daily discharge at a large number of sites of even time series length (1952-2002). It identifies the simultaneous occurrence of flood peaks based on the exceedance of an initial threshold of a 10 year flood at one location and consecutively pools all causally related, spatially and temporally lagged peak recordings at the other locations. A weighted cumulative index was developed that accounts for the spatial extent and the individual flood magnitudes within an event and allows quantifying the overall event severity. The parameters of the method were tested in a sensitivity analysis. An intensive study on sources and ways of information dissemination of flood-relevant publications in Germany was conducted. Based on the method of systematic reviews a strategic search approach was developed to identify relevant documentations for each of the 40 strongest trans-basin flood events. A novel framework for assessing the quality of event specific flood reports from a user's perspective was developed and validated by independent peers. The framework was designed to be generally applicable for any natural hazard type and assesses the quality of a document addressing accessibility as well as representational, contextual, and intrinsic dimensions of quality. The analysis of time-series of mean daily discharge resulted in the identification of 80 trans-basin flood events within the period 1952-2002 in Germany. The set is dominated by events that were recorded in the hydrological winter (64\%); 36\% occurred during the summer months. The occurrence of floods is characterised by a distinct clustering in time. Dividing the study period into two sub-periods, we find an increase in the percentage of winter events from 58\% in the first to 70.5\% in the second sub-period. Accordingly, we find a significant increase in the number of extreme trans-basin floods in the second sub-period. A large body of 186 flood relevant documentations was identified. For 87.5\% of the 40 strongest trans-basin floods in Germany at least one report has been found and for the most severe floods a substantial amount of documentation could be obtained. 80\% of the material can be considered grey literature (i.e. literature not controlled by commercial publishers). The results of the quality assessment show that the majority of flood event specific reports are of a good quality, i.e. they are well enough drafted, largely accurate and objective, and contain a substantial amount of information on the sources, pathways and receptors/consequences of the floods. The inclusion of this information in the process of knowledge building for flood risk assessment is recommended. Both the results as well as the data produced in this study are openly accessible and can be used for further research. The results of this study contribute to an improved spatial risk assessment in Germany. The identified set of trans-basin floods provides the basis for an assessment of the chance that flooding occurs simultaneously at a number of sites. The information obtained from flood event documentation can usefully supplement the analysis of the processes that govern flood risk.}, language = {en} } @phdthesis{Toele2013, author = {T{\"o}le, Jonas Claudius}, title = {{\"U}ber die Arc-catFISH-Methode als neues Werkzeug zur Charakterisierung der Geschmacksverarbeitung im Hirnstamm der Maus}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70491}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Intensive Forschung hat in den vergangenen Jahrzehnten zu einer sehr detaillierten Charakterisierung des Geschmackssystems der S{\"a}ugetiere gef{\"u}hrt. Dennoch sind mit den bislang eingesetzten Methoden wichtige Fragestellungen unbeantwortet geblieben. Eine dieser Fragen gilt der Unterscheidung von Bitterstoffen. Die Zahl der Substanzen, die f{\"u}r den Menschen bitter schmecken und in Tieren angeborenes Aversionsverhalten ausl{\"o}sen, geht in die Tausende. Diese Substanzen sind sowohl von der chemischen Struktur als auch von ihrer Wirkung auf den Organismus sehr verschieden. W{\"a}hrend viele Bitterstoffe potente Gifte darstellen, sind andere in den Mengen, die mit der Nahrung aufgenommen werden, harmlos oder haben sogar positive Effekte auf den K{\"o}rper. Zwischen diesen Gruppen unterscheiden zu k{\"o}nnen, w{\"a}re f{\"u}r ein Tier von Vorteil. Ein solcher Mechanismus ist jedoch bei S{\"a}ugetieren nicht bekannt. Das Ziel dieser Arbeit war die Untersuchung der Verarbeitung von Geschmacksinformation in der ersten Station der Geschmacksbahn im Mausgehirn, dem Nucleus tractus solitarii (NTS), mit besonderem Augenmerk auf der Frage nach der Diskriminierung verschiedener Bitterstoffe. Zu diesem Zweck wurde eine neue Untersuchungsmethode f{\"u}r das Geschmackssystem etabliert, die die Nachteile bereits verf{\"u}gbarer Methoden umgeht und ihre Vorteile kombiniert. Die Arc-catFISH-Methode (cellular compartment analysis of temporal activity by fluorescent in situ hybridization), die die Charakterisierung der Antwort großer Neuronengruppen auf zwei Stimuli erlaubt, wurde zur Untersuchung geschmacksverarbeitender Zellen im NTS angewandt. Im Zuge dieses Projekts wurde erstmals eine stimulusinduzierte Arc-Expression im NTS gezeigt. Die ersten Ergebnisse offenbarten, dass die Arc-Expression im NTS spezifisch nach Stimulation mit Bitterstoffen auftritt und sich die Arc exprimierenden Neurone vornehmlich im gustatorischen Teil des NTS befinden. Dies weist darauf hin, dass Arc-Expression ein Marker f{\"u}r bitterverarbeitende gustatorische Neurone im NTS ist. Nach zweimaliger Stimulation mit Bittersubstanzen konnten {\"u}berlappende, aber verschiedene Populationen von Neuronen beobachtet werden, die unterschiedlich auf die drei verwendeten Bittersubstanzen Cycloheximid, Chininhydrochlorid und Cucurbitacin I reagierten. Diese Neurone sind vermutlich an der Steuerung von Abwehrreflexen beteiligt und k{\"o}nnten so die Grundlage f{\"u}r divergentes Verhalten gegen{\"u}ber verschiedenen Bitterstoffen bilden.}, language = {de} } @phdthesis{Tyrallova2013, author = {Tyrallov{\´a}, Lucia}, title = {Automatisierte Objektidentifikation und Visualisierung terrestrischer Oberfl{\"a}chenformen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69268}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Die automatisierte Objektidentifikation stellt ein modernes Werkzeug in den Geoinformationswissenschaften dar (BLASCHKE et al., 2012). Um bei thematischen Kartierungen untereinander vergleichbare Ergebnisse zu erzielen, sollen aus Sicht der Geoinformatik Mittel f{\"u}r die Objektidentifikation eingesetzt werden. Anstelle von Feldarbeit werden deshalb in der vorliegenden Arbeit multispektrale Fernerkundungsdaten als Prim{\"a}rdaten verwendet. Konkrete nat{\"u}rliche Objekte werden GIS-gest{\"u}tzt und automatisiert {\"u}ber große Fl{\"a}chen und Objektdichten aus Prim{\"a}rdaten identifiziert und charakterisiert. Im Rahmen der vorliegenden Arbeit wird eine automatisierte Prozesskette zur Objektidentifikation konzipiert. Es werden neue Ans{\"a}tze und Konzepte der objektbasierten Identifikation von nat{\"u}rlichen isolierten terrestrischen Oberfl{\"a}chenformen entwickelt und implementiert. Die Prozesskette basiert auf einem Konzept, das auf einem generischen Ansatz f{\"u}r automatisierte Objektidentifikation aufgebaut ist. Die Prozesskette kann anhand charakteristischer quantitativer Parameter angepasst und so umgesetzt werden, womit das Konzept der Objektidentifikation modular und skalierbar wird. Die modulbasierte Architektur erm{\"o}glicht den Einsatz sowohl einzelner Module als auch ihrer Kombination und m{\"o}glicher Erweiterungen. Die eingesetzte Methodik der Objektidentifikation und die daran anschließende Charakteristik der (geo)morphometrischen und morphologischen Parameter wird durch statistische Verfahren gest{\"u}tzt. Diese erm{\"o}glichen die Vergleichbarkeit von Objektparametern aus unterschiedlichen Stichproben. Mit Hilfe der Regressionsund Varianzanalyse werden Verh{\"a}ltnisse zwischen Objektparametern untersucht. Es werden funktionale Abh{\"a}ngigkeiten der Parameter analysiert, um die Objekte qualitativ zu beschreiben. Damit ist es m{\"o}glich, automatisiert berechnete Maße und Indizes der Objekte als quantitative Daten und Informationen zu erfassen und unterschiedliche Stichproben anzuwenden. Im Rahmen dieser Arbeit bilden Thermokarstseen die Grundlage f{\"u}r die Entwicklungen und als Beispiel sowie Datengrundlage f{\"u}r den Aufbau des Algorithmus und die Analyse. Die Geovisualisierung der multivariaten nat{\"u}rlichen Objekte wird f{\"u}r die Entwicklung eines besseren Verst{\"a}ndnisses der r{\"a}umlichen Relationen der Objekte eingesetzt. Kern der Geovisualisierung ist das Verkn{\"u}pfen von Visualisierungsmethoden mit karten{\"a}hnlichen Darstellungen.}, language = {de} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @phdthesis{Thomas2013, author = {Thomas, Bj{\"o}rn Daniel}, title = {Analysis and management of low flows in small catchments of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69247}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Water management and environmental protection is vulnerable to extreme low flows during streamflow droughts. During the last decades, in most rivers of Central Europe summer runoff and low flows have decreased. Discharge projections agree that future decrease in runoff is likely for catchments in Brandenburg, Germany. Depending on the first-order controls on low flows, different adaption measures are expected to be appropriate. Small catchments were analyzed because they are expected to be more vulnerable to a changing climate than larger rivers. They are mainly headwater catchments with smaller ground water storage. Local characteristics are more important at this scale and can increase vulnerability. This thesis mutually evaluates potential adaption measures to sustain minimum runoff in small catchments of Brandenburg, Germany, and similarities of these catchments regarding low flows. The following guiding questions are addressed: (i) Which first-order controls on low flows and related time scales exist? (ii) Which are the differences between small catchments regarding low flow vulnerability? (iii) Which adaption measures to sustain minimum runoff in small catchments of Brandenburg are appropriate considering regional low flow patterns? Potential adaption measures to sustain minimum runoff during periods of low flows can be classified into three categories: (i) increase of groundwater recharge and subsequent baseflow by land use change, land management and artificial ground water recharge, (ii) increase of water storage with regulated outflow by reservoirs, lakes and wetland water management and (iii) regional low flow patterns have to be considered during planning of measures with multiple purposes (urban water management, waste water recycling and inter-basin water transfer). The question remained whether water management of areas with shallow groundwater tables can efficiently sustain minimum runoff. Exemplary, water management scenarios of a ditch irrigated area were evaluated using the model Hydrus-2D. Increasing antecedent water levels and stopping ditch irrigation during periods of low flows increased fluxes from the pasture to the stream, but storage was depleted faster during the summer months due to higher evapotranspiration. Fluxes from this approx. 1 km long pasture with an area of approx. 13 ha ranged from 0.3 to 0.7 l\s depending on scenario. This demonstrates that numerous of such small decentralized measures are necessary to sustain minimum runoff in meso-scale catchments. Differences in the low flow risk of catchments and meteorological low flow predictors were analyzed. A principal component analysis was applied on daily discharge of 37 catchments between 1991 and 2006. Flows decreased more in Southeast Brandenburg according to meteorological forcing. Low flow risk was highest in a region east of Berlin because of intersection of a more continental climate and the specific geohydrology. In these catchments, flows decreased faster during summer and the low flow period was prolonged. A non-linear support vector machine regression was applied to iteratively select meteorological predictors for annual 30-day minimum runoff in 16 catchments between 1965 and 2006. The potential evapotranspiration sum of the previous 48 months was the most important predictor (r²=0.28). The potential evapotranspiration of the previous 3 months and the precipitation of the previous 3 months and last year increased model performance (r²=0.49, including all four predictors). Model performance was higher for catchments with low yield and more damped runoff. In catchments with high low flow risk, explanatory power of long term potential evapotranspiration was high. Catchments with a high low flow risk as well as catchments with a considerable decrease in flows in southeast Brandenburg have the highest demand for adaption. Measures increasing groundwater recharge are to be preferred. Catchments with high low flow risk showed relatively deep and decreasing groundwater heads allowing increased groundwater recharge at recharge areas with higher altitude away from the streams. Low flows are expected to stay low or decrease even further because long term potential evapotranspiration was the most important low flow predictor and is projected to increase during climate change. Differences in low flow risk and runoff dynamics between catchments have to be considered for management and planning of measures which do not only have the task to sustain minimum runoff.}, language = {en} } @phdthesis{Theves2013, author = {Theves, Matthias}, title = {Bacterial motility and growth in open and confined environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70313}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In the presence of a solid-liquid or liquid-air interface, bacteria can choose between a planktonic and a sessile lifestyle. Depending on environmental conditions, cells swimming in close proximity to the interface can irreversibly attach to the surface and grow into three-dimensional aggregates where the majority of cells is sessile and embedded in an extracellular polymer matrix (biofilm). We used microfluidic tools and time lapse microscopy to perform experiments with the polarly flagellated soil bacterium Pseudomonas putida (P. putida), a bacterial species that is able to form biofilms. We analyzed individual trajectories of swimming cells, both in the bulk fluid and in close proximity to a glass-liquid interface. Additionally, surface related growth during the early phase of biofilm formation was investigated. In the bulk fluid, P.putida shows a typical bacterial swimming pattern of alternating periods of persistent displacement along a line (runs) and fast reorientation events (turns) and cells swim with an average speed around 24 micrometer per second. We found that the distribution of turning angles is bimodal with a dominating peak around 180 degrees. In approximately six out of ten turning events, the cell reverses its swimming direction. In addition, our analysis revealed that upon a reversal, the cell systematically changes its swimming speed by a factor of two on average. Based on the experimentally observed values of mean runtime and rotational diffusion, we presented a model to describe the spreading of a population of cells by a run-reverse random walker with alternating speeds. We successfully recover the mean square displacement and, by an extended version of the model, also the negative dip in the directional autocorrelation function as observed in the experiments. The analytical solution of the model demonstrates that alternating speeds enhance a cells ability to explore its environment as compared to a bacterium moving at a constant intermediate speed. As compared to the bulk fluid, for cells swimming near a solid boundary we observed an increase in swimming speed at distances below d= 5 micrometer and an increase in average angular velocity at distances below d= 4 micrometer. While the average speed was maximal with an increase around 15\% at a distance of d= 3 micrometer, the angular velocity was highest in closest proximity to the boundary at d=1 micrometer with an increase around 90\% as compared to the bulk fluid. To investigate the swimming behavior in a confinement between two solid boundaries, we developed an experimental setup to acquire three-dimensional trajectories using a piezo driven objective mount coupled to a high speed camera. Results on speed and angular velocity were consistent with motility statistics in the presence of a single boundary. Additionally, an analysis of the probability density revealed that a majority of cells accumulated near the upper and lower boundaries of the microchannel. The increase in angular velocity is consistent with previous studies, where bacteria near a solid boundary were shown to swim on circular trajectories, an effect which can be attributed to a wall induced torque. The increase in speed at a distance of several times the size of the cell body, however, cannot be explained by existing theories which either consider the drag increase on cell body and flagellum near a boundary (resistive force theory) or model the swimming microorganism by a multipole expansion to account for the flow field interaction between cell and boundary. An accumulation of swimming bacteria near solid boundaries has been observed in similar experiments. Our results confirm that collisions with the surface play an important role and hydrodynamic interactions alone cannot explain the steady-state accumulation of cells near the channel walls. Furthermore, we monitored the number growth of cells in the microchannel under medium rich conditions. We observed that, after a lag time, initially isolated cells at the surface started to grow by division into colonies of increasing size, while coexisting with a comparable smaller number of swimming cells. After 5:50 hours, we observed a sudden jump in the number of swimming cells, which was accompanied by a breakup of bigger clusters on the surface. After approximately 30 minutes where planktonic cells dominated in the microchannel, individual swimming cells reattached to the surface. We interpret this process as an emigration and recolonization event. A number of complementary experiments were performed to investigate the influence of collective effects or a depletion of the growth medium on the transition. Similar to earlier observations on another bacterium from the same family we found that the release of cells to the swimming phase is most likely the result of an individual adaption process, where syntheses of proteins for flagellar motility are upregulated after a number of division cycles at the surface.}, language = {en} } @phdthesis{Thalmann2013, author = {Thalmann, Sophie}, title = {Korrelation zwischen der genetischen und der funktionellen Diversit{\"a}t humaner Bitterrezeptoren}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66845}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Der Mensch besitzt ~25 funktionelle Bitterrezeptoren (TAS2R), die f{\"u}r die Wahrnehmung potenziell toxischer Substanzen in der Nahrung verantwortlich sind. Aufgrund der großen genetischen Variabilit{\"a}t der TAS2R-Gene k{\"o}nnte es eine Vielzahl funktionell unterschiedlicher TAS2R-Haplotypen geben, die zu Unterschieden der Bitterwahrnehmung f{\"u}hren. Dies konnte bereits in funktionellen Analysen und sensorischen Studien f{\"u}r einzelne Bitterrezeptoren gezeigt werden. In dieser Arbeit wurden die h{\"a}ufigsten Haplotypen aller 25 Bitterrezeptoren verschiedener Ethnien funktionell charakterisiert. Das Ziel war eine umfassende Aussage {\"u}ber die funktionelle Diversit{\"a}t der TAS2Rs, die die molekulare Grundlage f{\"u}r individuelle Bitterwahrnehmung bildet, treffen zu k{\"o}nnen. Fehlende Varianten wurden aus genomischer DNA kloniert oder durch gezielte Mutagenese bereits vorhandener TAS2R-Konstrukte generiert. Die funktionelle Analyse erfolgte mittels Expression der TAS2R-Haplotypen in HEK293TG16gust44 Zellen und anschließenden Calcium-Imaging-Experimenten mit zwei bekannten Agonisten. Die Haplotypen der f{\"u}nf orphanen TAS2Rs wurden mit {\"u}ber hundert Bitterstoffen stimuliert. Durch die gelungene Deorphanisierung des TAS2R41 in dieser Arbeit, wurden f{\"u}r die 21 aktivierbaren TAS2Rs 36 funktionell-unterschiedliche Haplotypen identifiziert. Die tats{\"a}chliche funktionelle Vielfalt blieb jedoch deutlich hinter der genetischen Variabilit{\"a}t der TAS2Rs zur{\"u}ck. Neun Bitterrezeptoren wiesen funktionell homogene Haplotypen auf oder besaßen nur eine weltweit vorherrschende Variante. Funktionell heterogene Haplotypen wurden f{\"u}r zw{\"o}lf TAS2Rs identifiziert. Inaktive Varianten der Rezeptoren TAS2R9, TAS2R38 und TAS2R46 sollten die Wahrnehmung von Bitterstoffen wie Ofloxacin, Cnicin, Hydrocortison, Limonin, Parthenolid oder Strychnin beeinflussen. Unterschiedlich sensitive Varianten, besonders der Rezeptoren TAS2R47 und TAS2R49, sollten f{\"u}r Agonisten wie Absinthin, Amarogentin oder Cromolyn ebenfalls zu ph{\"a}notypischen Unterschieden f{\"u}hren. Wie f{\"u}r den TAS2R16 bereits gezeigt, traten Haplotypen des funktionell heterogenen TAS2R7 und TAS2R41 ethnien-spezifisch auf, was auf lokale Anpassung und verschiedene Ph{\"a}notypen hinweisen k{\"o}nnte. Weiterf{\"u}hrend muss nun eine Analyse der funktionell-variablen TAS2Rs in sensorischen Tests erfolgen, um ihre ph{\"a}notypische Relevanz zu pr{\"u}fen. Die Analyse der funktionsmodulierenden Aminos{\"a}urepositionen, z.Bsp. des TAS2R44, TAS2R47 oder TAS2R49, k{\"o}nnte weiterf{\"u}hrend zum besseren Verst{\"a}ndnis der Rezeptor-Ligand- und Rezeptor-G-Protein-Interaktion beitragen.}, language = {de} } @phdthesis{Takey2013, author = {Takey, Ali Said Ahmed}, title = {The XMM-Newton/SDSS galaxy cluster survey}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71229}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Galaxy clusters are the largest known gravitationally bound objects, their study is important for both an intrinsic understanding of their systems and an investigation of the large scale structure of the universe. The multi- component nature of galaxy clusters offers multiple observable signals across the electromagnetic spectrum. At X-ray wavelengths, galaxy clusters are simply identified as X-ray luminous, spatially extended, and extragalactic sources. X-ray observations offer the most powerful technique for constructing cluster catalogues. The main advantages of the X-ray cluster surveys are their excellent purity and completeness and the X-ray observables are tightly correlated with mass, which is indeed the most fundamental parameter of clusters. In my thesis I have conducted the 2XMMi/SDSS galaxy cluster survey, which is a serendipitous search for galaxy clusters based on the X-ray extended sources in the XMM-Newton Serendipitous Source Catalogue (2XMMi-DR3). The main aims of the survey are to identify new X-ray galaxy clusters, investigate their X-ray scaling relations, identify distant cluster candidates, and study the correlation of the X-ray and optical properties. The survey is constrained to those extended sources that are in the footprint of the Sloan Digital Sky Survey (SDSS) in order to be able to identify the optical counterparts as well as to measure their redshifts that are mandatory to measure their physical properties. The overlap area be- tween the XMM-Newton fields and the SDSS-DR7 imaging, the latest SDSS data release at the starting of the survey, is 210 deg^2. The survey comprises 1180 X-ray cluster candidates with at least 80 background-subtracted photon counts, which passed the quality control process. To measure the optical redshifts of the X-ray cluster candidates, I used three procedures; (i) cross-matching these candidates with the recent and largest optically selected cluster catalogues in the literature, which yielded the photometric redshifts of about a quarter of the X-ray cluster candidates. (ii) I developed a finding algorithm to search for overdensities of galaxies at the positions of the X-ray cluster candidates in the photometric redshift space and to measure their redshifts from the SDSS-DR8 data, which provided the photometric redshifts of 530 groups/clusters. (iii) I developed an algorithm to identify the cluster candidates associated with spectroscopically targeted Luminous Red Galaxies (LRGs) in the SDSS-DR9 and to measure the cluster spectroscopic redshift, which provided 324 groups and clusters with spectroscopic confirmation based on spectroscopic redshift of at least one LRG. In total, the optically confirmed cluster sample comprises 574 groups and clusters with redshifts (0.03 ≤ z ≤ 0.77), which is the largest X-ray selected cluster catalogue to date based on observations from the current X-ray observatories (XMM-Newton, Chandra, Suzaku, and Swift/XRT). Among the cluster sample, about 75 percent are newly X-ray discovered groups/clusters and 40 percent are new systems to the literature. To determine the X-ray properties of the optically confirmed cluster sample, I reduced and analysed their X-ray data in an automated way following the standard pipelines of processing the XMM-Newton data. In this analysis, I extracted the cluster spectra from EPIC(PN, MOS1, MOS2) images within an optimal aperture chosen to maximise the signal-to-noise ratio. The spectral fitting procedure provided the X-ray temperatures kT (0.5 - 7.5 keV) for 345 systems that have good quality X-ray data. For all the optically confirmed cluster sample, I measured the physical properties L500 (0.5 x 10^42 - 1.2 x 10^45 erg s-1 ) and M500 (1.1 x 10^13 - 4.9 x 10^14 M⊙) from an iterative procedure using published scaling relations. The present X-ray detected groups and clusters are in the low and intermediate luminosity regimes apart from few luminous systems, thanks to the XMM-Newton sensitivity and the available XMM-Newton deep fields The optically confirmed cluster sample with measurements of redshift and X-ray properties can be used for various astrophysical applications. As a first application, I investigated the LX - T relation for the first time based on a large cluster sample of 345 systems with X-ray spectroscopic parameters drawn from a single survey. The current sample includes groups and clusters with wide ranges of redshifts, temperatures, and luminosities. The slope of the relation is consistent with the published ones of nearby clusters with higher temperatures and luminosities. The derived relation is still much steeper than that predicted by self-similar evolution. I also investigated the evolution of the slope and the scatter of the LX - T relation with the cluster redshift. After excluding the low luminosity groups, I found no significant changes of the slope and the intrinsic scatter of the relation with redshift when dividing the sample into three redshift bins. When including the low luminosity groups in the low redshift subsample, I found its LX - T relation becomes after than the relation of the intermediate and high redshift subsamples. As a second application of the optically confirmed cluster sample from our ongoing survey, I investigated the correlation between the cluster X-ray and the optical parameters that have been determined in a homogenous way. Firstly, I investigated the correlations between the BCG properties (absolute magnitude and optical luminosity) and the cluster global proper- ties (redshift and mass). Secondly, I computed the richness and the optical luminosity within R500 of a nearby subsample (z ≤ 0.42, with a complete membership detection from the SDSS data) with measured X-ray temperatures from our survey. The relation between the estimated optical luminosity and richness is also presented. Finally, the correlation between the cluster optical properties (richness and luminosity) and the cluster global properties (X-ray luminosity, temperature, mass) are investigated.}, language = {en} } @phdthesis{Suetterlin2013, author = {S{\"u}tterlin, Martin}, title = {New inverse hydogel opals as protein responsive sensors}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70179}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In this work, the development of temperature- and protein-responsive sensor materials based on biocompatible, inverse hydrogel opals (IHOs) is presented. With these materials, large biomolecules can be specifically recognised and the binding event visualised. The preparation of the IHOs was performed with a template process, for which monodisperse silica particles were vertically deposited onto glass slides as the first step. The obtained colloidal crystals with a thickness of 5 μm displayed opalescent reflections because of the uniform alignment of the colloids. As a second step, the template was embedded in a matrix consisting of biocompatible, thermoresponsive hydrogels. The comonomers were selected from the family of oligo(ethylene glycol)methacrylates. The monomer solution was injected into a polymerisation mould, which contained the colloidal crystals as a template. The space in-between the template particles was filled with the monomer solution and the hydrogel was cured via UV-polymerisation. The particles were chemically etched, which resulted in a porous inner structure. The uniform alignment of the pores and therefore the opalescent reflection were maintained, so these system were denoted as inverse hydrogel opals. A pore diameter of several hundred nanometres as well as interconnections between the pores should facilitate a diffusion of bigger (bio)molecules, which was always a challenge in the presented systems until now. The copolymer composition was chosen to result in a hydrogel collapse over 35 °C. All hydrogels showed pronounced swelling in water below the critical temperature. The incorporation of a reactive monomer with hydroxyl groups ensured a potential coupling group for the introduction of recognition units for analytes, e.g. proteins. As a test system, biotin as a recognition unit for avidin was coupled to the IHO via polymer-analogous Steglich esterification. The amount of accessible biotin was quantified with a colorimetric binding assay. When avidin was added to the biotinylated IHO, the wavelength of the opalescent reflection was significantly shifted and therefore the binding event was visualised. This effect is based on the change in swelling behaviour of the hydrogel after binding of the hydrophilic avidin, which is amplified by the thermoresponsive nature of the hydrogel. A swelling or shrinking of the pores induces a change in distance of the crystal planes, which are responsible for the colour of the reflection. With these findings, the possibility of creating sensor materials or additional biomolecules in the size range of avidin is given.}, language = {en} } @phdthesis{Steinmetz2013, author = {Steinmetz, Nadine}, title = {Context-aware semantic analysis of video metadata}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70551}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Im Vergleich zu einer stichwortbasierten Suche erm{\"o}glicht die semantische Suche ein pr{\"a}ziseres und anspruchsvolleres Durchsuchen von (Web)-Dokumenten, weil durch die explizite Semantik Mehrdeutigkeiten von nat{\"u}rlicher Sprache vermieden und semantische Beziehungen in das Suchergebnis einbezogen werden k{\"o}nnen. Eine semantische, Entit{\"a}ten-basierte Suche geht von einer Anfrage mit festgelegter Bedeutung aus und liefert nur Dokumente, die mit dieser Entit{\"a}t annotiert sind als Suchergebnis. Die wichtigste Voraussetzung f{\"u}r eine Entit{\"a}ten-zentrierte Suche stellt die Annotation der Dokumente im Archiv mit Entit{\"a}ten und Kategorien dar. Textuelle Informationen werden analysiert und mit den entsprechenden Entit{\"a}ten und Kategorien versehen, um den Inhalt semantisch erschließen zu k{\"o}nnen. Eine manuelle Annotation erfordert Dom{\"a}nenwissen und ist sehr zeitaufwendig. Die semantische Annotation von Videodokumenten erfordert besondere Aufmerksamkeit, da inhaltsbasierte Metadaten von Videos aus verschiedenen Quellen stammen, verschiedene Eigenschaften und Zuverl{\"a}ssigkeiten besitzen und daher nicht wie Fließtext behandelt werden k{\"o}nnen. Die vorliegende Arbeit stellt einen semantischen Analyseprozess f{\"u}r Video-Metadaten vor. Die Eigenschaften der verschiedenen Metadatentypen werden analysiert und ein Konfidenzwert ermittelt. Dieser Wert spiegelt die Korrektheit und die wahrscheinliche Mehrdeutigkeit eines Metadatums wieder. Beginnend mit dem Metadatum mit dem h{\"o}chsten Konfidenzwert wird der Analyseprozess innerhalb eines Kontexts in absteigender Reihenfolge des Konfidenzwerts durchgef{\"u}hrt. Die bereits analysierten Metadaten dienen als Referenzpunkt f{\"u}r die weiteren Analysen. So kann eine m{\"o}glichst korrekte Analyse der heterogen strukturierten Daten eines Kontexts sichergestellt werden. Am Ende der Analyse eines Metadatums wird die f{\"u}r den Kontext relevanteste Entit{\"a}t aus einer Liste von Kandidaten identifiziert - das Metadatum wird disambiguiert. Hierf{\"u}r wurden verschiedene Disambiguierungsalgorithmen entwickelt, die Beschreibungstexte und semantische Beziehungen der Entit{\"a}tenkandidaten zum gegebenen Kontext in Betracht ziehen. Der Kontext f{\"u}r die Disambiguierung wird f{\"u}r jedes Metadatum anhand der Eigenschaften und Konfidenzwerte zusammengestellt. Der vorgestellte Analyseprozess ist an zwei Hypothesen angelehnt: Um die Analyseergebnisse verbessern zu k{\"o}nnen, sollten die Metadaten eines Kontexts in absteigender Reihenfolge ihres Konfidenzwertes verarbeitet werden und die Kontextgrenzen von Videometadaten sollten durch Segmentgrenzen definiert werden, um m{\"o}glichst Kontexte mit koh{\"a}rentem Inhalt zu erhalten. Durch ausf{\"u}hrliche Evaluationen konnten die gestellten Hypothesen best{\"a}tigt werden. Der Analyseprozess wurden gegen mehrere State-of-the-Art Methoden verglichen und erzielt verbesserte Ergebnisse in Bezug auf Recall und Precision, besonders f{\"u}r Metadaten, die aus weniger zuverl{\"a}ssigen Quellen stammen. Der Analyseprozess ist Teil eines Videoanalyse-Frameworks und wurde bereits erfolgreich in verschiedenen Projekten eingesetzt.}, language = {en} } @phdthesis{Slezak2013, author = {Slezak, Kathleen}, title = {Impact of intestinal bacteria on the anatomy and physiology of the intestinal tract in the PRM/Alf mouse model}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68946}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Introduction: Intestinal bacteria influence gut morphology by affecting epithelial cell proliferation, development of the lamina propria, villus length and crypt depth [1]. Gut microbiota-derived factors have been proposed to also play a role in the development of a 30 \% longer intestine, that is characteristic of PRM/Alf mice compared to other mouse strains [2, 3]. Polyamines and SCFAs produced by gut bacteria are important growth factors, which possibly influence mucosal morphology, in particular villus length and crypt depth and play a role in gut lengthening in the PRM/Alf mouse. However, experimental evidence is lacking. Aim: The objective of this work was to clarify the role of bacterially-produced polyamines on crypt depth, mucosa thickness and epithelial cell proliferation. For this purpose, C3H mice associated with a simplified human microbiota (SIHUMI) were compared with mice colonized with SIHUMI complemented by the polyamine-producing Fusobacterium varium (SIHUMI + Fv). In addition, the microbial impact on gut lengthening in PRM/Alf mice was characterized and the contribution of SCFAs and polyamines to this phenotype was examined. Results: SIHUMI + Fv mice exhibited an up to 1.7 fold higher intestinal polyamine concentration compared to SIHUMI mice, which was mainly due to increased putrescine concentrations. However, no differences were observed in crypt depth, mucosa thickness and epithelial proliferation. In PRM/Alf mice, the intestine of conventional mice was 8.5 \% longer compared to germfree mice. In contrast, intestinal lengths of C3H mice were similar, independent of the colonization status. The comparison of PRM/Alf and C3H mice, both associated with SIHUMI + Fv, demonstrated that PRM/Alf mice had a 35.9 \% longer intestine than C3H mice. However, intestinal SCFA and polyamine concentrations of PRM/Alf mice were similar or even lower, except N acetylcadaverine, which was 3.1-fold higher in PRM/Alf mice. When germfree PRM/Alf mice were associated with a complex PRM/Alf microbiota, the intestine was one quarter longer compared to PRM/Alf mice colonized with a C3H microbiota. This gut elongation correlated with levels of the polyamine N acetylspermine. Conclusion: The intestinal microbiota is able to influence intestinal length dependent on microbial composition and on the mouse genotype. Although SCFAs do not contribute to gut elongation, an influence of the polyamines N acetylcadaverine and N acetylspermine is conceivable. In addition, the study clearly demonstrated that bacterial putrescine does not influence gut morphology in C3H mice.}, language = {en} } @phdthesis{Serag2013, author = {Serag, Rabe Al Sayed Al Sayed}, title = {Motivationale und Volitionale Aspekte im Leistungssport im interkulturellen Vergleich zwischen Deutschland und {\"A}gypten : eine explorative Analyse am Beispiel Ringen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64199}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Kultur gibt den Menschen eine Orientierung. Sie machen darin ganz spezifische Erfahrungen. Hieraus entwickeln sich auch motivationale Orientierungen. Dadurch werden andere Erfahrungen gemacht, die Sportler k{\"o}nnen andere Motivation und Volition entwickeln. Dabei sind mehr kollektivistische Kulturen eher vermeidungs-motiviert und mehr individualistische Kulturen mehr erfolgsorientiert. Beim Kollektivismus erscheint die Leistungsmotivation eher unter einem sozialen Aspekt, n{\"a}mlich die Auseinandersetzung mit einem G{\"u}temaßstab, der eher von außen vorgegeben wird und weniger einem ausschließlich eigenen Maßstab. {\"A}gypten erweist sich im Vergleich zu Deutschland als eine eher kollektivistisch gepr{\"a}gte Kultur. Daraus ergeben sich folgende Unterschiede: Einen signifikanten Unterschied zwischen deutschen und {\"a}gyptischen Ringern gibt es in der Wettkampforientierung und bei der Sieg- bzw. Gewinn-Orientierung. Die {\"a}gyptischen Ringer habe eine h{\"o}here Auspr{\"a}gung als die Deutschen. Sie weisen auch eine etwas h{\"o}here Zielorientierung auf als die Deutschen. Entgegen den Erwartungen zeigte sich, dass es keine signifikanten Unterschiede zwischen den {\"a}gyptischen und deutschen Ringern gibt in der Variable: Sieg- bzw. Gewinn-Orientierung. Die Furcht vor Misserfolg sowie auch die Hoffnung auf Erfolg liegen h{\"o}her bei den {\"A}gyptern als bei den Deutschen. Bezogen auf die Modi der Handlungskontrolle verf{\"u}gen die Deutschen Ringer {\"u}ber eine h{\"o}her Auspr{\"a}gung auf allen drei Komponenten. Sie haben eine h{\"o}here Handlungsorientierung nach Misserfolg, eine h{\"o}here Handlungsplanung sowie eine h{\"o}here Handlungst{\"a}tigkeitsausf{\"u}hrung. Diese kulturell kontrastive Studie {\"u}ber die psychologischen Aspekte, im Bereich der Leistungsmotivation und der Handlungskontrolle, kann f{\"u}r die Sportart Ringen sehr n{\"u}tzlich werden, da sie sehr wichtig ist beim Erkennen der sportlichen {\"U}berlegenheits- und Schw{\"a}chemerkmale. Sie wiederspiegelt auch die Hochstimmung in den entwickelten Staaten oder die Misere in den anderen Staaten. Aus den interkulturellen Unterschieden in der Motivation und Volition k{\"o}nnen somit verschiedene Maßnahmen zu sportpsychologischen Interventionen entwickelt werden. Es sollte unbedingt darauf wert gelegt werden, dass die kulturell bedingten Unterschiede im Trainingsalltag beachtet werden, bei Teams, die aus Personen aus unterschiedlichen Kulturkreisen stammen.}, language = {de} } @phdthesis{Schuetz2013, author = {Sch{\"u}tz, Felina}, title = {Surface heat flow and lithospheric thermal structure of the northwestern Arabian Plate}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69622}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The surface heat flow (qs) is paramount for modeling the thermal structure of the lithosphere. Changes in the qs over a distinct lithospheric unit are normally directly reflecting changes in the crustal composition and therewith the radiogenic heat budget (e.g., Rudnick et al., 1998; F{\"o}rster and F{\"o}rster, 2000; Mareschal and Jaupart, 2004; Perry et al., 2006; Hasterok and Chapman, 2011, and references therein) or, less usual, changes in the mantle heat flow (e.g., Pollack and Chapman, 1977). Knowledge of this physical property is therefore of great interest for both academic research and the energy industry. The present study focuses on the qs of central and southern Israel as part of the Sinai Microplate (SM). Having formed during Oligocene to Miocene rifting and break-up of the African and Arabian plates, the SM is characterized by a young and complex tectonic history. Resulting from the time thermal diffusion needs to pass through the lithosphere, on the order of several tens-of-millions of years (e.g., Fowler, 1990); qs-values of the area reflect conditions of pre-Oligocene times. The thermal structure of the lithosphere beneath the SM in general, and south-central Israel in particular, has remained poorly understood. To address this problem, the two parameters needed for the qs determination were investigated. Temperature measurements were made at ten pre-existing oil and water exploration wells, and the thermal conductivity of 240 drill core and outcrop samples was measured in the lab. The thermal conductivity is the sensitive parameter in this determination. Lab measurements were performed on both, dry and water-saturated samples, which is labor- and time-consuming. Another possibility is the measurement of thermal conductivity in dry state and the conversion to a saturated value by using mean model approaches. The availability of a voluminous and diverse dataset of thermal conductivity values in this study allowed (1) in connection with the temperature gradient to calculate new reliable qs values and to use them to model the thermal pattern of the crust in south-central Israel, prior to young tectonic events, and (2) in connection with comparable datasets, controlling the quality of different mean model approaches for indirect determination of bulk thermal conductivity (BTC) of rocks. The reliability of numerically derived BTC values appears to vary between different mean models, and is also strongly dependent upon sample lithology. Yet, correction algorithms may significantly reduce the mismatch between measured and calculated conductivity values based on the different mean models. Furthermore, the dataset allowed the derivation of lithotype-specific conversion equations to calculate the water-saturated BTC directly from data of dry-measured BTC and porosity (e.g., well log derived porosity) with no use of any mean model and thus provide a suitable tool for fast analysis of large datasets. The results of the study indicate that the qs in the study area is significantly higher than previously assumed. The new presented qs values range between 50 and 62 mW m⁻². A weak trend of decreasing heat flow can be identified from the east to the west (55-50 mW m⁻²), and an increase from the Dead Sea Basin to the south (55-62 mW m⁻²). The observed range can be explained by variation in the composition (heat production) of the upper crust, accompanied by more systematic spatial changes in its thickness. The new qs data then can be used, in conjunction with petrophysical data and information on the structure and composition of the lithosphere, to adjust a model of the pre-Oligocene thermal state of the crust in south-central Israel. The 2-D steady-state temperature model was calculated along an E-W traverse based on the DESIRE seismic profile (Mechie et al., 2009). The model comprises the entire lithosphere down to the lithosphere-asthenosphere boundary (LAB) involving the most recent knowledge of the lithosphere in pre-Oligocene time, i.e., prior to the onset of rifting and plume-related lithospheric thermal perturbations. The adjustment of modeled and measured qs allows conclusions about the pre-Oligocene LAB-depth. After the best fitting the most likely depth is 150 km which is consistent with estimations made in comparable regions of the Arabian Shield. It therefore comprises the first ever modelled pre-Oligocene LAB depth, and provides important clues on the thermal state of lithosphere before rifting. This, in turn, is vital for a better understanding of the (thermo)-dynamic processes associated with lithosphere extension and continental break-up.}, language = {en} } @phdthesis{Schumann2013, author = {Schumann, Sara}, title = {Influence of intestinal inflammation on bacterial protein expression in monoassociated mice}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67757}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Background: Increased numbers of intestinal E. coli are observed in inflammatory bowel disease, but the reasons for this proliferation and it exact role in intestinal inflammation are unknown. Aim of this PhD-project was to identify E. coli proteins involved in E. coli's adaptation to the inflammatory conditions in the gut and to investigate whether these factors affect the host. Furthermore, the molecular basis for strain-specific differences between probiotic and harmful E. coli in their response to intestinal inflammation was investigated. Methods: Using mice monoassociated either with the adherent-invasive E. coli (AIEC) strain UNC or the probiotic E. coli Nissle, two different mouse models of intestinal inflammation were analysed: On the one hand, severe inflammation was induced by treating mice with 3.5\% dextran sodium sulphate (DSS). On the other hand, a very mild intestinal inflammation was generated by associating interleukin 10-deficient (IL-10-/-) mice with E. coli. Differentially expressed proteins in the E. coli strains collected from caecal contents of these mice were identified by two-dimensional fluorescence difference gel electrophoresis. Results DSS-experiment: All DSS-treated mice revealed signs of a moderate caecal and a severe colonic inflammation. However, mice monoassociated with E. coli Nissle were less affected. In both E. coli strains, acute inflammation led to a downregulation of pathways involved in carbohydrate breakdown and energy generation. Accordingly, DSS-treated mice had lower caecal concentrations of bacterial fermentation products than the control mice. Differentially expressed proteins also included the Fe-S cluster repair protein NfuA, the tryptophanase TnaA, and the uncharacterised protein YggE. NfuA was upregulated nearly 3-fold in both E. coli strains after DSS administration. Reactive oxygen species produced during intestinal inflammation damage Fe-S clusters and thereby lead to an inactivation of Fe-S proteins. In vitro data indicated that the repair of Fe-S proteins by NfuA is a central mechanism in E. coli to survive oxidative stress. Expression of YggE, which has been reported to reduce the intracellular level of reactive oxygen species, was 4- to 8-fold higher in E. coli Nissle than in E. coli UNC under control and inflammatory conditions. In vitro growth experiments confirmed these results, indicating that E. coli Nissle is better equipped to cope with oxidative stress than E. coli UNC. Additionally, E. coli Nissle isolated from DSS-treated and control mice had TnaA levels 4- to 7-fold higher than E. coli UNC. In turn, caecal indole concentrations resulting from cleavage of tryptophan by TnaA were higher in E. coli Nissle- associated control mice than in the respective mice associated with E. coli UNC. Because of its anti-inflammatory effect, indole is hypothesised to be involved in the extension of the remission phase in ulcerative colitis described for E. coli Nissle. Results IL-10-/--experiment: Only IL-10-/- mice monoassociated with E. coli UNC for 8 weeks exhibited signs of a very mild caecal inflammation. In agreement with this weak inflammation, the variations in the bacterial proteome were small. Similar to the DSS-experiment, proteins downregulated by inflammation belong mainly to the central energy metabolism. In contrast to the DSS-experiment, no upregulation of chaperone proteins and NfuA were observed, indicating that these are strategies to overcome adverse effects of strong intestinal inflammation. The inhibitor of vertebrate C-type lysozyme, Ivy, was 2- to 3-fold upregulated on mRNA and protein level in E. coli Nissle in comparison to E. coli UNC isolated from IL-10-/- mice. By overexpressing ivy, it was demonstrated in vitro that Ivy contributes to a higher lysozyme resistance observed for E. coli Nissle, supporting the role of Ivy as a potential fitness factor in this E. coli strain. Conclusions: The results of this PhD-study demonstrate that intestinal bacteria sense even minimal changes in the health status of the host. While some bacterial adaptations to the inflammatory conditions are equal in response to strong and mild intestinal inflammation, other reactions are unique to a specific disease state. In addition, probiotic and colitogenic E. coli differ in their response to the intestinal inflammation and thereby may influence the host in different ways.}, language = {en} } @phdthesis{Schlolaut2013, author = {Schlolaut, Gordon}, title = {Varve and event layer chronology of Lake Suigetsu (Japan) back to 40 kyr BP and contribution to the international consensus atmospheric radiocarbon calibration curve}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69096}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The main intention of the PhD project was to create a varve chronology for the Suigetsu Varves 2006' (SG06) composite profile from Lake Suigetsu (Japan) by thin section microscopy. The chronology was not only to provide an age-scale for the various palaeo-environmental proxies analysed within the SG06 project, but also and foremost to contribute, in combination with the SG06 14C chronology, to the international atmospheric radiocarbon calibration curve (IntCal). The SG06 14C data are based on terrestrial leaf fossils and therefore record atmospheric 14C values directly, avoiding the corrections necessary for the reservoir ages of the marine datasets, which are currently used beyond the tree-ring limit in the IntCal09 dataset (Reimer et al., 2009). The SG06 project is a follow up of the SG93 project (Kitagawa \& van der Plicht, 2000), which aimed to produce an atmospheric calibration dataset, too, but suffered from incomplete core recovery and varve count uncertainties. For the SG06 project the complete Lake Suigetsu sediment sequence was recovered continuously, leaving the task to produce an improved varve count. Varve counting was carried out using a dual method approach utilizing thin section microscopy and micro X-Ray Fluorescence (µXRF). The latter was carried out by Dr. Michael Marshall in cooperation with the PhD candidate. The varve count covers 19 m of composite core, which corresponds to the time frame from ≈10 to ≈40 kyr BP. The count result showed that seasonal layers did not form in every year. Hence, the varve counts from either method were incomplete. This rather common problem in varve counting is usually solved by manual varve interpolation. But manual interpolation often suffers from subjectivity. Furthermore, sedimentation rate estimates (which are the basis for interpolation) are generally derived from neighbouring, well varved intervals. This assumes that the sedimentation rates in neighbouring intervals are identical to those in the incompletely varved section, which is not necessarily true. To overcome these problems a novel interpolation method was devised. It is computer based and automated (i.e. avoids subjectivity and ensures reproducibility) and derives the sedimentation rate estimate directly from the incompletely varved interval by statistically analysing distances between successive seasonal layers. Therefore, the interpolation approach is also suitable for sediments which do not contain well varved intervals. Another benefit of the novel method is that it provides objective interpolation error estimates. Interpolation results from the two counting methods were combined and the resulting chronology compared to the 14C chronology from Lake Suigetsu, calibrated with the tree-ring derived section of IntCal09 (which is considered accurate). The varve and 14C chronology showed a high degree of similarity, demonstrating that the novel interpolation method produces reliable results. In order to constrain the uncertainties of the varve chronology, especially the cumulative error estimates, U-Th dated speleothem data were used by linking the low frequency 14C signal of Lake Suigetsu and the speleothems, increasing the accuracy and precision of the Suigetsu calibration dataset. The resulting chronology also represents the age-scale for the various palaeo-environmental proxies analysed in the SG06 project. One proxy analysed within the PhD project was the distribution of event layers, which are often representatives of past floods or earthquakes. A detailed microfacies analysis revealed three different types of event layers, two of which are described here for the first time for the Suigetsu sediment. The types are: matrix supported layers produced as result of subaqueous slope failures, turbidites produced as result of landslides and turbidites produced as result of flood events. The former two are likely to have been triggered by earthquakes. The vast majority of event layers was related to floods (362 out of 369), which allowed the construction of a respective chronology for the last 40 kyr. Flood frequencies were highly variable, reaching their greatest values during the global sea level low-stand of the Glacial, their lowest values during Heinrich Event 1. Typhoons affecting the region represent the most likely control on the flood frequency, especially during the Glacial. However, also local, non-climatic controls are suggested by the data. In summary, the work presented here expands and revises knowledge on the Lake Suigetsu sediment and enabls the construction of a far more precise varve chronology. The 14C calibration dataset is the first such derived from lacustrine sediments to be included into the (next) IntCal dataset. References: Kitagawa \& van der Plicht, 2000, Radiocarbon, Vol 42(3), 370-381 Reimer et al., 2009, Radiocarbon, Vol 51(4), 1111-1150}, language = {en} } @phdthesis{Schick2013, author = {Schick, Daniel}, title = {Ultrafast lattice dynamics in photoexcited nanostructures : femtosecond X-ray diffraction with optimized evaluation schemes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68827}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Within the course of this thesis, I have investigated the complex interplay between electron and lattice dynamics in nanostructures of perovskite oxides. Femtosecond hard X-ray pulses were utilized to probe the evolution of atomic rearrangement directly, which is driven by ultrafast optical excitation of electrons. The physics of complex materials with a large number of degrees of freedom can be interpreted once the exact fingerprint of ultrafast lattice dynamics in time-resolved X-ray diffraction experiments for a simple model system is well known. The motion of atoms in a crystal can be probed directly and in real-time by femtosecond pulses of hard X-ray radiation in a pump-probe scheme. In order to provide such ultrashort X-ray pulses, I have built up a laser-driven plasma X-ray source. The setup was extended by a stable goniometer, a two-dimensional X-ray detector and a cryogen-free cryostat. The data acquisition routines of the diffractometer for these ultrafast X-ray diffraction experiments were further improved in terms of signal-to-noise ratio and angular resolution. The implementation of a high-speed reciprocal-space mapping technique allowed for a two-dimensional structural analysis with femtosecond temporal resolution. I have studied the ultrafast lattice dynamics, namely the excitation and propagation of coherent phonons, in photoexcited thin films and superlattice structures of the metallic perovskite SrRuO3. Due to the quasi-instantaneous coupling of the lattice to the optically excited electrons in this material a spatially and temporally well-defined thermal stress profile is generated in SrRuO3. This enables understanding the effect of the resulting coherent lattice dynamics in time-resolved X-ray diffraction data in great detail, e.g. the appearance of a transient Bragg peak splitting in both thin films and superlattice structures of SrRuO3. In addition, a comprehensive simulation toolbox to calculate the ultrafast lattice dynamics and the resulting X-ray diffraction response in photoexcited one-dimensional crystalline structures was developed in this thesis work. With the powerful experimental and theoretical framework at hand, I have studied the excitation and propagation of coherent phonons in more complex material systems. In particular, I have revealed strongly localized charge carriers after above-bandgap femtosecond photoexcitation of the prototypical multiferroic BiFeO3, which are the origin of a quasi-instantaneous and spatially inhomogeneous stress that drives coherent phonons in a thin film of the multiferroic. In a structurally imperfect thin film of the ferroelectric Pb(Zr0.2Ti0.8)O3, the ultrafast reciprocal-space mapping technique was applied to follow a purely strain-induced change of mosaicity on a picosecond time scale. These results point to a strong coupling of in- and out-of-plane atomic motion exclusively mediated by structural defects.}, language = {en} } @phdthesis{Scheffler2013, author = {Scheffler, Thomas}, title = {Privacy enforcement with data owner-defined policies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67939}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis proposes a privacy protection framework for the controlled distribution and use of personal private data. The framework is based on the idea that privacy policies can be set directly by the data owner and can be automatically enforced against the data user. Data privacy continues to be a very important topic, as our dependency on electronic communication maintains its current growth, and private data is shared between multiple devices, users and locations. The growing amount and the ubiquitous availability of personal private data increases the likelihood of data misuse. Early privacy protection techniques, such as anonymous email and payment systems have focused on data avoidance and anonymous use of services. They did not take into account that data sharing cannot be avoided when people participate in electronic communication scenarios that involve social interactions. This leads to a situation where data is shared widely and uncontrollably and in most cases the data owner has no control over further distribution and use of personal private data. Previous efforts to integrate privacy awareness into data processing workflows have focused on the extension of existing access control frameworks with privacy aware functions or have analysed specific individual problems such as the expressiveness of policy languages. So far, very few implementations of integrated privacy protection mechanisms exist and can be studied to prove their effectiveness for privacy protection. Second level issues that stem from practical application of the implemented mechanisms, such as usability, life-time data management and changes in trustworthiness have received very little attention so far, mainly because they require actual implementations to be studied. Most existing privacy protection schemes silently assume that it is the privilege of the data user to define the contract under which personal private data is released. Such an approach simplifies policy management and policy enforcement for the data user, but leaves the data owner with a binary decision to submit or withhold his or her personal data based on the provided policy. We wanted to empower the data owner to express his or her privacy preferences through privacy policies that follow the so-called Owner-Retained Access Control (ORAC) model. ORAC has been proposed by McCollum, et al. as an alternate access control mechanism that leaves the authority over access decisions by the originator of the data. The data owner is given control over the release policy for his or her personal data, and he or she can set permissions or restrictions according to individually perceived trust values. Such a policy needs to be expressed in a coherent way and must allow the deterministic policy evaluation by different entities. The privacy policy also needs to be communicated from the data owner to the data user, so that it can be enforced. Data and policy are stored together as a Protected Data Object that follows the Sticky Policy paradigm as defined by Mont, et al. and others. We developed a unique policy combination approach that takes usability aspects for the creation and maintenance of policies into consideration. Our privacy policy consists of three parts: A Default Policy provides basic privacy protection if no specific rules have been entered by the data owner. An Owner Policy part allows the customisation of the default policy by the data owner. And a so-called Safety Policy guarantees that the data owner cannot specify disadvantageous policies, which, for example, exclude him or her from further access to the private data. The combined evaluation of these three policy-parts yields the necessary access decision. The automatic enforcement of privacy policies in our protection framework is supported by a reference monitor implementation. We started our work with the development of a client-side protection mechanism that allows the enforcement of data-use restrictions after private data has been released to the data user. The client-side enforcement component for data-use policies is based on a modified Java Security Framework. Privacy policies are translated into corresponding Java permissions that can be automatically enforced by the Java Security Manager. When we later extended our work to implement server-side protection mechanisms, we found several drawbacks for the privacy enforcement through the Java Security Framework. We solved this problem by extending our reference monitor design to use Aspect-Oriented Programming (AOP) and the Java Reflection API to intercept data accesses in existing applications and provide a way to enforce data owner-defined privacy policies for business applications.}, language = {en} } @phdthesis{Sauer2013, author = {Sauer, Patrick}, title = {Liberation of low molecular weight organic acids from sedimentary organic matter and their role on microbial activity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68830}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Low molecular weight organic acids (LMWOAs) are important nutrients for microbes. However, most LMWOAs do not exist freely in the environment but are bound to macromolecular organic matter, e.g. kerogen, lignite and coal. During burial and geological maturation of sedimentary macromolecular organic matter biological and abiological processes promote the liberation of LMWOAs into the surrounding sediment. Through this process, microbes in sedimentary subsurface environments are supplied with essential nutrients. To estimate the feedstock potential of buried macromolecular organic matter to many environments it is important to determine the amount of LMWOAs that are bound to such a matrix. However, high-pressure and high temperature are a key feature of deep subsurface environments, and these physical parameters have a profound influence on chemical reaction kinetics. Therefore it is essential for the estimation of the feedstock potential to generate high-pressure and high temperature for the liberation of LMWOAs to recreate true in-situ conditions. This work presents a newly developed, inexpensive incubation system for biological and geological samples. It allows the application of high-pressure and high temperature as well as a subsampling of the liquid phase without loss of pressure, thereby not disturbing the on-going processes. When simulating the liberation of LMWOAs from sedimentary organic matter, the newly developed incubation system produces more realistic results than other extraction systems like Soxhlet. The extraction products remain in the extraction medium throughout the extraction, influencing the chemical conditions of the extraction medium. Sub-bituminous coal samples from New Zealand as well as lignite samples from Germany were extracted at elevated temperature (90˚C) and pressure (5 MPa). The main LMWOAs released from these low rank coals were formate, acetate and oxalate. Extraction efficiency was increased by two to four times for formate, acetate and oxalate in comparison to existing extraction methods without pressurisation and with demineralised water. This shows the importance of pressure for the simulation of true in-situ conditions and suggests that the amount of bioavailable LMWOAs is higher than previously thought. With the increase in carbon capture and storage (CCS) and the enhanced recovery of oil and gas (EOR/EGR), more and more CO2 becomes injected into the underground. However, the effects of elevated concentrations of carbon dioxide on sedimentary organic matter are rarely investigated. As the incuabtion system allows the manipulation of the composition and partial pressure of dissolved gasses, the effect of highly gas-enriched (CO2, CO2/SO2, CO2/NO2; to simulate flue gas conditions) waters on the extraction yield of LMWOAs from macromolecular organic matter was evaluated. For sub-bituminous coal the concentrations of all LMWAOs decreased upon the addition of gas, irrespective of its composition, whereas for lignite formate always and acetate mostly increased, while oxalate decreased. This suggests an positive effect on the nutrient supply for the subsurface microbiota of lignite layers, as formate and acetate are the most common LMWOAs used for microbial metabolism. In terrestrial mud volcanoes (TMVs), sedimentary material is rapidly ascending from great depth to the surface. Therefore LMWOAs that were produced from buried macromolecular organic matter at depth are also brought up to the surface, and fuel heterotrophic microbial ecosystems at the surface. TMVs represent geochemically and microbiologically diverse habitats, which are supplied with organic substrates and electron acceptors from deep-seated hydrocarbon-generating systems and intersected shallow aquifers, respectively. The main electron donor in TMVs in Azerbaijan is sulphate, and microbial sulphate reduction leads to the production of a wide range of reduced sulphur species that are key players in several biological processes. In our study we estimated the effect of LMWOAs on the sulphur metabolising activity of microorganims in TMVs from Azerbaijan. The addition of a mixture of volatile fatty acids containing acetate and other LMWOAs showed significant positive response to the sulphate reduction rate (SRR) of samples of several mud volcanoes. Further investigations on the temperature dependency of the SRR and the characterisation of thermophilic sulphate-reducing bacteria (SRB) showed a connection between the deep hot subsurface and the surface.}, language = {de} } @phdthesis{Santan2013, author = {Santan, Harshal Diliprao}, title = {Synthesis and characterization of thermosensitive hydrogels derived from polysaccharides}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69793}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In this work, thermosensitive hydrogels having tunable thermo-mechanical properties were synthesized. Generally the thermal transition of thermosensitive hydrogels is based on either a lower critical solution temperature (LCST) or critical micelle concentration/ temperature (CMC/ CMT). The temperature dependent transition from sol to gel with large volume change may be seen in the former type of thermosensitive hydrogels and is negligible in CMC/ CMT dependent systems. The change in volume leads to exclusion of water molecules, resulting in shrinking and stiffening of system above the transition temperature. The volume change can be undesired when cells are to be incorporated in the system. The gelation in the latter case is mainly driven by micelle formation above the transition temperature and further colloidal packing of micelles around the gelation temperature. As the gelation mainly depends on concentration of polymer, such a system could undergo fast dissolution upon addition of solvent. Here, it was envisioned to realize a thermosensitive gel based on two components, one responsible for a change in mechanical properties by formation of reversible netpoints upon heating without volume change, and second component conferring degradability on demand. As first component, an ABA triblockcopolymer (here: Poly(ethylene glycol)-b-poly(propylene glycol)-b-poly(ethylene glycol) (PEPE) with thermosensitive properties, whose sol-gel transition on the molecular level is based on micellization and colloidal jamming of the formed micelles was chosen, while for the additional macromolecular component crosslinking the formed micelles biopolymers were employed. The synthesis of the hydrogels was performed in two ways, either by physical mixing of compounds showing electrostatic interactions, or by covalent coupling of the components. Biopolymers (here: the polysaccharides hyaluronic acid, chondroitin sulphate, or pectin, as well as the protein gelatin) were employed as additional macromolecular crosslinker to simultaneously incorporate an enzyme responsiveness into the systems. In order to have strong ionic/electrostatic interactions between PEPE and polysaccharides, PEPE was aminated to yield predominantly mono- or di-substituted PEPEs. The systems based on aminated PEPE physically mixed with HA showed an enhancement in the mechanical properties such as, elastic modulus (G′) and viscous modulus (G′′) and a decrease of the gelation temperature (Tgel) compared to the PEPE at same concentration. Furthermore, by varying the amount of aminated PEPE in the composition, the Tgel of the system could be tailored to 27-36 °C. The physical mixtures of HA with di-amino PEPE (HA·di-PEPE) showed higher elastic moduli G′ and stability towards dissolution compared to the physical mixtures of HA with mono-amino PEPE (HA·mono-PEPE). This indicates a strong influence of electrostatic interaction between -COOH groups of HA and -NH2 groups of PEPE. The physical properties of HA with di-amino PEPE (HA·di-PEPE) compare beneficially with the physical properties of the human vitreous body, the systems are highly transparent, and have a comparable refractive index and viscosity. Therefore,this material was tested for a potential biological application and was shown to be non-cytotoxic in eluate and direct contact tests. The materials will in the future be investigated in further studies as vitreous body substitutes. In addition, enzymatic degradation of these hydrogels was performed using hyaluronidase to specifically degrade the HA. During the degradation of these hydrogels, increase in the Tgel was observed along with decrease in the mechanical properties. The aminated PEPE were further utilised in the covalent coupling to Pectin and chondroitin sulphate by using EDC as a coupling agent. Here, it was possible to adjust the Tgel (28-33 °C) by varying the grafting density of PEPE to the biopolymer. The grafting of PEPE to Pectin enhanced the thermal stability of the hydrogel. The Pec-g-PEPE hydrogels were degradable by enzymes with slight increase in Tgel and decrease in G′ during the degradation time. The covalent coupling of aminated PEPE to HA was performed by DMTMM as a coupling agent. This method of coupling was observed to be more efficient compared to EDC mediated coupling. Moreover, the purification of the final product was performed by ultrafiltration technique, which efficiently removed the unreacted PEPE from the final product, which was not sufficiently achieved by dialysis. Interestingly, the final products of these reaction were in a gel state and showed enhancement in the mechanical properties at very low concentrations (2.5 wt\%) near body temperature. In these hydrogels the resulting increase in mechanical properties was due to the combined effect of micelle packing (physical interactions) by PEPE and covalent netpoints between PEPE and HA. PEPE alone or the physical mixtures of the same components were not able to show thermosensitive behavior at concentrations below 16 wt\%. These thermosensitive hydrogels also showed on demand solubilisation by enzymatic degradation. The concept of thermosensitivity was introduced to 3D architectured porous hydrogels, by covalently grafting the PEPE to gelatin and crosslinking with LDI as a crosslinker. Here, the grafted PEPE resulted in a decrease in the helix formation in gelatin chains and after fixing the gelatin chains by crosslinking, the system showed an enhancement in the mechanical properties upon heating (34-42 °C) which was reversible upon cooling. A possible explanation of the reversible changes in mechanical properties is the strong physical interactions between micelles formed by PEPE being covalently linked to gelatin. Above the transition temperature, the local properties were evaluated by AFM indentation of pore walls in which an increase in elastic modulus (E) at higher temperature (37 °C) was observed. The water uptake of these thermosensitive architectured porous hydrogels was also influenced by PEPE and temperature (25 °C and 37 °C), showing lower water up take at higher temperature and vice versa. In addition, due to the lower water uptake at high temperature, the rate of hydrolytic degradation of these systems was found to be decreased when compared to pure gelatin architectured porous hydrogels. Such temperature sensitive architectured porous hydrogels could be important for e.g. stem cell culturing, cell differentiation and guided cell migration, etc. Altogether, it was possible to demonstrate that the crosslinking of micelles by a macromolecular crosslinker increased the shear moduli, viscosity, and stability towards dissolution of CMC-based gels. This effect could be likewise be realized by covalent or non-covalent mechanisms such as, micelle interactions, physical interactions of gelatin chains and physical interactions between gelatin chains and micelles. Moreover, the covalent grafting of PEPE will create additional net-points which also influence the mechanical properties of thermosensitive architectured porous hydrogels. Overall, the physical and chemical interactions and reversible physical interactions in such thermosensitive architectured porous hydrogels gave a control over the mechanical properties of such complex system. The hydrogels showing change of mechanical properties without a sol-gel transition or volume change are especially interesting for further study with cell proliferation and differentiation.}, language = {en} } @phdthesis{Salffner2013, author = {Salffner, Katharina}, title = {Entwicklung eines breitbandigen Cavity-Ring-Down-Spektrometers unter Verwendung nahinfraroter, inkoh{\"a}renter Strahlung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68952}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In der vorliegenden Arbeit werden verschiedene Spektrometer f{\"u}r die Analyse von Gasen bzw. Gasgemischen vorgestellt und deren Design, Aufbau, Charakterisierung und Optimierung beschrieben. Das Resultat der Optimierung und Weiterentwicklungen ist ein spektral breitbandiges Cavity-Ring-Down-Spektrometer (CRD-Spektrometer). Ausgangspunkt der hier vorgestellten Arbeit ist ein Spektrometer auf Basis klassischer Absorptionsspektroskopie in einer Multireflexionszelle. F{\"u}r dieses Spektrometer wurde als Strahlquelle ein Superkontinuumlaser verwendet. Der Vorteil dieses Spektrometers liegt in seiner Kompaktheit. Mit diesem Spektrometer wurden Absorptionsspektren von mehreren Reingasen und einem Gasgemisch {\"u}ber einen Wellenl{\"a}ngenbereich von 1500 nm - 1700 nm aufgenommen. Der qualitative Vergleich mit zu erwartenden Spektren, welche auf der HITRAN-Datenbank basieren, zeigte eine gute {\"U}bereinstimmung. Die quantitative Interpretierbarkeit der Daten war jedoch stark eingeschr{\"a}nkt aufgrund des hohen zuf{\"a}lligen und systematischen Fehlers der Messungen. Als Konsequenz aus der als nicht zufriedenstellend bewerteten quantitativen Interpretierbarkeit der Daten wurde eine alternative Messmethode gesucht, welche eine h{\"o}here Sensitivit{\"a}t und Genauigkeit erm{\"o}glicht. Die Wahl fiel auf die Cavity-Ring-Down-Spektroskopie, eine resonatorgest{\"u}tzte Variante der Absorptionsspektroskopie. Wesentliche Vorteile dieser Technik sind a) die Unabh{\"a}ngigkeit von Leistungsschwankungen der Strahlquelle, b) ein effektiver Absorptionsweg von bis zu mehreren Kilometern, welcher sich unmittelbar auf die Sensitivit{\"a}t der Messungen auswirkt, c) die Ermittlung absoluter Absorberkonzentrationen, ohne die Notwendigkeit einer Kalibrierung oder den Vergleich mit einer Referenzzelle und d) die Vernachl{\"a}ssigbarkeit von Absorptionen außerhalb des Resonators. Als notwendiger Zwischenschritt auf dem Weg zu einem breitbandigen CRD-Spektrometer wurde zun{\"a}chst ein monochromatisches CRD-Spektrometer designt, aufgebaut und charakterisiert. F{\"u}r die effektive Einkopplung von Strahlungsenergie in einen Resonator ist die Anpassung der Strahlparameter an die Mode des Resonators notwendig. Voraussetzung dieser Anpassung ist die Kenntnis der Strahlparameter, welche experimentell ermittelt wurden. Im Laufe des Aufbaus des Spektrometers ergab sich, dass trotz der Modenanpassung die Einkopplung der Strahlungsenergie in den Resonator gest{\"o}rt wurde. Daraufhin wurden systematisch m{\"o}gliche Ursachen dieser St{\"o}rung untersucht und das Spektrometer optimiert. Mit diesem optimierten Spektrometer wurden Spektren gemessen, welche sowohl qualitativ als auch quantitativ gut mit den zu erwartenden Spektren {\"u}bereinstimmen. Als Nachweisgrenze dieses Spektrometers wurde ein Wert f{\"u}r den Absorptionskoeffizienten alpha von 10^-8 cm-1 bestimmt. Mit dem monochromatischen CRD-Spektrometer war es zudem m{\"o}glich, isotopenspezifische Messungen durchzuf{\"u}hren. F{\"u}r das breitbandige Spektrometer wurde als Strahlquelle eine ASE-Diode (amplified spontaneous emission) verwendet. Dabei handelt es sich um eine inkoh{\"a}rente Strahlquelle. Mittels Messungen nach dem Prinzip der Cavity-Enhanced-Absorptionsspektroskopie wurde die generelle Funktionalit{\"a}t des resonatorgest{\"u}tzten Spektrometers {\"u}berpr{\"u}ft. Anschließend wurden die wellenl{\"a}ngenabh{\"a}ngigen Abklingsignale des leeren und des mit einem CO2-Luft-Gemisch gef{\"u}llten Resonators gemessen und ebenfalls mit den zu erwartenden Spektren verglichen. Qualitativ stimmen die experimentellen Spektren gut mit den zu erwartenden Spektren {\"u}berein. F{\"u}r die quantitative Interpretation der Daten wurde ein spezieller Algorithmus entwickelt, der die spektrale Aufl{\"o}sung des Systems ber{\"u}cksichtigt. Mit dem vorgestellten Spektrometer ist so die qualitative und quantitative Interpretation der Spektren m{\"o}glich. Die Nachweisgrenze des breitbandigen Cavity-Ring-Down-Spektrometers wurde zu einem Wert von alpha = 8x10^-7 cm-1 bestimmt. Der systematischen und der zuf{\"a}llige Fehler der Messungen lagen bei Werten von ca. 1\%. Bei diesem Spektrometer handelt es sich um einen Prototyp. Mittels Optimierung des Systems lassen sich sowohl der Wert der Nachweisgrenze als auch die Fehler der Messungen verbessern.}, language = {de} } @phdthesis{Raetzel2013, author = {R{\"a}tzel, Dennis}, title = {Tensorial spacetime geometries and background-independent quantum field theory}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65731}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Famously, Einstein read off the geometry of spacetime from Maxwell's equations. Today, we take this geometry that serious that our fundamental theory of matter, the standard model of particle physics, is based on it. However, it seems that there is a gap in our understanding if it comes to the physics outside of the solar system. Independent surveys show that we need concepts like dark matter and dark energy to make our models fit with the observations. But these concepts do not fit in the standard model of particle physics. To overcome this problem, at least, we have to be open to matter fields with kinematics and dynamics beyond the standard model. But these matter fields might then very well correspond to different spacetime geometries. This is the basis of this thesis: it studies the underlying spacetime geometries and ventures into the quantization of those matter fields independently of any background geometry. In the first part of this thesis, conditions are identified that a general tensorial geometry must fulfill to serve as a viable spacetime structure. Kinematics of massless and massive point particles on such geometries are introduced and the physical implications are investigated. Additionally, field equations for massive matter fields are constructed like for example a modified Dirac equation. In the second part, a background independent formulation of quantum field theory, the general boundary formulation, is reviewed. The general boundary formulation is then applied to the Unruh effect as a testing ground and first attempts are made to quantize massive matter fields on tensorial spacetimes.}, language = {en} } @phdthesis{RudolphMohr2013, author = {Rudolph-Mohr, Nicole}, title = {A novel non-invasive optical method for quantitative visualization of pH and oxygen dynamics in soils}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66993}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In soils and sediments there is a strong coupling between local biogeochemical processes and the distribution of water, electron acceptors, acids and nutrients. Both sides are closely related and affect each other from small scale to larger scales. Soil structures such as aggregates, roots, layers or macropores enhance the patchiness of these distributions. At the same time it is difficult to access the spatial distribution and temporal dynamics of these parameter. Noninvasive imaging techniques with high spatial and temporal resolution overcome these limitations. And new non-invasive techniques are needed to study the dynamic interaction of plant roots with the surrounding soil, but also the complex physical and chemical processes in structured soils. In this study we developed an efficient non-destructive in-situ method to determine biogeochemical parameters relevant to plant roots growing in soil. This is a quantitative fluorescence imaging method suitable for visualizing the spatial and temporal pH changes around roots. We adapted the fluorescence imaging set-up and coupled it with neutron radiography to study simultaneously root growth, oxygen depletion by respiration activity and root water uptake. The combined set up was subsequently applied to a structured soil system to map the patchy structure of oxic and anoxic zones induced by a chemical oxygen consumption reaction for spatially varying water contents. Moreover, results from a similar fluorescence imaging technique for nitrate detection were complemented by a numerical modeling study where we used imaging data, aiming to simulate biodegradation under anaerobic, nitrate reducing conditions.}, language = {en} } @phdthesis{Rothe2013, author = {Rothe, Monique}, title = {Response of intestinal Escherichia coli to dietary factors in the mouse intestine}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66387}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Diet is a major force influencing the intestinal microbiota. This is obvious from drastic changes in microbiota composition after a dietary alteration. Due to the complexity of the commensal microbiota and the high inter-individual variability, little is known about the bacterial response at the cellular level. The objective of this work was to identify mechanisms that enable gut bacteria to adapt to dietary factors. For this purpose, germ-free mice monoassociated with the commensal Escherichia coli K-12 strain MG1655 were fed three different diets over three weeks: a diet rich in starch, a diet rich in non-digestible lactose and a diet rich in casein. Two dimensional gel electrophoresis and electrospray tandem mass spectrometry were applied to identify differentially expressed proteins of E. coli recovered from small intestine and caecum of mice fed the lactose or casein diets in comparison with those of mice fed the starch diet. Selected differentially expressed bacterial proteins were characterised in vitro for their possible roles in bacterial adaptation to the various diets. Proteins belonging to the oxidative stress regulon oxyR such as alkyl hydroperoxide reductase subunit F (AhpF), DNA protection during starvation protein (Dps) and ferric uptake regulatory protein (Fur), which are required for E. coli's oxidative stress response, were upregulated in E. coli of mice fed the lactose-rich diet. Reporter gene analysis revealed that not only oxidative stress but also carbohydrate-induced osmotic stress led to the OxyR-dependent expression of ahpCF and dps. Moreover, the growth of E. coli mutants lacking the ahpCF or oxyR genes was impaired in the presence of non-digestible sucrose. This indicates that some OxyR-dependent proteins are crucial for the adaptation of E. coli to osmotic stress conditions. In addition, the function of two so far poorly characterised E. coli proteins was analysed: 2 deoxy-D gluconate 3 dehydrogenase (KduD) was upregulated in intestinal E. coli of mice fed the lactose-rich diet and this enzyme and 5 keto 4 deoxyuronate isomerase (KduI) were downregulated on the casein-rich diet. Reporter gene analysis identified galacturonate and glucuronate as inducers of the kduD and kduI gene expression. Moreover, KduI was shown to facilitate the breakdown of these hexuronates, which are normally degraded by uronate isomerase (UxaC), altronate oxidoreductase (UxaB), altronate dehydratase (UxaA), mannonate oxidoreductase (UxuB) and mannonate dehydratase (UxuA), whose expression was repressed by osmotic stress. The growth of kduID-deficient E. coli on galacturonate or glucuronate was impaired in the presence of osmotic stress, suggesting KduI and KduD to compensate for the function of the regular hexuronate degrading enzymes under such conditions. This indicates a novel function of KduI and KduD in E. coli's hexuronate metabolism. Promotion of the intracellular formation of hexuronates by lactose connects these in vitro observations with the induction of KduD on the lactose-rich diet. Taken together, this study demonstrates the crucial influence of osmotic stress on the gene expression of E. coli enzymes involved in stress response and metabolic processes. Therefore, the adaptation to diet-induced osmotic stress is a possible key factor for bacterial colonisation of the intestinal environment.}, language = {en} } @phdthesis{RoncagliaDenissen2013, author = {Roncaglia-Denissen, Maria Paula}, title = {The role of first and second language speech rhythm in syntactic ambiguity processing and musical rhythmic aptitude}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78256}, school = {Universit{\"a}t Potsdam}, pages = {vi, 157}, year = {2013}, abstract = {Rhythm is a temporal and systematic organization of acoustic events in terms of prominence, timing and grouping, helping to structure our most basic experiences, such as body movement, music and speech. In speech, rhythm groups auditory events, e.g., sounds and pauses, together into words, making their boundaries acoustically prominent and aiding word segmentation and recognition by the hearer. After word recognition, the hearer is able to retrieve word meaning form his mental lexicon, integrating it with information from other linguistic domains, such as semantics, syntax and pragmatics, until comprehension is achieved. The importance of speech rhythm, however, is not restricted to word segmentation and recognition only. Beyond the word level rhythm continues to operate as an organization device, interacting with different linguistic domains, such as syntax and semantics, and grouping words into larger prosodic constituents, organized in a prosodic hierarchy. This dissertation investigates the function of speech rhythm as a sentence segmentation device during syntactic ambiguity processing, possible limitations on its use, i.e., in the context of second language processing, and its transferability as cognitive skill to the music domain.}, language = {en} } @phdthesis{RodriguezVillagra2013, author = {Rodr{\´i}guez-Villagra, Odir Antonio}, title = {Inhibition, attentional control, and causes of forgetting in working memory: a formal approach}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76434}, school = {Universit{\"a}t Potsdam}, pages = {133}, year = {2013}, abstract = {In many cognitive activities, the temporary maintenance and manipulation of mental objects is a necessary step in order to reach a cognitive goal. Working memory has been regarded as the process responsible for those cognitive activities. This thesis addresses the question: what limits working-memory capacity (WMC)? A question that still remains controversial (Barrouillet \& Camos, 2009; Lewandowsky, Oberauer, \& Brown, 2009). This study attempted to answer this question by proposing that the dynamics between the causes of forgetting and the processes helping the maintenance, and the manipulation of the memoranda are the key aspects in understanding the limits of WMC. Chapter 1 introduced key constructs and the strategy to examine the dynamics between inhibition, attentional control, and the causes of forgetting in working memory. The study in Chapter 2 tested the performance of children, young adults, and old adults in a working-memory updating-task with two conditions: one condition included go steps and the other condition included go, and no-go steps. The interference model (IM; Oberauer \& Kliegl, 2006), a model proposing interference-related mechanisms as the main cause of forgetting was used to simultaneously fit the data of these age groups. In addition to the interference-related parameters reflecting interference by feature overwriting and interference by confusion, and in addition to the parameters reflecting the speed of processing, the study included a new parameter that captured the time for switching between go steps and no-go steps. The study indicated that children and young adults were less susceptible than old adults to interference by feature overwriting; children were the most susceptible to interference by confusion, followed by old adults and then by young adults; young adults presented the higher rate of processing, followed by children and then by old adults; and young adults were the fastest group switching from go steps to no-go steps. Chapter 3 examined the dynamics between causes of forgetting and the inhibition of a prepotent response in the context of three formal models of the limits of WMC: A resources model, a decay-based model, and three versions of the IM. The resources model was built on the assumption that a limited and shared source of activation for the maintenance and manipulation of the objects underlies the limits of WMC. The decay model assumes that memory traces of the working-memory objects decay over time if they are not reactivated via different mechanisms of maintenance. The IM, already described, proposes that interference-related mechanisms explain the limits of WMC. In two experiments and in a reanalysis of data of the second experiment, one version of the IM received more statistical support from the data. This version of the IM proposes that interference by feature overwriting and interference by confusion are the main factors underlying the limits of WMC. In addition, the model suggests that experimental conditions involving the inhibition of a prepotent response reduce the speed of processing and promotes the involuntary activation of irrelevant information in working memory. Chapter 4 summed up Chapter 2 and 3 and discussed their findings and presented how this thesis has provided evidence of interference-related mechanisms as the main cause of forgetting, and it has attempted to clarify the role of inhibition and attentional control in working memory. With the implementation of formal models and experimental manipulations in the framework of nonlinear mixed models the data offered explanations of causes of forgetting and the role of inhibition in WMC at different levels: developmental effects, aging effects, effects related to experimental manipulations and individual differences in these effects. Thus, the present approach afforded a comprehensive view of a large number of factors limiting WMC.}, language = {en} } @phdthesis{Robinson2013, author = {Robinson, Joshua Wayne}, title = {Novel Poly(N-substituted glycine)s : synthesis, post-modification, and physical properties}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-64789}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Various synthetic approaches were explored towards the preparation of poly(N-substituted glycine) homo/co-polymers (a.k.a. polypeptoids). In particular, monomers that would facilitate in the preparation of bio-relevant polymers via either chain- or step-growth polymerization were targeted. A 3-step synthetic approach towards N-substituted glycine N-carboxyanhydrides (NNCA) was implemented, or developed, and optimized allowing for an efficient gram scale preparation of the aforementioned monomer (chain-growth). After exploring several solvents and various conditions, a reproducible and efficient ring-opening polymerization (ROP) of NNCAs was developed in benzonitrile (PhCN). However, achieving molecular weights greater than 7 kDa required longer reaction times (>4 weeks) and sub-sequentially allowed for undesirable competing side reactions to occur (eg. zwitterion monomer mechanisms). A bulk-polymerization strategy provided molecular weights up to 11 kDa within 24 hours but suffered from low monomer conversions (ca. 25\%). Likewise, a preliminary study towards alcohol promoted ROP of NNCAs suffered from impurities and a suspected alternative activated monomer mechanism (AAMM) providing poor inclusion of the initiator and leading to multi-modal dispersed polymeric systems. The post-modification of poly(N-allyl glycine) via thiol-ene photo-addition was observed to be quantitative, with the utilization of photo-initiators, and facilitated in the first glyco-peptoid prepared under environmentally benign conditions. Furthermore, poly(N-allyl glycine) demonstrated thermo-responsive behavior and could be prepared as a semi-crystalline bio-relevant polymer from solution (ie. annealing). Initial efforts in preparing these polymers via standard poly-condensation protocols were insufficient (step-growth). However, a thermally induced side-product, diallyl diketopiperazine (DKP), afforded the opportunity to explore photo-induced thiol-ene and acyclic diene metathesis (ADMET) polymerizations. Thiol-ene polymerization readily led to low molecular weight polymers (<2.5 kDa), that were insoluble in most solvents except heated amide solvents (ie. DMF), whereas ADMET polymerization, with diallyl DKP, was unsuccessful due to a suspected 6 member complexation/deactivation state of the catalyst. This understanding prompted the preparation of elongated DKPs most notably dibutenyl DKP. SEC data supports the aforementioned understanding but requires further optimization studies in both the preparation of the DKP monomers and following ADMET polymerization. This work was supported by NMR, GC-MS, FT-IR, SEC-IR, and MALDI-Tof MS characterization. Polymer properties were measured by UV-Vis, TGA, and DSC.}, language = {en} } @phdthesis{RiveraVillarreyes2013, author = {Rivera Villarreyes, Carlos Andres}, title = {Cosmic-ray neutron sensing for soil moisture measurements in cropped fields}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69748}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This cumulative dissertation explored the use of the detection of natural background of fast neutrons, the so-called cosmic-ray neutron sensing (CRS) approach to measure field-scale soil moisture in cropped fields. Primary cosmic rays penetrate the top atmosphere and interact with atmospheric particles. Such interaction results on a cascade of high-energy neutrons, which continue traveling through the atmospheric column. Finally, neutrons penetrate the soil surface and a second cascade is produced with the so-called secondary cosmic-ray neutrons (fast neutrons). Partly, fast neutrons are absorbed by hydrogen (soil moisture). Remaining neutrons scatter back to the atmosphere, where its flux is inversely correlated to the soil moisture content, therefore allowing a non-invasive indirect measurement of soil moisture. The CRS methodology is mainly evaluated based on a field study carried out on a farmland in Potsdam (Brandenburg, Germany) along three crop seasons with corn, sunflower and winter rye; a bare soil period; and two winter periods. Also, field monitoring was carried out in the Schaefertal catchment (Harz, Germany) for long-term testing of CRS against ancillary data. In the first experimental site, the CRS method was calibrated and validated using different approaches of soil moisture measurements. In a period with corn, soil moisture measurement at the local scale was performed at near-surface only, and in subsequent periods (sunflower and winter rye) sensors were placed in three depths (5 cm, 20 cm and 40 cm). The direct transfer of CRS calibration parameters between two vegetation periods led to a large overestimation of soil moisture by the CRS. Part of this soil moisture overestimation was attributed to an underestimation of the CRS observation depth during the corn period ( 5-10 cm), which was later recalculated to values between 20-40 cm in other crop periods (sunflower and winter rye). According to results from these monitoring periods with different crops, vegetation played an important role on the CRS measurements. Water contained also in crop biomass, above and below ground, produces important neutron moderation. This effect was accounted for by a simple model for neutron corrections due to vegetation. It followed crop development and reduced overall CRS soil moisture error for periods of sunflower and winter rye. In Potsdam farmland also inversely-estimated soil hydraulic parameters were determined at the field scale, using CRS soil moisture from the sunflower period. A modelling framework coupling HYDRUS-1D and PEST was applied. Subsequently, field-scale soil hydraulic properties were compared against local scale soil properties (modelling and measurements). Successful results were obtained here, despite large difference in support volume. Simple modelling framework emphasizes future research directions with CRS soil moisture to parameterize field scale models. In Schaefertal catchment, CRS measurements were verified using precipitation and evapotranspiration data. At the monthly resolution, CRS soil water storage was well correlated to these two weather variables. Also clearly, water balance could not be closed due to missing information from other compartments such as groundwater, catchment discharge, etc. In the catchment, the snow influence to natural neutrons was also evaluated. As also observed in Potsdam farmland, CRS signal was strongly influenced by snow fall and snow accumulation. A simple strategy to measure snow was presented for Schaefertal case. Concluding remarks of this dissertation showed that (a) the cosmic-ray neutron sensing (CRS) has a strong potential to provide feasible measurement of mean soil moisture at the field scale in cropped fields; (b) CRS soil moisture is strongly influenced by other environmental water pools such as vegetation and snow, therefore these should be considered in analysis; (c) CRS water storage can be used for soil hydrology modelling for determination of soil hydraulic parameters; and (d) CRS approach has strong potential for long term monitoring of soil moisture and for addressing studies of water balance.}, language = {en} } @phdthesis{Ritz2013, author = {Ritz, Julia}, title = {Discourse-givenness of noun phrases : theoretical and computational models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70818}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis gives formal definitions of discourse-givenness, coreference and reference, and reports on experiments with computational models of discourse-givenness of noun phrases for English and German. Definitions are based on Bach's (1987) work on reference, Kibble and van Deemter's (2000) work on coreference, and Kamp and Reyle's Discourse Representation Theory (1993). For the experiments, the following corpora with coreference annotation were used: MUC-7, OntoNotes and ARRAU for Englisch, and TueBa-D/Z for German. As for classification algorithms, they cover J48 decision trees, the rule based learner Ripper, and linear support vector machines. New features are suggested, representing the noun phrase's specificity as well as its context, which lead to a significant improvement of classification quality.}, language = {en} } @phdthesis{Rietsch2013, author = {Rietsch, Katrin}, title = {Body composition especially external skeletal robustness in association with physical activity and recreation in pre-pubertal children : a national and international investigation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66913}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In children the way of life, nutrition and recreation changed in recent years and as a consequence body composition shifted as well. It is established that overweight belongs to a global problem. In addition, German children exhibit a less robust skeleton than ten years ago. These developments may elevate the risk of cardiovascular diseases and skeletal modifications. Heredity and environmental factors as nutrition, socioeconomic status, physical activity and inactivity influence fat accumulation and the skeletal system. Based on these negative developments associations between type of body shape, skeletal measures and physical activity; relations between external skeletal robustness, physical activity and inactivity, BMI and body fat and also the progress of body composition especially external skeletal robustness in comparison in Russian and German children were investigated. In a cross-sectional study 691 German boys and girls aged 6 to 10 years were examined. Anthropometric measurements were taken and questionnaires about physical activity and inactivity were answered by parents. Additionally, pedometers were worn to determinate the physical activity in children. To compare the body composition in Russian and German children data from the years 2000 and 2010 were used. The study has shown that pyknomorphic individuals exhibit the highest external skeletal robustness and leptomorphic ones the lowest. Leptomorphic children may have a higher risk for bone diseases in adulthood. Pyknomorphic boys are more physically active by tendency. This is assessed as positive because pyknomorphic types display the highest BMI and body fat. Results showed that physical activity may reduce BMI and body fat. In contrast physical inactivity may lead to an increase of BMI and body fat and may rise with increasing age. Physical activity encourages additionally a robust skeleton. Furthermore external skeletal robustness is associated with BMI in order that BMI as a measure of overweight should be consider critically. The international 10-year comparison has shown an increase of BMI in Russian children and German boys. Currently, Russian children exhibit a higher external skeletal robustness than the Germans. However, in Russian boys skeleton is less robust than ten years ago. This trend should be observed in the future as well in other countries. All in all, several measures should be used to describe health situation in children and adults. Furthermore, in children it is essential to support physical activity in order to reduce the risk of obesity and to maintain a robust skeleton. In this way diseases are able to prevent in adulthood.}, language = {en} } @phdthesis{Reisswig2013, author = {Reisswig, Katja}, title = {Die „unternehmerische Mission" von Universit{\"a}ten : eine neoinstitutionalistische Betrachtung des Aufgabenbereichs Wissens- und Technologietransfer (WTT) an deutschen Hochschulen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70574}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Die Arbeit thematisiert die Ver{\"a}nderungen im deutschen Wissenschafts- und Hochschulsystem. Im Mittelpunkt steht die "unternehmerische Mission" von Universit{\"a}ten. Der Blick wird auf das Aufgabenfeld Wissens- und Technologietransfer (WTT) gerichtet. Anhand dessen werden die Ver{\"a}nderungen, die innerhalb des deutschen Universit{\"a}tssystems in den vergangenen Jahren erfolgten, nachgezeichnet. Die Erwartungshaltungen an Universit{\"a}ten haben sich ver{\"a}ndert. {\"O}konomische Sichtweisen nehmen einen immer gr{\"o}ßeren Stellenwert ein. Die Arbeit baut auf den Pr{\"a}missen der neoinstitutionalistischen Organisationstheorie auf. Anhand dieser wird gezeigt, wie Erwartungen externer Stakeholder Eingang in Hochschulen finden und sich auf ihre organisatorische Ausgestaltung auswirken. Der Arbeit liegt ein exploratives, qualitatives Untersuchungsdesign zugrunde. In einer Fallstudie werden zwei Universit{\"a}ten als Fallbeispiele untersucht. Die Untersuchung liefert Antworten auf die Fragen, wie der WTT als Aufgabenbereich an deutschen Universit{\"a}ten umgesetzt wird, welche Strukturen sich herausgebildet haben und inwieweit eine Institutionalisierung des WTTs an Universit{\"a}ten erfolgt ist. In der Arbeit werden verschiedene Erhebungsinstrumente im Rahmen einer Triangulation genutzt. Experteninterviews bilden das Hauptanalyseinstrument. Ziel der Untersuchung ist neben der Beantwortung der Forschungsfragen, Hypothesen zu bilden, die f{\"u}r weiterf{\"u}hrende Untersuchungen genutzt werden k{\"o}nnen. Dar{\"u}ber hinaus werden Handlungsempfehlungen f{\"u}r die Umsetzung des WTTs an deutschen Hochschulen gegeben. Die Arbeit richtet sich sowohl an Wissenschaftler als auch Praktiker aus dem Bereich Wissens- und Technologietransfer.}, language = {de} } @phdthesis{Reilich2013, author = {Reilich, Julia}, title = {Bildungsrenditen in Deutschland}, series = {Potsdamer Schriften zur Raumwirtschaft}, journal = {Potsdamer Schriften zur Raumwirtschaft}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-219-3}, issn = {2190-8702}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62658}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 244}, year = {2013}, abstract = {Der Einfluss von Bildung gewinnt gesellschaftlich und politisch an Bedeutung. Auch im wissenschaftlichen Bereich zeigt sich dies {\"u}ber eine vielseitige Diskussion zum Einfluss von Bildung auf das Einkommen. In dieser Arbeit werden nationale und regionale Disparit{\"a}ten in der monet{\"a}ren Wertsch{\"a}tzung von allgemeinem Humankapital aufgedeckt und diskutiert. Daf{\"u}r werden verschiedene Verfahren diskutiert und basierend darauf Intervalle f{\"u}r die mittleren Bildungsrenditen bestimmt. Im ersten Abschnitt wird die Thematik theoretisch {\"u}ber zwei verschiedene Modellans{\"a}tze fundiert und kritisch diskutiert. Anschließend folgt die Darstellung des aktuellen empirischen Forschungsbestands. Der Hauptteil der Arbeit beginnt mit der Darstellung des verwendeten Datensatzes und seiner kritischen Repr{\"a}sentativit{\"a}tspr{\"u}fung. Eine n{\"a}here Variablenbeschreibung mit deskriptiver Analyse dient zur Erkl{\"a}rung der verwendeten Gr{\"o}ßen. Darauffolgend werden bestehende Verfahren zur Sch{\"a}tzung von Bildungsrenditen diskutiert. Unter ausschließlicher Ber{\"u}cksichtigung der Erwerbst{\"a}tigen zeigt das 3SLS-Verfahren die besten Eigenschaften. Bezieht man jedoch alle Erwerbspersonen in die Analyse mit ein, so erweist sich das Heckman-Verfahren als sehr geeignet. Die Analyse - zun{\"a}chst auf nationaler Ebene - best{\"a}tigt weitestgehend die bestehenden Erkenntnisse der Literatur. Eine Separierung des Datensatzes auf verschiedene Alterscluster, Voll- und Teilerwerbst{\"a}tige sowie Erwerbst{\"a}tige in der Privatwirtschaft und im {\"o}ffentlichen Dienst zeigen keine signifikanten Unterschiede in der H{\"o}he der gezahlten durchschnittlichen Bildungsrenditen. Anders verh{\"a}lt es sich bei der regionalen Analyse. Zun{\"a}chst werden Ost- und Westdeutschland separat betrachtet. F{\"u}r diese erste Analyse lassen sich {\"u}ber 95 \%-Konfidenzintervalle deutliche Unterschiede in der H{\"o}he der Bildungsrenditen ermitteln. Aufbauend auf diese Ergebnisse wird die Analyse vertieft. Eine Separierung auf Bundesl{\"a}nderebene und ein weiterer Vergleich der Konfidenzintervalle folgen. Zur besseren statistischen Vergleichbarkeit der Ergebnisse wird neben dem 3SLS-Verfahren, angewendet auf die separierten Datens{\"a}tze, auch ein Modell ohne die Notwendigkeit der Separierung gew{\"a}hlt. Hierbei ist die Variation der Regionen {\"u}ber Interaktionsterme ber{\"u}cksichtigt. Dieses Regressionsmodell wird auf das OLS- und das Heckman-Verfahren angewendet. Der Vorteil hierbei ist, dass die Koeffizienten auf Gleichheit getestet werden k{\"o}nnen. Dabei kristallisieren sich deutlich unterschiedliche Bildungsrenditen f{\"u}r Mecklenburg-Vorpommern, aber auch f{\"u}r Sachsen-Anhalt und Th{\"u}ringen im Vergleich zu den restlichen Bundesl{\"a}ndern Deutschlands heraus. Diese L{\"a}nder zeichnen sich durch eine besonders hohe j{\"a}hrliche Verzinsung von allgemeinem Humankapital aus. Es folgt eine Diskussion {\"u}ber m{\"o}gliche Ursachen f{\"u}r die regional verschiedenen Bildungsrenditen. Dabei zeigt sich, dass in den Bundesl{\"a}ndern mit hoher Rendite das mittlere Einkommensniveau und auch das durchschnittliche Preisniveau tendenziell geringer sind. Weiterhin wird deutlich, dass bei h{\"o}heren relativen Abweichungen der durchschnittlichen Einkommen h{\"o}here Renditen zu verzeichnen sind. Auch die Wanderungsbewegungen je nach Qualifikation unterscheiden sich. Unter zus{\"a}tzlicher Ber{\"u}cksichtigung der Arbeitslosenquoten zeigt sich in den L{\"a}ndern mit hoher Rendite eine tendenziell h{\"o}here Arbeitslosigkeit. Im zusammenfassenden Fazit der Arbeit werden abschließend die Erkenntnisse gew{\"u}rdigt. Dabei ist zu bemerken, dass der Beitrag einen Start in die bundesl{\"a}nderweite Analyse liefert, die eine Fortf{\"u}hrung auf beispielsweise eine mehrperiodische Betrachtung anregt.}, language = {de} } @phdthesis{Preiss2013, author = {Preiß, Andreas}, title = {Determinanten postsekund{\"a}rer Bildungsaspirationen im Kontext jugendlicher Lebensstile : eine empirische Analyse entlang eigener Erhebungsdaten}, isbn = {978-3-86956-218-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62626}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Nur langsam scheinen jene Schockwellen abzuebben, die ausgel{\"o}st durch die Ergebnisse der PISA-Erhebungen seit mehr als einem Jahrzehnt die Bildungsrepublik Deutschland durchqueren und weite Teile der Gesellschaft in den Zustand regelrechter Bildungspanik versetzten. An der Schwelle zum 21. Jahrhundert belegte eine Reihe von Studien f{\"u}r das wiedervereinte Deutschland eine im OECD-Vergleich besonders ausgepr{\"a}gte Abh{\"a}ngigkeit des Bildungserfolges von der sozialen Herkunft. Als eine Konsequenz ist der Zugang zu terti{\"a}rer Bildung bis dato deutlich durch soziale Ungleichheit gekennzeichnet. Vor diesem Hintergrund leistet die vorliegende Dissertationsschrift einen wesentlichen Beitrag zur urs{\"a}chlichen Erkl{\"a}rung von Mustern sozialer Selektivit{\"a}t, die an den Gelenkstellen zwischen sekund{\"a}ren und postsekund{\"a}ren Bildungsangeboten sichtbar werden. Auf innovative Weise verbindet die Arbeit ein zeitgem{\"a}ßes handlungstheoretisches Modell mit einer komplexen Lebensstilanalyse. Die Analyse st{\"u}tzt sich auf Erhebungsdaten, die zwischen Januar und April 2010 an mehr als 30 weiterf{\"u}hrenden Schulen des Bundeslandes Brandenburg erhoben wurden. Im Mittelpunkt des Forschungsinteresses steht einerseits die Identifikation von sozial-kognitiven Determinanten, die das Niveau und die Richtung postsekund{\"a}rer Bildungsaspirationen maßgeblich vorstrukturieren sowie andererseits deren Verortung im Kontext jugendlicher Lebensstile. Das komplexe Analysedesign erweist sich als empirisch fruchtbar: So erbringt die Arbeit den empirischen Nachweis, dass die spezifischen Konfigurationen der best{\"a}tigten psychosozialen Pr{\"a}diktoren nicht nur statistisch bedeutsam zwischen jugendlichen Stilmustern variieren, sondern sich diesbez{\"u}glich erfolgreiche von weniger erfolgreichen Typen unterscheiden lassen.}, language = {de} } @phdthesis{Pingel2013, author = {Pingel, Patrick}, title = {Morphology, charge transport properties, and molecular doping of thiophene-based organic semiconducting thin films}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69805}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Organic semiconductors combine the benefits of organic materials, i.e., low-cost production, mechanical flexibility, lightweight, and robustness, with the fundamental semiconductor properties light absorption, emission, and electrical conductivity. This class of material has several advantages over conventional inorganic semiconductors that have led, for instance, to the commercialization of organic light-emitting diodes which can nowadays be found in the displays of TVs and smartphones. Moreover, organic semiconductors will possibly lead to new electronic applications which rely on the unique mechanical and electrical properties of these materials. In order to push the development and the success of organic semiconductors forward, it is essential to understand the fundamental processes in these materials. This thesis concentrates on understanding how the charge transport in thiophene-based semiconductor layers depends on the layer morphology and how the charge transport properties can be intentionally modified by doping these layers with a strong electron acceptor. By means of optical spectroscopy, the layer morphologies of poly(3-hexylthiophene), P3HT, P3HT-fullerene bulk heterojunction blends, and oligomeric polyquaterthiophene, oligo-PQT-12, are studied as a function of temperature, molecular weight, and processing conditions. The analyses rely on the decomposition of the absorption contributions from the ordered and the disordered parts of the layers. The ordered-phase spectra are analyzed using Spano's model. It is figured out that the fraction of aggregated chains and the interconnectivity of these domains is fundamental to a high charge carrier mobility. In P3HT layers, such structures can be grown with high-molecular weight, long P3HT chains. Low and medium molecular weight P3HT layers do also contain a significant amount of chain aggregates with high intragrain mobility; however, intergranular connectivity and, therefore, efficient macroscopic charge transport are absent. In P3HT-fullerene blend layers, a highly crystalline morphology that favors the hole transport and the solar cell efficiency can be induced by annealing procedures and the choice of a high-boiling point processing solvent. Based on scanning near-field and polarization optical microscopy, the morphology of oligo-PQT-12 layers is found to be highly crystalline which explains the rather high field-effect mobility in this material as compared to low molecular weight polythiophene fractions. On the other hand, crystalline dislocations and grain boundaries are identified which clearly limit the charge carrier mobility in oligo-PQT-12 layers. The charge transport properties of organic semiconductors can be widely tuned by molecular doping. Indeed, molecular doping is a key to highly efficient organic light-emitting diodes and solar cells. Despite this vital role, it is still not understood how mobile charge carriers are induced into the bulk semiconductor upon the doping process. This thesis contains a detailed study of the doping mechanism and the electrical properties of P3HT layers which have been p-doped by the strong molecular acceptor tetrafluorotetracyanoquinodimethane, F4TCNQ. The density of doping-induced mobile holes, their mobility, and the electrical conductivity are characterized in a broad range of acceptor concentrations. A long-standing debate on the nature of the charge transfer between P3HT and F4TCNQ is resolved by showing that almost every F4TCNQ acceptor undergoes a full-electron charge transfer with a P3HT site. However, only 5\% of these charge transfer pairs can dissociate and induce a mobile hole into P3HT which contributes electrical conduction. Moreover, it is shown that the left-behind F4TCNQ ions broaden the density-of-states distribution for the doping-induced mobile holes, which is due to the longrange Coulomb attraction in the low-permittivity organic semiconductors.}, language = {en} } @phdthesis{Piffl2013, author = {Piffl, Tilmann}, title = {Models of the Galaxy and the massive spectroscopic stellar survey RAVE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70371}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Numerical simulations of galaxy formation and observational Galactic Astronomy are two fields of research that study the same objects from different perspectives. Simulations try to understand galaxies like our Milky Way from an evolutionary point of view while observers try to disentangle the current structure and the building blocks of our Galaxy. Due to great advances in computational power as well as in massive stellar surveys we are now able to compare resolved stellar populations in simulations and in observations. In this thesis we use a number of approaches to relate the results of the two fields to each other. The major observational data set we refer to for this work comes from the Radial Velocity Experiment (RAVE), a massive spectroscopic stellar survey that observed almost half a million stars in the Galaxy. In a first study we use three different models of the Galaxy to generate synthetic stellar surveys that can be directly compared to the RAVE data. To do this we evaluate the RAVE selection function to great detail. Among the Galaxy models is the widely used Besancon model that performs well when individual parameter distribution are considered, but fails when we study chemodynamic correlations. The other two models are based on distributions of mass particles instead of analytical distribution functions. This is the first time that such models are converted to the space of observables and are compared to a stellar survey. We show that these models can be competitive and in some aspects superior to analytic models, because of their self-consistent dynamic history. In the case of a full cosmological simulation of disk galaxy formation we can recover features in the synthetic survey that relate to the known issues of the model and hence proof that our technique is sensitive to the global structure of the model. We argue that the next generation of cosmological galaxy formation simulations will deliver valuable models for our Galaxy. Testing these models with our approach will provide a direct connection between stellar Galactic astronomy and physical cosmology. In the second part of the thesis we use a sample of high-velocity halo stars from the RAVE data to estimate the Galactic escape speed and the virial mass of the Milky Way. In the course of this study cosmological simulations of galaxy formation also play a crucial role. Here we use them to calibrate and extensively test our analysis technique. We find the local Galactic escape speed to be 533 (+54/-41) km/s (90\% confidence). With this result in combination with a simple mass model of the Galaxy we then construct an estimate of the virial mass of the Galaxy. For the mass profile of the dark matter halo we use two extreme models, a pure Navarro, Frenk \& White (NFW) profile and an adiabatically contracted NFW profile. When we use statistics on the concentration parameter of these profile taken from large dissipationless cosmological simulations we obtain an estimate of the virial mass that is almost independent of the choice of the halo profile. For the mass M_340 enclosed within R_340 = 180 kpc we find 1.3 (+0.4/-0.3) x 10^12 M_sun. This value is in very good agreement with a number of other mass estimates in the literature that are based on independent data sets and analysis techniques. In the last part of this thesis we investigate a new possible channel to generate a population of Hypervelocity stars (HVSs) that is observed in the stellar halo. Commonly, it is assumed that the velocities of these stars originate from an interaction with the super-massive black hole in the Galactic center. It was suggested recently that stars stripped-off a disrupted satellite galaxy could reach similar velocities and leave the Galaxy. Here we study in detail the kinematics of tidal debris stars to investigate the probability that the observed sample of HVSs could partly originate from such a galaxy collision. We use a suite of \$N\$-body simulations following the encounter of a satellite galaxy with its Milky Way-type host galaxy. We quantify the typical pattern in angular and phase space formed by the debris stars and develop a simple model that predicts the kinematics of stripped-off stars. We show that the distribution of orbital energies in the tidal debris has a typical form that can be described quite accurately by a simple function. The main parameters determining the maximum energy kick a tidal debris star can get is the initial mass of the satellite and only to a lower extent its orbit. Main contributors to an unbound stellar population created in this way are massive satellites (M_sat > 10^9 M_sun). The probability that the observed HVS population is significantly contaminated by tidal debris stars appears small in the light of our results.}, language = {en} } @phdthesis{Perscheid2013, author = {Perscheid, Michael}, title = {Test-driven fault navigation for debugging reproducible failures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68155}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The correction of software failures tends to be very cost-intensive because their debugging is an often time-consuming development activity. During this activity, developers largely attempt to understand what causes failures: Starting with a test case that reproduces the observable failure they have to follow failure causes on the infection chain back to the root cause (defect). This idealized procedure requires deep knowledge of the system and its behavior because failures and defects can be far apart from each other. Unfortunately, common debugging tools are inadequate for systematically investigating such infection chains in detail. Thus, developers have to rely primarily on their intuition and the localization of failure causes is not time-efficient. To prevent debugging by disorganized trial and error, experienced developers apply the scientific method and its systematic hypothesis-testing. However, even when using the scientific method, the search for failure causes can still be a laborious task. First, lacking expertise about the system makes it hard to understand incorrect behavior and to create reasonable hypotheses. Second, contemporary debugging approaches provide no or only partial support for the scientific method. In this dissertation, we present test-driven fault navigation as a debugging guide for localizing reproducible failures with the scientific method. Based on the analysis of passing and failing test cases, we reveal anomalies and integrate them into a breadth-first search that leads developers to defects. This systematic search consists of four specific navigation techniques that together support the creation, evaluation, and refinement of failure cause hypotheses for the scientific method. First, structure navigation localizes suspicious system parts and restricts the initial search space. Second, team navigation recommends experienced developers for helping with failures. Third, behavior navigation allows developers to follow emphasized infection chains back to root causes. Fourth, state navigation identifies corrupted state and reveals parts of the infection chain automatically. We implement test-driven fault navigation in our Path Tools framework for the Squeak/Smalltalk development environment and limit its computation cost with the help of our incremental dynamic analysis. This lightweight dynamic analysis ensures an immediate debugging experience with our tools by splitting the run-time overhead over multiple test runs depending on developers' needs. Hence, our test-driven fault navigation in combination with our incremental dynamic analysis answers important questions in a short time: where to start debugging, who understands failure causes best, what happened before failures, and which state properties are infected.}, language = {en} } @phdthesis{Pauly2013, author = {Pauly, Dennis}, title = {Grenzf{\"a}lle der Subordination : Merkmale, Empirie und Theorie abh{\"a}ngiger Nebens{\"a}tze}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70275}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Gegenstand dieser Arbeit sind sog. nicht-kanonische bzw. unintegrierte Nebens{\"a}tze. Diese Nebens{\"a}tze zeichnen sich dadurch aus, dass sie sich mittels g{\"a}ngiger Kriterien (Satzgliedstatus, Verbletztstellung) nicht klar als koordiniert oder subordiniert beschreiben lassen. Das Ph{\"a}nomen nicht-kanonischer Nebens{\"a}tze ist ein Thema, welches in der Sprachwissenschaft generell seit den sp{\"a}ten Siebzigern (Davison 1979) diskutiert wird und sp{\"a}testens mit Fabricius-Hansen (1992) auch innerhalb der germanistischen Linguistik angekommen ist. Ein viel beachteter Komplex ist hierbei - neben der reinen Identifizierung nicht-kanonischer Satzgef{\"u}ge - meist auch die Erstellung einer Klassifikation zur Erfassung zumindest einiger nicht-kanonischer Gef{\"u}ge, wie dies etwa bei Fabricius-Hansen (1992) und Reis (1997) zu sehen ist. Das Ziel dieser Studie ist es, eine exhaustive Klassifikation der angesprochenen Nebensatztypen vorzunehmen. Dazu werden zun{\"a}chst - unter Zuhilfenahme von Korpusdaten - alle potentiellen Subordinationsmerkmale genauer untersucht, da die meisten bisherigen Studien zu diesem Thema die stets gleichen Merkmale als gegeben voraussetzen. Dabei wird sich herausstellen, dass nur eine kleine Anzahl von Merkmalen sich wirklich zweifelsfrei dazu eignet, Aufschluss {\"u}ber die Satzverkn{\"u}pfungsqualit{\"a}t zu geben. Die anschließend aufgestellte Taxonomie deutscher Nebens{\"a}tze wird schließlich einzig mit der Postulierung einer nicht-kanonischen Nebensatzklasse auskommen. Sie ist dar{\"u}ber hinaus auch in der Lage, die zahlreich vorkommenden Ausnahmef{\"a}lle zu erfassen. Dies heißt konkret, dass auch etwaige Nebens{\"a}tze, die sich aufgrund bestimmter Eigenschaften teilweise idiosynkratisch verhalten, einfach in die vorgeschlagene Klassifikation {\"u}bernommen werden k{\"o}nnen. In diesem Zuge werde ich weiterhin zeigen, wie eine Nebensatzklassifikation auch sog. sekund{\"a}ren Subordinationsmerkmalen gerecht werden kann, obwohl diese sich hinsichtlich der einzelnen Nebensatzklassen nicht einheitlich verhalten. Schließlich werde ich eine theoretische Modellierung der zuvor postulierten Taxonomie vornehmen, die auf Basis der HPSG mittels Merkmalsvererbung alle m{\"o}glichen Nebensatztypen zu erfassen imstande ist.}, language = {de} } @phdthesis{Patz2013, author = {Patz, Ronny}, title = {Information flows in the context of EU policy-making : affiliation networks and the post-2012 reform of the EU's Common Fisheries Policy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70732}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Information flows in EU policy-making are heavily dependent on personal networks, both within the Brussels sphere but also reaching outside the narrow limits of the Belgian capital. These networks develop for example in the course of formal and informal meetings or at the sidelines of such meetings. A plethora of committees at European, transnational and regional level provides the basis for the establishment of pan-European networks. By studying affiliation to those committees, basic network structures can be uncovered. These affiliation network structures can then be used to predict EU information flows, assuming that certain positions within the network are advantageous for tapping into streams of information while others are too remote and peripheral to provide access to information early enough. This study has tested those assumptions for the case of the reform of the Common Fisheries Policy for the time after 2012. Through the analysis of an affiliation network based on participation in 10 different fisheries policy committees over two years (2009 and 2010), network data for an EU-wide network of about 1300 fisheries interest group representatives and more than 200 events was collected. The structure of this network showed a number of interesting patterns, such as - not surprisingly - a rather central role of Brussels-based committees but also close relations of very specific interests to the Brussels-cluster and stronger relations between geographically closer maritime regions. The analysis of information flows then focused on access to draft EU Commission documents containing the upcoming proposal for a new basic regulation of the Common Fisheries Policy. It was first documented that it would have been impossible to officially obtain this document and that personal networks were thus the most likely sources for fisheries policy actors to obtain access to these "leaks" in early 2011. A survey of a sample of 65 actors from the initial network supported these findings: Only a very small group had accessed the draft directly from the Commission. Most respondents who obtained access to the draft had received it from other actors, highlighting the networked flow of informal information in EU politics. Furthermore, the testing of the hypotheses connecting network positions and the level of informedness indicated that presence in or connections to the Brussels sphere had both advantages for overall access to the draft document and with regard to timing. Methodologically, challenges of both the network analysis and the analysis of information flows but also their relevance for the study of EU politics have been documented. In summary, this study has laid the foundation for a different way to study EU policy-making by connecting topical and methodological elements - such as affiliation network analysis and EU committee governance - which so far have not been considered together, thereby contributing in various ways to political science and EU studies.}, language = {en} } @phdthesis{Patterson2013, author = {Patterson, Clare}, title = {The role of structural and discourse-level cues during pronoun resolution}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71280}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Pronoun resolution normally takes place without conscious effort or awareness, yet the processes behind it are far from straightforward. A large number of cues and constraints have previously been recognised as playing a role in the identification and integration of potential antecedents, yet there is considerable debate over how these operate within the resolution process. The aim of this thesis is to investigate how the parser handles multiple antecedents in order to understand more about how certain information sources play a role during pronoun resolution. I consider how both structural information and information provided by the prior discourse is used during online processing. This is investigated through several eye tracking during reading experiments that are complemented by a number of offline questionnaire experiments. I begin by considering how condition B of the Binding Theory (Chomsky 1981; 1986) has been captured in pronoun processing models; some researchers have claimed that processing is faithful to syntactic constraints from the beginning of the search (e.g. Nicol and Swinney 1989), while others have claimed that potential antecedents which are ruled out on structural grounds nonetheless affect processing, because the parser must also pay attention to a potential antecedent's features (e.g. Badecker and Straub 2002). My experimental findings demonstrate that the parser is sensitive to the subtle changes in syntactic configuration which either allow or disallow pronoun reference to a local antecedent, and indicate that the parser is normally faithful to condition B at all stages of processing. Secondly, I test the Primitives of Binding hypothesis proposed by Koornneef (2008) based on work by Reuland (2001), which is a modular approach to pronoun resolution in which variable binding (a semantic relationship between pronoun and antecedent) takes place before coreference. I demonstrate that a variable-binding (VB) antecedent is not systematically considered earlier than a coreference (CR) antecedent online. I then go on to explore whether these findings could be attributed to the linear order of the antecedents, and uncover a robust recency preference both online and offline. I consider what role the factor of recency plays in pronoun resolution and how it can be reconciled with the first-mention advantage (Gernsbacher and Hargreaves 1988; Arnold 2001; Arnold et al., 2007). Finally, I investigate how aspects of the prior discourse affect pronoun resolution. Prior discourse status clearly had an effect on pronoun resolution, but an antecedent's appearance in the previous context was not always facilitative; I propose that this is due to the number of topic switches that a reader must make, leading to a lack of discourse coherence which has a detrimental effect on pronoun resolution. The sensitivity of the parser to structural cues does not entail that cue types can be easily separated into distinct sequential stages, and I therefore propose that the parser is structurally sensitive but not modular. Aspects of pronoun resolution can be captured within a parallel constraints model of pronoun resolution, however, such a model should be sensitive to the activation of potential antecedents based on discourse factors, and structural cues should be strongly weighted.}, language = {en} } @phdthesis{Patra2013, author = {Patra, Pintu}, title = {Population dynamics of bacterial persistence}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69253}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The life of microorganisms is characterized by two main tasks, rapid growth under conditions permitting growth and survival under stressful conditions. The environments, in which microorganisms dwell, vary in space and time. The microorganisms innovate diverse strategies to readily adapt to the regularly fluctuating environments. Phenotypic heterogeneity is one such strategy, where an isogenic population splits into subpopulations that respond differently under identical environments. Bacterial persistence is a prime example of such phenotypic heterogeneity, whereby a population survives under an antibiotic attack, by keeping a fraction of population in a drug tolerant state, the persister state. Specifically, persister cells grow more slowly than normal cells under growth conditions, but survive longer under stress conditions such as the antibiotic administrations. Bacterial persistence is identified experimentally by examining the population survival upon an antibiotic treatment and the population resuscitation in a growth medium. The underlying population dynamics is explained with a two state model for reversible phenotype switching in a cell within the population. We study this existing model with a new theoretical approach and present analytical expressions for the time scale observed in population growth and resuscitation, that can be easily used to extract underlying model parameters of bacterial persistence. In addition, we recapitulate previously known results on the evolution of such structured population under periodically fluctuating environment using our simple approximation method. Using our analysis, we determine model parameters for Staphylococcus aureus population under several antibiotics and interpret the outcome of cross-drug treatment. Next, we consider the expansion of a population exhibiting phenotype switching in a spatially structured environment consisting of two growth permitting patches separated by an antibiotic patch. The dynamic interplay of growth, death and migration of cells in different patches leads to distinct regimes in population propagation speed as a function of migration rate. We map out the region in parameter space of phenotype switching and migration rate to observe the condition under which persistence is beneficial. Furthermore, we present an extended model that allows mutation from the two phenotypic states to a resistant state. We find that the presence of persister cells may enhance the probability of resistant mutation in a population. Using this model, we explain the experimental results showing the emergence of antibiotic resistance in a Staphylococcus aureus population upon tobramycin treatment. In summary, we identify several roles of bacterial persistence, such as help in spatial expansion, development of multidrug tolerance and emergence of antibiotic resistance. Our study provides a theoretical perspective on the dynamics of bacterial persistence in different environmental conditions. These results can be utilized to design further experiments, and to develop novel strategies to eradicate persistent infections.}, language = {en} } @phdthesis{Ohl2013, author = {Ohl, Sven}, title = {Small eye movements during fixation : the case of postsaccadic fixation and preparatory influences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69862}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Describing human eye movement behavior as an alternating sequence of saccades and fixations turns out to be an oversimplification because the eyes continue to move during fixation. Small-amplitude saccades (e.g., microsaccades) are typically observed 1-2 times per second during fixation. Research on microsaccades came in two waves. Early studies on microsaccades were dominated by the question whether microsaccades affect visual perception, and by studies on the role of microsaccades in the process of fixation control. The lack of evidence for a unique role of microsaccades led to a very critical view on the importance of microsaccades. Over the last years, microsaccades moved into focus again, revealing many interactions with perception, oculomotor control and cognition, as well as intriguing new insights into the neurophysiological implementation of microsaccades. In contrast to early studies on microsaccades, recent findings on microsaccades were accompanied by the development of models of microsaccade generation. While the exact generating mechanisms vary between the models, they still share the assumption that microsaccades are generated in a topographically organized saccade motor map that includes a representation for small-amplitude saccades in the center of the map (with its neurophysiological implementation in the rostral pole of the superior colliculus). In the present thesis I criticize that models of microsaccade generation are exclusively based on results obtained during prolonged presaccadic fixation. I argue that microsaccades should also be studied in a more natural situation, namely the fixation following large saccadic eye movements. Studying postsaccadic fixation offers a new window to falsify models that aim to account for the generation of small eye movements. I demonstrate that error signals (visual and extra-retinal), as well as non-error signals like target eccentricity influence the characteristics of small-amplitude eye movements. These findings require a modification of a model introduced by Rolfs, Kliegl and Engbert (2008) in order to account for the generation of small-amplitude saccades during postsaccadic fixation. Moreover, I present a promising type of survival analysis that allowed me to examine time-dependent influences on postsaccadic eye movements. In addition, I examined the interplay of postsaccadic eye movements and postsaccadic location judgments, highlighting the need to include postsaccadic eye movements as covariate in the analyses of location judgments in the presented paradigm. In a second goal, I tested model predictions concerning preparatory influences on microsaccade generation during presaccadic fixation. The observation, that the preparatory set significantly influenced microsaccade rate, supports the critical model assumption that increased fixation-related activity results in a larger number of microsaccades. In the present thesis I present important influences on the generation of small-amplitude saccades during fixation. These eye movements constitute a rich oculomotor behavior which still poses many research questions. Certainly, small-amplitude saccades represent an interesting source of information and will continue to influence future studies on perception and cognition.}, language = {en} } @phdthesis{Oehme2013, author = {Oehme, Astrid}, title = {{\"A}sthetisches Verst{\"a}ndnis und {\"a}sthetische Wertsch{\"a}tzung von Automobildesign : eine Frage der Expertise}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-210-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-62013}, school = {Universit{\"a}t Potsdam}, pages = {xxx, 357}, year = {2013}, abstract = {Automobildesigner haben als Gestaltungsexperten die Aufgabe, die Identit{\"a}t und damit die Werte einer Marke in Formen zu {\"u}bersetzen, welche eine Vielzahl von Kunden ansprechen (Giannini \& Monti, 2003; Karjalainen, 2002). F{\"u}r diesen {\"U}bersetzungsprozess ist es zielf{\"u}hrend, {\"a}sthetische Kundenbed{\"u}rfnisse zu kennen, denn die Qualit{\"a}t einer Designl{\"o}sung h{\"a}ngt auch davon ab, inwieweit der Designer Kundenbe-d{\"u}rfnisse und damit das Designproblem richtig erfasst hat (Ulrich, 2006). Eine Grundlage hierf{\"u}r entsteht durch eine erfolgreiche Designer-Nutzer-Interaktion und den Aufbau eines gemeinsamen Kontextwissens (Lee, Popovich, Blackler \& Lee, 2009). Zwischen Designern und Kunden findet jedoch h{\"a}ufig kein direkter Austausch statt (Zeisel, 2006). Zudem belegen Befunde der Kunst- und Produkt{\"a}sthetikforschung, dass der Erwerb von gestalterischem Wissen und damit die Entwicklung {\"a}sthetischer Expertise mit Ver{\"a}nderungen der kognitiven Verarbeitung {\"a}sthetischer Objekte einhergeht, die sich in Wahrnehmung, Bewertung und Verhalten manifestieren. Damit ist auch zu erwarten, dass die Pr{\"a}ferenzurteile von Designern und Kunden bei der {\"a}sthetischen Bewertung von Design nicht immer konvergieren. Ziel der vorliegenden Arbeit war daher die systematische Untersuchung dieser expertisebedingten Wahrnehmungs- und Bewertungsunterschiede zwischen designge-schulten und ungeschulten Personen bei der Betrachtung von Automobildesign. Damit sollten Perzeption, Verarbeitung und Bewertung von Automobildesign durch design-ungeschulte Personen transparenter gemacht und mit der Verarbeitung designgeschul-ter Personen verglichen werden, um einen Beitrag zur gemeinsamen Wissensbasis und damit einer erfolgreichen Designer-Nutzer-Interaktion zu leisten. Die theoretische Einbettung der Arbeit basierte auf dem Modell {\"a}sthetischer Erfahrung und {\"a}stheti-schen Urteilens von Leder, Belke, Oeberst und Augustin (2004), welches konkrete Annahmen zu Verarbeitungsunterschieden von {\"a}sthetischen Objekten zwischen Experten und Laien bietet, die bisher allerdings noch nicht umfassend gepr{\"u}ft wurden. Den ersten Schwerpunkt dieser Arbeit bildete die Untersuchung von Unter-schieden zwischen Designern und designungeschulten Rezipienten bei der Beschrei-bung und Bewertung auf dem Markt vorhandenen Fahrzeugdesigns. Dabei sollte auch gepr{\"u}ft werden, ob eine lexikalische Verbindung zwischen Beschreibungsattributen von Fahrzeugrezipienten und den postulierten Markenwerten von Automobilmarken hergestellt werden kann. Diesem ersten Untersuchungsanliegen wurde in zwei Studien nachgegangen: Studie I diente der Erhebung von Beschreibungsattributen mittels Triadenvergleich in Anlehnung an Kelly (1955). Es wurde gepr{\"u}ft, ob designgeschulte Teilnehmer produkti-ver verbalisieren, dabei anteilig mehr symbolbezogene als formbezogene Attribute generieren und innerhalb ihrer Gruppe h{\"a}ufiger gleiche Attribute nutzen als designun-geschulte Teilnehmer. Hierf{\"u}r beschrieben 20 designgeschulte Probanden und 20 designungeschulte Probanden mit selbst gew{\"a}hlten Adjektiven die Unterschiede zwischen vier pr{\"a}sentierten Fahrzeugen. Die Gruppen nutzten dabei entgegen der Annahmen sehr {\"a}hnliche Attribute und unterschieden sich somit auch nicht in ihrer Verwendung symbolbezogener und formbezogener Attribute. Die generierten Attribute wurden mittels Prototypenansatz (Amelang \& Zielinski, 2002) den ermittelten und nachfolgend kategorisierten Markenwerten von 10 Automobilherstellern zugeordnet, so dass sechs Skalen zur Erfassung der {\"a}sthetischen Wirkung von Fahrzeugen entstanden. In Studie II wurde ein diese sechs Skalen umfassender Fragebogen an einer Stichprobe von 83 Designern und Designstudierenden sowie 98 Probanden ohne Designausbildung in einer Onlinebefragung hinsichtlich Skalenkonsistenz gepr{\"u}ft. Außerdem wurden erste Annahmen aus dem Modell von Leder et al. (2004) abgeleitet und durch einen Vergleich der beiden Teilnehmergruppen hinsichtlich der Bewertung der vier pr{\"a}sentierten Fahrzeugmodelle f{\"u}r die Skalen mit guter interner Konsistenz (Attraktivit{\"a}t, Dynamik, Fortschritt, Qualit{\"a}t), sowie eines {\"a}sthetischen Gesamturteils, der ben{\"o}tigten Bewertungszeit und der Automobilaffinit{\"a}t {\"u}berpr{\"u}ft. Hierbei vergaben Designstudierende und insbesondere ausgebildete Designer radikalere Bewertungen als Designlaien, ben{\"o}tigten mehr Zeit bei der Bewertung und waren automobilaffiner als die ungeschulten Befragungsteilnehmer. Den zweiten Schwerpunkt der Arbeit bildete eine konzeptionelle Zusammen-f{\"u}hrung der Annahmen des Modells von Leder et al. (2004) und der Postulate zur Wirkung von Objekteigenschaften auf {\"a}sthetische Urteile (Berlyne, 1971; Martindale, 1988; Silvia, 2005b). Konkret sollte gepr{\"u}ft werden, welchen Einfluss marktrelevante Objekteigenschaften, wie z.B. das Ausmaß an Innovativit{\"a}t, auf die durch Expertise moderierte Bewertung von Design haben. In den Studien III und IV wurden hierf{\"u}r systematisch bez{\"u}glich Innovativit{\"a}t und Balance gestufte Linienmodelle von Fahrzeu-gen pr{\"a}sentiert. In Studie III wurden die Modelle in einer Onlinebefragung durch 18 Designstudierende und 20 Studenten der Fahrzeugtechnik hinsichtlich Attraktivit{\"a}t, Innovativit{\"a}t und Balance bewertet. Im Einklang mit den Annahmen konnte gezeigt werden, dass sehr neuartiges Design von den designungeschulten Probanden als weniger attraktiv bewertet wird als von Betrachtern eines Designstudienganges. In Studie IV wurden neben den {\"A}sthetikbewertungen zus{\"a}tzlich das Blickverhal-ten und der affektiver Zustand der Versuchsteilnehmer in einem Messwiederholungs-design mit einer zwischengelagerten Phase elaborierter Designbewertung, in welcher der in Studie II gepr{\"u}fte Fragebogen eingesetzt wurde, erhoben. An der Laborstudie nahmen je 11 Designer, Ingenieure, und Geisteswissenschaftler teil. Wiederum wurde innovatives Design von den designungeschulten Gruppen als weniger attraktiv bewertet. Dieser Unterschied reduzierte sich jedoch nach wiederholter Bewertung der Modelle. Die Manifestation expertisebedingten Blickverhaltens konnte nicht beobach-tet werden, wie auch die durch eine angenommene bessere Bew{\"a}ltigung einherge-hende positivere Stimmung oder h{\"o}here Zufriedenheit in der Expertengruppe. Gemeinsam mit den Befunden aus den Studien II und III wurde deutlich, dass Designausbildung und, noch ausgepr{\"a}gter, Designexpertise neben einer h{\"o}heren Attraktivit{\"a}tsbewertung innovativen Designs auch zu einer differenzierteren Beurtei-lung von Innovativit{\"a}t f{\"u}hrt. Dies wurde mit der Erweiterung des mentalen Schemas f{\"u}r Fahrzeuge durch die Besch{\"a}ftigung mit vielf{\"a}ltigen Modellvarianten bereits w{\"a}hrend des Studiums interpretiert. Es wurden Hinweise auf eine stilbezogene, elaboriertere Verarbeitung von Fahrzeugdesign durch designgeschulte Betrachter beobachtet sowie eine mit Expertise einhergehende Autonomit{\"a}t {\"a}sthetischer Urteile als Ausdruck einer hohen {\"a}sthetischen Entwicklungsstufe (Parsons, 1987). Mit diesen bei unterschiedlichen Stichproben beobachteten, stabilen expertisebedingten Bewer-tungsunterschieden wurde eine begr{\"u}ndete Basis f{\"u}r die geforderte Sensibilisierung f{\"u}r {\"a}sthetische Kundenbed{\"u}rfnisse im Gestaltungsprozess geschaffen. Der in dieser Arbeit entwickelte Fragebogen kann hierbei f{\"u}r eine elaborierte Messung von Fahrzeugdesignpr{\"a}ferenzen, zum Vergleich der {\"a}sthetischen Wirkung mit den intendierten Markenwerten sowie f{\"u}r die Diskussion von Nutzereindr{\"u}cken eingesetzt werden. Die Ergebnisse der vorliegenden Arbeiten tragen somit zur Erweiterung und Pr{\"a}zisierung des theoretischen Verst{\"a}ndnisses von {\"A}sthetikbewertungen bei und lassen sich gleichzeitig in die Praxis der Designausbildung und des Designprozesses {\"u}bertragen.}, language = {de} } @phdthesis{Nitschke2013, author = {Nitschke, Felix}, title = {Phosphorylation of polyglycans, especially glycogen and starch}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67396}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Functional metabolism of storage carbohydrates is vital to plants and animals. The water-soluble glycogen in animal cells and the amylopectin which is the major component of water-insoluble starch granules residing in plant plastids are chemically similar as they consist of α-1,6 branched α-1,4 glucan chains. Synthesis and degradation of transitory starch and of glycogen are accomplished by a set of enzymatic activities that to some extend are also similar in plants and animals. Chain elongation, branching, and debranching are achieved by synthases, branching enzymes, and debranching enzymes, respectively. Similarly, both types of polyglucans contain low amounts of phosphate esters whose abundance varies depending on species and organs. Starch is selectively phosphorylated by at least two dikinases (GWD and PWD) at the glucosyl carbons C6 and C3 and dephosphorylated by the phosphatase SEX4 and SEX4-like enzymes. In Arabidopsis insufficiency in starch phosphorylation or dephosphorylation results in largely impaired starch turnover, starch accumulation, and often in retardation of growth. In humans the progressive neurodegenerative epilepsy, Lafora disease, is the result of a defective enzyme (laforin) that is functional equivalent to the starch phosphatase SEX4 and capable of glycogen dephosphorylation. Patients lacking laforin progressively accumulate unphysiologically structured insoluble glycogen-derived particles (Lafora bodies) in many tissues including brain. Previous results concerning the carbon position of glycogen phosphate are contradictory. Currently it is believed that glycogen is esterified exclusively at the carbon positions C2 and C3 and that the monophosphate esters, being incorporated via a side reaction of glycogen synthase (GS), lack any specific function but are rather an enzymatic error that needs to be corrected. In this study a versatile and highly sensitive enzymatic cycling assay was established that enables quantification of very small G6P amounts in the presence of high concentrations of non-target compounds as present in hydrolysates of polysaccharides, such as starch, glycogen, or cytosolic heteroglycans in plants. Following validation of the G6P determination by analyzing previously characterized starches G6P was quantified in hydrolysates of various glycogen samples and in plant heteroglycans. Interestingly, glucosyl C6 phosphate is present in all glycogen preparations examined, the abundance varying between glycogens of different sources. Additionally, it was shown that carbon C6 is severely hyperphosphorylated in glycogen of Lafora disease mouse model and that laforin is capable of removing C6 phosphate from glycogen. After enrichment of phosphoglucans from amylolytically degraded glycogen, several techniques of two-dimensional NMR were applied that independently proved the existence of 6-phosphoglucosyl residues in glycogen and confirmed the recently described phosphorylation sites C2 and C3. C6 phosphate is neither Lafora disease- nor species-, or organ-specific as it was demonstrated in liver glycogen from laforin-deficient mice and in that of wild type rabbit skeletal muscle. The distribution of 6-phosphoglucosyl residues was analyzed in glycogen molecules and has been found to be uneven. Gradual degradation experiments revealed that C6 phosphate is more abundant in central parts of the glycogen molecules and in molecules possessing longer glucan chains. Glycogen of Lafora disease mice consistently contains a higher proportion of longer chains while most short chains were reduced as compared to wild type. Together with results recently published (Nitschke et al., 2013) the findings of this work completely unhinge the hypothesis of GS-mediated phosphate incorporation as the respective reaction mechanism excludes phosphorylation of this glucosyl carbon, and as it is difficult to explain an uneven distribution of C6 phosphate by a stochastic event. Indeed the results rather point to a specific function of 6-phosphoglucosyl residues in the metabolism of polysaccharides as they are present in starch, glycogen, and, as described in this study, in heteroglycans of Arabidopsis. In the latter the function of phosphate remains unclear but this study provides evidence that in starch and glycogen it is related to branching. Moreover a role of C6 phosphate in the early stages of glycogen synthesis is suggested. By rejecting the current view on glycogen phosphate to be a stochastic biochemical error the results permit a wider view on putative roles of glycogen phosphate and on alternative biochemical ways of glycogen phosphorylation which for many reasons are likely to be mediated by distinct phosphorylating enzymes as it is realized in starch metabolism of plants. Better understanding of the enzymology underlying glycogen phosphorylation implies new possibilities of Lafora disease treatment.}, language = {en} } @phdthesis{Nikolaeva2013, author = {Nikolaeva, Elena}, title = {Landslide kinematics and interactions studied in central Georgia by using synthetic aperture radar interferometry, optical imagery and inverse modeling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70406}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Landslides are one of the biggest natural hazards in Georgia, a mountainous country in the Caucasus. So far, no systematic monitoring and analysis of the dynamics of landslides in Georgia has been made. Especially as landslides are triggered by extrinsic processes, the analysis of landslides together with precipitation and earthquakes is challenging. In this thesis I describe the advantages and limits of remote sensing to detect and better understand the nature of landslide in Georgia. The thesis is written in a cumulative form, composing a general introduction, three manuscripts and a summary and outlook chapter. In the present work, I measure the surface displacement due to active landslides with different interferometric synthetic aperture radar (InSAR) methods. The slow landslides (several cm per year) are well detectable with two-pass interferometry. In same time, the extremely slow landslides (several mm per year) could be detected only with time series InSAR techniques. I exemplify the success of InSAR techniques by showing hitherto unknown landslides, located in the central part of Georgia. Both, the landslide extent and displacement rate is quantified. Further, to determine a possible depth and position of potential sliding planes, inverse models were developed. Inverse modeling searches for parameters of source which can create observed displacement distribution. I also empirically estimate the volume of the investigated landslide using displacement distributions as derived from InSAR combined with morphology from an aerial photography. I adapted a volume formula for our case, and also combined available seismicity and precipitation data to analyze potential triggering factors. A governing question was: What causes landslide acceleration as observed in the InSAR data? The investigated area (central Georgia) is seismically highly active. As an additional product of the InSAR data analysis, a deformation area associated with the 7th September Mw=6.0 earthquake was found. Evidences of surface ruptures directly associated with the earthquake could not be found in the field, however, during and after the earthquake new landslides were observed. The thesis highlights that deformation from InSAR may help to map area prone landslides triggering by earthquake, potentially providing a technique that is of relevance for country wide landslide monitoring, especially as new satellite sensors will emerge in the coming years.}, language = {en} } @phdthesis{Neymeyer2013, author = {Neymeyer, Hanna}, title = {Annexin A1 im chronischen Nierenversagen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69670}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Die Expansion des renalen Tubulointerstitiums aufgrund einer Akkumulation zellul{\"a}rer Bestandteile und extrazellul{\"a}rer Matrix ist eine charakteristische Eigenschaft der chronischen Nierenerkrankung (CKD) und f{\"u}hrt zu einer Progression der Erkrankung in Richtung eines terminalen Nierenversagens. Die Fibroblasten Proliferation und ihre Transformation hin zum sekretorischen Myofibroblasten-Ph{\"a}notyp stellen hierbei Schl{\"u}sselereignisse dar. Signalprozesse, die zur Induktion der Myofibroblasten f{\"u}hren, werden aktiv beforscht um anti-fibrotische Therapieans{\"a}tze zu identifizieren. Das anti-inflammatorische Protein Annexin A1 und sein Rezeptor Formyl-Peptid Rezeptor 2 (FPR2) wurden in verschiedenen Organsystemen mit der Regulation von Fibroblastenaktivit{\"a}t in Verbindung gebracht, jedoch wurden ihre Expression und Funktion bei renalen fibrotischen Erkrankungen bisher nicht untersucht. Ziel der aktuellen Studie war daher die Untersuchung der renalen Annexin A1- und FPR2-Expression in einem Tiermodell des chronischen Nierenversagens, sowie die Charakterisierung der funktionellen Rolle von Annexin A1 in der Regulation des Fibroblasten Ph{\"a}notyps und ihrer Syntheseleistung. Dazu wurden neugeborene Sprague-Dawley Ratten in den ersten zwei Wochen ihres Lebens entweder mit Vehikel oder mit einem Angiotensin II Typ I Rezeptor Antagonisten behandelt und ohne weitere Intervention bis zu einem Alter von 11 Monaten (CKD Ratten) gehalten. Die Regulation und Lokalisation von Annexin A1 und FPR2 wurden mit Hilfe von Real-Time PCR und Immunhistochemie erfasst. Annexin A1- und FPR2-exprimierende Zellen wurden weiter durch Doppelimmunfluoreszenzf{\"a}rbungen charakterisiert. Gef{\"a}rbt wurde mit Antik{\"o}rpern gegen endotheliale Zellen (rat endothelial cell antigen), Makrophagen (CD 68), Fibroblasten (CD73) und Myofibroblasten (alpha-smooth muscle actin (α-sma)). Zellkulturstudien wurden an immortalisierten renalen kortikalen Fibroblasten aus Wildtyp- und Annexin A1-defizienten M{\"a}usen, sowie an etablierten humanen und murinen renalen Fibrolasten durchgef{\"u}hrt. Eine {\"U}berexpression von Annexin A1 wurde durch eine stabile Transfektion erreicht. Die Expression von Annexin A1, α-sma und Kollagen 1α1 wurde durch Real-Time PCR, Western Blot und Immuhistochemie erfasst. Die Sekretion des Annexin A1 Proteins wurde nach TCA-F{\"a}llung des Zellkultur{\"u}berstandes im Western Blot untersucht. Wie zu erwarten zeigten die CKD Ratten eine geringere Anzahl an Nephronen mit deutlicher glomerul{\"a}ren Hypertrophie. Der tubulointerstitielle Raum war durch fibrill{\"a}res Kollagen, aktivierte Fibroblasten und inflammatorische Zellen expandiert. Parallel dazu war die mRNA Expression von Annexin A1 und Transforming growth factor beta (TGF-β) signifikant erh{\"o}ht. Die Annexin A1-Lokalisation mittels Doppelimmunfluorsezenz identifizierte eine große Anzahl von CD73-positiven kortikalen Fibroblasten und eine Subpopulation von Makrophagen als Annexin A1-positiv. Die Annexin A1-Menge in Myofibroblasten und renalen Endothelien war gering. FPR2 konnte in der Mehrzahl der renalen Fibroblasten, in Myofibroblasten, in einer Subpopulation von Makrophagen und in renalen Epithelzellen nachgewiesen werden. Eine Behandlung der murinen Fibroblasten mit dem pro-fibrotischen Zytokin TGF-β f{\"u}hrte zu einem parallelen Anstieg der α-sma-, Kollagen 1α1- und Annexin A1-Biosynthese und zu einer gesteigerten Sekretion von Annexin A1. Eine {\"U}berexpression von Annexin A1 in murinen Fibroblasten reduzierte das Ausmaß der TGF-β induzierten α-sma- und Kollagen 1α1-Biosynthese. Fibroblasten aus Annexin A1-defizienten M{\"a}usen zeigten einen starken Myofibroblasten-Ph{\"a}notyp mit einer gesteigerten Expression an α-sma und Kollagen 1α1. Der Einsatz eines Peptidantagonisten des FPR2 (WRW4) resultierte in einer Stimulation der α-sma-Biosynthese, was die Vermutung nahe legte, dass Annexin A1 FPR2-vermittelt anti-fibrotische Effekte hat. Zusammenfassend zeigen diese Ergebnisse, dass renale kortikale Fibroblasten eine Hauptquelle des Annexin A1 im renalen Interstitium und einen Ansatzpunkt f{\"u}r Annexin A1-Signalwege in der Niere darstellen. Das Annexin A1/FPR2-System k{\"o}nnte daher eine wichtige Rolle in der Kontrolle des Fibroblasten Ph{\"a}notyp und der Fibroblasten Aktivit{\"a}t spielen und daher einen neuen Ansatz f{\"u}r die anti-fibrotischen pharmakologischen Strategien in der Behandlung des CKD darstellen.}, language = {de} } @phdthesis{Nass2013, author = {Naß, Andrea}, title = {Konzeption und Implementierung eines GIS-basierten Kartierungssystems f{\"u}r die geowissenschaftliche Planetenforschung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65298}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Die Kartierung planetarer K{\"o}rper stellt ein wesentliches Mittel der raumfahrtgest{\"u}tzten Exploration der Himmelsk{\"o}rper dar. Aktuell kommen zur Erstellung der planetaren Karten Geo-Informationssysteme (GIS) zum Einsatz. Ziel dieser Arbeit ist es, eine GIS-orientierte Prozesskette (Planetary Mapping System (PMS)) zu konzipieren, mit dem Schwerpunkt geologische und geomorphologische Karten planetarer Oberfl{\"a}chen einheitlich durchf{\"u}hren zu k{\"o}nnen und nachhaltig zug{\"a}nglich zu machen.}, language = {de} } @phdthesis{Mueller2013, author = {M{\"u}ller, Mike-Freya}, title = {Die Glutathionperoxidase 2 : physiologische Funktion und Rolle in der Azoxymethan-induzierten Colonkanzerogenese}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66955}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Das Selenoprotein Glutathionperoxidase 2 (GPx2) ist ein epithelzellspezifisches, Hydroperoxide-reduzierendes Enzym, welches im Darmepithel, vor allem in den proliferierenden Zellen des Kryptengrundes, exprimiert wird. Die Aufrechterhaltung der GPx2-Expression im Kryptengrund auch bei subad{\"a}quatem Selenstatus k{\"o}nnte darauf hinweisen, dass sie hier besonders wichtige Funktionen wahrnimmt. Tats{\"a}chlich weisen GPx2 knockout (KO)-M{\"a}use eine erh{\"o}hte Apoptoserate im Kryptengrund auf. Ein Ziel dieser Arbeit war es deshalb, die physiologische Funktion der GPx2 n{\"a}her zu untersuchen. In Kryptengrundepithelzellen aus dem Colon selenarmer GPx2 KO-M{\"a}use wurde eine erh{\"o}hte Caspase 3/7-Aktivit{\"a}t im Vergleich zum Wildtyp (WT) festgestellt. Zudem wiesen diese Zellen eine erh{\"o}hte Suszeptibilit{\"a}t f{\"u}r oxidativen Stress auf. Die GPx2 gew{\"a}hrleistet also den Schutz der proliferierenden Zellen des Kryptengrundes auch bei subad{\"a}quater Selenversorgung. Des Weiteren wurde im Colon selenarmer (-Se) und -ad{\"a}quater (+Se) GPx2 KO-M{\"a}use im Vergleich zum WT eine erh{\"o}hte Tumornekrosefaktor α-Expression und eine erh{\"o}hte Infiltration von Makrophagen festgestellt. Durch F{\"u}tterung einer selensupplementierten Di{\"a}t (++Se) konnte dies verhindert werden. In GPx2 KO-M{\"a}usen liegt demnach bereits basal eine niedriggradige Entz{\"u}ndung vor. Dies unterstreicht, dass GPx2 vor allem eine wichtige antiinflammatorische Funktion im Darmepithel besitzt. Dem Mikron{\"a}hrstoff Selen werden protektive Funktionen in der Colonkanzerogenese zugeschrieben. In einem Mausmodell der Colitis-assoziierten Colonkanzerogenese wirkte GPx2 antiinflammatorisch und hemmte so die Tumorentstehung. Auf der anderen Seite wurden jedoch auch prokanzerogene Eigenschaften der GPx2 aufgedeckt. Deshalb sollte in dieser Arbeit untersucht werden, welchen Effekt ein GPx2 knockout in einem Modell der sporadischen, durch Azoxymethan (AOM) induzierten, Colonkanzerogenese hat. Im WT kam es in pr{\"a}neoplastischen L{\"a}sionen h{\"a}ufig zu einer erh{\"o}hten GPx2-Expression im Vergleich zur normalen Darmmucosa. Eine derartige Steigerung der GPx2-Expression wurde auch in der humanen Colonkanzerogenese beschrieben. Das Fehlen der GPx2 resultierte in einer verminderten Entstehung von Tumoren (-Se und ++Se) und pr{\"a}neoplastischen L{\"a}sionen (-Se und +Se). Somit f{\"o}rderte GPx2 die Tumorentstehung im AOM-Modell. Acht Stunden nach AOM-Gabe war im GPx2 KO-Colon im Vergleich zum WT eine erh{\"o}hte Apoptoserate in der Kryptenmitte (-Se, +Se), nicht jedoch im Kryptengrund oder in der ++Se-Gruppe zu beobachten. M{\"o}glicherweise wirkte GPx2 prokanzerogen, indem sie die effiziente Elimination gesch{\"a}digter Zellen in der Tumorinitiationsphase verhinderte. Eine {\"a}hnliche Wirkung w{\"a}re auch durch die erh{\"o}hte GPx2-Expression in der Promotionsphase denkbar. So k{\"o}nnte GPx2 proliferierende pr{\"a}neoplastische Zellen vor oxidativem Stress, Apoptosen, oder auch der Antitumorimmunit{\"a}t sch{\"u}tzen. Dies k{\"o}nnte durch ein Zusammenwirken mit anderen Selenoproteinen wie GPx1 und Thioredoxinreduktasen, f{\"u}r die ebenfalls auch prokanzerogene Funktionen beschrieben wurden, verst{\"a}rkt werden. Eine wichtige Rolle k{\"o}nnte hier die Modulation des Redoxstatus in Tumorzellen spielen. Die Variation des Selengehalts der Di{\"a}t hatte im WT einen eher U-f{\"o}rmigen Effekt. So traten in der -Se und ++Se-Gruppe tendenziell mehr und gr{\"o}ßere Tumore auf, als in der +Se Gruppe. Zusammenfassend sch{\"u}tzt GPx2 also die proliferierenden Zellen des Kryptengrundes. Sie k{\"o}nnte jedoch auch proliferierende transformierte Zellen sch{\"u}tzen und so die sporadische, AOM-induzierte Colonkanzerogenese f{\"o}rdern. In einem Modell der Colitis-assoziierten Colonkanzerogenese hatte GPx2 auf Grund ihrer antiinflammatorischen Wirkung einen gegenteiligen Effekt und hemmte die Tumorentstehung. Die Rolle der GPx2 in der Colonkanzerogenese ist also abh{\"a}ngig vom zugrunde liegenden Mechanismus und wird maßgeblich von der Beteiligung einer Entz{\"u}ndung bestimmt.}, language = {de} } @phdthesis{MostafaKamelAbdelfatah2013, author = {Mostafa Kamel Abdelfatah, Ali}, title = {Interactions of food proteins with plant phenolics - modulation of structural, techno- and bio-functional properties of proteins}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69033}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The phenolic compounds as food components represent the largest group of secondary metabolites in plant foods. The phenolic compounds, e.g. chlorogenic acid (CQA), are susceptible to oxidation by enzymes specially, polyphenol oxidase (PPO) and at alkaline conditions. Both enzymatic and non-enzymatic oxidations occur in the presence of oxygen and produce quinone, which normally further react with other quinone to produce colored compounds (dimers), as well as is capable of undergoing a nucleophilic addition to proteins. The interactions of proteins with the phenolic compounds have received considerable attention in the recent years where, plant phenolic compounds have drawn increasing attention due to their antioxidant properties and their noticeable effects in the prevention of various oxidative stress associated diseases. Green coffee beans are one of the richest sources of chlorogenic acids. Therefore, a green coffee extract would provide an eligible food relevant source for phenolic compounds for modification of proteins. The interaction between 5-CQA and amino acid lysine showed decrease in both free CQA and amino acid groups and only a slight effect on the antioxidative capacity depending on the reaction time was found. Furthermore, this interaction showed a large number of intermediary substances of low intensities. The reaction of lysine with 5-CQA in a model system initially leads to formation of 3-CQA and 4-CQA (both are isomers of 5-CQA), oxidation giving rise to the formation of a dimer which subsequently forms an adduct with lysine to finally result in a benzacridine derivative as reported and confirmed with the aid of HPLC coupled with ESI-MSn. The benzacridine derivative containing a trihydroxy structural element, was found to be yellow, being very reactive with oxygen yielding semiquinone and quinone type of products with characteristic green colors. Finally, the optimal conditions for this interaction as assessed by both the loss of CQA and free amino groups of lysine can be given at pH 7 and 25°C, the interaction increasing with incubation time and depending also on the amount of tyrosinase present. Green coffee bean has a higher diversity and content of phenolics, where besides the CQA isomers and their esters, other conjugates like feruloylquinic acids were also identified, thus documenting differences in phenolic profiles for the two coffee types (Coffea arabica and Coffea robusta). Coffee proteins are modified by interactions with phenolic compounds during the extraction, where those from C. arabica are more susceptible to these interactions compared to C. robusta, and the polyphenol oxidase activity seems to be a crucial factor for the formation of these addition products. Moreover, In-gel digestion combined with MALDI-TOF-MS revealed that the most reactive and susceptible protein fractions to covalent reactions are the α-chains of the 11S storage protein. Thus, based on these results and those supplied by other research groups, a tentative list of possible adduct structures was derived. The diversity of the different CQA derivatives present in green coffee beans complicates the series of reactions occurring, providing a broad palette of reaction products. These interactions influence the properties of protein, where they exposed changes in the solubility and hydrophobicity of proteins compared to faba bean proteins (as control). Modification of milk whey protein products (primarily b-lactoglobulin) with coffee specific phenolics and commercial CQA under enzymatic and alkaline conditions seems to be affecting their chemical, structural and functional properties, where both modifications led to reduced free amino-,thiol groups and tryptophan content. We propose that the disulfide-thiol exchange in the C-terminus of b-lactoglobulin may be initiated by the redox conditions provided in the presence of CQA. The protein structure b-lactoglobulin thereupon becomes more disordered as simulated by molecular dynamic calculation. This unfolding process may additionally be supported by the reaction of the CQA at the proposed sites of modification of -amino groups of lysine (K77, K91, K138, K47) and the thiol group of cysteine (C121). These covalent modifications also decreased the solubility and hydrophobicity of b-lactoglobulin, moreover they provide modified protein samples with a high antioxidative power, thermally more stable as reflected by a higher Td, require less amount of energy to unfold and when emulsified with lutein esters, exhibit their higher stability against UV light. The MALDI-TOF and SDS-PAGE results revealed that proteins treated at alkaline conditions were more strongly modified than those treated under enzymatic conditions. Finally, the results showed a slight change in emulsifying properties of modified proteins.}, language = {en} }