@phdthesis{Giesewetter2011, author = {Giesewetter, Stefan}, title = {Resolute readings of later Wittgenstein and the challenge of avoiding hierarchies in philosophy}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57021}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This dissertation addresses the question: How did later Wittgenstein aim to achieve his goal of putting forward a way of dissolving philosophical problems which centered on asking ourselves what we mean by our words - yet which did not entail any claims about the essence of language and meaning? This question is discussed with reference to "resolute" readings of Wittgenstein. I discuss the readings of James Conant, Oskari Kuusela, and Martin Gustafsson. I follow Oskari Kuusela's claim that in order to fully appreciate how later Wittgenstein meant to achieve his goal, we need to clearly see how he aimed to do away with hierarchies in philosophy: Not only is the dissolution of philosophical problems via the method of clarifying the grammar of expressions to be taken as independent from any theses about what meaning must be - but furthermore, it is to be taken as independent from the dissolution of any particular problem via this method. As Kuusela stresses, this also holds for the problems involving rule-following and meaning: the clarification of the grammar of "rule" and "meaning" has no foundational status - it is nothing on which the method of clarifying the grammar of expressions as such were meant to in any way rely on. The lead question of this dissertation then is: What does it mean to come to see that the method of dissolving philosophical problems by asking "How is this word actually used?" does not in any way rely on the results of our having investigated the grammar of the particular concepts "rule" and "meaning"? What is the relation of such results - results such as "To follow a rule, [...], to obey an order, [...] are customs (uses, institutions)" or "The meaning of a word is its use in the language" - to this method? From this vantage point, I concern myself with two aspects of the readings of Gustafsson and Kuusela. In Gustafsson, I concern myself with his idea that the dissolution of philosophical problems in general "relies on" the very agreement which - during the dissolution of the rule-following problem - comes out as a presupposition for our talk of "meaning" in terms of rules. In Kuusela, I concern myself with his idea that Wittgenstein, in adopting a way of philosophical clarification which investigates the actual use of expressions, is following the model of "meaning as use" - which model he had previously introduced in order to perspicuously present an aspect of the actual use of the word "meaning". This dissertation aims to show how these two aspects of Gustafsson's and Kuusela's readings still fail to live up to the vision of Wittgenstein as a philosopher who aimed to do away with any hierarchies in philosophy. I base this conclusion on a detailed analysis of which of the occasions where Wittgenstein invokes the notions of "use" and "application" (as also "agreement") have to do with the dissolution of a specific problem only, and which have to do with the dissolution of philosophical problems in general. I discuss Wittgenstein's remarks on rule-following, showing how in the dissolution of the rule-following paradox, notions such as "use", "application", and "practice" figure on two distinct logical levels. I then discuss an example of what happens when this distinction is not duly heeded: Gordon Baker and Peter Hacker's idea that the rule-following remarks have a special significance for his project of dissolving philosophical problems as such. I furnish an argument to the effect that their idea that the clarification of the rules of grammar of the particular expression "following a rule" could answer a question about rules of grammar in general rests on a conflation of the two logical levels on which "use" occurs in the rule-following remarks, and that it leads into a regress. I then show that Gustafsson's view - despite its decisive advance over Baker and Hacker - contains a version of that same idea, and that it likewise leads into a regress. Finally, I show that Kuusela's idea of a special significance of the model "meaning as use" for the whole of the method of stating rules for the use of words is open to a regress argument of a similar kind as that he himself advances against Baker and Hacker. I conclude that in order to avoid such a regress, we need to reject the idea that the grammatical remark "The meaning of a word is its use in the language" - because of the occurrence of "use" in it - stood in any special relation to the method of dissolving philosophical problems by describing the use of words. Rather, we need to take this method as independent from this outcome of the investigation of the use of the particular word "meaning".}, language = {en} } @phdthesis{Schatz2011, author = {Schatz, Daniela}, title = {LNA-clamp-PCR zum sensitiven Nachweis von Punktmutationen im Rahmen der Entwicklung eines Darmkrebsfr{\"u}herkennungstests}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52308}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Darmkrebs ist die zweith{\"a}ufigste malignombedingte Todesursache in den westlichen Industriel{\"a}ndern. Durch eine fr{\"u}hzeitige Diagnose besteht jedoch eine hohe Chance auf Heilung. Der Goldstandard zur Darmkrebsfr{\"u}herkennung ist gegenw{\"a}rtig die Koloskopie. Eine Darmspiegelung ist jedoch invasiv und mit Unannehmlichkeiten f{\"u}r den Patienten verbunden. Die Akzeptanz in der Bev{\"o}lkerung ist daher gering. Ziel des BMBF- Projektes „Entwicklung eines nichtinvasiven Nachweissystems zur Fr{\"u}herkennung von humanem Darmkrebs", in dessen Rahmen diese Arbeit entstand, ist die Bereitstellung eines nichtinvasiven Nachweisverfahrens zur Darmkrebsfr{\"u}herkennung. Der Nachweis soll {\"u}ber die Detektion von aus neoplastischen Zellen stammender DNA in Stuhl erfolgen. Die Entartung dieser Zellen beruht auf Ver{\"a}nderungen im Erbgut, welches unter anderem Mutationen sind. Im ersten Teil des BMBF-Projektes wurde ein Set von Mutationen zusammengestellt, welches eine hohe Sensitivit{\"a}t f{\"u}r Vorstufen von Darmkrebs aufweist. Ziel dieser Arbeit war es, eine Nachweismethode f{\"u}r die zuvor identifizierten Punktmutationen zu entwickeln. Das Nachweisverfahren musste dabei unempfindlich gegen einen hohen Hintergrund nichtmutierter DNA sein, da im Stuhl geringe Mengen DNA aus neoplastischen Zellen bei einem hohen Hintergrund von DNA aus gesunden Zellen vorliegen. Hierzu wurden Plasmidmodellsysteme f{\"u}r die aus dem Marker-Set stammenden Genfragmente BRAF und dessen Mutante V600E, CTNNB1 und T41I, T41A, S45P und K-ras G12C hergestellt. Mit Hilfe dieser Plasmidmodellsysteme wurde dann das Nachweissystem entwickelt. Der entscheidende Schritt f{\"u}r die Detektion von Punktmutationen bei hohem Wildtyp{\"u}berschuss ist eine vorhergehende Anreicherung. In der vorliegenden Arbeit wurde dazu die Methode der LNA-clamp-PCR (locked nucleic acid) etabliert. Die Bewertung der erzielten Anreicherung erfolgte {\"u}ber das relative Detektionslimit. Zur Bestimmung des Detektionslimits wurde die Schmelzkurvenanalyse von Hybridisierungssonden eingesetzt; diese wurde im Rahmen dieser Arbeit f{\"u}r die drei oben genannten Genfragmente und ihre Mutanten entwickelt. Die LNA-clamp-PCR wird in Anwesenheit eines LNA-Blockers durchgef{\"u}hrt. Das Nukleotidanalogon LNA weist im Vergleich zu DNA eine erh{\"o}hte Affinit{\"a}t zu komplement{\"a}ren DNA-Str{\"a}ngen auf. Gleichzeitig kommt es bei Anwesenheit einer Basenfehlpaarung zu einer gr{\"o}ßeren Destabilisierung der Bindung. Als Blocker werden kurze LNA-DNA-Hybridoligonukleotide eingesetzt, die den mutierten Sequenzbereich {\"u}berspannen und selbst der Wildtypsequenz entsprechen. Durch Bindung an die Wildtypsequenz wird deren Amplifikation w{\"a}hrend der PCR verhindert (clamp = arretieren, festklemmen). Der Blocker selbst wird dabei nicht verl{\"a}ngert. Der Blocker bindet unter optimalen Bedingungen jedoch nicht an die mutierte Sequenz. Die Mutante wird daher ungehindert amplifiziert und somit gegen{\"u}ber dem Wildtyp-Fragment angereichert. Die Position des Blockers kann im Bindungsbereich eines der Primer sein und hier dessen Hybridisierung an dem Wildtyp-Fragment verhindern oder zwischen den beiden Primern liegen und so die Synthese durch die Polymerase inhibieren. Die Anwendbarkeit beider Systeme wurde in dieser Arbeit gezeigt. Die LNA-clamp-PCR mit Primerblocker wurde f{\"u}r BRAF etabliert. Es wurde ein Detektionslimit von mindestens 1:100 erzielt. Die LNA-clamp-PCR mit Amplifikationsblocker wurde erfolgreich f{\"u}r BRAF, K-ras und CTNNB1: T41I, T41A mit einem Detektionslimit von 1:1000 bis 1:10 000 entwickelt. In Stuhlproben liegt DNA aus neoplastischen Zellen nach Literaturangaben zu einem Anteil von 1\% bis 0,1\% vor. Die LNA-clamp-PCR weist also mit Amplifikationsblockern ein ausreichend hohes Detektionslimit f{\"u}r die Analyse von Stuhlproben auf. Durch die erfolgreiche Etablierung der Methode auf drei verschiedenen Genfragmenten und vier unterschiedlichen Punktmutationen konnte deren universelle Einsetzbarkeit gezeigt werden. F{\"u}r die Ausweitung der LNA-clamp-PCR auf die {\"u}brigen Mutationen des Marker-Sets wurden Richtlinien ausgearbeitet und die Blockereffizienz als Kennzahl eingef{\"u}hrt. Die LNA-clamp-PCR ist ein schnelles, kosteng{\"u}nstiges Verfahren, welches einen geringen Arbeitsaufwand erfordert und wenig fehleranf{\"a}llig ist. Sie ist somit ein geeignetes Anreicherungsverfahren f{\"u}r Punktmutationen in einem diagnostischen System zur Darmkrebsfr{\"u}herkennung. Dar{\"u}ber hinaus kann die LNA-clamp-PCR auch in anderen Bereichen, in denen die Detektion von Punktmutationen in einem hohen Wildtyphintergrund erforderlich ist, eingesetzt werden.}, language = {de} } @phdthesis{Grochowska2011, author = {Grochowska, Marta}, title = {{\"O}konomische, soziale und r{\"a}umliche Folgen der saisonalen Arbeitsmigration im Herkunftsgebiet : am Beispiel der Region Konin (Polen)}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-137-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49649}, school = {Universit{\"a}t Potsdam}, pages = {390}, year = {2011}, abstract = {Die vorliegende Arbeit basiert auf Forschungen in den Jahren 2007-2009. Sie betrachtet die saisonale Arbeitsmigration aus der polnischen Region Konin, wo die Arbeitsmigration aus {\"o}konomischen Gr{\"u}nden, wie auch in {\"a}hnlich strukturierten Gebieten Polens, eine lange Tradition hat, die bis ins 19. Jahrhundert zur{\"u}ckgeht. Sie wird die saisonale Migration ins Ausland mit den {\"o}konomischen, sozialen und r{\"a}umlichen Auswirkungen aus der Perspektive des Einzelnen und seiner unmittelbaren Umgebung, aber auch der Gesellschaft und Herkunftsgebiet der Migranten betrachtet.}, language = {de} } @phdthesis{Schulze2011, author = {Schulze, Andreas}, title = {Demographics of supermassive black holes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54464}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Supermassive black holes are a fundamental component of the universe in general and of galaxies in particular. Almost every massive galaxy harbours a supermassive black hole (SMBH) in its center. Furthermore, there is a close connection between the growth of the SMBH and the evolution of its host galaxy, manifested in the relationship between the mass of the black hole and various properties of the galaxy's spheroid component, like its stellar velocity dispersion, luminosity or mass. Understanding this relationship and the growth of SMBHs is essential for our picture of galaxy formation and evolution. In this thesis, I make several contributions to improve our knowledge on the census of SMBHs and on the coevolution of black holes and galaxies. The first route I follow on this road is to obtain a complete census of the black hole population and its properties. Here, I focus particularly on active black holes, observable as Active Galactic Nuclei (AGN) or quasars. These are found in large surveys of the sky. In this thesis, I use one of these surveys, the Hamburg/ESO survey (HES), to study the AGN population in the local volume (z~0). The demographics of AGN are traditionally represented by the AGN luminosity function, the distribution function of AGN at a given luminosity. I determined the local (z<0.3) optical luminosity function of so-called type 1 AGN, based on the broad band B_J magnitudes and AGN broad Halpha emission line luminosities, free of contamination from the host galaxy. I combined this result with fainter data from the Sloan Digital Sky Survey (SDSS) and constructed the best current optical AGN luminosity function at z~0. The comparison of the luminosity function with higher redshifts supports the current notion of 'AGN downsizing', i.e. the space density of the most luminous AGN peaks at higher redshifts and the space density of less luminous AGN peaks at lower redshifts. However, the AGN luminosity function does not reveal the full picture of active black hole demographics. This requires knowledge of the physical quantities, foremost the black hole mass and the accretion rate of the black hole, and the respective distribution functions, the active black hole mass function and the Eddington ratio distribution function. I developed a method for an unbiased estimate of these two distribution functions, employing a maximum likelihood technique and fully account for the selection function. I used this method to determine the active black hole mass function and the Eddington ratio distribution function for the local universe from the HES. I found a wide intrinsic distribution of black hole accretion rates and black hole masses. The comparison of the local active black hole mass function with the local total black hole mass function reveals evidence for 'AGN downsizing', in the sense that in the local universe the most massive black holes are in a less active stage then lower mass black holes. The second route I follow is a study of redshift evolution in the black hole-galaxy relations. While theoretical models can in general explain the existence of these relations, their redshift evolution puts strong constraints on these models. Observational studies on the black hole-galaxy relations naturally suffer from selection effects. These can potentially bias the conclusions inferred from the observations, if they are not taken into account. I investigated the issue of selection effects on type 1 AGN samples in detail and discuss various sources of bias, e.g. an AGN luminosity bias, an active fraction bias and an AGN evolution bias. If the selection function of the observational sample and the underlying distribution functions are known, it is possible to correct for this bias. I present a fitting method to obtain an unbiased estimate of the intrinsic black hole-galaxy relations from samples that are affected by selection effects. Third, I try to improve our census of dormant black holes and the determination of their masses. One of the most important techniques to determine the black hole mass in quiescent galaxies is via stellar dynamical modeling. This method employs photometric and kinematic observations of the galaxy and infers the gravitational potential from the stellar orbits. This method can reveal the presence of the black hole and give its mass, if the sphere of the black hole's gravitational influence is spatially resolved. However, usually the presence of a dark matter halo is ignored in the dynamical modeling, potentially causing a bias on the determined black hole mass. I ran dynamical models for a sample of 12 galaxies, including a dark matter halo. For galaxies for which the black hole's sphere of influence is not well resolved, I found that the black hole mass is systematically underestimated when the dark matter halo is ignored, while there is almost no effect for galaxies with well resolved sphere of influence.}, language = {en} } @phdthesis{Avila2011, author = {Avila, Gast{\´o}n}, title = {Asymptotic staticity and tensor decompositions with fast decay conditions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-54046}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Corvino, Corvino and Schoen, Chruściel and Delay have shown the existence of a large class of asymptotically flat vacuum initial data for Einstein's field equations which are static or stationary in a neighborhood of space-like infinity, yet quite general in the interior. The proof relies on some abstract, non-constructive arguments which makes it difficult to calculate such data numerically by using similar arguments. A quasilinear elliptic system of equations is presented of which we expect that it can be used to construct vacuum initial data which are asymptotically flat, time-reflection symmetric, and asymptotic to static data up to a prescribed order at space-like infinity. A perturbation argument is used to show the existence of solutions. It is valid when the order at which the solutions approach staticity is restricted to a certain range. Difficulties appear when trying to improve this result to show the existence of solutions that are asymptotically static at higher order. The problems arise from the lack of surjectivity of a certain operator. Some tensor decompositions in asymptotically flat manifolds exhibit some of the difficulties encountered above. The Helmholtz decomposition, which plays a role in the preparation of initial data for the Maxwell equations, is discussed as a model problem. A method to circumvent the difficulties that arise when fast decay rates are required is discussed. This is done in a way that opens the possibility to perform numerical computations. The insights from the analysis of the Helmholtz decomposition are applied to the York decomposition, which is related to that part of the quasilinear system which gives rise to the difficulties. For this decomposition analogous results are obtained. It turns out, however, that in this case the presence of symmetries of the underlying metric leads to certain complications. The question, whether the results obtained so far can be used again to show by a perturbation argument the existence of vacuum initial data which approach static solutions at infinity at any given order, thus remains open. The answer requires further analysis and perhaps new methods.}, language = {en} } @phdthesis{Kluth2011, author = {Kluth, Stephan}, title = {Quantitative modeling and analysis with FMC-QE}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52987}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The modeling and evaluation calculus FMC-QE, the Fundamental Modeling Concepts for Quanti-tative Evaluation [1], extends the Fundamental Modeling Concepts (FMC) for performance modeling and prediction. In this new methodology, the hierarchical service requests are in the main focus, because they are the origin of every service provisioning process. Similar to physics, these service requests are a tuple of value and unit, which enables hierarchical service request transformations at the hierarchical borders and therefore the hierarchical modeling. Through reducing the model complexity of the models by decomposing the system in different hierarchical views, the distinction between operational and control states and the calculation of the performance values on the assumption of the steady state, FMC-QE has a scalable applica-bility on complex systems. According to FMC, the system is modeled in a 3-dimensional hierarchical representation space, where system performance parameters are described in three arbitrarily fine-grained hierarchi-cal bipartite diagrams. The hierarchical service request structures are modeled in Entity Relationship Diagrams. The static server structures, divided into logical and real servers, are de-scribed as Block Diagrams. The dynamic behavior and the control structures are specified as Petri Nets, more precisely Colored Time Augmented Petri Nets. From the structures and pa-rameters of the performance model, a hierarchical set of equations is derived. The calculation of the performance values is done on the assumption of stationary processes and is based on fundamental laws of the performance analysis: Little's Law and the Forced Traffic Flow Law. Little's Law is used within the different hierarchical levels (horizontal) and the Forced Traffic Flow Law is the key to the dependencies among the hierarchical levels (vertical). This calculation is suitable for complex models and allows a fast (re-)calculation of different performance scenarios in order to support development and configuration decisions. Within the Research Group Zorn at the Hasso Plattner Institute, the work is embedded in a broader research in the development of FMC-QE. While this work is concentrated on the theoretical background, description and definition of the methodology as well as the extension and validation of the applicability, other topics are in the development of an FMC-QE modeling and evaluation tool and the usage of FMC-QE in the design of an adaptive transport layer in order to fulfill Quality of Service and Service Level Agreements in volatile service based environments. This thesis contains a state-of-the-art, the description of FMC-QE as well as extensions of FMC-QE in representative general models and case studies. In the state-of-the-art part of the thesis in chapter 2, an overview on existing Queueing Theory and Time Augmented Petri Net models and other quantitative modeling and evaluation languages and methodologies is given. Also other hierarchical quantitative modeling frameworks will be considered. The description of FMC-QE in chapter 3 consists of a summary of the foundations of FMC-QE, basic definitions, the graphical notations, the FMC-QE Calculus and the modeling of open queueing networks as an introductory example. The extensions of FMC-QE in chapter 4 consist of the integration of the summation method in order to support the handling of closed networks and the modeling of multiclass and semaphore scenarios. Furthermore, FMC-QE is compared to other performance modeling and evaluation approaches. In the case study part in chapter 5, proof-of-concept examples, like the modeling of a service based search portal, a service based SAP NetWeaver application and the Axis2 Web service framework will be provided. Finally, conclusions are given by a summary of contributions and an outlook on future work in chapter 6. [1] Werner Zorn. FMC-QE - A New Approach in Quantitative Modeling. In Hamid R. Arabnia, editor, Procee-dings of the International Conference on Modeling, Simulation and Visualization Methods (MSV 2007) within WorldComp '07, pages 280 - 287, Las Vegas, NV, USA, June 2007. CSREA Press. ISBN 1-60132-029-9.}, language = {en} } @phdthesis{Off2011, author = {Off, Thomas}, title = {Durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Softwareentwicklung von E-Government-Anwendungen : ein ontologiebasierter und modellgetriebener Ansatz am Beispiel von B{\"u}rgerdiensten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die {\"o}ffentliche Verwaltung setzt seit mehreren Jahren E-Government-Anwendungssysteme ein, um ihre Verwaltungsprozesse intensiver mit moderner Informationstechnik zu unterst{\"u}tzen. Da die {\"o}ffentliche Verwaltung in ihrem Handeln in besonderem Maße an Recht und Gesetz gebunden ist verst{\"a}rkt und verbreitet sich der Zusammenhang zwischen den Gesetzen und Rechtsvorschriften einerseits und der zur Aufgabenunterst{\"u}tzung eingesetzten Informationstechnik andererseits. Aus Sicht der Softwaretechnik handelt es sich bei diesem Zusammenhang um eine spezielle Form der Verfolgbarkeit von Anforderungen (engl. Traceability), die so genannte Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (Pre-Requirements Specification Traceability, kurz Pre-RS Traceability), da sie Aspekte betrifft, die relevant sind, bevor die Anforderungen in eine Spezifikation eingeflossen sind (Urspr{\"u}nge von Anforderungen). Der Ansatz dieser Arbeit leistet einen Beitrag zur Verfolgbarkeit im Vorfeld der Anforderungsspezifikation von E-Government-Anwendungssystemen. Er kombiniert dazu aktuelle Entwicklungen und Standards (insbesondere des World Wide Web Consortium und der Object Management Group) aus den Bereichen Verfolgbarkeit von Anforderungen, Semantic Web, Ontologiesprachen und modellgetriebener Softwareentwicklung. Der L{\"o}sungsansatz umfasst eine spezielle Ontologie des Verwaltungshandeln, die mit den Techniken, Methoden und Werkzeugen des Semantic Web eingesetzt wird, um in Texten von Rechtsvorschriften relevante Urspr{\"u}nge von Anforderungen durch Annotationen mit einer definierten Semantik zu versehen. Darauf aufbauend wird das Ontology Definition Metamodel (ODM) verwendet, um die Annotationen als spezielle Individuen einer Ontologie auf Elemente der Unified Modeling Language (UML) abzubilden. Dadurch entsteht ein neuer Modelltyp Pre-Requirements Model (PRM), der das Vorfeld der Anforderungsspezifikation formalisiert. Modelle diesen Typs k{\"o}nnen auch verwendet werden, um Aspekte zu formalisieren die sich nicht oder nicht vollst{\"a}ndig aus dem Text der Rechtsvorschrift ergeben. Weiterhin bietet das Modell die M{\"o}glichkeit zum Anschluss an die modellgetriebene Softwareentwicklung. In der Arbeit wird deshalb eine Erweiterung der Model Driven Architecture (MDA) vorgeschlagen. Zus{\"a}tzlich zu den etablierten Modelltypen Computation Independent Model (CIM), Platform Independent Model (PIM) und Platform Specific Model (PSM) k{\"o}nnte der Einsatz des PRM Vorteile f{\"u}r die Verfolgbarkeit bringen. Wird die MDA mit dem PRM auf das Vorfeld der Anforderungsspezifikation ausgeweitet, kann eine Transformation des PRM in ein CIM als initiale Anforderungsspezifikation erfolgen, indem der MOF Query View Transformation Standard (QVT) eingesetzt wird. Als Teil des QVT-Standards ist die Aufzeichnung von Verfolgbarkeitsinformationen bei Modelltransformationen verbindlich. Um die semantische L{\"u}cke zwischen PRM und CIM zu {\"u}berbr{\"u}cken, erfolgt analog zum Einsatz des Plattformmodells (PM) in der PIM nach PSM Transformation der Einsatz spezieller Hilfsmodelle. Es kommen daf{\"u}r die im Projekt "E-LoGo" an der Universit{\"a}t Potsdam entwickelten Referenzmodelle zum Einsatz. Durch die Aufzeichnung der Abbildung annotierter Textelemente auf Elemente im PRM und der Transformation der Elemente des PRM in Elemente des CIM kann durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Anforderungsspezifikation erreicht werden. Der Ansatz basiert auf einer so genannten Verfolgbarkeitsdokumentation in Form verlinkter Hypertextdokumente, die mittels XSL-Stylesheet erzeugt wurden und eine Verbindung zur graphischen Darstellung des Diagramms (z. B. Anwendungsfall-, Klassendiagramm der UML) haben. Der Ansatz unterst{\"u}tzt die horizontale Verfolgbarkeit zwischen Elementen unterschiedlicher Modelle vorw{\"a}rts- und r{\"u}ckw{\"a}rtsgerichtet umfassend. Er bietet außerdem vertikale Verfolgbarkeit, die Elemente des gleichen Modells und verschiedener Modellversionen in Beziehung setzt. {\"U}ber den offensichtlichen Nutzen einer durchg{\"a}ngigen Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (z. B. Analyse der Auswirkungen einer Gesetzes{\"a}nderung, Ber{\"u}cksichtigung des vollst{\"a}ndigen Kontextes einer Anforderung bei ihrer Priorisierung) hinausgehend, bietet diese Arbeit eine erste Ansatzm{\"o}glichkeit f{\"u}r eine Feedback-Schleife im Prozess der Gesetzgebung. Stehen beispielsweise mehrere gleichwertige Gestaltungsoptionen eines Gesetzes zur Auswahl, k{\"o}nnen die Auswirkungen jeder Option analysiert und der Aufwand ihrer Umsetzung in E-Government-Anwendungen als Auswahlkriterium ber{\"u}cksichtigt werden. Die am 16. M{\"a}rz 2011 in Kraft getretene {\"A}nderung des NKRG schreibt eine solche Analyse des so genannten „Erf{\"u}llungsaufwands" f{\"u}r Teilbereiche des Verwaltungshandelns bereits heute verbindlich vor. F{\"u}r diese Analyse kann die vorliegende Arbeit einen Ansatz bieten, um zu fundierten Aussagen {\"u}ber den {\"A}nderungsaufwand eingesetzter E-Government-Anwendungssysteme zu kommen.}, language = {de} } @phdthesis{Mallien2011, author = {Mallien, Grit}, title = {Explorative multizentrische Querschnittsstudie zur Diagnostik der Dysarthrie bei Progressiver Supranukle{\"a}rer Blickparese - PSP}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58045}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die Progressive Supranukle{\"a}re Blickparese (PSP) ist eine sporadisch auftretende neurodegenerative Erkrankung im Rahmen der atypischen Parkinson-Syndrome (APS), die im fr{\"u}hen Verlauf h{\"a}ufig mit dem Idiopathischen Parkinson-Syndrom (IPS) verwechselt wird. Dabei ist die Dysarthrie als eine erworbene, zentral bedingte sprechmotorische St{\"o}rung ein h{\"a}ufiges und fr{\"u}h auftretendes Symptom bei PSP. Bislang spricht man von einer eher unspezifischen „gemischten" Dysarthrie aus hypokinetischen, spastischen und auch ataktischen Komponenten. Im Rahmen einer explorativen Querschnittsstudie am „Fachkrankenhaus f{\"u}r Bewegungsst{\"o}rungen und Parkinson" Beelitz-Heilst{\"a}tten in Kooperation mit der „Entwicklungsgruppe Klinische Neuropsychologie" M{\"u}nchen (EKN) sowie der „Interdisziplin{\"a}ren Ambulanz f{\"u}r Bewegungsst{\"o}rungen" am Klinikum M{\"u}nchen-Großhadern wurden 50 Patienten dahingehend untersucht, ob sich f{\"u}r die Progressive Supranukle{\"a}re Blickparese (PSP) eine spezielle, fr{\"u}hzeitig zu diagnostizierende und differentialdiagnostisch relevante Dysarthrie beschreiben ließe. In diesem Zusammenhang soll gekl{\"a}rt werden, ob es sich um ph{\"a}notypische Auspr{\"a}gungen im Rahmen eines St{\"o}rungsspektrums handelt oder ob sich differenzierbare Subtypen der Krankheit, insbesondere ein „klassischer" PSP-Typ (PSP-RS) und ein „atypischer" PSP-Typ (PSP-P), auch im Bereich der Dysarthrie zeigen. Im Rahmen der Untersuchungen wurde der Schweregrad der Erkrankung mittels der „PSP-sensitiven Ratingskala (PSPRS)" gemessen. Die Dysarthriediagnostik erfolgte anhand der „Bogenhausener Dysarthrieskalen (BoDyS)" zur Beschreibung der Art und Auspr{\"a}gung der Dysarthrie bei PSP. Die Verst{\"a}ndlichkeit wurde mithilfe des „M{\"u}nchner Verst{\"a}ndlichkeits-Profils (MVP)" sowie eines weiteren Transkriptionsverfahrens ermittelt, wobei Ausschnitte aus den Tests zum Lesen und Nachsprechen der BoDyS zugrunde lagen. Weiterhin erfolgte eine Einsch{\"a}tzung der Nat{\"u}rlichkeit des Sprechens. Die Ergebnisse hinsichtlich des Einflusses von Nat{\"u}rlichkeit und Verst{\"a}ndlichkeit des Sprechens auf den Schweregrad der Dysarthrie zeigten, dass dieser modalit{\"a}ten{\"u}bergreifend mit beiden Schweregradaspekten korreliert, wenngleich es offenbar die Nat{\"u}rlichkeit des Sprechens ist, die bei PSP bereits fr{\"u}hzeitig beeintr{\"a}chtigt ist und somit als das entscheidende differentialdiagnostische Kriterium zur Differenzierung zwischen beiden PSP-Subtypen zu beurteilen ist, m{\"o}glicherweise auch gegen{\"u}ber anderen Parkinson-Syndromen. Anhand statistisch valider Ergebnisse konnten spezifische St{\"o}rungsmerkmale der Dysarthrie extrahiert werden, die eine signifikante Trennung von PSP-RS und PSP-P erm{\"o}glichen: eine leise und behaucht-heisere Stimme sowie ein verlangsamtes Sprechtempo und Hypernasalit{\"a}t. Damit k{\"o}nnen f{\"u}r die hier fokussierten Subtypen der PSP zwei unterschiedliche Dysarthrietypen postuliert werden. Danach wird dem Subtyp PSP-RS eine spastisch betonte Dysarthrie mit ausgepr{\"a}gter Verlangsamung des Sprechtempos zugeordnet, dem Subtyp PSP-P hingegen eine hypokinetische Dysarthrie mit behaucht-heiserer Hypophonie. Desweiteren konnte ein „Dysarthrie-Schwellenwert" als Zusatzkriterium f{\"u}r eine zeitliche Differenzierung beider PSP-Subtypen ermittelt werden. Anhand der Daten zeigte sich die Dysarthrie bei dem Subtyp PSP-RS gleich zu Beginn der Erkrankung, jedoch sp{\"a}testens 24 Monate danach. Hingegen konnte die Dysarthrie beim Subtyp PSP-P fr{\"u}hestens 24 Monate nach Erkrankungsbeginn festgestellt werden. Die Daten dieser Studie verdeutlichen, dass der Frage nach einer subtypenspezifischen Auspr{\"a}gung der Dysarthrie bei PSP eine L{\"a}ngsschnittsstudie folgen sollte, um die ermittelten Ergebnisse zu konsolidieren.}, language = {de} } @phdthesis{Lossow2011, author = {Loßow, Kristina}, title = {Erzeugung und Charakterisierung von Mausmodellen mit lichtsensitivem Geschmackssystem zur Aufkl{\"a}rung der neuronalen Geschmackskodierung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58059}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die Wahrnehmung von Geschmacksempfindungen beruht auf dem Zusammenspiel verschiedener Sinneseindr{\"u}cke wie Schmecken, Riechen und Tasten. Diese Komplexit{\"a}t der gustatorischen Wahrnehmung erschwert die Beantwortung der Frage wie Geschmacksinformationen vom Mund ins Gehirn weitergeleitet, prozessiert und kodiert werden. Die Analysen zur neuronalen Prozessierung von Geschmacksinformationen erfolgten zumeist mit Bitterstimuli am Mausmodell. Zwar ist bekannt, dass das Genom der Maus f{\"u}r 35 funktionelle Bitterrezeptoren kodiert, jedoch war nur f{\"u}r zwei unter ihnen ein Ligand ermittelt worden. Um eine bessere Grundlage f{\"u}r tierexperimentelle Arbeiten zu schaffen, wurden 16 der 35 Bitterrezeptoren der Maus heterolog in HEK293T-Zellen exprimiert und in Calcium-Imaging-Experimenten funktionell charakterisiert. Die Daten belegen, dass das Funktionsspektrum der Bitterrezeptoren der Maus im Vergleich zum Menschen enger ist und widerlegen damit die Aussage, dass humane und murine orthologe Rezeptoren durch das gleiche Ligandenspektrum angesprochen werden. Die Interpretation von tierexperimentellen Daten und die {\"U}bertragbarkeit auf den Menschen werden folglich nicht nur durch die Komplexit{\"a}t des Geschmacks, sondern auch durch Speziesunterschiede verkompliziert. Die Komplexit{\"a}t des Geschmacks beruht u. a. auf der Tatsache, dass Geschmacksstoffe selten isoliert auftreten und daher eine Vielzahl an Informationen kodiert werden muss. Um solche geschmacksstoffassoziierten Stimuli in der Analyse der gustatorischen Kommunikationsbahnen auszuschließen, sollten Opsine, die durch Licht spezifischer Wellenl{\"a}nge angeregt werden k{\"o}nnen, f{\"u}r die selektive Ersetzung von Geschmacksrezeptoren genutzt werden. Um die Funktionalit{\"a}t dieser angestrebten Knockout-Knockin-Modelle zu evaluieren, die eine Kopplung von Opsinen mit dem geschmacksspezifischen G-Protein Gustducin voraussetzte, wurden Oozyten vom Krallenfrosch Xenopus laevis mit dem Zwei-Elektroden-Spannungsklemm-Verfahren hinsichtlich dieser Interaktion analysiert. Der positiven Bewertung dieser Kopplung folgte die Erzeugung von drei Mauslinien, die in der kodierenden Region eines spezifischen Geschmacksrezeptors (Tas1r1, Tas1r2, Tas2r114) Photorezeptoren exprimierten. Durch RT-PCR-, In-situ-Hybridisierungs- und immunhistochemische Experimente konnte der erfolgreiche Knockout der Rezeptorgene und der Knockin der Opsine belegt werden. Der Nachweis der Funktionalit{\"a}t der Opsine im gustatorischen System wird Gegenstand zuk{\"u}nftiger Analysen sein. Bei erfolgreichem Beleg der Lichtempfindlichkeit von Geschmacksrezeptorzellen dieser Mausmodelle w{\"a}re ein System geschaffen, dass es erm{\"o}glichen w{\"u}rde, gustatorische neuronale Netzwerke und Hirnareale zu identifizieren, die auf einen reinen geschmacks- und qualit{\"a}tsspezifischen Stimulus zur{\"u}ckzuf{\"u}hren w{\"a}ren.}, language = {de} } @phdthesis{Spreyer2011, author = {Spreyer, Kathrin}, title = {Does it have to be trees? : Data-driven dependency parsing with incomplete and noisy training data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57498}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {We present a novel approach to training data-driven dependency parsers on incomplete annotations. Our parsers are simple modifications of two well-known dependency parsers, the transition-based Malt parser and the graph-based MST parser. While previous work on parsing with incomplete data has typically couched the task in frameworks of unsupervised or semi-supervised machine learning, we essentially treat it as a supervised problem. In particular, we propose what we call agnostic parsers which hide all fragmentation in the training data from their supervised components. We present experimental results with training data that was obtained by means of annotation projection. Annotation projection is a resource-lean technique which allows us to transfer annotations from one language to another within a parallel corpus. However, the output tends to be noisy and incomplete due to cross-lingual non-parallelism and error-prone word alignments. This makes the projected annotations a suitable test bed for our fragment parsers. Our results show that (i) dependency parsers trained on large amounts of projected annotations achieve higher accuracy than the direct projections, and that (ii) our agnostic fragment parsers perform roughly on a par with the original parsers which are trained only on strictly filtered, complete trees. Finally, (iii) when our fragment parsers are trained on artificially fragmented but otherwise gold standard dependencies, the performance loss is moderate even with up to 50\% of all edges removed.}, language = {en} } @phdthesis{Wulf2011, author = {Wulf, Hendrik}, title = {Seasonal precipitation, river discharge, and sediment flux in the western Himalaya}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57905}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Rainfall, snow-, and glacial melt throughout the Himalaya control river discharge, which is vital for maintaining agriculture, drinking water and hydropower generation. However, the spatiotemporal contribution of these discharge components to Himalayan rivers is not well understood, mainly because of the scarcity of ground-based observations. Consequently, there is also little known about the triggers and sources of peak sediment flux events, which account for extensive hydropower reservoir filling and turbine abrasion. We therefore lack basic information on the distribution of water resources and controls of erosion processes. In this thesis, I employ various methods to assess and quantify general characteristics of and links between precipitation, river discharge, and sediment flux in the Sutlej Valley. First, I analyze daily precipitation data (1998-2007) from 80 weather stations in the western Himalaya, to decipher the distribution of rain- and snowfall. Rainfall magnitude frequency analyses indicate that 40\% of the summer rainfall budget is attributed to monsoonal rainstorms, which show higher variability in the orogenic interior than in frontal regions. Combined analysis of rainstorms and sediment flux data of a major Sutlej River tributary indicate that monsoonal rainfall has a first order control on erosion processes in the orogenic interior, despite the dominance of snowfall in this region. Second, I examine the contribution of rainfall, snow and glacial melt to river discharge in the Sutlej Valley (s55,000 km2), based on a distributed hydrological model, which covers the period 2000-2008. To achieve high spatial and daily resolution despite limited ground-based observations the hydrological model is forced by daily remote sensing data, which I adjusted and calibrated with ground station data. The calibration shows that the Tropical Rainfall Measuring Mission (TRMM) 3B42 rainfall product systematically overestimates rainfall in semi-arid and arid regions, increasing with aridity. The model results indicate that snowmelt-derived discharge (74\%) is most important during the pre-monsoon season (April to June) whereas rainfall (56\%) and glacial melt (17\%) dominate the monsoon season (July-September). Therefore, climate change most likely causes a reduction in river discharge during the pre-monsoon season, which especially affects the orogenic interior. Third, I investigate the controls on suspended sediment flux in different parts of the Sutlej catchments, based on daily gauging data from the past decade. In conjunction with meteorological data, earthquake records, and rock strength measurements I find that rainstorms are the most frequent trigger of high-discharge events with peaks in suspended sediment concentrations (SSC) that account for the bulk of the suspended sediment flux. The suspended sediment flux increases downstream, mainly due to increases in runoff. Pronounced erosion along the Himalayan Front occurs throughout the monsoon season, whereas efficient erosion of the orogenic interior is confined to single extreme events. The results of this thesis highlight the importance of snow and glacially derived melt waters in the western Himalaya, where extensive regions receive only limited amounts of monsoonal rainfall. These regions are therefore particularly susceptible to global warming with major implications on the hydrological cycle. However, the sediment discharge data show that infrequent monsoonal rainstorms that pass the orographic barrier of the Higher Himalaya are still the primary trigger of the highest-impact erosion events, despite being subordinate to snow and glacially-derived discharge. These findings may help to predict peak sediment flux events and could underpin the strategic development of preventative measures for hydropower infrastructures.}, language = {en} } @phdthesis{Malik2011, author = {Malik, Nishant}, title = {Extremes in events and dynamics : a nonlinear data analysis perspective on the past and present dynamics of the Indian summer monsoon}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58016}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {To identify extreme changes in the dynamics of the Indian Summer Monsoon (ISM) in the past, I propose a new approach based on the quantification of fluctuations of a nonlinear similarity measure, to identify regimes of distinct dynamical complexity in short time series. I provide an analytical derivation for the relationship of the new measure with the dynamical invariants such as dimension and Lyapunov exponents of the underlying system. A statistical test is also developed to estimate the significance of the identified transitions. Our method is justified by uncovering bifurcation structures in several paradigmatic models, providing more complex transitions compared with traditional Lyapunov exponents. In a real world situation, we apply the method to identify millennial-scale dynamical transitions in Pleistocene proxy records of the south Asian summer monsoon system. We infer that many of these transitions are induced by the external forcing of solar insolation and are also affected by internal forcing on Monsoonal dynamics, i.e., the glaciation cycles of the Northern Hemisphere and the onset of the tropical Walker circulation. Although this new method has general applicability, it is particularly useful in analysing short palaeo-climate records. Rainfall during the ISM over the Indian subcontinent occurs in form of enormously complex spatiotemporal patterns due to the underlying dynamics of atmospheric circulation and varying topography. I present a detailed analysis of summer monsoon rainfall over the Indian peninsular using Event Synchronization (ES), a measure of nonlinear correlation for point processes such as rainfall. First, using hierarchical clustering I identify principle regions where the dynamics of monsoonal rainfall is more coherent or homogenous. I also provide a method to reconstruct the time delay patterns of rain events. Moreover, further analysis is carried out employing the tools of complex network theory. This study provides valuable insights into the spatial organization, scales, and structure of the 90th and 94th percentile rainfall events during the ISM (June to September). I furthermore analyse the influence of different critical synoptic atmospheric systems and the impact of the steep Himalayan topography on rainfall patterns. The presented method not only helps in visualising the structure of the extremeevent rainfall fields, but also identifies the water vapor pathways and decadal-scale moisture sinks over the region. Furthermore a simple scheme based on complex networks is presented to decipher the spatial intricacies and temporal evolution of monsoonal rainfall patterns over the last six decades. Some supplementary results on the evolution of monsoonal rainfall extremes over the last sixty years are also presented.}, language = {de} } @phdthesis{Partskhaladze2011, author = {Partskhaladze, Vakhtang}, title = {Development perspectives of small and medium businesses in Georgia}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66397}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {After the collapse of the Soviet Union the former member states have started the transformation process. The transformation process from planned to market economy includes not only the adaptation of the economy to the new market rules but also the profound social and political transformation. For this reason such processes present huge challenges to affected societies. The transformational recession in Georgia was significantly enhanced by the civil war and by ethnic conflicts in Abkhazia and South Ossetia. During the ethnic conflicts and civil war the business and technical infrastructure were damaged and most of them were completely destroyed. Poverty and political instability were predominated. The trade relations with the member countries of Council of Mutual Economic Assistance (Comecon) were aborted. Moreover, the conflict in South Ossetia hampered the power supply from Russia and a conflict in Abkhazia, the production and trade with tea and citruses, which were major trade commodities at that time. In the beginning of 90-ies, Georgian government with the assistance of international organizations, such as International Monetary Fund and World Bank started to elaborate political and economical reforms. The reforms included several aspects, such as the transfer of public assets to private through privatization, the liberalization of domestic market and trade and the creation of market-oriented institutions. Because of lack in implementation neither economical nor political transformation has been achieved. For instance, by the begin of market oriented reforms the awareness of policy makers about the importance of entrepreneurship, in particular small and medium ones for the economy was low. The absence of previous experience prevented the elaboration of appropriate policy instruments and methods for the development of competitive market economy. The stimulation of private sector has been generally neglected. This had a severe affect on political, social and economical problems, which still hampers the development of middle class in Georgia. The presented research indicates that productive entrepreneurship is a driving force of an economy. The entrepreneurial activities on the one hand facilitate the resource allocation and on the other through the development of new products and services urge the competition. Furthermore, they contribute to technological improvements through innovation, create jobs and thus boost the economic and social development of a particular region or country. However, it is important that the legal and institutional framework is appropriately settled. Unlike mature market economies, Georgia is not characterized by well-developed sector of small and medium sized businesses. Most existing SMEs are operating in local markets and predominantly in the shadow economy. It is also noteworthy that small business in Georgia belongs to so called "mom and pop" rather than to innovative, growth oriented businesses. They are mostly engaged in trade and craft. In addition of poor performance, the business activity of SMEs is very centralized. The vast majority of them are operating in the capital Tbilisi. The poor performance of small and medium businesses in Georgia and their negligence by the market forces is among others due to the armed conflicts and state failure. As in the beginning of transformation process, down to the present day, the state fails to provide necessary conditions, such as rule of law, the protection of property rights and competition, transparent and uncorrupted public administration. The result is the weak middle class. The weak middle class by itself has a negative impact on economic development and democratization process in Georgia.}, language = {de} } @phdthesis{Krehl2011, author = {Krehl, Susanne}, title = {Das Selenoprotein Glutathionperoxidase-2 : physiologische Funktion und Einfluss auf die entz{\"u}ndungsassoziierte Colonkarzinogenese}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50220}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Bei der Entdeckung der Glutathionperoxidase-2 (GPx2) wurde zun{\"a}chst davon ausgegangen, dass die Funktion dieses Enzyms im Kryptengrund des Colons einzig in der Reduktion von H2O2 besteht. Im Laufe der weiteren Erforschung zeigte sich, dass GPx2 auch in verschiedenen Tumorgeweben vermehrt exprimiert wird. Dabei wird diskutiert, ob die Wirkung von GPx2 im Tumor eher als pro- oder als antikarzinogen einzustufen ist. Mehrere Experimente in vitro und in vivo zeigten antiinflammatorische Eigenschaften der GPx2. Aufgrund dieser Befunde wird derzeit {\"u}ber weitere Funktionen der GPx2 spekuliert. In dieser Arbeit wurde die physiologische Funktion von GPx2 n{\"a}her erforscht, dazu wurden Wildtyp- und GPx2-Knockout-M{\"a}use in Hinblick auf Ver{\"a}nderungen der Enzymexpression und der Colonmorphologie untersucht. Es wurden drei verschiedene Selendi{\"a}ten verf{\"u}ttert: selenarmes, selenad{\"a}quates und selensupplementiertes Futter. Unter physiologischen Bedingungen ist am Kryptengrund des Colons, innerhalb der proliferierenden Zone, die Mitoserate am h{\"o}chsten. Der Großteil der apoptotischen Zellen ist hingegen an der Kryptenspitze vorzufinden. Durch den Knockout von GPx2 kam es zu einer signifikanten Erh{\"o}hung der Apoptoserate am Kryptengrund. Dabei war der gr{\"o}ßte Effekt auf selenarmem Futter zu verzeichnen. Hierbei wurde sogar eine Ver{\"a}nderung der Colonmorphologie dokumentiert, da die Verschiebung der Proliferationszone in Richtung Kryptenspitze eine Verl{\"a}ngerung der Krypten nach sich zog. Im Wildtyp wurden keine Apoptosen im Kryptengrund detektiert. GPx1 wird unter physiologischen Bedingungen im Gegensatz zur GPx2 in der Kryptenspitze exprimiert und ist im Selenmangel nicht mehr detektierbar. Der Knockout von GPx2 erh{\"o}hte die GPx1-Expression im Kryptengrund auf allen drei Selendi{\"a}ten. Diese {\"U}berexpression von GPx1 am Kryptengrund soll vermutlich den Verlust von GPx2 an dieser Stelle kompensieren. Da jedoch dort die massive Apoptoserate detektiert wurde, kann die GPx1 nicht die komplette Funktion von GPx2 kompensieren. Diese Ergebnisse deuten darauf hin, dass die Funktion von GPx2 nicht nur in der Reduktion von H2O2 liegt. Vielmehr kann eine Rolle bei der Aufrechterhaltung der Hom{\"o}ostase von Zellen postuliert werden. Ein weiterer Bestandteil dieser Arbeit war die Kl{\"a}rung der Frage, welchen Einfluss GPx2 auf die entz{\"u}ndungsassoziierte Colonkarzinogenese aus{\"u}bt. In dem hierf{\"u}r verwendeten AOM/DSS-Model wird der karzinogene Prozess durch Entz{\"u}ndung vorangetrieben. Es erfolgte sowohl im Wildtyp als auch im GPx2-Knockout zum einen die Bewertung des Entz{\"u}ndungsstatus des Colons und zum anderen wurde die Anzahl von ACF und Tumoren verglichen. Das Colon im GPx2-Knockout war wesentlich st{\"a}rker entz{\"u}ndet als im Wildtyp. Diese Ergebnisse best{\"a}tigen die f{\"u}r die GPx2 postulierte antiinflammatorische Funktion. Normalerweise f{\"u}hrt eine Erh{\"o}hung der Mitoseanzahl zur Regeneration des entz{\"u}ndeten Gewebes. Jedoch beeinflusst der Verlust von GPx2 vermutlich den Ablauf der Entz{\"u}ndung, indem beispielsweise die Regeneration des Gewebes durch die enorm hohe Apoptoserate am Kryptengrund verlangsamt wird. Des Weiteren hatten sich im GPx2-Knockout tendenziell mehr Tumore entwickelt. Somit korrelierte die Entz{\"u}ndung des Colons mit der Entwicklung von Tumoren. Der Verlust von GPx2 beg{\"u}nstigte vermutlich sowohl die Tumorinitiation als auch die Tumorprogression. Allerdings stimulierte die Expression von GPx2 ebenfalls das Tumorwachstum. Es kann geschlussfolgert werden, dass eine ad{\"a}quate GPx2-Expression vor Entz{\"u}ndung sch{\"u}tzt und somit das Risiko f{\"u}r Colonkrebs senkt. Ob GPx2 aber insgesamt pro- oder antikarzinogen wirkt, h{\"a}ngt vermutlich vom Stadium des Colonkarzinogenese ab.}, language = {de} } @phdthesis{Wichura2011, author = {Wichura, Henry}, title = {Topographic evolution of the East African Plateau : a combined study on lava-flow modeling and paleo-topography}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52363}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The East African Plateau provides a spectacular example of geodynamic plateau uplift, active continental rifting, and associated climatic forcing. It is an integral part of the East African Rift System and has an average elevation of approximately 1,000 m. Its location coincides with a negative Bouguer gravity anomaly with a semi-circular shape, closely related to a mantle plume, which influences the Cenozoic crustal development since its impingement in Eocene-Oligocene time. The uplift of the East African Plateau, preceding volcanism, and rifting formed an important orographic barrier and tectonically controlled environment, which is profoundly influenced by climate driven processes. Its location within the equatorial realm supports recently proposed hypotheses, that topographic changes in this region must be considered as the dominant forcing factor influencing atmospheric circulation patterns and rainfall distribution. The uplift of this region has therefore often been associated with fundamental climatic and environmental changes in East Africa and adjacent regions. While the far-reaching influence of the plateau uplift is widely accepted, the timing and the magnitude of the uplift are ambiguous and are still subject to ongoing discussion. This dilemma stems from the lack of datable, geomorphically meaningful reference horizons that could record surface uplift. In order to quantify the amount of plateau uplift and to find evidence for the existence of significant relief along the East African Plateau prior to rifting, I analyzed and modeled one of the longest terrestrial lava flows; the 300-km-long Yatta phonolite flow in Kenya. This lava flow is 13.5 Ma old and originated in the region that now corresponds to the eastern rift shoulders. The phonolitic flow utilized an old riverbed that once drained the eastern flank of the plateau. Due to differential erosion this lava flow now forms a positive relief above the parallel-flowing Athi River, which is mimicking the course of the paleo-river. My approach is a lava-flow modeling, based on an improved composition and temperature dependent method to parameterize the flow of an arbitrary lava in a rectangular-shaped channel. The essential growth pattern is described by a one-dimensional model, in which Newtonian rheological flow advance is governed by the development of viscosity and/or velocity in the internal parts of the lava-flow front. Comparing assessments of different magma compositions reveal that length-dominated, channelized lava flows are characterized by high effusion rates, rapid emplacement under approximately isothermal conditions, and laminar flow. By integrating the Yatta lava flow dimensions and the covered paleo-topography (slope angle) into the model, I was able to determine the pre-rift topography of the East African Plateau. The modeling results yield a pre-rift slope of at least 0.2°, suggesting that the lava flow must have originated at a minimum elevation of 1,400 m. Hence, high topography in the region of the present-day Kenya Rift must have existed by at least 13.5 Ma. This inferred mid-Miocene uplift coincides with the two-step expansion of grasslands, as well as important radiation and speciation events in tropical Africa. Accordingly, the combination of my results regarding the Yatta lava flow emplacement history, its location, and its morphologic character, validates it as a suitable "paleo-tiltmeter" and has thus to be considered as an important topographic and volcanic feature for the topographic evolution in East Africa.}, language = {en} } @phdthesis{Kellermann2011, author = {Kellermann, Thorsten}, title = {Accurate numerical relativity simulations of non-vacuumspace-times in two dimensions and applications to critical collapse}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59578}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This Thesis puts its focus on the physics of neutron stars and its description with methods of numerical relativity. In the first step, a new numerical framework the Whisky2D code will be developed, which solves the relativistic equations of hydrodynamics in axisymmetry. Therefore we consider an improved formulation of the conserved form of these equations. The second part will use the new code to investigate the critical behaviour of two colliding neutron stars. Considering the analogy to phase transitions in statistical physics, we will investigate the evolution of the entropy of the neutron stars during the whole process. A better understanding of the evolution of thermodynamical quantities, like the entropy in critical process, should provide deeper understanding of thermodynamics in relativity. More specifically, we have written the Whisky2D code, which solves the general-relativistic hydrodynamics equations in a flux-conservative form and in cylindrical coordinates. This of course brings in 1/r singular terms, where r is the radial cylindrical coordinate, which must be dealt with appropriately. In the above-referenced works, the flux operator is expanded and the 1/r terms, not containing derivatives, are moved to the right-hand-side of the equation (the source term), so that the left hand side assumes a form identical to the one of the three-dimensional (3D) Cartesian formulation. We call this the standard formulation. Another possibility is not to split the flux operator and to redefine the conserved variables, via a multiplication by r. We call this the new formulation. The new equations are solved with the same methods as in the Cartesian case. From a mathematical point of view, one would not expect differences between the two ways of writing the differential operator, but, of course, a difference is present at the numerical level. Our tests show that the new formulation yields results with a global truncation error which is one or more orders of magnitude smaller than those of alternative and commonly used formulations. The second part of the Thesis uses the new code for investigations of critical phenomena in general relativity. In particular, we consider the head-on-collision of two neutron stars in a region of the parameter space where two final states a new stable neutron star or a black hole, lay close to each other. In 1993, Choptuik considered one-parameter families of solutions, S[P], of the Einstein-Klein-Gordon equations for a massless scalar field in spherical symmetry, such that for every P > P⋆, S[P] contains a black hole and for every P < P⋆, S[P] is a solution not containing singularities. He studied numerically the behavior of S[P] as P → P⋆ and found that the critical solution, S[P⋆], is universal, in the sense that it is approached by all nearly-critical solutions regardless of the particular family of initial data considered. All these phenomena have the common property that, as P approaches P⋆, S[P] approaches a universal solution S[P⋆] and that all the physical quantities of S[P] depend only on |P - P⋆|. The first study of critical phenomena concerning the head-on collision of NSs was carried out by Jin and Suen in 2007. In particular, they considered a series of families of equal-mass NSs, modeled with an ideal-gas EOS, boosted towards each other and varied the mass of the stars, their separation, velocity and the polytropic index in the EOS. In this way they could observe a critical phenomenon of type I near the threshold of black-hole formation, with the putative solution being a nonlinearly oscillating star. In a successive work, they performed similar simulations but considering the head-on collision of Gaussian distributions of matter. Also in this case they found the appearance of type-I critical behaviour, but also performed a perturbative analysis of the initial distributions of matter and of the merged object. Because of the considerable difference found in the eigenfrequencies in the two cases, they concluded that the critical solution does not represent a system near equilibrium and in particular not a perturbed Tolmann-Oppenheimer-Volkoff (TOV) solution. In this Thesis we study the dynamics of the head-on collision of two equal-mass NSs using a setup which is as similar as possible to the one considered above. While we confirm that the merged object exhibits a type-I critical behaviour, we also argue against the conclusion that the critical solution cannot be described in terms of equilibrium solution. Indeed, we show that, in analogy with what is found in, the critical solution is effectively a perturbed unstable solution of the TOV equations. Our analysis also considers fine-structure of the scaling relation of type-I critical phenomena and we show that it exhibits oscillations in a similar way to the one studied in the context of scalar-field critical collapse.}, language = {en} } @phdthesis{Federico2011, author = {Federico, Stefania}, title = {Synthetic peptides derived from decorin as building blocks for biomaterials based on supramolecular interactions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59661}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this work, the development of a new molecular building block, based on synthetic peptides derived from decorin, is presented. These peptides represent a promising basis for the design of polymer-based biomaterials that mimic the ECM on a molecular level and exploit specific biological recognition for technical applications. Multiple sequence alignments of the internal repeats of decorin that formed the inner and outer surface of the arch-shaped protein were used to develop consensus sequences. These sequences contained conserved sequence motifs that are likely to be related to structural and functional features of the protein. Peptides representative for the consensus sequences were synthesized by microwave-assisted solid phase peptide synthesis and purified by RP-HPLC, with purities higher than 95 mol\%. After confirming the desired masses by MALDI-TOF-MS, the primary structure of each peptide was investigated by 1H and 2D NMR, from which a full assignment of the chemical shifts was obtained. The characterization of the peptides conformation in solution was performed by CD spectroscopy, which demonstrated that using TFE, the peptides from the outer surface of decorin show a high propensity to fold into helical structures as observed in the original protein. To the contrary, the peptides from the inner surface did not show propensity to form stable secondary structure. The investigation of the binding capability of the peptides to Collagen I was performed by surface plasmon resonance analyses, from which all but one of the peptides representing the inner surface of decorin showed binding affinity to collagen with values of dissociation constant between 2•10-7 M and 2.3•10-4 M. On the other hand, the peptides representative for the outer surface of decorin did not show any significant interaction to collagen. This information was then used to develop experimental demonstration for the binding capabilities of the peptides from the inner surface of decorin to collagen even when used in more complicated situations close to possible appications. With this purpose, the peptide (LRELHLNNN) which showed the highest binding affinity to collagen (2•10-7 M) was functionalized with an N-terminal triple bond in order to obtain a peptide dimer via copper(I)-catalyzed cycloaddition reaction with 4,4'-diazidostilbene-2,2'-disulfonic acid. Rheological measurements showed that the presence of the peptide dimer was able to enhance the elastic modulus (G') of a collagen gel from ~ 600 Pa (collagen alone) to ~ 2700 Pa (collagen and peptide dimer). Moreover, it was shown that the mechanical properties of a collagen gel can be tailored by using different molar ratios of peptide dimer respect to collagen. The same peptide, functionalized with the triple bond, was used to obtain a peptide-dye conjugate by coupling it with N-(5'-azidopentanoyl)-5-aminofluorescein. An aqueous solution (5 vol\% methanol) of the peptide dye conjugate was injected into a collagen and a hyaluronic acid (HA) gel and images of fluorescence detection showed that the diffusion of the peptide was slower in the collagen gel compared to the HA gel. The third experimental demonstration was gained using the peptide (LSELRLHNN) which showed the lower binding affinity (2.3•10-4 M) to collagen. This peptide was grafted to hyaluronic acid via EDC-chemistry, with a degree of functionalization of 7 ± 2 mol\% as calculated by 1H-NMR. The grafting was further confirmed by FTIR and TGA measurements, which showed that the onset of decomposition for the HA-g-peptide decreased by 10 °C compared to the native HA. Rheological measurements showed that the elastic modulus of a system based on collagen and HA-g-peptide increased by almost two order of magnitude (G' = 200 Pa) compared to a system based on collagen and HA (G' = 0.9 Pa). Overall, this study showed that the synthetic peptides, which were identified from decorin, can be applied as potential building blocks for biomimetic materials that function via biological recognition.}, language = {en} } @phdthesis{Seewald2011, author = {Seewald, Gunter}, title = {Lineare und nichtlineare optische Untersuchungen am synthetischen Eumelanin und Entwicklung eines Kaskadenmodells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59967}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Eumelanin ist ein Fluorophor mit teilweise recht ungew{\"o}hnlichen spektralen Eigenschaften. Unter anderem konnten in fr{\"u}heren Ver{\"o}ffentlichungen Unterschiede zwischen dem 1- und 2-photonen-angeregtem Fluoreszenzspektrum beobachtet werden, weshalb im nichtlinearen Anregungsfall ein schrittweiser Anregungsprozess vermutet wurde. Um diese und weitere optische Eigenschaften des Eumelanins besser zu verstehen, wurden in der vorliegenden Arbeit vielf{\"a}ltige messmethodische Ans{\"a}tze der linearen und nichtlinearen Optik an synthetischem Eumelanin in 0,1M NaOH verfolgt. Aus den Ergebnissen wurde ein Modell abgeleitet, welches die beobachteten photonischen Eigenschaften konsistent beschreibt. In diesem kaskadierten Zustandsmodell (Kaskaden-Modell) wird die aufgenommene Photonenenergie schrittweise von Anregungszust{\"a}nden hoher {\"U}bergangsenergien zu Anregungszust{\"a}nden niedrigerer {\"U}bergangsenergien transferiert. Messungen der transienten Absorption ergaben dominante Anteile mit kurzen Lebensdauern im ps-Bereich und ließen damit auf eine hohe Relaxationsgeschwindigkeit entlang der Kaskade schließen. Durch Untersuchung der nichtlinear angeregten Fluoreszenz von verschieden großen Eumelanin-Aggregaten konnte gezeigt werden, dass Unterschiede zwischen dem linear und nichtlinear angeregten Fluoreszenzspektrum nicht nur durch einen schrittweisen Anregungsprozess bei nichtlinearer Anregung sondern auch durch Unterschiede in den Verh{\"a}ltnissen der Quantenausbeuten zwischen kleinen und großen Aggregaten beim Wechsel von linearer zu nichtlinearer Anregung begr{\"u}ndet sein k{\"o}nnen. Durch Bestimmung des Anregungswirkungsquerschnitts und der Anregungspulsdauer-Abh{\"a}ngigkeit der nichtlinear angeregten Fluoreszenz von Eumelanin konnte jedoch ein schrittweiser 2-Photonen-Anregungsprozess {\"u}ber einen Zwischenzustand mit Lebendsdauern im ps-Bereich nachgewiesen werden.}, language = {de} } @phdthesis{Moesta2011, author = {M{\"o}sta, Philipp}, title = {Novel aspects of the dynamics of binary black-hole mergers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59820}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The inspiral and merger of two black holes is among the most exciting and extreme events in our universe. Being one of the loudest sources of gravitational waves, they provide a unique dynamical probe of strong-field general relativity and a fertile ground for the observation of fundamental physics. While the detection of gravitational waves alone will allow us to observe our universe through an entirely new window, combining the information obtained from both gravitational wave and electro-magnetic observations will allow us to gain even greater insight in some of the most exciting astrophysical phenomena. In addition, binary black-hole mergers serve as an intriguing tool to study the geometry of space-time itself. In this dissertation we study the merger process of binary black-holes in a variety of conditions. Our results show that asymmetries in the curvature distribution on the common apparent horizon are correlated to the linear momentum acquired by the merger remnant. We propose useful tools for the analysis of black holes in the dynamical and isolated horizon frameworks and shed light on how the final merger of apparent horizons proceeds after a common horizon has already formed. We connect mathematical theorems with data obtained from numerical simulations and provide a first glimpse on the behavior of these surfaces in situations not accessible to analytical tools. We study electro-magnetic counterparts of super-massive binary black-hole mergers with fully 3D general relativistic simulations of binary black-holes immersed both in a uniform magnetic field in vacuum and in a tenuous plasma. We find that while a direct detection of merger signatures with current electro-magnetic telescopes is unlikely, secondary emission, either by altering the accretion rate of the circumbinary disk or by synchrotron radiation from accelerated charges, may be detectable. We propose a novel approach to measure the electro-magnetic radiation in these simulations and find a non-collimated emission that dominates over the collimated one appearing in the form of dual jets associated with each of the black holes. Finally, we provide an optimized gravitational wave detection pipeline using phenomenological waveforms for signals from compact binary coalescence and show that by including spin effects in the waveform templates, the detection efficiency is drastically improved as well as the bias on recovered source parameters reduced. On the whole, this disseration provides evidence that a multi-messenger approach to binary black-hole merger observations provides an exciting prospect to understand these sources and, ultimately, our universe.}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} }