@phdthesis{Abed2010, author = {Abed, Jamil}, title = {An iterative approach to operators on manifolds with singularities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-44757}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {We establish elements of a new approach to ellipticity and parametrices within operator algebras on manifolds with higher singularities, only based on some general axiomatic requirements on parameter-dependent operators in suitable scales of spaes. The idea is to model an iterative process with new generations of parameter-dependent operator theories, together with new scales of spaces that satisfy analogous requirements as the original ones, now on a corresponding higher level. The "full" calculus involves two separate theories, one near the tip of the corner and another one at the conical exit to infinity. However, concerning the conical exit to infinity, we establish here a new concrete calculus of edge-degenerate operators which can be iterated to higher singularities.}, language = {en} } @phdthesis{Ahnert2010, author = {Ahnert, Karsten}, title = {Compactons in strongly nonlinear lattices}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48539}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {In the present work, we study wave phenomena in strongly nonlinear lattices. Such lattices are characterized by the absence of classical linear waves. We demonstrate that compactons - strongly localized solitary waves with tails decaying faster than exponential - exist and that they play a major role in the dynamics of the system under consideration. We investigate compactons in different physical setups. One part deals with lattices of dispersively coupled limit cycle oscillators which find various applications in natural sciences such as Josephson junction arrays or coupled Ginzburg-Landau equations. Another part deals with Hamiltonian lattices. Here, a prominent example in which compactons can be found is the granular chain. In the third part, we study systems which are related to the discrete nonlinear Schr{\"o}dinger equation describing, for example, coupled optical wave-guides or the dynamics of Bose-Einstein condensates in optical lattices. Our investigations are based on a numerical method to solve the traveling wave equation. This results in a quasi-exact solution (up to numerical errors) which is the compacton. Another ansatz which is employed throughout this work is the quasi-continuous approximation where the lattice is described by a continuous medium. Here, compactons are found analytically, but they are defined on a truly compact support. Remarkably, both ways give similar qualitative and quantitative results. Additionally, we study the dynamical properties of compactons by means of numerical simulation of the lattice equations. Especially, we concentrate on their emergence from physically realizable initial conditions as well as on their stability due to collisions. We show that the collisions are not exactly elastic but that a small part of the energy remains at the location of the collision. In finite lattices, this remaining part will then trigger a multiple scattering process resulting in a chaotic state.}, language = {en} } @phdthesis{Alff2010, author = {Alff, Henryk}, title = {Zwischen Geburtsort und Land der Vorv{\"a}ter : die sozialen Netzwerke von Kasachen aus der Mongolei und ihre Rolle im postsowjetischen Migrations- und Inkorporationsprozess}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49886}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Seit dem Zusammenbruch der Sowjetunion kamen in diesem Raum neue Migrationsprozesse wie die Arbeitsmigration zwischen den s{\"u}dlichen GUS-Republiken und Russland, aber auch grenz{\"u}berschreitende Bev{\"o}lkerungsbewegungen ethnischer Gruppen in ihre „historischen Herkunftsgebiete" auf. Die in der vorliegenden Arbeit untersuchten, dynamischen Wanderungsprozesse von Kasachen zwischen der Mongolei und Kasachstan weisen Kennzeichen dieses Migrationstypus, aber auch einige Besonderheiten auf. Die vorliegende Arbeit hat l{\"a}ngere Forschungsaufenthalte in Kasachstan und der Mongolei von 2006 bis 2009 zur Grundlage. Aus der Mongolei stammende kasachische Migranten im Umland von Almaty und Kasachen im westlichsten aymag der Mongolei, Bayan-{\"O}lgiy, wurden mittels quantitativer und qualitativer Methoden empirischer Sozialforschung befragt. Erg{\"a}nzend wurden in beiden Staaten Befragungen von Experten aus gesellschaftlichen, wissenschaftlichen und politischen Institutionen durchgef{\"u}hrt, um eine m{\"o}glichst ausgeglichene Sicht auf die postsowjetischen Migrations- und Inkorporationsprozesse zwischen beiden Staaten sicherzustellen. Zwischen den Migranten in Kasachstan und ihren - noch bzw. wieder - in der Mongolei lebenden Verwandten haben sich in den letzten Jahrzehnten enge soziale Netzwerke entwickelt. Die Aufrechterhaltung der Bindungen wird durch eine Verbesserung der Transport- und Kommunikationsm{\"o}glichkeiten zwischen beiden Staaten gef{\"o}rdert. Zirkul{\"a}re Migrationsmuster, regelm{\"a}ßige Besuche und Telefongespr{\"a}che sowie grenz{\"u}berschreitende sozio{\"o}konomische Unterst{\"u}tzungsmechanismen haben sich insbesondere in den vergangenen Jahren intensiviert. Diese Interaktionen sind im Kontext der rechtlichen, politischen und wirtschaftlichen Bedingungen im Migrationssystem Mongolei-Kasachstan - und insbesondere in Wechselwirkung mit der staat¬lichen Migrations- und Inkorpora-tionspolitik - einzuordnen. Die Erkenntnisse der vorliegenden Untersuchung lassen sich in aller K{\"u}rze so zusammenfassen: (I) Die in sozialen Netzwerken organisierten Interaktionen der Kasachen aus der Mongolei weisen Merkmale von, aber auch Unterschiede zu Konzepten des Transnationalismus-Ansatzes auf. (II) Die sozialen Bindungen zwischen Verwandten generieren Sozialkapital und tragen zur allt{\"a}glichen Unterst{\"u}tzung bei. (III) Die lokalen und grenz{\"u}berschreitenden Aktivit{\"a}ten der Migranten sind als Strategien der sozio{\"o}konomischen Eingliederung zu deuten. (IV) Ein wesentlicher Teil der aus der Mongolei stammenden Kasachen artikuliert von der Mehrheitsbev{\"o}lkerung abweichende, hybride Identifikationsmuster, die die politischen Eliten in Kasachstan bisher zu wenig wahrnehmen.}, language = {de} } @phdthesis{Arvidsson2010, author = {Arvidsson, Samuel Janne}, title = {Identification of growth-related tonoplast proteins in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52408}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {In a very simplified view, the plant leaf growth can be reduced to two processes, cell division and cell expansion, accompanied by expansion of their surrounding cell walls. The vacuole, as being the largest compartment of the plant cell, plays a major role in controlling the water balance of the plant. This is achieved by regulating the osmotic pressure, through import and export of solutes over the vacuolar membrane (the tonoplast) and by controlling the water channels, the aquaporins. Together with the control of cell wall relaxation, vacuolar osmotic pressure regulation is thought to play an important role in cell expansion, directly by providing cell volume and indirectly by providing ion and pH homestasis for the cytosoplasm. In this thesis the role of tonoplast protein coding genes in cell expansion in the model plant Arabidopsis thaliana is studied and genes which play a putative role in growth are identified. Since there is, to date, no clearly identified protein localization signal for the tonoplast, there is no possibility to perform genome-wide prediction of proteins localized to this compartment. Thus, a series of recent proteomic studies of the tonoplast were used to compile a list of cross-membrane tonoplast protein coding genes (117 genes), and other growth-related genes from notably the growth regulating factor (GRF) and expansin families were included (26 genes). For these genes a platform for high-throughput reverse transcription quantitative real time polymerase chain reaction (RT-qPCR) was developed by selecting specific primer pairs. To this end, a software tool (called QuantPrime, see http://www.quantprime.de) was developed that automatically designs such primers and tests their specificity in silico against whole transcriptomes and genomes, to avoid cross-hybridizations causing unspecific amplification. The RT-qPCR platform was used in an expression study in order to identify candidate growth related genes. Here, a growth-associative spatio-temporal leaf sampling strategy was used, targeting growing regions at high expansion developmental stages and comparing them to samples taken from non-expanding regions or stages of low expansion. Candidate growth related genes were identified after applying a template-based scoring analysis on the expression data, ranking the genes according to their association with leaf expansion. To analyze the functional involvement of these genes in leaf growth on a macroscopic scale, knockout mutants of the candidate growth related genes were screened for growth phenotypes. To this end, a system for non-invasive automated leaf growth phenotyping was established, based on a commercially available image capture and analysis system. A software package was developed for detailed developmental stage annotation of the images captured with the system, and an analysis pipeline was constructed for automated data pre-processing and statistical testing, including modeling and graph generation, for various growth-related phenotypes. Using this system, 24 knockout mutant lines were analyzed, and significant growth phenotypes were found for five different genes.}, language = {en} } @phdthesis{Audretsch2010, author = {Audretsch, Andreas}, title = {Zur Entstehung von Good Governance : Gr{\"u}nde, Muster und Bedingungen einer afrikanischen Entwicklung ; das Beispiel Ghana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42310}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Ghana ist ein Musterbeispiel daf{\"u}r, dass ein Entwicklungsland den Weg zu Good Governance schaffen kann. In vielen Studien wird dem Land im afrikanischen Vergleich heute bescheinigt, hier ein Vorreiter zu sein. Dies ist Ausgangslage der vorliegenden Studie, die der Frage nachgeht „Welche Gr{\"u}nde, Muster und Bedingungen f{\"u}hren zur Entstehung von Good Governance?". Im Zentrum der vorliegenden Studie steht, wie aus der erkenntnisleitenden Fragestellung hervorgeht, eine empirische Untersuchung zur Entstehung von Good Governance und damit ein Transformationsprozess. Dieser wird bewusst {\"u}ber einen sehr langen Zeitraum ({\"u}ber ein halbes Jahrhundert) untersucht, um auch langfristige Entwicklungen einbeziehen zu k{\"o}nnen. Die Studie wird mit Hilfe eines „Mixed-Methods-Ansatzes" sowohl unter R{\"u}ckgriff auf quantitative als auch auf qualitative Methoden durchgef{\"u}hrt, was sich im R{\"u}ckblick als sehr ertragreich erwiesen hat. Zun{\"a}chst wird die Qualit{\"a}t der Governance {\"u}ber den gesamten Zeitraum anhand von sechs Indikatoren gemessen. Danach werden qualitativ die Gr{\"u}nde f{\"u}r die Fort- und R{\"u}ckschritte analysiert. Dabei lassen sich immer wieder Systematiken herausarbeiten, wie zum Beispiel zirkul{\"a}re Entwicklungen, die {\"u}ber viele Jahre den Weg hin zu Good Governance verhinderten, bis jeweils Ausbr{\"u}che aus den Kreisl{\"a}ufen geschafft werden konnten. Sowohl in der demokratischen und rechtsstaatlichen Entwicklung als auch bezogen auf die Versorgung der Bev{\"o}lkerung mit {\"o}ffentlichen G{\"u}tern und die wirtschaftliche Entwicklung. Auch wenn die verschiedenen Bereiche von Good Governance zun{\"a}chst einzeln untersucht werden, so zeigen sich gleichzeitig deutlich die Wechselwirkungen der Komponenten. Zum Beispiel kristallisiert sich klar heraus, dass Rechtsstaatlichkeit sowohl auf die Stabilit{\"a}t politischer Systeme wirkt, als auch auf die wirtschaftliche Entwicklung. Ebenso beeinflussen diese wiederum die Korruption. {\"A}hnliche Verkn{\"u}pfungen lassen sich auch bei allen anderen Bereichen nachvollziehen. Die Entwicklung eines Landes kann also nur unter Ber{\"u}cksichtigung eines komplexen Governance-Systems verstanden und erkl{\"a}rt werden. Dabei k{\"o}nnen die Wechselwirkungen entweder konstruktiv oder destruktiv sein. Die Verflechtungen der einzelnen Bereiche werden in einem Negativ- und dann in einem Positiv-Szenario festgehalten. Diese Idealtypen-Bildung spitzt die Erkenntnisse der vorliegenden Arbeit zu und dient dem analytischen Verst{\"a}ndnis der untersuchten Prozesse. Die Untersuchung zeigt, wie Good Governance {\"u}ber das Zusammenspiel verschiedener Faktoren entstehen kann und dass es wissenschaftlich sehr ertragreich ist, Transformationsforschung auf ein komplexes Governance-System auszuweiten. Hierbei werden die vielen empirisch erarbeiteten Ergebnisse zu den einzelnen Transformationen zu komplexen, in sich greifenden Gesamtszenarien zusammengef{\"u}hrt. Da es bisher keine explizite Good Governance-Transformationsforschung gab, wurde hiermit ein erster Schritt in diese Richtung getan. Es wird dar{\"u}ber hinaus deutlich, dass eine Transformation zu Good Governance nicht durch eine kurzfristige Ver{\"a}nderung der Rahmenbedingungen zu erreichen ist. Es geht um kulturelle Ver{\"a}nderungen, um Lernprozesse, um langfristige Entwicklungen, die in der Studie am Beispiel Ghana analysiert werden. In vielen vorangegangenen Transformationsstudien wurde diese zeitliche Komponente vernachl{\"a}ssigt. Ghana hat bereits viele Schritte getan, um einen Weg in die Zukunft und zu Good Governance zu finden. Die Untersuchung dieser Schritte ist Kern der vorliegenden Arbeit. Der Weg Ghanas ist jedoch noch nicht abgeschlossen.}, language = {de} } @phdthesis{Awad2010, author = {Awad, Ahmed Mahmoud Hany Aly}, title = {A compliance management framework for business process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49222}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Companies develop process models to explicitly describe their business operations. In the same time, business operations, business processes, must adhere to various types of compliance requirements. Regulations, e.g., Sarbanes Oxley Act of 2002, internal policies, best practices are just a few sources of compliance requirements. In some cases, non-adherence to compliance requirements makes the organization subject to legal punishment. In other cases, non-adherence to compliance leads to loss of competitive advantage and thus loss of market share. Unlike the classical domain-independent behavioral correctness of business processes, compliance requirements are domain-specific. Moreover, compliance requirements change over time. New requirements might appear due to change in laws and adoption of new policies. Compliance requirements are offered or enforced by different entities that have different objectives behind these requirements. Finally, compliance requirements might affect different aspects of business processes, e.g., control flow and data flow. As a result, it is infeasible to hard-code compliance checks in tools. Rather, a repeatable process of modeling compliance rules and checking them against business processes automatically is needed. This thesis provides a formal approach to support process design-time compliance checking. Using visual patterns, it is possible to model compliance requirements concerning control flow, data flow and conditional flow rules. Each pattern is mapped into a temporal logic formula. The thesis addresses the problem of consistency checking among various compliance requirements, as they might stem from divergent sources. Also, the thesis contributes to automatically check compliance requirements against process models using model checking. We show that extra domain knowledge, other than expressed in compliance rules, is needed to reach correct decisions. In case of violations, we are able to provide a useful feedback to the user. The feedback is in the form of parts of the process model whose execution causes the violation. In some cases, our approach is capable of providing automated remedy of the violation.}, language = {en} } @phdthesis{Baas2010, author = {Baas, Timo}, title = {Unter welchen Bedingungen ist ein Beitritt zu einer W{\"a}hrungsunion optimal? : Eine Analyse stabilit{\"a}tspolitischer Konsequenzen, statischer Effekte und wachstumstheoretischer Implikationen einer Osterweiterung der Europ{\"a}ischen W{\"a}hrungsunion}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-43446}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Die Europ{\"a}ische W{\"a}hrungsunion (EWU) umfasst heute 16 Staaten mit insgesamt 321 Millionen Einwohnern, sie ist mit einem Bruttoinlandsprodukt von 22,9 Billionen Euro einer der gr{\"o}ßten Wirtschaftsr{\"a}ume der Erde. In den n{\"a}chsten Jahren wird die EWU durch die Aufnahme der 2004 und 2007 beigetretenen neuen EU-L{\"a}nder weiter wachsen. Da der Beitritt von der Erf{\"u}llung der Kriterien von Maastricht abh{\"a}ngt, erfolgt die Erweiterung im Gegensatz zur 5. Erweiterungsrunde der EU nicht als Block, sondern sequentiell. Nach den Beitritten von Slowenien am 1.1.2007 und der Slowakei zum 1.1.2009 steht der Beitritt eines ersten großen Landes in den n{\"a}chsten Jahren bevor. Daher st{\"o}ßt die Frage der Effekte eines solchen Beitritts seit geraumer Zeit auf breites Interesse in der {\"o}konomischen Literatur. Das Forschungsziel der Dissertation ist es, die theoretischen Wirkungsmechanismen eines Beitritts der neuen Mitgliedsl{\"a}nder zur Europ{\"a}ischen W{\"a}hrungsunion abzubilden. Hierzu werden m{\"o}gliche stabilit{\"a}tspolitische Konsequenzen sowie die Auswirkungen eines Beitritts auf die geografische Wirtschaftsstruktur und das Wachstum dieser L{\"a}nder in theoretischen Modellen abgeleitet. Die direkten Effekte des Beitritts werden in einem angewandt-theoretischen Modell zudem quantifiziert. Insgesamt wird der Beitritt aus drei verschiedenen Perspektiven analysiert: Erstens werden die Konsequenzen der W{\"a}hrungsunion f{\"u}r die Stabilit{\"a}tspolitik der neuen Mitgliedsl{\"a}nder im Rahmen eines neukeynesianischen Modells betrachtet. Zweitens werden die mit der Transaktionskostensenkung verbundenen Gewinne in einem angewandten Gleichgewichtsmodell quantifiziert. Drittens werden die wachstumstheoretischen Wirkungen der Finanzmarktintegration in einem dynamischen Gleichgewichtsmodell untersucht. Da die drei Aspekte der makro{\"o}konomischen Stabilit{\"a}t, der Transaktionskostensenkung und der dynamischen Wirkungen der Finanzmarktintegration weitgehend unabh{\"a}ngig voneinander auftreten, ist die Verwendung verschiedener Modelle mit geringen Kosten verbunden. In der Gesamtbeurteilung des EWU-Beitritts der neuen EU-L{\"a}nder kommt diese Arbeit zu einer anderen Einsch{\"a}tzung als bisherige Studien. Die in Teil eins ermittelten stabilit{\"a}tspolitischen Konsequenzen sind entweder neutral oder implizieren bei Beitritt zur W{\"a}hrungsunion eine gr{\"o}ßere Stabilit{\"a}t. Die in Teil zwei und drei ermittelten statischen und dynamischen Gewinne eines Beitritts sind zudem erheblich, so dass ein schneller Beitritt zur W{\"a}hrungsunion f{\"u}r die neuen EU-Mitgliedsl{\"a}nder vorteilhaft ist. Unter Ber{\"u}cksichtigung der Ziele der Europ{\"a}ischen Wirtschafts- und W{\"a}hrungsunion (EWWU) m{\"u}ssen hierzu jedoch zwei Bedingungen erf{\"u}llt sein. Einerseits sind hinreichend entwickelte Finanzm{\"a}rkte notwendig, um das Ziel einer Konvergenz der neuen und alten EU-Mitgliedsl{\"a}nder zu erreichen. Andererseits wird der Gesamtraum von einer st{\"a}rkeren Finanzmarktintegration und einer Senkung der Transaktionskosten profitieren, jedoch durch die {\"U}bertragung von Schocks der neuen Mitgliedsl{\"a}nder instabiler. Daher kann der Beitritt der neuen Mitgliedsl{\"a}nder zur EWU f{\"u}r den Gesamtraum negativ sein. Diese Kosten sind nur dann zu rechtfertigen, falls {\"u}ber die schnellere Entwicklung der neuen Mitgliedsstaaten eine h{\"o}here Stabilit{\"a}t des W{\"a}hrungsraumes erzielt wird. Das neukeynesianische Wachstumsmodell gibt Hinweise, dass eine solche Entwicklung eintreten k{\"o}nnte.}, language = {de} } @phdthesis{Berger2010, author = {Berger, Anja}, title = {Entwicklung und Validierung eines Inventars zur Erfassung positiver und negativer Attribute des Geschlechtsrollenselbstkonzepts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51215}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Das Geschlechtsrollenselbstkonzept, das sich im Laufe der Sozialisation in Auseinandersetzung mit den vorherrschenden Vorstellungen der umgebenden Kultur entwickelt, steht in Beziehung zu Affekten, Kognitionen und Verhaltensweisen in einer Vielzahl von Bereichen. Bisherige GSK-Instrumente messen jedoch nahezu ausschließlich den positiven Aspekt von Maskulinit{\"a}t und Femininit{\"a}t. Die Definition des allgemeinen Selbstkonzepts gibt diese Limitierung auf positive Valenz nicht vor, und aus gesundheitspsychologischer Sicht sowie der Gruppenforschung ist die Bedeutung negativer Eigenschaften zur Selbstbeschreibung bekannt. Vor diesem Hintergrund wurden sieben aufeinander aufbauende Studien durchgef{\"u}hrt mit dem Ziel ein neues Instrument zu entwickeln, deren Items zum einen kulturell aktuellen Eigenschaften zur Selbstbeschreibung entsprechen und zum anderen die Valenzunterschiede dieser Merkmalsbeschreibungen ber{\"u}cksichtigen. Nach einer kritischen empirischen {\"U}berpr{\"u}fung des deutschen BSRI, um Schw{\"a}chen der Items ausschließlich positiver Valenz aufzudecken, wurde eine neue Skala entwickelt, die von Beginn an auch negative Selbstbeschreibungen ber{\"u}cksichtigte um der Komplexit{\"a}t des geschlechtlichen Selbst gerecht zu werden. Aufgrund der Einsch{\"a}tzungen zur Typizit{\"a}t und sozialen Erw{\"u}nschtheit sowie mit ersten Resultaten aus der Selbstbeschreibung wurde die Auswahl der Items f{\"u}r die Teilskalen vorgenommen. In zwei weiteren Studien wurden schließlich die vier neu entwickelten Teilskalen des neuen GSK-Inventars einer Validierung unterzogen. Jeder der Teilskalen wurden theoriegeleitet spezifische Konstrukte zugeordnet und es konnte nachgewiesen werden, dass alle Teilskalen ihren eigenen Beitrag zur Vorhersage psychologischer Konzepte leisten k{\"o}nnen. So standen beispielsweise die negativen maskulinen Eigenschaften in engerer Beziehung zu Aggressivit{\"a}t und machtbezogenen Werten als die positiven Aspekte der Maskulinit{\"a}t. Als Ergebnis dieser Entwicklung stehen am Ende vier kurze, unabh{\"a}ngige, reliable Teilskalen, die positive als auch negative Aspekte von Maskulinit{\"a}t und Femininit{\"a}t abbilden und mittels sehr unterschiedlicher psychologischer Erlebens- und Verhaltenskonstrukte validiert wurden, die die Unabh{\"a}ngigkeit der Skalen belegen und diese f{\"u}r einen Einsatz in der Forschung empfehlen. Die Einf{\"u}hrung einer individuellen Wertkomponente im Zuge der Selbstbeschreibung, angelehnt an das bekannte Erwartungs-mal-Wert Modell der Motivations- und Einstellungsforschung, und die daraus m{\"o}gliche multiplikative Verkn{\"u}pfung von Selbsteinsch{\"a}tzung und pers{\"o}nlicher Wichtigkeit der Eigenschaften konnten den Aufkl{\"a}rungswert in Bezug auf unterschiedliche Validierungskonstrukte dagegen nicht verbessern und wurden daher nicht ins das Instrument integriert.}, language = {de} } @phdthesis{Berndorff2010, author = {Berndorff, Lothar}, title = {Die Prediger der Grafschaft Mansfeld}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-008-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33896}, school = {Universit{\"a}t Potsdam}, pages = {404}, year = {2010}, abstract = {Am 22. Oktober 1565 beauftragte der Herzog Julius von Braunschweig-Wolfenb{\"u}ttel seinen Prediger Martin Chemnitz, das literarische Oeuvre des Magisters Cyriacus Spangenberg auf dem Buchmarkt ausfindig zu machen, prunkvoll binden zu lassen und in den herz{\"o}glichen Bibliotheksbestand aufzunehmen. 64 Werke mit gut 6000 Seiten hatte der Mansfelder Generaldekan Spangenberg zu diesem Zeitpunkt bereits verfasst, seine Amtskollegen in der s{\"a}chsischen Grafschaft hatten ihrerseits 64 B{\"u}cher ver{\"o}ffentlicht. Bis zum Abgang Spangenbergs aus Mansfeld 1574 verdoppelte sich die Anzahl geistlicher Ver{\"o}ffentlichungen Mansfelder Provenienz. Obwohl zu Lebzeiten breit rezipiert, hat die Publizistik der geistlichen "Druckmetropole" Mansfeld in der Geschichte und Kirchengeschichte wenig Beachtung gefunden. Die vorliegende Dissertation will diese Forschungsl{\"u}cke schließen. Die Mansfelder Prediger verfassten Lehrpredigten, Festpredigten, Trostpredigten, Katechismen, theologische Disputationen, historische Abhandlungen und geistliche Spiele in hoher Zahl und publizierten diese unter geschickter Ausnutzung der Mechanismen der fr{\"u}hneuzeitlichen Buchmarktes reichsweit. Ihre Ver{\"o}ffentlichungen richteten sich an Theologen, "Weltkinder" und "Einf{\"a}ltige". Sie generierten Verbindungen zu den Kirchen und Potentaten Nord- und S{\"u}ddeutschlands, Frankreichs und der Niederlande und f{\"u}hrten zu Kontroversen mit den großen Bildungszentren Mitteldeutschlands Wittenberg, Leipzig und Jena und deren Landesherren. Die Frage nach der Motivation f{\"u}r das Engagement der Mansfelder Prediger auf dem Buchmarkt steht im Zentrum der Untersuchung, die in einem synoptischen Verfahren den Wunsch nach Teilhaberschaft an der Ausbildung der kirchlichen, herrschaftlichen, sozialen und kommunikativen Strukturen als zentrales Motiv der schreibenden Theologen herausarbeitet, aber auch die Absicht der Autoren beweist, der Grafschaft Mansfeld {\"u}ber das Medium Buch als lutherischem Bildungszentrum in Europa Geltung zu verschaffen.}, language = {de} } @phdthesis{Blessmann2010, author = {Bleßmann, Daniela}, title = {Der Einfluss der Dynamik auf die stratosph{\"a}rische Ozonvariabilit{\"a}t {\"u}ber der Arktis im Fr{\"u}hwinter}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51394}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Der fr{\"u}hwinterliche Ozongehalt ist ein Indikator f{\"u}r den Ozongehalt im Sp{\"a}twinter/Fr{\"u}hjahr. Jedoch weist dieser aufgrund von Absinkprozessen, chemisch bedingten Ozonabbau und Wellenaktivit{\"a}t von Jahr zu Jahr starke Schwankungen auf. Die vorliegende Arbeit zeigt, dass diese Variabilit{\"a}t weitestgehend auf dynamische Prozesse w{\"a}hrend der Wirbelbildungsphase des arktischen Polarwirbels zur{\"u}ckgeht. Ferner wird der bisher noch ausstehende Zusammenhang zwischen dem fr{\"u}h- und sp{\"a}twinterlichen Ozongehalt bez{\"u}glich Dynamik und Chemie aufgezeigt. F{\"u}r die Untersuchung des Zusammenhangs zwischen der im Polarwirbel eingeschlossenen Luftmassenzusammensetzung und Ozonmenge wurden Beobachtungsdaten von Satellitenmessinstrumenten und Ozonsonden sowie Modellsimulationen des Lagrangschen Chemie/Transportmodells ATLAS verwandt. Die {\"u}ber die Fl{\"a}che (45-75°N) und Zeit (August-November) gemittelte Vertikalkomponente des Eliassen-Palm-Flussvektors durch die 100hPa-Fl{\"a}che zeigt eine Verbindung zwischen der fr{\"u}hwinterlichen wirbelinneren Luftmassenzusammensetzung und der Wirbelbildungsphase auf. Diese ist jedoch nur f{\"u}r die untere Stratosph{\"a}re g{\"u}ltig, da die Vertikalkomponente die sich innerhalb der Stratosph{\"a}re {\"a}ndernden Wellenausbreitungsbedingungen nicht erfasst. F{\"u}r eine verbesserte H{\"o}hendarstellung des Signals wurde eine neue integrale auf der Wellenamplitude und dem Charney-Drazin-Kriterium basierende Gr{\"o}ße definiert. Diese neue Gr{\"o}ße verbindet die Wellenaktivit{\"a}t w{\"a}hrend der Wirbelbildungsphase sowohl mit der Luftmassenzusammensetzung im Polarwirbel als auch mit der Ozonverteilung {\"u}ber die Breite. Eine verst{\"a}rkte Wellenaktivit{\"a}t f{\"u}hrt zu mehr Luft aus niedrigeren ozonreichen Breiten im Polarwirbel. Aber im Herbst und Fr{\"u}hwinter zerst{\"o}ren chemische Prozesse, die das Ozon ins Gleichgewicht bringen, die interannuale wirbelinnere Ozonvariablit{\"a}t, die durch dynamische Prozesse w{\"a}hrend der arktischen Polarwirbelbildungsphase hervorgerufen wird. Eine Analyse in Hinblick auf den Fortbestand einer dynamisch induzierten Ozonanomalie bis in den Mittwinter erm{\"o}glicht eine Absch{\"a}tzung des Einflusses dieser dynamischen Prozesse auf den arktischen Ozongehalt. Zu diesem Zweck wurden f{\"u}r den Winter 1999-2000 Modelll{\"a}ufe mit dem Lagrangesche Chemie/Transportmodell ATLAS gerechnet, die detaillierte Informationen {\"u}ber den Erhalt der k{\"u}nstlichen Ozonvariabilit{\"a}t hinsichtlich Zeit, H{\"o}he und Breite liefern. Zusammengefasst, besteht die dynamisch induzierte Ozonvariabilit{\"a}t w{\"a}hrend der Wirbelbildungsphase l{\"a}nger im Inneren als im {\"A}ußeren des Polarwirbels und verliert oberhalb von 750K potentieller Temperatur ihre signifikante Wirkung auf die mittwinterliche Ozonvariabilit{\"a}t. In darunterliegenden H{\"o}henbereichen ist der Anteil an der urspr{\"u}nglichen St{\"o}rung groß, bis zu 90\% auf der 450K. Innerhalb dieses H{\"o}henbereiches {\"u}ben die dynamischen Prozesse w{\"a}hrend der Wirbelbildungsphase einen entscheidenden Einfluss auf den Ozongehalt im Mittwinter aus.}, language = {de} } @phdthesis{Blum2010, author = {Blum, Niklas}, title = {Formalization of a converged internet and telecommunications service environment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51146}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The programmable network envisioned in the 1990s within standardization and research for the Intelligent Network is currently coming into reality using IPbased Next Generation Networks (NGN) and applying Service-Oriented Architecture (SOA) principles for service creation, execution, and hosting. SOA is the foundation for both next-generation telecommunications and middleware architectures, which are rapidly converging on top of commodity transport services. Services such as triple/quadruple play, multimedia messaging, and presence are enabled by the emerging service-oriented IPMultimedia Subsystem (IMS), and allow telecommunications service providers to maintain, if not improve, their position in the marketplace. SOA becomes the de facto standard in next-generation middleware systems as the system model of choice to interconnect service consumers and providers within and between enterprises. We leverage previous research activities in overlay networking technologies along with recent advances in network abstraction, service exposure, and service creation to develop a paradigm for a service environment providing converged Internet and Telecommunications services that we call Service Broker. Such a Service Broker provides mechanisms to combine and mediate between different service paradigms from the two domains Internet/WWW and telecommunications. Furthermore, it enables the composition of services across these domains and is capable of defining and applying temporal constraints during creation and execution time. By adding network-awareness into the service fabric, such a Service Broker may also act as a next generation network-to-service element allowing the composition of crossdomain and cross-layer network and service resources. The contribution of this research is threefold: first, we analyze and classify principles and technologies from Information Technologies (IT) and telecommunications to identify and discuss issues allowing cross-domain composition in a converging service layer. Second, we discuss service composition methods allowing the creation of converged services on an abstract level; in particular, we present a formalized method for model-checking of such compositions. Finally, we propose a Service Broker architecture converging Internet and Telecom services. This environment enables cross-domain feature interaction in services through formalized obligation policies acting as constraints during service discovery, creation, and execution time.}, language = {en} } @phdthesis{Borchers2010, author = {Borchers, Andreas}, title = {Glaciomarine sedimentation at the continental margin of Prydz Bay, East Antarctica : implications on palaeoenvironmental changes during the Quaternary}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52620}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The Antarctic plays an important role in the global climate system. On the one hand, the Antarctic Ice Sheet is the largest freshwater reservoir on Earth. On the other hand, a major proportion of the global bottom-water formation takes place in Antarctic shelf regions, forcing the global thermohaline circulation. The main goal of this dissertation is to provide new insights into the dynamics and stability of the EAIS during the Quaternary. Additionally, variations in the activity of bottom-water formation and their causes are investigated. The dissertation is a German contribution to the International Polar Year 2007/ 2008 and was funded by the 'Deutsche Forschungsgesellschaft' (DFG) within the scope of priority program 1158 'Antarctic research with comparative studies in Arctic ice regions'. During RV Polarstern expedition ANT-XXIII/9, glaciomarine sediments were recovered from the Prydz Bay-Kerguelen region. Prydz Bay is a key region for the study of East EAIS dynamics, as 16\% of the EAIS are drained through the Lambert Glacier into the bay. Thereby, the glacier transports sediment into Prydz Bay which is then further distributed by calving icebergs or by current transport. The scientific approach of this dissertation is the reconstruction of past glaciomarine environments to infer on the response of the Lambert Glacier-Amery Ice Shelf system to climate shifts during the Quaternary. To characterize the depositional setting, sedimentological methods are used and statistical analyses are applied. Mineralogical and (bio)geochemical methods provide a means to reconstruct sediment provenances and to provide evidence on changes in the primary production in the surface water column. Age-depth models were constructed based on palaeomagnetic and palaeointensity measurements, diatom stratigraphy and radiocarbon dating. Sea-bed surface sediments in the investigation area show distinct variations in terms of their clay minerals and heavy-mineral assemblages. Considerable differences in the mineralogical composition of surface sediments are determined on the continental shelf. Clay minerals as well as heavy minerals provide useful parameters to differentiate between sediments which originated from erosion of crystalline rocks and sediments originating from Permo-Triassic deposits. Consequently, mineralogical parameters can be used to reconstruct the provenance of current-transported and ice-rafted material. The investigated sediment cores cover the time intervals of the last 1.4 Ma (continental slope) and the last 12.8 cal. ka BP (MacRobertson shelf). The sediment deposits were mainly influenced by glacial and oceanographic processes and further by biological activity (continental shelf), meltwater input and possibly gravitational transport. Sediments from the continental slope document two major deglacial events: the first deglaciation is associated with the mid-Pleistocene warming recognized around the Antarctic. In Prydz Bay, the Lambert Glacier-Amery Ice Shelf retreated far to the south and high biogenic productivity commenced or biogenic remains were better preserved due to increased sedimentation rates. Thereafter, stable glacial conditions continued until 400 - 500 ka BP. Calving of icebergs was restricted to the western part of the Lambert Glacier. The deeper bathymetry in this area allows for floating ice shelf even during times of decreased sea-level. Between 400 - 500 ka BP and the last interglacial (marine isotope stage 5) the glacier was more dynamic. During or shortly after the last interglacial the LAIS retreated again due to sea-level rise of 6 - 9 m. Both deglacial events correlate with a reduction in the thickness of ice masses in the Prince Charles Mountains. It indicates that a disintegration of the Amery Ice Shelf possibly led to increased drainage of ice masses from the Prydz Bay hinterland. A new end-member modelling algorithm was successfully applied on sediments from the MacRobertson shelf used to unmix the sand grain size fractions sorted by current activity and ice transport, respectively. Ice retreat on MacRobertson Shelf commenced 12.8 cal. ka BP and ended around 5.5 cal. ka BP. During the Holocene, strong fluctuations of the bottomwater activity were observed, probably related to variations of sea-ice formation in the Cape Darnley polynya. Increased activity of bottom-water flow was reconstructed at transitions from warm to cool conditions, whereas bottom-water activity receded during the mid- Holocene climate optimum. It can be concluded that the Lambert Glacier-Amery Ice Shelf system was relatively stable in terms of climate variations during the Quaternary. In contrast, bottom-water formation due to polynya activity was very sensitive to changes in atmospheric forcing and should gain more attention in future research.}, language = {en} } @phdthesis{Borchert2010, author = {Borchert, Manuela}, title = {Interactions between aqueous fluids and silicate melts : equilibration, partitioning and complexation of trace elements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42088}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The origin and evolution of granites has been widely studied because granitoid rocks constitute a major portion of the Earth's crust. The formation of granitic magma is, besides temperature mainly triggered by the water content of these rocks. The presence of water in magmas plays an important role due to the ability of aqueous fluids to change the chemical composition of the magma. The exsolution of aqueous fluids from melts is closely linked to a fractionation of elements between the two phases. Then, aqueous fluids migrate to shallower parts of the Earth's crust because of it's lower density compared to that of melts and adjacent rocks. This process separates fluids and melts, and furthermore, during the ascent, aqueous fluids can react with the adjacent rocks and alter their chemical signature. This is particularly impor- tant during the formation of magmatic-hydrothermal ore deposits or in the late stages of the evolution of magmatic complexes. For a deeper insight to these processes, it is essential to improve our knowledge on element behavior in such systems. In particular, trace elements are used for these studies and petrogenetic interpretations because, unlike major elements, they are not essential for the stability of the phases involved and often reflect magmatic processes with less ambiguity. However, for the majority of important trace elements, the dependence of the geochemical behavior on temperature, pressure, and in particular on the composition of the system are only incompletely or not at all experimentally studied. Former studies often fo- cus on the determination of fluid-melt partition coefficients (Df/m=cfluid/cmelt) of economically interesting elements, e.g., Mo, Sn, Cu, and there are some partitioning data available for ele- ments that are also commonly used for petrological interpretations. At present, no systematic experimental data on trace element behavior in fluid-melt systems as function of pressure, temperature, and chemical composition are available. Additionally, almost all existing data are based on the analysis of quenched phases. This results in substantial uncertainties, particularly for the quenched aqueous fluid because trace element concentrations may change upon cooling. The objective of this PhD thesis consisted in the study of fluid-melt partition coefficients between aqueous solutions and granitic melts for different trace elements (Rb, Sr, Ba, La, Y, and Yb) as a function of temperature, pressure, salinity of the fluid, composition of the melt, and experimental and analytical approach. The latter included the refinement of an existing method to measure trace element concentrations in fluids equilibrated with silicate melts di- rectly at elevated pressures and temperatures using a hydrothermal diamond-anvil cell and synchrotron radiation X-ray fluorescence microanalysis. The application of this in-situ method enables to avoid the main source of error in data from quench experiments, i.e., trace element concentration in the fluid. A comparison of the in-situ results to data of conventional quench experiments allows a critical evaluation of quench data from this study and literature data. In detail, starting materials consisted of a suite of trace element doped haplogranitic glasses with ASI varying between 0.8 and 1.4 and H2O or a chloridic solution with m NaCl/KCl=1 and different salinities (1.16 to 3.56 m (NaCl+KCl)). Experiments were performed at 750 to 950◦C and 0.2 or 0.5 GPa using conventional quench devices (externally and internally heated pressure vessels) with different quench rates, and at 750◦C and 0.2 to 1.4 GPa with in-situ analysis of the trace element concentration in the fluids. The fluid-melt partitioning data of all studied trace elements show 1. a preference for the melt (Df/m < 1) at all studied conditions, 2. one to two orders of magnitude higher Df/m using chloridic solutions compared to experiments with H2O, 3. a clear dependence on the melt composition for fluid-melt partitioning of Sr, Ba, La, Y, and Yb in experiments using chloridic solutions, 4. quench rate-related differences of fluid-melt partition coefficients of Rb and Sr, and 5. distinctly higher fluid-melt partitioning data obtained from in-situ experiments than from comparable quench runs, particularly in the case of H2O as starting solution. The data point to a preference of all studied trace elements for the melt even at fairly high salinities, which contrasts with other experimental studies, but is supported by data from studies of natural co-genetically trapped fluid and melt inclusions. The in-situ measurements of trace element concentrations in the fluid verify that aqueous fluids will change their composition upon cooling, which is in particular important for Cl free systems. The distinct differences of the in-situ results to quench data of this study as well as to data from the literature signify the im- portance of a careful fluid sampling and analysis. Therefore, the direct measurement of trace element contents in fluids equilibrated with silicate melts at elevated PT conditions represents an important development to obtain more reliable fluid-melt partition coefficients. For further improvement, both the aqueous fluid and the silicate melt need to be analyzed in-situ because partitioning data that are based on the direct measurement of the trace element content in the fluid and analysis of a quenched melt are still not completely free of quench effects. At present, all available data on element complexation in aqueous fluids in equilibrium with silicate melts at high PT are indirectly derived from partitioning data, which involves in these experiments assumptions on the species present in the fluid. However, the activities of chemical components in these partitioning experiments are not well constrained, which is required for the definition of exchange equilibria between melt and fluid species. For example, the melt-dependent variation of partition coefficient observed for Sr imply that this element can not only be complexed by Cl- as suggested previously. The data indicate a more complicated complexation of Sr in the aqueous fluid. To verify this hypothesis, the in-situ setup was also used to determine strontium complexation in fluids equilibrated with silicate melts at desired PT conditions by the application of X-ray absorption near edge structure (XANES) spectroscopy. First results show a strong effect of both fluid and melt composition on the resulting XANES spectra, which indicates different complexation environments for Sr.}, language = {en} } @phdthesis{Borković2010, author = {Borković, Vladimir}, title = {Evaluation kommunaler Sportprojekte zur sozialen Integration von Heranwachsenden}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-051-9}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48186}, school = {Universit{\"a}t Potsdam}, pages = {372}, year = {2010}, abstract = {Gegenstand der Studie ist die Evaluation eines kommunalen Sportprojekts. Die Forschungsarbeit entstand aus der wachsenden Erkenntnis heraus, dass es nicht mehr nur um die Entwicklung und Durchf{\"u}hrung kommunaler oder sozialer Projekte geht, sondern zunehmend darauf ankommt, die Projektarbeit zu evaluieren, um ihren Einfluss auf die kommunale, soziale und personale Entwicklung zu pr{\"u}fen und in der Folge die Implementierung zu optimieren. Die unterschiedlichen Schritte in der Definition des theoretischen Rahmens, der Datenanalyse sowie der Erarbeitung der evaluativen Empfehlungen wurden unternommen mit dem Anspruch auf Modellcharakter, um f{\"u}r zuk{\"u}nftige Evaluationsvorhaben entsprechende Standards zu setzen. Die Grundidee des kommunalen Sportprojekts „Straßenfußball f{\"u}r Toleranz" ist innovativ: M{\"a}dchen und Jungen erobern durch gemeinsames Fußballspielen den {\"o}ffentlichen Raum. Sie spielen ohne Schiedsrichter und nach speziellen Regeln. Das Projekt richtet sich ausdr{\"u}cklich an sozial benachteiligte Jugendliche und bezieht gleichermaßen Jungen wie M{\"a}dchen ein.}, language = {de} } @phdthesis{Brauer2010, author = {Brauer, Falk}, title = {Extraktion und Identifikation von Entit{\"a}ten in Textdaten im Umfeld der Enterprise Search}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51409}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Die automatische Informationsextraktion (IE) aus unstrukturierten Texten erm{\"o}glicht v{\"o}llig neue Wege, auf relevante Informationen zuzugreifen und deren Inhalte zu analysieren, die weit {\"u}ber bisherige Verfahren zur Stichwort-basierten Dokumentsuche hinausgehen. Die Entwicklung von Programmen zur Extraktion von maschinenlesbaren Daten aus Texten erfordert jedoch nach wie vor die Entwicklung von dom{\"a}nenspezifischen Extraktionsprogrammen. Insbesondere im Bereich der Enterprise Search (der Informationssuche im Unternehmensumfeld), in dem eine große Menge von heterogenen Dokumenttypen existiert, ist es oft notwendig ad-hoc Programm-module zur Extraktion von gesch{\"a}ftsrelevanten Entit{\"a}ten zu entwickeln, die mit generischen Modulen in monolithischen IE-Systemen kombiniert werden. Dieser Umstand ist insbesondere kritisch, da potentiell f{\"u}r jeden einzelnen Anwendungsfall ein von Grund auf neues IE-System entwickelt werden muss. Die vorliegende Dissertation untersucht die effiziente Entwicklung und Ausf{\"u}hrung von IE-Systemen im Kontext der Enterprise Search und effektive Methoden zur Ausnutzung bekannter strukturierter Daten im Unternehmenskontext f{\"u}r die Extraktion und Identifikation von gesch{\"a}ftsrelevanten Entit{\"a}ten in Doku-menten. Grundlage der Arbeit ist eine neuartige Plattform zur Komposition von IE-Systemen auf Basis der Beschreibung des Datenflusses zwischen generischen und anwendungsspezifischen IE-Modulen. Die Plattform unterst{\"u}tzt insbesondere die Entwicklung und Wiederverwendung von generischen IE-Modulen und zeichnet sich durch eine h{\"o}here Flexibilit{\"a}t und Ausdrucksm{\"a}chtigkeit im Vergleich zu vorherigen Methoden aus. Ein in der Dissertation entwickeltes Verfahren zur Dokumentverarbeitung interpretiert den Daten-austausch zwischen IE-Modulen als Datenstr{\"o}me und erm{\"o}glicht damit eine weitgehende Parallelisierung von einzelnen Modulen. Die autonome Ausf{\"u}hrung der Module f{\"u}hrt zu einer wesentlichen Beschleu-nigung der Verarbeitung von Einzeldokumenten und verbesserten Antwortzeiten, z. B. f{\"u}r Extraktions-dienste. Bisherige Ans{\"a}tze untersuchen lediglich die Steigerung des durchschnittlichen Dokumenten-durchsatzes durch verteilte Ausf{\"u}hrung von Instanzen eines IE-Systems. Die Informationsextraktion im Kontext der Enterprise Search unterscheidet sich z. B. von der Extraktion aus dem World Wide Web dadurch, dass in der Regel strukturierte Referenzdaten z. B. in Form von Unternehmensdatenbanken oder Terminologien zur Verf{\"u}gung stehen, die oft auch die Beziehungen von Entit{\"a}ten beschreiben. Entit{\"a}ten im Unternehmensumfeld haben weiterhin bestimmte Charakteristiken: Eine Klasse von relevanten Entit{\"a}ten folgt bestimmten Bildungsvorschriften, die nicht immer bekannt sind, auf die aber mit Hilfe von bekannten Beispielentit{\"a}ten geschlossen werden kann, so dass unbekannte Entit{\"a}ten extrahiert werden k{\"o}nnen. Die Bezeichner der anderen Klasse von Entit{\"a}ten haben eher umschreibenden Charakter. Die korrespondierenden Umschreibungen in Texten k{\"o}nnen variieren, wodurch eine Identifikation derartiger Entit{\"a}ten oft erschwert wird. Zur effizienteren Entwicklung von IE-Systemen wird in der Dissertation ein Verfahren untersucht, das alleine anhand von Beispielentit{\"a}ten effektive Regul{\"a}re Ausdr{\"u}cke zur Extraktion von unbekannten Entit{\"a}ten erlernt und damit den manuellen Aufwand in derartigen Anwendungsf{\"a}llen minimiert. Verschiedene Generalisierungs- und Spezialisierungsheuristiken erkennen Muster auf verschiedenen Abstraktionsebenen und schaffen dadurch einen Ausgleich zwischen Genauigkeit und Vollst{\"a}ndigkeit bei der Extraktion. Bekannte Regellernverfahren im Bereich der Informationsextraktion unterst{\"u}tzen die beschriebenen Problemstellungen nicht, sondern ben{\"o}tigen einen (annotierten) Dokumentenkorpus. Eine Methode zur Identifikation von Entit{\"a}ten, die durch Graph-strukturierte Referenzdaten vordefiniert sind, wird als dritter Schwerpunkt untersucht. Es werden Verfahren konzipiert, welche {\"u}ber einen exakten Zeichenkettenvergleich zwischen Text und Referenzdatensatz hinausgehen und Teil{\"u}bereinstimmungen und Beziehungen zwischen Entit{\"a}ten zur Identifikation und Disambiguierung heranziehen. Das in der Arbeit vorgestellte Verfahren ist bisherigen Ans{\"a}tzen hinsichtlich der Genauigkeit und Vollst{\"a}ndigkeit bei der Identifikation {\"u}berlegen.}, language = {de} } @phdthesis{Boeniger2010, author = {B{\"o}niger, Urs}, title = {Attributes and their potential to analyze and interpret 3D GPR data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50124}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Based on technological advances made within the past decades, ground-penetrating radar (GPR) has become a well-established, non-destructive subsurface imaging technique. Catalyzed by recent demands for high-resolution, near-surface imaging (e.g., the detection of unexploded ordnances and subsurface utilities, or hydrological investigations), the quality of today's GPR-based, near-surface images has significantly matured. At the same time, the analysis of oil and gas related reflection seismic data sets has experienced significant advances. Considering the sensitivity of attribute analysis with respect to data positioning in general, and multi-trace attributes in particular, trace positioning accuracy is of major importance for the success of attribute-based analysis flows. Therefore, to study the feasibility of GPR-based attribute analyses, I first developed and evaluated a real-time GPR surveying setup based on a modern tracking total station (TTS). The combination of current GPR systems capability of fusing global positioning system (GPS) and geophysical data in real-time, the ability of modern TTS systems to generate a GPS-like positional output and wireless data transmission using radio modems results in a flexible and robust surveying setup. To elaborate the feasibility of this setup, I studied the major limitations of such an approach: system cross-talk and data delays known as latencies. Experimental studies have shown that when a minimal distance of ~5 m between the GPR and the TTS system is considered, the signal-to-noise ratio of the acquired GPR data using radio communication equals the one without radio communication. To address the limitations imposed by system latencies, inherent to all real-time data fusion approaches, I developed a novel correction (calibration) strategy to assess the gross system latency and to correct for it. This resulted in the centimeter trace accuracy required by high-frequency and/or three-dimensional (3D) GPR surveys. Having introduced this flexible high-precision surveying setup, I successfully demonstrated the application of attribute-based processing to GPR specific problems, which may differ significantly from the geological ones typically addressed by the oil and gas industry using seismic data. In this thesis, I concentrated on archaeological and subsurface utility problems, as they represent typical near-surface geophysical targets. Enhancing 3D archaeological GPR data sets using a dip-steered filtering approach, followed by calculation of coherency and similarity, allowed me to conduct subsurface interpretations far beyond those obtained by classical time-slice analyses. I could show that the incorporation of additional data sets (magnetic and topographic) and attributes derived from these data sets can further improve the interpretation. In a case study, such an approach revealed the complementary nature of the individual data sets and, for example, allowed conclusions about the source location of magnetic anomalies by concurrently analyzing GPR time/depth slices to be made. In addition to archaeological targets, subsurface utility detection and characterization is a steadily growing field of application for GPR. I developed a novel attribute called depolarization. Incorporation of geometrical and physical feature characteristics into the depolarization attribute allowed me to display the observed polarization phenomena efficiently. Geometrical enhancement makes use of an improved symmetry extraction algorithm based on Laplacian high-boosting, followed by a phase-based symmetry calculation using a two-dimensional (2D) log-Gabor filterbank decomposition of the data volume. To extract the physical information from the dual-component data set, I employed a sliding-window principle component analysis. The combination of the geometrically derived feature angle and the physically derived polarization angle allowed me to enhance the polarization characteristics of subsurface features. Ground-truth information obtained by excavations confirmed this interpretation. In the future, inclusion of cross-polarized antennae configurations into the processing scheme may further improve the quality of the depolarization attribute. In addition to polarization phenomena, the time-dependent frequency evolution of GPR signals might hold further information on the subsurface architecture and/or material properties. High-resolution, sparsity promoting decomposition approaches have recently had a significant impact on the image and signal processing community. In this thesis, I introduced a modified tree-based matching pursuit approach. Based on different synthetic examples, I showed that the modified tree-based pursuit approach clearly outperforms other commonly used time-frequency decomposition approaches with respect to both time and frequency resolutions. Apart from the investigation of tuning effects in GPR data, I also demonstrated the potential of high-resolution sparse decompositions for advanced data processing. Frequency modulation of individual atoms themselves allows to efficiently correct frequency attenuation effects and improve resolution based on shifting the average frequency level. GPR-based attribute analysis is still in its infancy. Considering the growing widespread realization of 3D GPR studies there will certainly be an increasing demand towards improved subsurface interpretations in the future. Similar to the assessment of quantitative reservoir properties through the combination of 3D seismic attribute volumes with sparse well-log information, parameter estimation in a combined manner represents another step in emphasizing the potential of attribute-driven GPR data analyses.}, language = {en} } @phdthesis{Childs2010, author = {Childs, Liam H.}, title = {Bioinformatics approaches to analysing RNA mediated regulation of gene expression}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41284}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {The genome can be considered the blueprint for an organism. Composed of DNA, it harbours all organism-specific instructions for the synthesis of all structural components and their associated functions. The role of carriers of actual molecular structure and functions was believed to be exclusively assumed by proteins encoded in particular segments of the genome, the genes. In the process of converting the information stored genes into functional proteins, RNA - a third major molecule class - was discovered early on to act a messenger by copying the genomic information and relaying it to the protein-synthesizing machinery. Furthermore, RNA molecules were identified to assist in the assembly of amino acids into native proteins. For a long time, these - rather passive - roles were thought to be the sole purpose of RNA. However, in recent years, new discoveries have led to a radical revision of this view. First, RNA molecules with catalytic functions - thought to be the exclusive domain of proteins - were discovered. Then, scientists realized that much more of the genomic sequence is transcribed into RNA molecules than there are proteins in cells begging the question what the function of all these molecules are. Furthermore, very short and altogether new types of RNA molecules seemingly playing a critical role in orchestrating cellular processes were discovered. Thus, RNA has become a central research topic in molecular biology, even to the extent that some researcher dub cells as "RNA machines". This thesis aims to contribute towards our understanding of RNA-related phenomena by applying Bioinformatics means. First, we performed a genome-wide screen to identify sites at which the chemical composition of DNA (the genotype) critically influences phenotypic traits (the phenotype) of the model plant Arabidopsis thaliana. Whole genome hybridisation arrays were used and an informatics strategy developed, to identify polymorphic sites from hybridisation to genomic DNA. Following this approach, not only were genotype-phenotype associations discovered across the entire Arabidopsis genome, but also regions not currently known to encode proteins, thus representing candidate sites for novel RNA functional molecules. By statistically associating them with phenotypic traits, clues as to their particular functions were obtained. Furthermore, these candidate regions were subjected to a novel RNA-function classification prediction method developed as part of this thesis. While determining the chemical structure (the sequence) of candidate RNA molecules is relatively straightforward, the elucidation of its structure-function relationship is much more challenging. Towards this end, we devised and implemented a novel algorithmic approach to predict the structural and, thereby, functional class of RNA molecules. In this algorithm, the concept of treating RNA molecule structures as graphs was introduced. We demonstrate that this abstraction of the actual structure leads to meaningful results that may greatly assist in the characterization of novel RNA molecules. Furthermore, by using graph-theoretic properties as descriptors of structure, we indentified particular structural features of RNA molecules that may determine their function, thus providing new insights into the structure-function relationships of RNA. The method (termed Grapple) has been made available to the scientific community as a web-based service. RNA has taken centre stage in molecular biology research and novel discoveries can be expected to further solidify the central role of RNA in the origin and support of life on earth. As illustrated by this thesis, Bioinformatics methods will continue to play an essential role in these discoveries.}, language = {en} } @phdthesis{Chirvasa2010, author = {Chirvasa, Mihaela}, title = {Finite difference methods for 1st Order in time, 2nd order in space, hyperbolic systems used in numerical relativity}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-42135}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis is concerned with the development of numerical methods using finite difference techniques for the discretization of initial value problems (IVPs) and initial boundary value problems (IBVPs) of certain hyperbolic systems which are first order in time and second order in space. This type of system appears in some formulations of Einstein equations, such as ADM, BSSN, NOR, and the generalized harmonic formulation. For IVP, the stability method proposed in [14] is extended from second and fourth order centered schemes, to 2n-order accuracy, including also the case when some first order derivatives are approximated with off-centered finite difference operators (FDO) and dissipation is added to the right-hand sides of the equations. For the model problem of the wave equation, special attention is paid to the analysis of Courant limits and numerical speeds. Although off-centered FDOs have larger truncation errors than centered FDOs, it is shown that in certain situations, off-centering by just one point can be beneficial for the overall accuracy of the numerical scheme. The wave equation is also analyzed in respect to its initial boundary value problem. All three types of boundaries - outflow, inflow and completely inflow that can appear in this case, are investigated. Using the ghost-point method, 2n-accurate (n = 1, 4) numerical prescriptions are prescribed for each type of boundary. The inflow boundary is also approached using the SAT-SBP method. In the end of the thesis, a 1-D variant of BSSN formulation is derived and some of its IBVPs are considered. The boundary procedures, based on the ghost-point method, are intended to preserve the interior 2n-accuracy. Numerical tests show that this is the case if sufficient dissipation is added to the rhs of the equations.}, language = {en} } @phdthesis{Creutzfeldt2010, author = {Creutzfeldt, Noah Angelo Benjamin}, title = {The effect of water storages on temporal gravity measurements and the benefits for hydrology}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48575}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Temporal gravimeter observations, used in geodesy and geophysics to study variation of the Earth's gravity field, are influenced by local water storage changes (WSC) and - from this perspective - add noise to the gravimeter signal records. At the same time, the part of the gravity signal caused by WSC may provide substantial information for hydrologists. Water storages are the fundamental state variable of hydrological systems, but comprehensive data on total WSC are practically inaccessible and their quantification is associated with a high level of uncertainty at the field scale. This study investigates the relationship between temporal gravity measurements and WSC in order to reduce the hydrological interfering signal from temporal gravity measurements and to explore the value of temporal gravity measurements for hydrology for the superconducting gravimeter (SG) of the Geodetic Observatory Wettzell, Germany. A 4D forward model with a spatially nested discretization domain was developed to simulate and calculate the local hydrological effect on the temporal gravity observations. An intensive measurement system was installed at the Geodetic Observatory Wettzell and WSC were measured in all relevant storage components, namely groundwater, saprolite, soil, top soil and snow storage. The monitoring system comprised also a suction-controlled, weighable, monolith-filled lysimeter, allowing an all time first comparison of a lysimeter and a gravimeter. Lysimeter data were used to estimate WSC at the field scale in combination with complementary observations and a hydrological 1D model. Total local WSC were derived, uncertainties were assessed and the hydrological gravity response was calculated from the WSC. A simple conceptual hydrological model was calibrated and evaluated against records of a superconducting gravimeter, soil moisture and groundwater time series. The model was evaluated by a split sample test and validated against independently estimated WSC from the lysimeter-based approach. A simulation of the hydrological gravity effect showed that WSC of one meter height along the topography caused a gravity response of 52 µGal, whereas, generally in geodesy, on flat terrain, the same water mass variation causes a gravity change of only 42 µGal (Bouguer approximation). The radius of influence of local water storage variations can be limited to 1000 m and 50 \% to 80 \% of the local hydro¬logical gravity signal is generated within a radius of 50 m around the gravimeter. At the Geodetic Observatory Wettzell, WSC in the snow pack, top soil, unsaturated saprolite and fractured aquifer are all important terms of the local water budget. With the exception of snow, all storage components have gravity responses of the same order of magnitude and are therefore relevant for gravity observations. The comparison of the total hydrological gravity response to the gravity residuals obtained from the SG, showed similarities in both short-term and seasonal dynamics. However, the results demonstrated the limitations of estimating total local WSC using hydrological point measurements. The results of the lysimeter-based approach showed that gravity residuals are caused to a larger extent by local WSC than previously estimated. A comparison of the results with other methods used in the past to correct temporal gravity observations for the local hydrological influence showed that the lysimeter measurements improved the independent estimation of WSC significantly and thus provided a better way of estimating the local hydrological gravity effect. In the context of hydrological noise reduction, at sites where temporal gravity observations are used for geophysical studies beyond local hydrology, the installation of a lysimeter in combination with complementary hydrological measurements is recommended. From the hydrological view point, using gravimeter data as a calibration constraint improved the model results in comparison to hydrological point measurements. Thanks to their capacity to integrate over different storage components and a larger area, gravimeters provide generalized information on total WSC at the field scale. Due to their integrative nature, gravity data must be interpreted with great care in hydrological studies. However, gravimeters can serve as a novel measurement instrument for hydrology and the application of gravimeters especially designed to study open research questions in hydrology is recommended.}, language = {en} } @phdthesis{DiGiacomo2010, author = {Di Giacomo, Domenico}, title = {Determination of the energy magnitude ME : application to rapid response purposes and insights to regional/local variabilities}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-50768}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Recent large earthquakes put in evidence the need of improving and developing robust and rapid procedures to properly calculate the magnitude of an earthquake in a short time after its occurrence. The most famous example is the 26 December 2004 Sumatra earthquake, when the limitations of the standard procedures adopted at that time by many agencies failed to provide accurate magnitude estimates of this exceptional event in time to launch early enough warnings and appropriate response. Being related to the radiated seismic energy ES, the energy magnitude ME is a good estimator of the high frequency content radiated by the source which goes into the seismic waves. However, a procedure to rapidly determine ME (that is to say, within 15 minutes after the earthquake occurrence) was required. Here it is presented a procedure able to provide in a rapid way the energy magnitude ME for shallow earthquakes by analyzing teleseismic P‑waves in the distance range 20-98. To account for the energy loss experienced by the seismic waves from the source to the receivers, spectral amplitude decay functions obtained from numerical simulations of Greens functions based on the average global model AK135Q are used. The proposed method has been tested using a large global dataset (~1000 earthquakes) and the obtained rapid ME estimations have been compared to other magnitude scales from different agencies. Special emphasis is given to the comparison with the moment magnitude MW, since the latter is very popular and extensively used in common seismological practice. However, it is shown that MW alone provide only limited information about the seismic source properties, and that disaster management organizations would benefit from a combined use of MW and ME in the prompt evaluation of an earthquake's tsunami and shaking potential. In addition, since the proposed approach for ME is intended to work without knowledge of the fault plane geometry (often available only hours after an earthquake occurrence), the suitability of this method is discussed by grouping the analyzed earthquakes according to their type of mechanism (strike-slip, normal faulting, thrust faulting, etc.). No clear trend is found from the rapid ME estimates with the different fault plane solution groups. This is not the case for the ME routinely determined by the U.S. Geological Survey, which uses specific radiation pattern corrections. Further studies are needed to verify the effect of such corrections on ME estimates. Finally, exploiting the redundancy of the information provided by the analyzed dataset, the components of variance on the single station ME estimates are investigated. The largest component of variance is due to the intra-station (record-to-record) error, although the inter-station (station-to-station) error is not negligible and is of several magnitude units for some stations. Moreover, it is shown that the intra-station component of error is not random but depends on the travel path from a source area to a given station. Consequently, empirical corrections may be used to account for the heterogeneities of the real Earth not considered in the theoretical calculations of the spectral amplitude decay functions used to correct the recorded data for the propagation effects.}, language = {en} }