@phdthesis{Fischer2008, author = {Fischer, Anna}, title = {"Reactive hard templating" : from carbon nitrides to metal nitrides}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-19777}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Nanostructured inorganic materials are routinely synthesized by the use of templates. Depending on the synthesis conditions of the product material, either "soft" or "hard" templates can be applied. For sol-gel processes, usually "soft" templating techniques are employed, while "hard" templates are used for high temperature synthesis pathways. In classical templating approaches, the template has the unique role of structure directing agent, in the sense that it is not participating to the chemical formation of the resulting material. This work investigates a new templating pathway to nanostructured materials, where the template is also a reagent in the formation of the final material. This concept is described as "reactive templating" and opens a synthetic path toward materials which cannot be synthesised on a nanometre scale by classical templating approaches. Metal nitrides are such kind of materials. They are usually produced by the conversion of metals or metal oxides in ammonia flow at high temperature (T > 1000°C), which make the application of classical templating techniques difficult. Graphitic carbon nitride, g-C3N4, despite its fundamental and theoretical importance, is probably one of the most promising materials to complement carbon in material science and many efforts are put in the synthesis of this material. A simple polyaddition/elimination reaction path at high temperature (T = 550°C) allows the polymerisation of cyanamide toward graphitic carbon nitride solids. By hard templating, using nanostructured silica or aluminium oxide as nanotemplates, a variety of nanostructured graphitic carbon nitrides such as nanorods, nanotubes, meso- and macroporous powders could be obtained by nanocasting or nanocoating. Due to the special semi-conducting properties of the graphitic carbon nitride matrix, the nanostructured graphitic carbon nitrides show unexpected catalytic activity for the activation of benzene in Friedel-Crafts type reactions, making this material an interesting metal free catalyst. Furthermore, due to the chemical composition of g-C3N4 and the fact that it is totally decomposed at temperatures between 600°C and 800°C even under inert atmosphere, g-C3N4 was shown to be a good nitrogen donor for the synthesis of early transition metal nitrides at high temperatures. Thus using the nanostructured carbon nitrides as "reactive templates" or "nanoreactors", various metal nitride nanostructures, such as nanoparticles and porous frameworks could be obtained at high temperature. In this approach the carbon nitride nanostructure played both the role of the nitrogen source and of the exotemplate, imprinting its size and shape to the resulting metal nitride nanostructure.}, language = {en} } @phdthesis{Zeiske2008, author = {Zeiske, Anja}, title = {"Sexualit{\"a}t im angehenden Erwachsenenalter" : die sexuelle Handlungsf{\"a}higkeit junger Frauen und M{\"a}nner}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52346}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {In dieser Arbeit wurden Zusammenh{\"a}nge zwischen den sexuellen Erfahrungen junger Frauen und M{\"a}nner, ihren Pers{\"o}nlichkeitseigenschaften und ihren sexualmoralischen Einstellungen auf der einen Seite und der Einsch{\"a}tzung ihrer sexuellen Handlungsf{\"a}higkeit auf der anderen Seite untersucht. Die Grundlage f{\"u}r das Modell der sexuellen Handlungsf{\"a}higkeit bildeten die Vorstellungen der Arbeitsgruppe um Matthias Grundmann (Grundmann et al. 2006) sowie von Emirbayer und Mische (1998). Das in dieser Arbeit entwickelte Modell zur sexuellen Handlungsf{\"a}higkeit ist ein multidimensionales Konstrukt, das sich aus den Komponenten „sexuelle Kommunikation", „sexuelle Zufriedenheit", „sexuelle Reziprozit{\"a}t" sowie „sexuelle Eigenverantwortung" zusammensetzt. „Sexuelle Kommunikation" beinhaltet die F{\"a}higkeit, sexuelle W{\"u}nsche zum Ausdruck bringen zu k{\"o}nnen. „Sexuelle Zufriedenheit" beschreibt den Grad der Zufriedenheit mit dem eigenen Sexualleben. „Sexuelle Reziprozit{\"a}t" verweist auf die F{\"a}higkeit, sexuelle Aufmerksamkeiten sowohl Annehmen als auch Geben zu k{\"o}nnen. „Sexuelle Eigenverantwortung" betont schließlich die Einsch{\"a}tzung, inwieweit die eigene Sexualit{\"a}t selbst bestimmt gestaltet werden kann. Mit Emirbayer und Mische werden die sexuellen Erfahrungen der Frauen und M{\"a}nner als Korrelate der Einsch{\"a}tzung der Dimensionen der sexuellen Handlungsf{\"a}higkeit betrachtet. Mit Grundmann et al. sind es zudem verschiedene Pers{\"o}nlichkeitseigenschaften sowie sexualmoralische Einstellungen, deren Beschaffenheiten Aussagen {\"u}ber die sexuelle Handlungsf{\"a}higkeit erlauben. Um die Thematik der sexuellen Handlungsf{\"a}higkeit empirisch zu betrachten, wurden im Jahr 2006 695 junge Potsdamer/innen im Alter von 19 bis 21 Jahren im Rahmen einer standardisierten Erhebung zu ihren sexuellen und Beziehungserfahrungen befragt. Die empirischen Analysen verdeutlichen eine ko-konstruktive Anschauung von der Entwicklung sexueller Handlungsf{\"a}higkeit. Diese entsteht nicht im Individuum allein, sondern innerhalb der Interaktions- und Aushandlungsprozesse des Individuums mit den Anderen seiner sozialen und sexuellen Umwelt. Von Bedeutung erweisen dabei sowohl die Erlebnisse der sexuellen Biografie als auch die Pers{\"o}nlichkeitsmerkmale eines jeden Einzelnen. Nur geringf{\"u}gig erscheinen die erfragten sexualmoralischen Ansichten von Bedeutung.}, language = {de} } @phdthesis{Reininghaus2009, author = {Reininghaus, Karl}, title = {"So sind die Zeichen in der Welt" : Beobachtungen zur Rezeption von eher Wenigem Friedrich H{\"o}lderlins}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-35723}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Wenn in einem verbrannten Geb{\"a}ude, in dem sich unliebsame Untermieter breit gemacht haben, diese damit beginnen, Stein f{\"u}r Stein das restliche Gem{\"a}uer abzutragen, um die verbliebenen Fenster zuzumauern, wird es Zeit, mit mehr oder weniger freundlichen Worten die Bewohner des Hauses zu verweisen, die nur verhindern, dass sich neue Besucher dem Gel{\"a}nde n{\"a}hern. Daf{\"u}r muss freilich alter und neuer Behang von den W{\"a}nden genommen werden; und eben durch die Verbannung all dessen, was nicht an diesen Ort geh{\"o}rt, kann ein freundliches Bild des Dichters Friedrich H{\"o}lderlin erhalten bleiben, der nicht nur poetisch, sondern auch zwischenmenschlich einigen bedeutenden Pers{\"o}nlichkeiten seiner Zeit im Wege stand, was ihm wohl in beiden Aspekten zum Verh{\"a}ngnis geworden ist, weil er sich weder verwandtschaftlich noch im Rahmen des poetischen Gesch{\"a}fts gegen seine intrigante Umgebung zur Wehr zu setzen wusste. Der etwas l{\"a}nger geratene Aufsatz „So sind die Zeichen in der Welt" soll weder ein neues Heiligenbild schaffen noch einen frisch aus der Tasche gezogenen Popstar zum Liebhaben, sondern will behutsam einige Fresken des Gedankengeb{\"a}udes H{\"o}lderlin f{\"u}r diejenigen freilegen, deren Bild des Dichters noch nicht v{\"o}llig von der Vorstellung des wahnsinnig gewordenen Dichters {\"u}bert{\"u}ncht worden ist. Obwohl sich die Arbeit damit ganz bewusst den H{\"o}lderlin - Studien von Pierre Bertaux anschließt, setzt sie sich auch mit dieser Wahrnehmungslinie kritisch auseinander, indem sie neben biographischen Anmerkungen auch stil- und ideologiekritische Methoden einsetzt, um die manchmal un{\"u}bersichtliche Quellenlage ein wenig durchsichtiger zu machen, als dies bisher der Fall ist. {\"U}ber eine solche, in Einzelheiten vielleicht unorthodox wirkende Darstellung hinaus will die Arbeit die Behandlungsm{\"o}glichkeit von Friedrich H{\"o}lderlin im Deutschunterricht des Gymnasiums nahe legen, weil selbst J{\"u}ngeres {\"u}ber ihn behandelt wird, das darauf hinweist, inwiefern die Marginalisierung dieses Poeten damit zu tun hat, dass er w{\"a}hrend eines langen Abschnitts der Literaturgeschichte auch daf{\"u}r verantwortlich gemacht wurde, was er weder geschrieben hat noch meinte. Die Intention der Arbeit besteht insgesamt in der Vorstellung, das Gedankengut H{\"o}lderlins m{\"u}sse aus dem breiten Strom einer konservativen Wahrnehmungstradition entfernt werden (zu der beispielsweise auch die dramatische H{\"o}lderlin - Bearbeitung E. Jelineks Wolken.Heim. gez{\"a}hlt werden kann, selbst wenn sie widerborstig gemeint sein sollte) und dieser Dichter sei als realistischer Denker zu restaurieren, der sich deshalb dem Literaturbetrieb seiner Zeit entgezogen hat, weil er, selbst der Lebensf{\"u}hrung nach, sehr fr{\"u}h die Bewegungen gegen franz{\"o}sische Aufkl{\"a}rung und Revolution begriffen hat - und von deren massiver Ablehnung H{\"o}lderlin bis heute getroffen wird. Da Friedrich H{\"o}lderlin aber nicht nur ideologischer Betrachtung, Kritik und Verf{\"a}lschung ausgesetzt ist, sondern auch regelm{\"a}ßig Gegenstand umfangreicher biographisch - psychologischer Spekulationen darstellt, wurde dieser Aspekt nicht nur bezogen auf die Rezeptionsgeschichte untersucht, sondern am Gegenstand selbst. In diesem Zusammenhang konnte nicht nur eine bislang vernachl{\"a}ssigte Beziehung H{\"o}lderlins zu Sophie Mereau rekonstruiert und der Verdacht zur{\"u}ckgewiesen werden, es habe zur selben Zeit eine homoerotische Beziehung zu Isaak Sinclair bestanden, vielmehr gelang auch der Nachweis, dass das Verh{\"a}ltnis des Dichters zu Susette Gontard weder singul{\"a}r noch konkurrenzlos gewesen ist, weshalb sich eine eindeutige Zuordnung dieser Frau zur poetischen Figur der Diotima verbietet. Dazu wurde einerseits der Umstand entmythologisiert, nach dem die Liebe zur Frankfurter Zeit platonisch betrieben wurde, andererseits aber diese Affaire den Verh{\"a}ltnissen H{\"o}lderlins zu anderen Frauen zugeordnet, mit denen sich Frau Gontard - letztlich erfolglos - dadurch auseinander zu setzen versuchte, dass sie die Rolle Diotimas okkupierte. Dabei ließ sich schließlich der Verdacht erh{\"a}rten, die stabilste Bindung des Dichters an eine Frau sei die zur eigenen Schwester Heinrike gewesen, mit der bis zum Bruch von Bordeaux aus zwar unregelm{\"a}ßig, aber emotional immer wieder ausufernde Briefe getauscht wurden. Es ist nicht ohne Ironie, wenn ausgerechnet im vielleicht bekanntesten Gedicht H{\"o}lderlins, der „H{\"a}lfte des Lebens", in dem regelm{\"a}ßig ein bedeutender philosophischer Entwurf gesehen wird, Rudimente eines Textes enthalten sind, der - eindeutig erotisch konnotiert - an die eigene Schwester gerichtet ist.}, language = {de} } @phdthesis{Lontsi2016, author = {Lontsi, Agostiny Marrios}, title = {1D shallow sedimentary subsurface imaging using ambient noise and active seismic data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103807}, school = {Universit{\"a}t Potsdam}, pages = {xix, 119}, year = {2016}, abstract = {The Earth's shallow subsurface with sedimentary cover acts as a waveguide to any incoming wavefield. Within the framework of my thesis, I focused on the characterization of this shallow subsurface within tens to few hundreds of meters of sediment cover. I imaged the seismic 1D shear wave velocity (and possibly the 1D compressional wave velocity). This information is not only required for any seismic risk assessment, geotechnical engineering or microzonation activities, but also for exploration and global seismology where site effects are often neglected in seismic waveform modeling. First, the conventional frequency-wavenumber (f - k) technique is used to derive the dispersion characteristic of the propagating surface waves recorded using distinct arrays of seismometers in 1D and 2D configurations. Further, the cross-correlation technique is applied to seismic array data to estimate the Green's function between receivers pairs combination assuming one is the source and the other the receiver. With the consideration of a 1D media, the estimated cross-correlation Green's functions are sorted with interstation distance in a virtual 1D active seismic experiment. The f - k technique is then used to estimate the dispersion curves. This integrated analysis is important for the interpretation of a large bandwidth of the phase velocity dispersion curves and therefore improving the resolution of the estimated 1D Vs profile. Second, the new theoretical approach based on the Diffuse Field Assumption (DFA) is used for the interpretation of the observed microtremors H/V spectral ratio. The theory is further extended in this research work to include not only the interpretation of the H/V measured at the surface, but also the H/V measured at depths and in marine environments. A modeling and inversion of synthetic H/V spectral ratio curves on simple predefined geological structures shows an almost perfect recovery of the model parameters (mainly Vs and to a lesser extent Vp). These results are obtained after information from a receiver at depth has been considered in the inversion. Finally, the Rayleigh wave phase velocity information, estimated from array data, and the H/V(z, f) spectral ratio, estimated from a single station data, are combined and inverted for the velocity profile information. Obtained results indicate an improved depth resolution in comparison to estimations using the phase velocity dispersion curves only. The overall estimated sediment thickness is comparable to estimations obtained by inverting the full micortremor H/V spectral ratio.}, language = {en} } @phdthesis{Amour2013, author = {Amour, Fr{\´e}d{\´e}ric}, title = {3-D modeling of shallow-water carbonate systems : a scale-dependent approach based on quantitative outcrop studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66621}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The study of outcrop modeling is located at the interface between two fields of expertise, Sedimentology and Computing Geoscience, which respectively investigates and simulates geological heterogeneity observed in the sedimentary record. During the last past years, modeling tools and techniques were constantly improved. In parallel, the study of Phanerozoic carbonate deposits emphasized the common occurrence of a random facies distribution along single depositional domain. Although both fields of expertise are intrinsically linked during outcrop simulation, their respective advances have not been combined in literature to enhance carbonate modeling studies. The present study re-examines the modeling strategy adapted to the simulation of shallow-water carbonate systems, based on a close relationship between field sedimentology and modeling capabilities. In the present study, the evaluation of three commonly used algorithms Truncated Gaussian Simulation (TGSim), Sequential Indicator Simulation (SISim), and Indicator Kriging (IK), were performed for the first time using visual and quantitative comparisons on an ideally suited carbonate outcrop. The results show that the heterogeneity of carbonate rocks cannot be fully simulated using one single algorithm. The operating mode of each algorithm involves capabilities as well as drawbacks that are not capable to match all field observations carried out across the modeling area. Two end members in the spectrum of carbonate depositional settings, a low-angle Jurassic ramp (High Atlas, Morocco) and a Triassic isolated platform (Dolomites, Italy), were investigated to obtain a complete overview of the geological heterogeneity in shallow-water carbonate systems. Field sedimentology and statistical analysis performed on the type, morphology, distribution, and association of carbonate bodies and combined with palaeodepositional reconstructions, emphasize similar results. At the basin scale (x 1 km), facies association, composed of facies recording similar depositional conditions, displays linear and ordered transitions between depositional domains. Contrarily, at the bedding scale (x 0.1 km), individual lithofacies type shows a mosaic-like distribution consisting of an arrangement of spatially independent lithofacies bodies along the depositional profile. The increase of spatial disorder from the basin to bedding scale results from the influence of autocyclic factors on the transport and deposition of carbonate sediments. Scale-dependent types of carbonate heterogeneity are linked with the evaluation of algorithms in order to establish a modeling strategy that considers both the sedimentary characteristics of the outcrop and the modeling capabilities. A surface-based modeling approach was used to model depositional sequences. Facies associations were populated using TGSim to preserve ordered trends between depositional domains. At the lithofacies scale, a fully stochastic approach with SISim was applied to simulate a mosaic-like lithofacies distribution. This new workflow is designed to improve the simulation of carbonate rocks, based on the modeling of each scale of heterogeneity individually. Contrarily to simulation methods applied in literature, the present study considers that the use of one single simulation technique is unlikely to correctly model the natural patterns and variability of carbonate rocks. The implementation of different techniques customized for each level of the stratigraphic hierarchy provides the essential computing flexibility to model carbonate systems. Closer feedback between advances carried out in the field of Sedimentology and Computing Geoscience should be promoted during future outcrop simulations for the enhancement of 3-D geological models.}, language = {en} } @phdthesis{Koyan2024, author = {Koyan, Philipp}, title = {3D attribute analysis and classification to interpret ground-penetrating radar (GPR) data collected across sedimentary environments: Synthetic studies and field examples}, doi = {10.25932/publishup-63948}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-639488}, school = {Universit{\"a}t Potsdam}, pages = {xi, 115, A51}, year = {2024}, abstract = {Die Untersuchung des oberfl{\"a}chennahen Untergrundes erfolgt heutzutage bei Frage- stellungen aus den Bereichen des Bauwesens, der Arch{\"a}ologie oder der Geologie und Hydrologie oft mittels zerst{\"o}rungsfreier beziehungsweise zerst{\"o}rungsarmer Methoden der angewandten Geophysik. Ein Bereich, der eine immer zentralere Rolle in Forschung und Ingenieurwesen einnimmt, ist die Untersuchung von sediment{\"a}ren Umgebungen, zum Beispiel zur Charakterisierung oberfl{\"a}chennaher Grundwassersysteme. Ein in diesem Kontext h{\"a}ufig eingesetztes Verfahren ist das des Georadars (oftmals GPR - aus dem Englischen ground-penetrating radar). Dabei werden kurze elektromagnetische Impulse von einer Antenne in den Untergrund ausgesendet, welche dort wiederum an Kontrasten der elektromagnetischen Eigenschaften (wie zum Beispiel an der Grundwasseroberfl{\"a}che) reflektiert, gebrochen oder gestreut werden. Eine Empfangsantenne zeichnet diese Signale in Form derer Amplituden und Laufzeiten auf. Eine Analyse dieser aufgezeichneten Signale erm{\"o}glicht Aussagen {\"u}ber den Untergrund, beispielsweise {\"u}ber die Tiefenlage der Grundwasseroberfl{\"a}che oder die Lagerung und Charakteristika oberfl{\"a}chennaher Sedimentschichten. Dank des hohen Aufl{\"o}sungsverm{\"o}gens der GPR-Methode sowie stetiger technologischer Entwicklungen erfolgt heutzutage die Aufzeichnung von GPR- Daten immer h{\"a}ufiger in 3D. Trotz des hohen zeitlichen und technischen Aufwandes f{\"u}r die Datenakquisition und -bearbeitung werden die resultierenden 3D-Datens{\"a}tze, welche den Untergrund hochaufl{\"o}send abbilden, typischerweise von Hand interpretiert. Dies ist in der Regel ein {\"a}ußerst zeitaufwendiger Analyseschritt. Daher werden oft repr{\"a}sentative 2D-Schnitte aus dem 3D-Datensatz gew{\"a}hlt, in denen markante Reflektionsstrukuren markiert werden. Aus diesen Strukturen werden dann sich {\"a}hnelnde Bereiche im Untergrund als so genannte Radar-Fazies zusammengefasst. Die anhand von 2D-Schnitten erlangten Resultate werden dann als repr{\"a}sentativ f{\"u}r die gesamte untersuchte Fl{\"a}che angesehen. In dieser Form durchgef{\"u}hrte Interpretationen sind folglich oft unvollst{\"a}ndig sowie zudem in hohem Maße von der Expertise der Interpretierenden abh{\"a}ngig und daher in der Regel nicht reproduzierbar. Eine vielversprechende Alternative beziehungsweise Erg{\"a}nzung zur manuellen In- terpretation ist die Verwendung von so genannten GPR-Attributen. Dabei werden nicht die aufgezeichneten Daten selbst, sondern daraus abgeleitete Gr{\"o}ßen, welche die markanten Reflexionsstrukturen in 3D charakterisieren, zur Interpretation herangezogen. In dieser Arbeit wird anhand verschiedener Feld- und Modelldatens{\"a}tze untersucht, welche Attribute sich daf{\"u}r insbesondere eignen. Zudem zeigt diese Arbeit, wie ausgew{\"a}hlte Attribute mittels spezieller Bearbeitungs- und Klassifizierungsmethoden zur Erstellung von 3D-Faziesmodellen genutzt werden k{\"o}nnen. Dank der M{\"o}glichkeit der Erstellung so genannter attributbasierter 3D-GPR-Faziesmodelle k{\"o}nnen zuk{\"u}nftige Interpretationen zu gewissen Teilen automatisiert und somit effizienter durchgef{\"u}hrt werden. Weiterhin beschreiben die so erhaltenen Resultate den untersuchten Untergrund in reproduzierbarer Art und Weise sowie umf{\"a}nglicher als es bisher mittels manueller Interpretationsmethoden typischerweise m{\"o}glich war.}, language = {en} } @phdthesis{Holz2013, author = {Holz, Christian}, title = {3D from 2D touch}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67796}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {While interaction with computers used to be dominated by mice and keyboards, new types of sensors now allow users to interact through touch, speech, or using their whole body in 3D space. These new interaction modalities are often referred to as "natural user interfaces" or "NUIs." While 2D NUIs have experienced major success on billions of mobile touch devices sold, 3D NUI systems have so far been unable to deliver a mobile form factor, mainly due to their use of cameras. The fact that cameras require a certain distance from the capture volume has prevented 3D NUI systems from reaching the flat form factor mobile users expect. In this dissertation, we address this issue by sensing 3D input using flat 2D sensors. The systems we present observe the input from 3D objects as 2D imprints upon physical contact. By sampling these imprints at very high resolutions, we obtain the objects' textures. In some cases, a texture uniquely identifies a biometric feature, such as the user's fingerprint. In other cases, an imprint stems from the user's clothing, such as when walking on multitouch floors. By analyzing from which part of the 3D object the 2D imprint results, we reconstruct the object's pose in 3D space. While our main contribution is a general approach to sensing 3D input on 2D sensors upon physical contact, we also demonstrate three applications of our approach. (1) We present high-accuracy touch devices that allow users to reliably touch targets that are a third of the size of those on current touch devices. We show that different users and 3D finger poses systematically affect touch sensing, which current devices perceive as random input noise. We introduce a model for touch that compensates for this systematic effect by deriving the 3D finger pose and the user's identity from each touch imprint. We then investigate this systematic effect in detail and explore how users conceptually touch targets. Our findings indicate that users aim by aligning visual features of their fingers with the target. We present a visual model for touch input that eliminates virtually all systematic effects on touch accuracy. (2) From each touch, we identify users biometrically by analyzing their fingerprints. Our prototype Fiberio integrates fingerprint scanning and a display into the same flat surface, solving a long-standing problem in human-computer interaction: secure authentication on touchscreens. Sensing 3D input and authenticating users upon touch allows Fiberio to implement a variety of applications that traditionally require the bulky setups of current 3D NUI systems. (3) To demonstrate the versatility of 3D reconstruction on larger touch surfaces, we present a high-resolution pressure-sensitive floor that resolves the texture of objects upon touch. Using the same principles as before, our system GravitySpace analyzes all imprints and identifies users based on their shoe soles, detects furniture, and enables accurate touch input using feet. By classifying all imprints, GravitySpace detects the users' body parts that are in contact with the floor and then reconstructs their 3D body poses using inverse kinematics. GravitySpace thus enables a range of applications for future 3D NUI systems based on a flat sensor, such as smart rooms in future homes. We conclude this dissertation by projecting into the future of mobile devices. Focusing on the mobility aspect of our work, we explore how NUI devices may one day augment users directly in the form of implanted devices.}, language = {en} } @phdthesis{Vranic2019, author = {Vranic, Marija}, title = {3D Structure of the biomarker hepcidin-25 in its native state}, doi = {10.25932/publishup-45929}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-459295}, school = {Universit{\"a}t Potsdam}, pages = {xii, 135}, year = {2019}, abstract = {Hepcidin-25 (Hep-25) plays a crucial role in the control of iron homeostasis. Since the dysfunction of the hepcidin pathway leads to multiple diseases as a result of iron imbalance, hepcidin represents a potential target for the diagnosis and treatment of disorders of iron metabolism. Despite intense research in the last decade targeted at developing a selective immunoassay for iron disorder diagnosis and treatment and better understanding the ferroportin-hepcidin interaction, questions remain. The key to resolving these underlying questions is acquiring exact knowledge of the 3D structure of native Hep-25. Since it was determined that the N-terminus, which is responsible for the bioactivity of Hep-25, contains a small Cu(II)-binding site known as the ATCUN motif, it was assumed that the Hep-25-Cu(II) complex is the native, bioactive form of the hepcidin. This structure has thus far not been elucidated in detail. Owing to the lack of structural information on metal-bound Hep-25, little is known about its possible biological role in iron metabolism. Therefore, this work is focused on structurally characterizing the metal-bound Hep-25 by NMR spectroscopy and molecular dynamics simulations. For the present work, a protocol was developed to prepare and purify properly folded Hep-25 in high quantities. In order to overcome the low solubility of Hep-25 at neutral pH, we introduced the C-terminal DEDEDE solubility tag. The metal binding was investigated through a series of NMR spectroscopic experiments to identify the most affected amino acids that mediate metal coordination. Based on the obtained NMR data, a structural calculation was performed in order to generate a model structure of the Hep-25-Ni(II) complex. The DEDEDE tag was excluded from the structural calculation due to a lack of NMR restraints. The dynamic nature and fast exchange of some of the amide protons with solvent reduced the overall number of NMR restraints needed for a high-quality structure. The NMR data revealed that the 20 Cterminal Hep-25 amino acids experienced no significant conformational changes, compared to published results, as a result of a pH change from pH 3 to pH 7 and metal binding. A 3D model of the Hep-25-Ni(II) complex was constructed from NMR data recorded for the hexapeptideNi(II) complex and Hep-25-DEDEDE-Ni(II) complex in combination with the fixed conformation of 19 C-terminal amino acids. The NMR data of the Hep-25-DEDEDE-Ni(II) complex indicates that the ATCUN motif moves independently from the rest of the structure. The 3D model structure of the metal-bound Hep-25 allows for future works to elucidate hepcidin's interaction with its receptor ferroportin and should serve as a starting point for the development of antibodies with improved selectivity.}, language = {en} } @phdthesis{Schmoll2001, author = {Schmoll, J{\"u}rgen}, title = {3D-Spektrofotometrie extragalaktischer Emissionslinienobjekte}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000372}, school = {Universit{\"a}t Potsdam}, year = {2001}, abstract = {Popul{\"a}rwissenschaftlicher Abstract: Bislang gibt es in der beobachtenden optischen Astronomie zwei verschiedene Herangehensweisen: Einerseits werden Objekte durch Kameras abbildend erfaßt, andererseits werden durch die wellenl{\"a}ngenabh{\"a}ngige Zerlegung ihres Lichtes Spektren gewonnen. Das Integral - Field - Verfahren ist eine relativ neue Technik, welche die genannten Beobachtungsmethoden vereint. Das Objektbild im Teleskopfokus wird in r{\"a}umlich zerlegt und jedes Ortselement einem gemeinsamen Spektrografen zugef{\"u}hrt. Hierdurch wird das Objekt nicht nur zweidimensional r{\"a}umlich erfaßt, sondern zus{\"a}tzlich die spektrale Kompenente als dritte Dimension erhalten, weswegen das Verfahren auch als 3D-Methode bezeichnet wird. Anschaulich kann man sich das Datenresultat als eine Abbildung vorstellen, in der jeder einzelne Bildpunkt nicht mehr nur einen Intensit{\"a}tswert enth{\"a}lt, sondern gleich ein ganzes Spektrum. Diese Technik erm{\"o}glicht es, ausgedehnte Objekte im Unterschied zu g{\"a}ngigen Spaltspektrografen komplett zu erfassen. Die besondere St{\"a}rke der Methode ist die M{\"o}glichkeit, die Hintergrundkontamination der unmittelbaren Umgebung des Objektes zu erfassen und in der Auswertung zu ber{\"u}cksichtigen. Durch diese F{\"a}higkeit erscheint die 3D-Methode pr{\"a}destiniert f{\"u}r den durch moderne Großteleskope erschlossenen Bereich der extragalaktischen Stellarastronomie. Die detaillierte Untersuchung aufgel{\"o}ster stellare Populationen in nahegelegenen Galaxien ist erst seit kurzer Zeit dank der Fortschritte mit modernen Grossteleskopen und fortschrittlicher Instrumentierung m{\"o}glich geworden. Wegen der Bedeutung f{\"u}r die Entstehung und Evolution von Galaxien werden diese Arbeiten zuk{\"u}nftig weiter an Bedeutung gewinnen. In der vorliegenden Arbeit wurde die Integral-Field-Spektroskopie an zwei planetarischen Nebeln in der n{\"a}chstgelegenen großen Spiralgalaxie M31 (NGC 224) getestet, deren Helligkeiten und Koordinaten aus einer Durchmusterung vorlagen. Hierzu wurden Beobachtungen mit dem MPFS-Instrument am russischen 6m - Teleskop in Selentschuk/Kaukasus sowie mit INTEGRAL/WYFFOS am englischen William-Herschel-Teleskop auf La Palma gewonnen. Ein {\"u}berraschendes Ergebnis war, daß eins der beiden Objekte falsch klassifiziert wurde. Sowohl die meßbare r{\"a}umliche Ausdehnung des Objektes als auch das spektrale Erscheinungsbild schlossen die Identit{\"a}t mit einem planetarischen Nebel aus. Mit hoher Wahrscheinlichkeit handelt es sich um einen Supernova{\"u}berrest, zumal im Rahmen der Fehler an gleicher Stelle eine vom R{\"o}ntgensatelliten ROSAT detektierte R{\"o}ntgenquelle liegt. Die in diesem Projekt verwendeten Integral-Field-Instrumente wiesen zwei verschiedene Bauweisen auf, die sich miteinander vergleichen ließen. Ein Hauptkritikpunkt der verwendeten Instrumente war ihre geringe Lichtausbeute. Die gesammelten Erfahrung fanden Eingang in das Konzept des derzeit in Potsdam in der Fertigung befindlichen 3D-Instruments PMAS (Potsdamer Multi - Apertur - Spektrophotometer), welcher zun{\"a}chst f{\"u}r das 3.5m-Teleskop des Calar - Alto - Observatoriums in S{\"u}dspanien vorgesehen ist. Um die Effizienz dieses Instrumentes zu verbessern, wurde in dieser Arbeit die Kopplung der zum Bildrasterung verwendeten Optik zu den Lichtleitfasern im Labor untersucht. Die Untersuchungen zur Maximierung von Lichtausbeute und Stabilit{\"a}t zeigen, daß sich die Effizienz durch Auswahl einer geeigneten Koppelmethode um etwa 20 Prozent steigern l{\"a}sst.}, language = {de} } @phdthesis{Engelhardt2018, author = {Engelhardt, Jonathan}, title = {40Ar/39Ar geochronology of ICDP PALEOVAN drilling cores}, doi = {10.25932/publishup-42953}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429539}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 338}, year = {2018}, abstract = {The scientific drilling campaign PALEOVAN was conducted in the summer of 2010 and was part of the international continental drilling programme (ICDP). The main goal of the campaign was the recovery of a sensitive climate archive in the East of Anatolia. Lacustrine deposits underneath the lake floor of 'Lake Van' constitute this archive. The drilled core material was recovered from two locations: the Ahlat Ridge and the Northern Basin. A composite core was constructed from cored material of seven parallel boreholes at the Ahlat Ridge and covers an almost complete lacustrine history of Lake Van. The composite record offered sensitive climate proxies such as variations of total organic carbon, K/Ca ratios, or a relative abundance of arboreal pollen. These proxies revealed patterns that are similar to climate proxy variations from Greenland ice cores. Climate variations in Greenland ice cores have been dated by modelling the timing of orbital forces to affect the climate. Volatiles from melted ice aliquots are often taken as high-resolution proxies and provide a base for fitting the according temporal models. The ICDP PALEOVAN scientific team fitted proxy data from the lacustrine drilling record to ice core data and constructed an age model. Embedded volcaniclastic layers had to be dated radiometrically in order to provide independent age constraints to the climate-stratigraphic age model. Solving this task by an application of the 40Ar/39Ar method was the main objective of this thesis. Earlier efforts to apply the 40Ar/39Ar dating resulted in inaccuracies that could not be explained satisfactorily. The absence of K-rich feldspars in suitable tephra layers implied that feldspar crystals needed to be 500 μm in size minimum, in order to apply single-crystal 40Ar/39Ar dating. Some of the samples did not contain any of these grain sizes or only very few crystals of that size. In order to overcome this problem this study applied a combined single-crystal and multi-crystal approach with different crystal fractions from the same sample. The preferred method of a stepwise heating analysis of an aliquot of feldspar crystals has been applied to three samples. The Na-rich crystals and their young geological age required 20 mg of inclusion-free, non-corroded feldspars. Small sample volumes (usually 25 \% aliquots of 5 cm3 of sample material - a spoon full of tephra) and the widespread presence of melt-inclusion led to the application of combined single- and multigrain total fusion analyses. 40Ar/39Ar analyses on single crystals have the advantage of being able to monitor the presence of excess 40Ar and detrital or xenocrystic contamination in the samples. Multigrain analyses may hide the effects from these obstacles. The results from the multigrain analyses are therefore discussed with respect to the findings from the respective cogenetic single crystal ages. Some of the samples in this study were dated by 40Ar/39Ar on feldspars on multigrain separates and (if available) in combination with only a few single crystals. 40Ar/39Ar ages from two of the samples deviated statistically from the age model. All other samples resulted in identical ages. The deviations displayed older ages than those obtained from the age model. t-Tests compared radiometric ages with available age control points from various proxies and from the relative paleointensity of the earth magnetic field within a stratigraphic range of ± 10 m. Concordant age control points from different relative chronometers indicated that deviations are a result of erroneous 40Ar/39Ar ages. The thesis argues two potential reasons for these ages: (1) the irregular appearance of 40Ar from rare melt- and fluid- inclusions and (2) the contamination of the samples with older crystals due to a rapid combination of assimilation and ejection. Another aliquot of feldspar crystals that underwent separation for the application of 40Ar/39Ar dating was investigated for geochemical inhomogeneities. Magmatic zoning is ubiquitous in the volcaniclastic feldspar crystals. Four different types of magmatic zoning were detected. The zoning types are compositional zoning (C-type zoning), pseudo-oscillatory zoning of trace ele- ment concentrations (PO-type zoning), chaotic and patchy zoning of major and trace element concentrations (R-type zoning) and concentric zoning of trace elements (CC-type zoning). Sam- ples that deviated in 40Ar/39Ar ages showed C-type zoning, R-type zoning or a mix of different types of zoning (C-type and PO-type). Feldspars showing PO-type zoning typically represent the smallest grain size fractions in the samples. The constant major element compositions of these crystals are interpreted to represent the latest stages in the compositional evolution of feldspars in a peralkaline melt. PO-type crystals contain less melt- inclusions than other zoning types and are rarely corroded. This thesis concludes that feldspars that show PO-type zoning are most promising chronometers for the 40Ar/39Ar method, if samples provide mixed zoning types of Quaternary anorthoclase feldspars. Five samples were dated by applying the 40Ar/39Ar method to volcanic glass. High fractions of atmospheric Ar (typically > 98\%) significantly hampered the precision of the 40Ar/39Ar ages and resulted in rough age estimates that widely overlap the age model. Ar isotopes indicated that the glasses bore a chorine-rich Ar-end member. The chlorine-derived 38Ar indicated chlorine-rich fluid-inclusions or the hydration of the volcanic glass shards. This indication strengthened the evidence that irregularly distributed melt-inclusions and thus irregular distributed excess 40Ar influenced the problematic feldspar 40Ar/39Ar ages. Whether a connection between a corrected initial 40Ar/36Ar ratio from glasses to the 40Ar/36Ar ratios from pore waters exists remains unclear. This thesis offers another age model, which is similarly based on the interpolation of the temporal tie points from geophysical and climate-stratigraphic data. The model used a PCHIP- interpolation (piecewise cubic hermite interpolating polynomial) whereas the older age model used a spline-interpolation. Samples that match in ages from 40Ar/39Ar dating of feldspars with the earlier published age model were additionally assigned with an age from the PCHIP- interpolation. These modelled ages allowed a recalculation of the Alder Creek sanidine mineral standard. The climate-stratigraphic calibration of an 40Ar/39Ar mineral standard proved that the age versus depth interpolations from PAELOVAN drilling cores were accurate, and that the applied chronometers recorded the temporal evolution of Lake Van synchronously. Petrochemical discrimination of the sampled volcaniclastic material is also given in this thesis. 41 from 57 sampled volcaniclastic layers indicate Nemrut as their provenance. Criteria that served for the provenance assignment are provided and reviewed critically. Detailed correlations of selected PALEOVAN volcaniclastics to onshore samples that were described in detail by earlier studies are also discussed. The sampled volcaniclastics dominantly have a thickness of < 40 cm and have been ejected by small to medium sized eruptions. Onshore deposits from these types of eruptions are potentially eroded due to predominant strong winds on Nemrut and S{\"u}phan slopes. An exact correlation with the data presented here is therefore equivocal or not possible at all. Deviating feldspar 40Ar/39Ar ages can possibly be explained by inherited 40Ar from feldspar xenocrysts contaminating the samples. In order to test this hypothesis diffusion couples of Ba were investigated in compositionally zoned feldspar crystals. The diffusive behaviour of Ba in feldspar is known, and gradients in the changing concentrations allowed for the calculation of the duration of the crystal's magmatic development since the formation of the zoning interface. Durations were compared with degassing scenarios that model the Ar-loss during assimilation and subsequent ejection of the xenocrystals. Diffusive equilibration of the contrasting Ba concentrations is assumed to generate maximum durations as the gradient could have been developed in several growth and heating stages. The modelling does not show any indication of an involvement of inherited 40Ar in any of the deviating samples. However, the analytical set-up represents the lower limit of the required spatial resolution. Therefore, it cannot be excluded that the degassing modelling relies on a significant overestimation of the maximum duration of the magmatic history. Nevertheless, the modelling of xenocrystal degassing evidences that the irregular incorporation of excess 40Ar by melt- and fluid inclusions represents the most critical problem that needs to be overcome in dating volcaniclastic feldspars from the PALEOVAN drill cores. This thesis provides the complete background in generating and presenting 40Ar/39Ar ages that are compared to age data from a climate-stratigraphic model. Deviations are identified statistically and then discussed in order to find explanations from the age model and/or from 40Ar/39Ar geochronology. Most of the PALEOVAN stratigraphy provides several chronometers that have been proven for their synchronicity. Lacustrine deposits from Lake Van represent a key archive for reconstructing climate evolution in the eastern Mediterranean and in the Near East. The PALEOVAN record offers a climate-stratigraphic age model with a remarkable accuracy and resolution.}, language = {en} } @phdthesis{Swierczynski2012, author = {Swierczynski, Tina}, title = {A 7000 yr runoff chronology from varved sediments of Lake Mondsee (Upper Austria)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66702}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {The potential increase in frequency and magnitude of extreme floods is currently discussed in terms of global warming and the intensification of the hydrological cycle. The profound knowledge of past natural variability of floods is of utmost importance in order to assess flood risk for the future. Since instrumental flood series cover only the last ~150 years, other approaches to reconstruct historical and pre-historical flood events are needed. Annually laminated (varved) lake sediments are meaningful natural geoarchives because they provide continuous records of environmental changes > 10000 years down to a seasonal resolution. Since lake basins additionally act as natural sediment traps, the riverine sediment supply, which is preserved as detrital event layers in the lake sediments, can be used as a proxy for extreme discharge events. Within my thesis I examined a ~ 8.50 m long sedimentary record from the pre-Alpine Lake Mondsee (Northeast European Alps), which covered the last 7000 years. This sediment record consists of calcite varves and intercalated detrital layers, which range in thickness from 0.05 to 32 mm. Detrital layer deposition was analysed by a combined method of microfacies analysis via thin sections, Scanning Electron Microscopy (SEM), μX-ray fluorescence (μXRF) scanning and magnetic susceptibility. This approach allows characterizing individual detrital event layers and assigning a corresponding input mechanism and catchment. Based on varve counting and controlled by 14C age dates, the main goals of this thesis are (i) to identify seasonal runoff processes, which lead to significant sediment supply from the catchment into the lake basin and (ii) to investigate flood frequency under changing climate boundary conditions. This thesis follows a line of different time slices, presenting an integrative approach linking instrumental and historical flood data from Lake Mondsee in order to evaluate the flood record inferred from Lake Mondsee sediments. The investigation of eleven short cores covering the last 100 years reveals the abundance of 12 detrital layers. Therein, two types of detrital layers are distinguished by grain size, geochemical composition and distribution pattern within the lake basin. Detrital layers, which are enriched in siliciclastic and dolomitic material, reveal sediment supply from the Flysch sediments and Northern Calcareous Alps into the lake basin. These layers are thicker in the northern lake basin (0.1-3.9 mm) and thinner in the southern lake basin (0.05-1.6 mm). Detrital layers, which are enriched in dolomitic components forming graded detrital layers (turbidites), indicate the provenance from the Northern Calcareous Alps. These layers are generally thicker (0.65-32 mm) and are solely recorded within the southern lake basin. In comparison with instrumental data, thicker graded layers result from local debris flow events in summer, whereas thin layers are deposited during regional flood events in spring/summer. Extreme summer floods as reported from flood layer deposition are principally caused by cyclonic activity from the Mediterranean Sea, e.g. July 1954, July 1997 and August 2002. During the last two millennia, Lake Mondsee sediments reveal two significant flood intervals with decadal-scale flood episodes, during the Dark Ages Cold Period (DACP) and the transition from the Medieval Climate Anomaly (MCA) into the Little Ice Age (LIA) suggesting a linkage of transition to climate cooling and summer flood recurrences in the Northeastern Alps. In contrast, intermediate or decreased flood episodes appeared during the MWP and the LIA. This indicates a non-straightforward relationship between temperature and flood recurrence, suggesting higher cyclonic activity during climate transition in the Northeast Alps. The 7000-year flood chronology reveals 47 debris flows and 269 floods, with increased flood activity shifting around 3500 and 1500 varve yr BP (varve yr BP = varve years before present, before present = AD 1950). This significant increase in flood activity shows a coincidence with millennial-scale climate cooling that is reported from main Alpine glacier advances and lower tree lines in the European Alps since about 3300 cal. yr BP (calibrated years before present). Despite relatively low flood occurrence prior to 1500 varve yr BP, floods at Lake Mondsee could have also influenced human life in early Neolithic lake dwellings (5750-4750 cal. yr BP). While the first lake dwellings were constructed on wetlands, the later lake dwellings were built on piles in the water suggesting an early flood risk adaptation of humans and/or a general change of the Late Neolithic Culture of lake-dwellers because of socio-economic reasons. However, a direct relationship between the final abandonment of the lake dwellings and higher flood frequencies is not evidenced.}, language = {en} } @phdthesis{Hesse2022, author = {Hesse, G{\"u}nter}, title = {A benchmark for enterprise stream processing architectures}, doi = {10.25932/publishup-56600}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-566000}, school = {Universit{\"a}t Potsdam}, pages = {ix, 148}, year = {2022}, abstract = {Data stream processing systems (DSPSs) are a key enabler to integrate continuously generated data, such as sensor measurements, into enterprise applications. DSPSs allow to steadily analyze information from data streams, e.g., to monitor manufacturing processes and enable fast reactions to anomalous behavior. Moreover, DSPSs continuously filter, sample, and aggregate incoming streams of data, which reduces the data size, and thus data storage costs. The growing volumes of generated data have increased the demand for high-performance DSPSs, leading to a higher interest in these systems and to the development of new DSPSs. While having more DSPSs is favorable for users as it allows choosing the system that satisfies their requirements the most, it also introduces the challenge of identifying the most suitable DSPS regarding current needs as well as future demands. Having a solution to this challenge is important because replacements of DSPSs require the costly re-writing of applications if no abstraction layer is used for application development. However, quantifying performance differences between DSPSs is a difficult task. Existing benchmarks fail to integrate all core functionalities of DSPSs and lack tool support, which hinders objective result comparisons. Moreover, no current benchmark covers the combination of streaming data with existing structured business data, which is particularly relevant for companies. This thesis proposes a performance benchmark for enterprise stream processing called ESPBench. With enterprise stream processing, we refer to the combination of streaming and structured business data. Our benchmark design represents real-world scenarios and allows for an objective result comparison as well as scaling of data. The defined benchmark query set covers all core functionalities of DSPSs. The benchmark toolkit automates the entire benchmark process and provides important features, such as query result validation and a configurable data ingestion rate. To validate ESPBench and to ease the use of the benchmark, we propose an example implementation of the ESPBench queries leveraging the Apache Beam software development kit (SDK). The Apache Beam SDK is an abstraction layer designed for developing stream processing applications that is applied in academia as well as enterprise contexts. It allows to run the defined applications on any of the supported DSPSs. The performance impact of Apache Beam is studied in this dissertation as well. The results show that there is a significant influence that differs among DSPSs and stream processing applications. For validating ESPBench, we use the example implementation of the ESPBench queries developed using the Apache Beam SDK. We benchmark the implemented queries executed on three modern DSPSs: Apache Flink, Apache Spark Streaming, and Hazelcast Jet. The results of the study prove the functioning of ESPBench and its toolkit. ESPBench is capable of quantifying performance characteristics of DSPSs and of unveiling differences among systems. The benchmark proposed in this thesis covers all requirements to be applied in enterprise stream processing settings, and thus represents an improvement over the current state-of-the-art.}, language = {en} } @phdthesis{VillatoroLeal2018, author = {Villatoro Leal, Jos{\´e} Andr{\´e}s}, title = {A combined approach for the analysis of biomolecules using IR-MALDI ion mobility spectrometry and molecular dynamics simulations of peptide ions in the gas phase}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-419723}, school = {Universit{\"a}t Potsdam}, pages = {133}, year = {2018}, abstract = {The aim of this doctoral thesis was to establish a technique for the analysis of biomolecules with infrared matrix-assisted laser dispersion (IR-MALDI) ion mobility (IM) spectrometry. The main components of the work were the characterization of the IR-MALDI process, the development and characterization of different ion mobility spectrometers, the use of IR-MALDI-IM spectrometry as a robust, standalone spectrometer and the development of a collision cross-section estimation approach for peptides based on molecular dynamics and thermodynamic reweighting. First, the IR-MALDI source was studied with atmospheric pressure ion mobility spectrometry and shadowgraphy. It consisted of a metal capillary, at the tip of which a self-renewing droplet of analyte solution was met by an IR laser beam. A relationship between peak shape, ion desolvation, diffusion and extraction pulse delay time (pulse delay) was established. First order desolvation kinetics were observed and related to peak broadening by diffusion, both influenced by the pulse delay. The transport mechanisms in IR-MALDI were then studied by relating different laser impact positions on the droplet surface to the corresponding ion mobility spectra. Two different transport mechanisms were determined: phase explosion due to the laser pulse and electrical transport due to delayed ion extraction. The velocity of the ions stemming from the phase explosion was then measured by ion mobility and shadowgraphy at different time scales and distances from the source capillary, showing an initially very high but rapidly decaying velocity. Finally, the anatomy of the dispersion plume was observed in detail with shadowgraphy and general conclusions over the process were drawn. Understanding the IR-MALDI process enabled the optimization of the different IM spectrometers at atmospheric and reduced pressure (AP and RP, respectively). At reduced pressure, both an AP and an RP IR-MALDI source were used. The influence of the pulsed ion extraction parameters (pulse delay, width and amplitude) on peak shape, resolution and area was systematically studied in both AP and RP IM spectrometers and discussed in the context of the IR-MALDI process. Under RP conditions, the influence of the closing field and of the pressure was also examined for both AP and RP sources. For the AP ionization RP IM spectrometer, the influence of the inlet field (IF) in the source region was also examined. All of these studies led to the determination of the optimal analytical parameters as well as to a better understanding of the initial ion cloud anatomy. The analytical performance of the spectrometer was then studied. Limits of detection (LOD) and linear ranges were determined under static and pulsed ion injection conditions and interpreted in the context of the IR-MALDI mechanism. Applications in the separation of simple mixtures were also illustrated, demonstrating good isomer separation capabilities and the advantages of singly charged peaks. The possibility to couple high performance liquid chromatography (HPLC) to IR-MALDI-IM spectrometry was also demonstrated. Finally, the reduced pressure spectrometer was used to study the effect of high reduced field strength on the mobility of polyatomic ions in polyatomic gases. The last focus point was on the study of peptide ions. A dataset obtained with electrospray IM spectrometry was characterized and used for the calibration of a collision cross-section (CCS) determination method based on molecular dynamics (MD) simulations at high temperature. Instead of producing candidate structures which are evaluated one by one, this semi-automated method uses the simulation as a whole to determine a single average collision cross-section value by reweighting the CCS of a few representative structures. The method was compared to the intrinsic size parameter (ISP) method and to experimental results. Additional MD data obtained from the simulations was also used to further analyze the peptides and understand the experimental results, an advantage with regard to the ISP method. Finally, the CCS of peptide ions analyzed by IR-MALDI were also evaluated with both ISP and MD methods and the results compared to experiment, resulting in a first validation of the MD method. Thus, this thesis brings together the soft ionization technique that is IR-MALDI, which produces mostly singly charged peaks, with ion mobility spectrometry, which can distinguish between isomers, and a collision cross-section determination method which also provides structural information on the analyte at hand.}, language = {en} } @phdthesis{Pilz2010, author = {Pilz, Marco}, title = {A comparison of proxies for seismic site conditions and amplification for the large urban area of Santiago de Chile}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52961}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Situated in an active tectonic region, Santiago de Chile, the country´s capital with more than six million inhabitants, faces tremendous earthquake hazard. Macroseismic data for the 1985 Valparaiso and the 2010 Maule events show large variations in the distribution of damage to buildings within short distances indicating strong influence of local sediments and the shape of the sediment-bedrock interface on ground motion. Therefore, a temporary seismic network was installed in the urban area for recording earthquake activity, and a study was carried out aiming to estimate site amplification derived from earthquake data and ambient noise. The analysis of earthquake data shows significant dependence on the local geological structure with regards to amplitude and duration. Moreover, the analysis of noise spectral ratios shows that they can provide a lower bound in amplitude for site amplification and, since no variability in terms of time and amplitude is observed, that it is possible to map the fundamental resonance frequency of the soil for a 26 km x 12 km area in the northern part of the Santiago de Chile basin. By inverting the noise spectral rations, local shear wave velocity profiles could be derived under the constraint of the thickness of the sedimentary cover which had previously been determined by gravimetric measurements. The resulting 3D model was derived by interpolation between the single shear wave velocity profiles and shows locally good agreement with the few existing velocity profile data, but allows the entire area, as well as deeper parts of the basin, to be represented in greater detail. The wealth of available data allowed further to check if any correlation between the shear wave velocity in the uppermost 30 m (vs30) and the slope of topography, a new technique recently proposed by Wald and Allen (2007), exists on a local scale. While one lithology might provide a greater scatter in the velocity values for the investigated area, almost no correlation between topographic gradient and calculated vs30 exists, whereas a better link is found between vs30 and the local geology. When comparing the vs30 distribution with the MSK intensities for the 1985 Valparaiso event it becomes clear that high intensities are found where the expected vs30 values are low and over a thick sedimentary cover. Although this evidence cannot be generalized for all possible earthquakes, it indicates the influence of site effects modifying the ground motion when earthquakes occur well outside of the Santiago basin. Using the attained knowledge on the basin characteristics, simulations of strong ground motion within the Santiago Metropolitan area were carried out by means of the spectral element technique. The simulation of a regional event, which has also been recorded by a dense network installed in the city of Santiago for recording aftershock activity following the 27 February 2010 Maule earthquake, shows that the model is capable to realistically calculate ground motion in terms of amplitude, duration, and frequency and, moreover, that the surface topography and the shape of the sediment bedrock interface strongly modify ground motion in the Santiago basin. An examination on the dependency of ground motion on the hypocenter location for a hypothetical event occurring along the active San Ram{\´o}n fault, which is crossing the eastern outskirts of the city, shows that the unfavorable interaction between fault rupture, radiation mechanism, and complex geological conditions in the near-field may give rise to large values of peak ground velocity and therefore considerably increase the level of seismic risk for Santiago de Chile.}, language = {en} } @phdthesis{Awad2010, author = {Awad, Ahmed Mahmoud Hany Aly}, title = {A compliance management framework for business process models}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-49222}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Companies develop process models to explicitly describe their business operations. In the same time, business operations, business processes, must adhere to various types of compliance requirements. Regulations, e.g., Sarbanes Oxley Act of 2002, internal policies, best practices are just a few sources of compliance requirements. In some cases, non-adherence to compliance requirements makes the organization subject to legal punishment. In other cases, non-adherence to compliance leads to loss of competitive advantage and thus loss of market share. Unlike the classical domain-independent behavioral correctness of business processes, compliance requirements are domain-specific. Moreover, compliance requirements change over time. New requirements might appear due to change in laws and adoption of new policies. Compliance requirements are offered or enforced by different entities that have different objectives behind these requirements. Finally, compliance requirements might affect different aspects of business processes, e.g., control flow and data flow. As a result, it is infeasible to hard-code compliance checks in tools. Rather, a repeatable process of modeling compliance rules and checking them against business processes automatically is needed. This thesis provides a formal approach to support process design-time compliance checking. Using visual patterns, it is possible to model compliance requirements concerning control flow, data flow and conditional flow rules. Each pattern is mapped into a temporal logic formula. The thesis addresses the problem of consistency checking among various compliance requirements, as they might stem from divergent sources. Also, the thesis contributes to automatically check compliance requirements against process models using model checking. We show that extra domain knowledge, other than expressed in compliance rules, is needed to reach correct decisions. In case of violations, we are able to provide a useful feedback to the user. The feedback is in the form of parts of the process model whose execution causes the violation. In some cases, our approach is capable of providing automated remedy of the violation.}, language = {en} } @phdthesis{Koc2021, author = {Ko{\c{c}}, Gamze}, title = {A comprehensive analysis of severe flood events in Turkey}, doi = {10.25932/publishup-51785}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-517853}, school = {Universit{\"a}t Potsdam}, pages = {209}, year = {2021}, abstract = {Over the past decades, natural hazards, many of which are aggravated by climate change and reveal an increasing trend in frequency and intensity, have caused significant human and economic losses and pose a considerable obstacle to sustainable development. Hence, dedicated action toward disaster risk reduction is needed to understand the underlying drivers and create efficient risk mitigation plans. Such action is requested by the Sendai Framework for Disaster Risk Reduction 2015-2030 (SFDRR), a global agreement launched in 2015 that establishes stating priorities for action, e.g. an improved understanding of disaster risk. Turkey is one of the SFDRR contracting countries and has been severely affected by many natural hazards, in particular earthquakes and floods. However, disproportionately little is known about flood hazards and risks in Turkey. Therefore, this thesis aims to carry out a comprehensive analysis of flood hazards for the first time in Turkey from triggering drivers to impacts. It is intended to contribute to a better understanding of flood risks, improvements of flood risk mitigation and the facilitated monitoring of progress and achievements while implementing the SFDRR. In order to investigate the occurrence and severity of flooding in comparison to other natural hazards in Turkey and provide an overview of the temporal and spatial distribution of flood losses, the Turkey Disaster Database (TABB) was examined for the years 1960-2014. The TABB database was reviewed through comparison with the Emergency Events Database (EM-DAT), the Dartmouth Flood Observatory database, the scientific literature and news archives. In addition, data on the most severe flood events between 1960 and 2014 were retrieved. These served as a basis for analyzing triggering mechanisms (i.e. atmospheric circulation and precipitation amounts) and aggravating pathways (i.e. topographic features, catchment size, land use types and soil properties). For this, a new approach was developed and the events were classified using hierarchical cluster analyses to identify the main influencing factor per event and provide additional information about the dominant flood pathways for severe floods. The main idea of the study was to start with the event impacts based on a bottom-up approach and identify the causes that created damaging events, instead of applying a model chain with long-term series as input and searching for potentially impacting events as model outcomes. However, within the frequency analysis of the flood-triggering circulation pattern types, it was discovered that events in terms of heavy precipitation were not included in the list of most severe floods, i.e. their impacts were not recorded in national and international loss databases but were mentioned in news archives and reported by the Turkish State Meteorological Service. This finding challenges bottom-up modelling approaches and underlines the urgent need for consistent event and loss documentation. Therefore, as a next step, the aim was to enhance the flood loss documentation by calibrating, validating and applying the United Nations Office for Disaster Risk Reduction (UNDRR) loss estimation method for the recent severe flood events (2015-2020). This provided, a consistent flood loss estimation model for Turkey, allowing governments to estimate losses as quickly as possible after events, e.g. to better coordinate financial aid. This thesis reveals that, after earthquakes, floods have the second most destructive effects in Turkey in terms of human and economic impacts, with over 800 fatalities and US\$ 885.7 million in economic losses between 1960 and 2020, and that more attention should be paid on the national scale. The clustering results of the dominant flood-producing mechanisms (e.g. circulation pattern types, extreme rainfall, sudden snowmelt) present crucial information regarding the source and pathway identification, which can be used as base information for hazard identification in the preliminary risk assessment process. The implementation of the UNDRR loss estimation model shows that the model with country-specific parameters, calibrated damage ratios and sufficient event documentation (i.e. physically damaged units) can be recommended in order to provide first estimates of the magnitude of direct economic losses, even shortly after events have occurred, since it performed well when estimates were compared to documented losses. The presented results can contribute to improving the national disaster loss database in Turkey and thus enable a better monitoring of the national progress and achievements with regard to the targets stated by the SFDRR. In addition, the outcomes can be used to better characterize and classify flood events. Information on the main underlying factors and aggravating flood pathways further supports the selection of suitable risk reduction policies. All input variables used in this thesis were obtained from publicly available data. The results are openly accessible and can be used for further research. As an overall conclusion, it can be stated that consistent loss data collection and better event documentation should gain more attention for a reliable monitoring of the implementation of the SFDRR. Better event documentation should be established according to a globally accepted standard for disaster classification and loss estimation in Turkey. Ultimately, this enables stakeholders to create better risk mitigation actions based on clear hazard definitions, flood event classification and consistent loss estimations.}, language = {en} } @phdthesis{Yadav2023, author = {Yadav, Himanshu}, title = {A computational evaluation of feature distortion and cue weighting in sentence comprehension}, doi = {10.25932/publishup-58505}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-585055}, school = {Universit{\"a}t Potsdam}, pages = {iv, 115}, year = {2023}, abstract = {Successful sentence comprehension requires the comprehender to correctly figure out who did what to whom. For example, in the sentence John kicked the ball, the comprehender has to figure out who did the action of kicking and what was being kicked. This process of identifying and connecting the syntactically-related words in a sentence is called dependency completion. What are the cognitive constraints that determine dependency completion? A widely-accepted theory is cue-based retrieval. The theory maintains that dependency completion is driven by a content-addressable search for the co-dependents in memory. The cue-based retrieval explains a wide range of empirical data from several constructions including subject-verb agreement, subject-verb non-agreement, plausibility mismatch configurations, and negative polarity items. However, there are two major empirical challenges to the theory: (i) Grammatical sentences' data from subject-verb number agreement dependencies, where the theory predicts a slowdown at the verb in sentences like the key to the cabinet was rusty compared to the key to the cabinets was rusty, but the data are inconsistent with this prediction; and, (ii) Data from antecedent-reflexive dependencies, where a facilitation in reading times is predicted at the reflexive in the bodybuilder who worked with the trainers injured themselves vs. the bodybuilder who worked with the trainer injured themselves, but the data do not show a facilitatory effect. The work presented in this dissertation is dedicated to building a more general theory of dependency completion that can account for the above two datasets without losing the original empirical coverage of the cue-based retrieval assumption. In two journal articles, I present computational modeling work that addresses the above two empirical challenges. To explain the grammatical sentences' data from subject-verb number agreement dependencies, I propose a new model that assumes that the cue-based retrieval operates on a probabilistically distorted representation of nouns in memory (Article I). This hybrid distortion-plus-retrieval model was compared against the existing candidate models using data from 17 studies on subject-verb number agreement in 4 languages. I find that the hybrid model outperforms the existing models of number agreement processing suggesting that the cue-based retrieval theory must incorporate a feature distortion assumption. To account for the absence of facilitatory effect in antecedent-reflexive dependen� cies, I propose an individual difference model, which was built within the cue-based retrieval framework (Article II). The model assumes that individuals may differ in how strongly they weigh a syntactic cue over a number cue. The model was fitted to data from two studies on antecedent-reflexive dependencies, and the participant-level cue-weighting was estimated. We find that one-fourth of the participants, in both studies, weigh the syntactic cue higher than the number cue in processing reflexive dependencies and the remaining participants weigh the two cues equally. The result indicates that the absence of predicted facilitatory effect at the level of grouped data is driven by some, not all, participants who weigh syntactic cues higher than the number cue. More generally, the result demonstrates that the assumption of differential cue weighting is important for a theory of dependency completion processes. This differential cue weighting idea was independently supported by a modeling study on subject-verb non-agreement dependencies (Article III). Overall, the cue-based retrieval, which is a general theory of dependency completion, needs to incorporate two new assumptions: (i) the nouns stored in memory can undergo probabilistic feature distortion, and (ii) the linguistic cues used for retrieval can be weighted differentially. This is the cumulative result of the modeling work presented in this dissertation. The dissertation makes an important theoretical contribution: Sentence comprehension in humans is driven by a mechanism that assumes cue-based retrieval, probabilistic feature distortion, and differential cue weighting. This insight is theoretically important because there is some independent support for these three assumptions in sentence processing and the broader memory literature. The modeling work presented here is also methodologically important because for the first time, it demonstrates (i) how the complex models of sentence processing can be evaluated using data from multiple studies simultaneously, without oversimplifying the models, and (ii) how the inferences drawn from the individual-level behavior can be used in theory development.}, language = {en} } @phdthesis{Mertzen2022, author = {Mertzen, Daniela}, title = {A cross-linguistic investigation of similarity-based interference in sentence comprehension}, doi = {10.25932/publishup-55668}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-556685}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 129}, year = {2022}, abstract = {The aim of this dissertation was to conduct a larger-scale cross-linguistic empirical investigation of similarity-based interference effects in sentence comprehension. Interference studies can offer valuable insights into the mechanisms that are involved in long-distance dependency completion. Many studies have investigated similarity-based interference effects, showing that syntactic and semantic information are employed during long-distance dependency formation (e.g., Arnett \& Wagers, 2017; Cunnings \& Sturt, 2018; Van Dyke, 2007, Van Dyke \& Lewis, 2003; Van Dyke \& McElree, 2011). Nevertheless, there are some important open questions in the interference literature that are critical to our understanding of the constraints involved in dependency resolution. The first research question concerns the relative timing of syntactic and semantic interference in online sentence comprehension. Only few interference studies have investigated this question, and, to date, there is not enough data to draw conclusions with regard to their time course (Van Dyke, 2007; Van Dyke \& McElree, 2011). Our first cross-linguistic study explores the relative timing of syntactic and semantic interference in two eye-tracking reading experiments that implement the study design used in Van Dyke (2007). The first experiment tests English sentences. The second, larger-sample experiment investigates the two interference types in German. Overall, the data suggest that syntactic and semantic interference can arise simultaneously during retrieval. The second research question concerns a special case of semantic interference: We investigate whether cue-based retrieval interference can be caused by semantically similar items which are not embedded in a syntactic structure. This second interference study builds on a landmark study by Van Dyke \& McElree (2006). The study design used in their study is unique in that it is able to pin down the source of interference as a consequence of cue overload during retrieval, when semantic retrieval cues do not uniquely match the retrieval target. Unlike most other interference studies, this design is able to rule out encoding interference as an alternative explanation. Encoding accounts postulate that it is not cue overload at the retrieval site but the erroneous encoding of similar linguistic items in memory that leads to interference (Lewandowsky et al., 2008; Oberauer \& Kliegl, 2006). While Van Dyke \& McElree (2006) reported cue-based retrieval interference from sentence-external distractors, the evidence for this effect was weak. A subsequent study did not show interference of this type (Van Dyke et al., 2014). Given these inconclusive findings, further research is necessary to investigate semantic cue-based retrieval interference. The second study in this dissertation provides a larger-scale cross-linguistic investigation of cue-based retrieval interference from sentence-external items. Three larger-sample eye-tracking studies in English, German, and Russian tested cue-based interference in the online processing of filler-gap dependencies. This study further extends the previous research by investigating interference in each language under varying task demands (Logačev \& Vasishth, 2016; Swets et al., 2008). Overall, we see some very modest support for proactive cue-based retrieval interference in English. Unexpectedly, this was observed only under a low task demand. In German and Russian, there is some evidence against the interference effect. It is possible that interference is attenuated in languages with richer case marking. In sum, the cross-linguistic experiments on the time course of syntactic and semantic interference from sentence-internal distractors support existing evidence of syntactic and semantic interference during sentence comprehension. Our data further show that both types of interference effects can arise simultaneously. Our cross-linguistic experiments investigating semantic cue-based retrieval interference from sentence-external distractors suggest that this type of interference may arise only in specific linguistic contexts.}, language = {en} } @phdthesis{Hakansson2017, author = {H{\aa}kansson, Nils}, title = {A Dark Matter line search using 3D-modeling of Cherenkov showers below 10 TeV with VERITAS}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397670}, school = {Universit{\"a}t Potsdam}, pages = {107, xxxvi}, year = {2017}, abstract = {Dark matter, DM, has not yet been directly observed, but it has a very solid theoretical basis. There are observations that provide indirect evidence, like galactic rotation curves that show that the galaxies are rotating too fast to keep their constituent parts, and galaxy clusters that bends the light coming from behind-lying galaxies more than expected with respect to the mass that can be calculated from what can be visibly seen. These observations, among many others, can be explained with theories that include DM. The missing piece is to detect something that can exclusively be explained by DM. Direct observation in a particle accelerator is one way and indirect detection using telescopes is another. This thesis is focused on the latter method. The Very Energetic Radiation Imaging Telescope Array System, V ERITAS, is a telescope array that detects Cherenkov radiation. Theory predicts that DM particles annihilate into, e.g., a γγ pair and create a distinctive energy spectrum when detected by such telescopes, e.i., a monoenergetic line at the same energy as the particle mass. This so called "smoking-gun" signature is sought with a sliding window line search within the sub-range ∼ 0.3 - 10 TeV of the VERITAS energy range, ∼ 0.01 - 30 TeV. Standard analysis within the VERITAS collaboration uses Hillas analysis and look-up tables, acquired by analysing particle simulations, to calculate the energy of the particle causing the Cherenkov shower. In this thesis, an improved analysis method has been used. Modelling each shower as a 3Dgaussian should increase the energy recreation quality. Five dwarf spheroidal galaxies were chosen as targets with a total of ∼ 224 hours of data. The targets were analysed individually and stacked. Particle simulations were based on two simulation packages, CARE and GrISU. Improvements have been made to the energy resolution and bias correction, up to a few percent each, in comparison to standard analysis. Nevertheless, no line with a relevant significance has been detected. The most promising line is at an energy of ∼ 422 GeV with an upper limit cross section of 8.10 · 10^-24 cm^3 s^-1 and a significance of ∼ 2.73 σ, before trials correction and ∼ 1.56 σ after. Upper limit cross sections have also been calculated for the γγ annihilation process and four other outcomes. The limits are in line with current limits using other methods, from ∼ 8.56 · 10^-26 - 6.61 · 10^-23 cm^3s^-1. Future larger telescope arrays, like the upcoming Cherenkov Telescope Array, CTA, will provide better results with the help of this analysis method.}, language = {en} } @phdthesis{Krentz2019, author = {Krentz, Konrad-Felix}, title = {A Denial-of-Sleep-Resilient Medium Access Control Layer for IEEE 802.15.4 Networks}, doi = {10.25932/publishup-43930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439301}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 187}, year = {2019}, abstract = {With the emergence of the Internet of things (IoT), plenty of battery-powered and energy-harvesting devices are being deployed to fulfill sensing and actuation tasks in a variety of application areas, such as smart homes, precision agriculture, smart cities, and industrial automation. In this context, a critical issue is that of denial-of-sleep attacks. Such attacks temporarily or permanently deprive battery-powered, energy-harvesting, or otherwise energy-constrained devices of entering energy-saving sleep modes, thereby draining their charge. At the very least, a successful denial-of-sleep attack causes a long outage of the victim device. Moreover, to put battery-powered devices back into operation, their batteries have to be replaced. This is tedious and may even be infeasible, e.g., if a battery-powered device is deployed at an inaccessible location. While the research community came up with numerous defenses against denial-of-sleep attacks, most present-day IoT protocols include no denial-of-sleep defenses at all, presumably due to a lack of awareness and unsolved integration problems. After all, despite there are many denial-of-sleep defenses, effective defenses against certain kinds of denial-of-sleep attacks are yet to be found. The overall contribution of this dissertation is to propose a denial-of-sleep-resilient medium access control (MAC) layer for IoT devices that communicate over IEEE 802.15.4 links. Internally, our MAC layer comprises two main components. The first main component is a denial-of-sleep-resilient protocol for establishing session keys among neighboring IEEE 802.15.4 nodes. The established session keys serve the dual purpose of implementing (i) basic wireless security and (ii) complementary denial-of-sleep defenses that belong to the second main component. The second main component is a denial-of-sleep-resilient MAC protocol. Notably, this MAC protocol not only incorporates novel denial-of-sleep defenses, but also state-of-the-art mechanisms for achieving low energy consumption, high throughput, and high delivery ratios. Altogether, our MAC layer resists, or at least greatly mitigates, all denial-of-sleep attacks against it we are aware of. Furthermore, our MAC layer is self-contained and thus can act as a drop-in replacement for IEEE 802.15.4-compliant MAC layers. In fact, we implemented our MAC layer in the Contiki-NG operating system, where it seamlessly integrates into an existing protocol stack.}, language = {en} } @phdthesis{Klar2012, author = {Klar, Jochen}, title = {A detailed view of filaments and sheets of the warm-hot intergalactic medium}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58038}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {In the context of cosmological structure formation sheets, filaments and eventually halos form due to gravitational instabilities. It is noteworthy, that at all times, the majority of the baryons in the universe does not reside in the dense halos but in the filaments and the sheets of the intergalactic medium. While at higher redshifts of z > 2, these baryons can be detected via the absorption of light (originating from more distant sources) by neutral hydrogen at temperatures of T ~ 10^4 K (the Lyman-alpha forest), at lower redshifts only about 20 \% can be found in this state. The remain (about 50 to 70 \% of the total baryons mass) is unaccounted for by observational means. Numerical simulations predict that these missing baryons could reside in the filaments and sheets of the cosmic web at high temperatures of T = 10^4.5 - 10^7 K, but only at low to intermediate densities, and constitutes the warm-hot intergalactic medium (WHIM). The high temperatures of the WHIM are caused by the formation of shocks and the subsequent shock-heating of the gas. This results in a high degree of ionization and renders the reliable detection of the WHIM a challenging task. Recent high-resolution hydrodynamical simulations indicate that, at redshifts of z ~ 2, filaments are able to provide very massive galaxies with a significant amount of cool gas at temperatures of T ~ 10^4 K. This could have an important impact on the star-formation in those galaxies. It is therefore of principle importance to investigate the particular hydro- and thermodynamical conditions of these large filament structures. Density and temperature profiles, and velocity fields, are expected to leave their special imprint on spectroscopic observations. A potential multiphase structure may act as tracer in observational studies of the WHIM. In the context of cold streams, it is important to explore the processes, which regulate the amount of gas transported by the streams. This includes the time evolution of filaments, as well as possible quenching mechanisms. In this context, the halo mass range in which cold stream accretion occurs is of particular interest. In order to address these questions, we perform particular hydrodynamical simulations of very high resolution, and investigate the formation and evolution of prototype structures representing the typical filaments and sheets of the WHIM. We start with a comprehensive study of the one-dimensional collapse of a sinusoidal density perturbation (pancake formation) and examine the influence of radiative cooling, heating due to an UV background, thermal conduction, and the effect of small-scale perturbations given by the cosmological power spectrum. We use a set of simulations, parametrized by the wave length of the initial perturbation L. For L ~ 2 Mpc/h the collapse leads to shock-confined structures. As a result of radiative cooling and of heating due to an UV background, a relatively cold and dense core forms. With increasing L the core becomes denser and more concentrated. Thermal conduction enhances this trend and may lead to an evaporation of the core at very large L ~ 30 Mpc/h. When extending our simulations into three dimensions, instead of a pancake structure, we obtain a configuration consisting of well-defined sheets, filaments, and a gaseous halo. For L > 4 Mpc/h filaments form, which are fully confined by an accretion shock. As with the one-dimensional pancakes, they exhibit an isothermal core. Thus, our results confirm a multiphase structure, which may generate particular spectral tracers. We find that, after its formation, the core becomes shielded against further infall of gas onto the filament, and its mass content decreases with time. In the vicinity of the halo, the filament's core can be attributed to the cold streams found in other studies. We show, that the basic structure of these cold streams exists from the very beginning of the collapse process. Further on, the cross section of the streams is constricted by the outwards moving accretion shock of the halo. Thermal conduction leads to a complete evaporation of the cold stream for L > 6 Mpc/h. This corresponds to halos with a total mass higher than M_halo = 10^13 M_sun, and predicts that in more massive halos star-formation can not be sustained by cold streams. Far away from the gaseous halo, the temperature gradients in the filament are not sufficiently strong for thermal conduction to be effective.}, language = {en} } @phdthesis{Biewald2008, author = {Biewald, Anne}, title = {A dynamic life cycle model for Germany with unemployment uncertainty}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33111}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This work analyzes the saving and consumption behavior of agents faced with the possibility of unemployment in a dynamic and stochastic life cycle model. The intertemporal optimization is based on Dynamic Programming with a backward recursion algorithm. The implemented uncertainty is not based on income shocks as it is done in traditional life cycle models but uses Markov probabilities where the probability for the next employment status of the agent depends on the current status. The utility function used is a CRRA function (constant relative risk aversion), combined with a CES function (constant elasticity of substitution) and has several consumption goods, a subsistence level, money and a bequest function.}, language = {en} } @phdthesis{Muksin2014, author = {Muksin, Umar}, title = {A fault-controlled geothermal system in Tarutung (North Sumatra, Indonesia)investigated by seismological analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72065}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The seismic structure (Vp, Vp/Vs, and Qp anomalies) contributes to the physical properties and the lithology of rocks and possible fluid distribution in the region. The Vp model images the geometry of the Tarutung and the Sarulla basins. Both basins have a depth of around 2.0 km. High Vp/Vs and high attenuation (low Qp) anomalies are observed along the Sarulla graben associated with a weak zone caused by volcanic activities along the graben. Low Vp/Vs and low conductivity anomalies are found in the west of the Tarutung basin. This anomaly is interpreted as dry, compact, and rigid granitic rock in the region as also found by geological observations. Low Vp, high Vp/Vs and low Qp anomalies are found at the east of the Tarutung basin which appear to be associated with the three big geothermal manifestations in Sipoholon, Hutabarat, and Panabungan area. These anomalies are connected with high Vp/Vs and low Qp anomalies below the Tarutung basin at depth of around 3 - 10 km. This suggests that these geothermal manifestations are fed by the same source of the hot fluid below the Tarutung basin. The hot fluids from below the Tarutung basin propagate to the more dilatational and more permeable zone in the northeast. Granite found in the west of the Tarutung basin could also be abundant underneath the basin at a certain depth so that it prevents the hot fluid to be transported directly to the Tarutung basin. High seismic attenuation and low Vp/Vs anomalies are found in the southwest of the Tarutung basin below the Martimbang volcano. These anomalies are associated with hot rock below the volcano without or with less amount of partial melting. There is no indication that the volcano controls the geothermal system around the Tarutung basin. The geothermal resources around the Tarutung basin is a fault-controlled system as a result of deep circulation of fluids. Outside of the basin, the seismicity delineation and the focal mechanism correlate with the shape and the characteristics of the strike-slip Sumatran fault. Within the Tarutung basin, the seismicity is distributed more broadly which coincides with the margin of the basin. An extensional duplex system in the Tarutung basin is derived from the seismicity and focal mechanism analysis which is also consistent with the geological observations. The vertical distribution of the seismicity suggests the presence of a negative flower structure within the Tarutung basin.}, language = {de} } @phdthesis{Beyhl2017, author = {Beyhl, Thomas}, title = {A framework for incremental view graph maintenance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405929}, school = {Universit{\"a}t Potsdam}, pages = {VII, 293}, year = {2017}, abstract = {Nowadays, graph data models are employed, when relationships between entities have to be stored and are in the scope of queries. For each entity, this graph data model locally stores relationships to adjacent entities. Users employ graph queries to query and modify these entities and relationships. These graph queries employ graph patterns to lookup all subgraphs in the graph data that satisfy certain graph structures. These subgraphs are called graph pattern matches. However, this graph pattern matching is NP-complete for subgraph isomorphism. Thus, graph queries can suffer a long response time, when the number of entities and relationships in the graph data or the graph patterns increases. One possibility to improve the graph query performance is to employ graph views that keep ready graph pattern matches for complex graph queries for later retrieval. However, these graph views must be maintained by means of an incremental graph pattern matching to keep them consistent with the graph data from which they are derived, when the graph data changes. This maintenance adds subgraphs that satisfy a graph pattern to the graph views and removes subgraphs that do not satisfy a graph pattern anymore from the graph views. Current approaches for incremental graph pattern matching employ Rete networks. Rete networks are discrimination networks that enumerate and maintain all graph pattern matches of certain graph queries by employing a network of condition tests, which implement partial graph patterns that together constitute the overall graph query. Each condition test stores all subgraphs that satisfy the partial graph pattern. Thus, Rete networks suffer high memory consumptions, because they store a large number of partial graph pattern matches. But, especially these partial graph pattern matches enable Rete networks to update the stored graph pattern matches efficiently, because the network maintenance exploits the already stored partial graph pattern matches to find new graph pattern matches. However, other kinds of discrimination networks exist that can perform better in time and space than Rete networks. Currently, these other kinds of networks are not used for incremental graph pattern matching. This thesis employs generalized discrimination networks for incremental graph pattern matching. These discrimination networks permit a generalized network structure of condition tests to enable users to steer the trade-off between memory consumption and execution time for the incremental graph pattern matching. For that purpose, this thesis contributes a modeling language for the effective definition of generalized discrimination networks. Furthermore, this thesis contributes an efficient and scalable incremental maintenance algorithm, which updates the (partial) graph pattern matches that are stored by each condition test. Moreover, this thesis provides a modeling evaluation, which shows that the proposed modeling language enables the effective modeling of generalized discrimination networks. Furthermore, this thesis provides a performance evaluation, which shows that a) the incremental maintenance algorithm scales, when the graph data becomes large, and b) the generalized discrimination network structures can outperform Rete network structures in time and space at the same time for incremental graph pattern matching.}, language = {en} } @phdthesis{Tattarini2022, author = {Tattarini, Giulia}, title = {A job is good, but is a good job healthier?}, doi = {10.25932/publishup-53672}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536723}, school = {Universit{\"a}t Potsdam}, pages = {182}, year = {2022}, abstract = {What are the consequences of unemployment and precarious employment for individuals' health in Europe? What are the moderating factors that may offset (or increase) the health consequences of labor-market risks? How do the effects of these risks vary across different contexts, which differ in their institutional and cultural settings? Does gender, regarded as a social structure, play a role, and how? To answer these questions is the aim of my cumulative thesis. This study aims to advance our knowledge about the health consequences that unemployment and precariousness cause over the life course. In particular, I investigate how several moderating factors, such as gender, the family, and the broader cultural and institutional context, may offset or increase the impact of employment instability and insecurity on individual health. In my first paper, 'The buffering role of the family in the relationship between job loss and self-perceived health: Longitudinal results from Europe, 2004-2011', I and my co-authors measure the causal effect of job loss on health and the role of the family and welfare states (regimes) as moderating factors. Using EU-SILC longitudinal data (2004-2011), we estimate the probability of experiencing 'bad health' following a transition to unemployment by applying linear probability models and undertake separate analyses for men and women. Firstly, we measure whether changes in the independent variable 'job loss' lead to changes in the dependent variable 'self-rated health' for men and women separately. Then, by adding into the model different interaction terms, we measure the moderating effect of the family, both in terms of emotional and economic support, and how much it varies across different welfare regimes. As an identification strategy, we first implement static fixed-effect panel models, which control for time-varying observables and indirect health selection—i.e., constant unobserved heterogeneity. Secondly, to control for reverse causality and path dependency, we implement dynamic fixed-effect panel models, adding a lagged dependent variable to the model. We explore the role of the family by focusing on close ties within households: we consider the presence of a stable partner and his/her working status as a source of social and economic support. According to previous literature, having a partner should reduce the stress from adverse events, thanks to the symbolic and emotional dimensions that such a relationship entails, regardless of any economic benefits. Our results, however, suggest that benefits linked to the presence of a (female) partner also come from the financial stability that (s)he can provide in terms of a second income. Furthermore, we find partners' employment to be at least as important as the mere presence of the partner in reducing the negative effect of job loss on the individual's health by maintaining the household's standard of living and decreasing economic strain on the family. Our results are in line with previous research, which has highlighted that some people cope better than others with adverse life circumstances, and the support provided by the family is a crucial resource in that regard. We also reported an important interaction between the family and the welfare state in moderating the health consequences of unemployment, showing how the compensation effect of the family varies across welfare regimes. The family plays a decisive role in cushioning the adverse consequences of labor market risks in Southern and Eastern welfare states, characterized by less developed social protection systems and -especially the Southern - high level of familialism. The first paper also found important gender differences concerning job loss, family and welfare effects. Of particular interest is the evidence suggesting that health selection works differently for men and women, playing a more prominent role for women than for men in explaining the relationship between job loss and self-perceived health. The second paper, 'Gender roles and selection mechanisms across contexts: A comparative analysis of the relationship between unemployment, self-perceived health, and gender.' investigates more in-depth the gender differential in health driven by unemployment. Being a highly contested issue in literature, we aim to study whether men are more penalized than women or the other way around and the mechanisms that may explain the gender difference. To do that, we rely on two theoretical arguments: the availability of alternative roles and social selection. The first argument builds on the idea that men and women may compensate for the detrimental health consequences of unemployment through the commitment to 'alternative roles,' which can provide for the resources needed to fulfill people's socially constructed needs. Notably, the availability of alternative options depends on the different positions that men and women have in society. Further, we merge the availability of the 'alternative roles' argument with the health selection argument. We assume that health selection could be contingent on people's social position as defined by gender and, thus, explain the gender differential in the relationship between unemployment and health. Ill people might be less reluctant to fall or remain (i.e., self-select) in unemployment if they have alternative roles. In Western societies, women generally have more alternative roles than men and thus more discretion in their labor market attachment. Therefore, health selection should be stronger for them, explaining why unemployment is less menace for women than for their male counterparts. Finally, relying on the idea of different gender regimes, we extended these arguments to comparison across contexts. For example, in contexts where being a caregiver is assumed to be women's traditional and primary roles and the primary breadwinner role is reserved to men, unemployment is less stigmatized, and taking up alternative roles is more socially accepted for women than for men (Hp.1). Accordingly, social (self)selection should be stronger for women than for men in traditional contexts, where, in the case of ill-health, the separation from work is eased by the availability of alternative roles (Hp.2). By focusing on contexts that are representative of different gender regimes, we implement a multiple-step comparative approach. Firstly, by using EU-SILC longitudinal data (2004-2015), our analysis tests gender roles and selection mechanisms for Sweden and Italy, representing radically different gender regimes, thus providing institutional and cultural variation. Then, we limit institutional heterogeneity by focusing on Germany and comparing East- and West-Germany and older and younger cohorts—for West-Germany (SOEP data 1995-2017). Next, to assess the differential impact of unemployment for men and women, we compared (unemployed and employed) men with (unemployed and employed) women. To do so, we calculate predicted probabilities and average marginal effect from two distinct random-effects probit models. Our first step is estimating random-effects models that assess the association between unemployment and self-perceived health, controlling for observable characteristics. In the second step, our fully adjusted model controls for both direct and indirect selection. We do this using dynamic correlated random-effects (CRE) models. Further, based on the fully adjusted model, we test our hypotheses on alternative roles (Hp.1) by comparing several contexts - models are estimated separately for each context. For this hypothesis, we pool men and women and include an interaction term between unemployment and gender, which has the advantage to allow for directly testing whether gender differences in the effect of unemployment exist and are statistically significant. Finally, we test the role of selection mechanisms (Hp.2), using the KHB method to compare coefficients across nested nonlinear models. Specifically, we test the role of selection for the relationship between unemployment and health by comparing the partially-adjusted and fully-adjusted models. To allow selection mechanisms to operate differently between genders, we estimate separate models for men and women. We found support to our first hypotheses—the context where people are embedded structures the relationship between unemployment, health, and gender. We found no gendered effect of unemployment on health in the egalitarian context of Sweden. Conversely, in the traditional context of Italy, we observed substantive and statistically significant gender differences in the effect of unemployment on bad health, with women suffering less than men. We found the same pattern for comparing East and West Germany and younger and older cohorts in West Germany. On the contrary, our results did not support our theoretical argument on social selection. We found that in Sweden, women are more selected out of employment than men. In contrast, in Italy, health selection does not seem to be the primary mechanism behind the gender differential—Italian men and women seem to be selected out of employment to the same extent. Namely, we do not find any evidence that health selection is stronger for women in more traditional countries (Hp2), despite the fact that the institutional and the cultural context would offer them a more comprehensive range of 'alternative roles' relative to men. Moreover, our second hypothesis is also rejected in the second and third comparisons, where the cross-country heterogeneity is reduced to maximize cultural differences within the same institutional context. Further research that addresses selection into inactivity is needed to evaluate the interplay between selection and social roles across gender regimes. While the health consequences of unemployment have been on the research agenda for a pretty long time, the interest in precarious employment—defined as the linking of the vulnerable worker to work that is characterized by uncertainty and insecurity concerning pay, the stability of the work arrangement, limited access to social benefits, and statutory protections—has emerged only later. Since the 80s, scholars from different disciplines have raised concerns about the social consequences of de-standardization of employment relationships. However, while work has become undoubtedly more precarious, very little is known about its causal effect on individual health and the role of gender as a moderator. These questions are at the core of my third paper : 'Bad job, bad health? A longitudinal analysis of the interaction between precariousness, gender and self-perceived health in Germany'. Herein, I investigate the multidimensional nature of precarious employment and its causal effect on health, particularly focusing on gender differences. With this paper, I aim at overcoming three major shortcomings of earlier studies: The first one regards the cross-sectional nature of data that prevents the authors from ruling out unobserved heterogeneity as a mechanism for the association between precarious employment and health. Indeed, several unmeasured individual characteristics—such as cognitive abilities—may confound the relationship between precarious work and health, leading to biased results. Secondly, only a few studies have directly addressed the role of gender in shaping the relationship. Moreover, available results on the gender differential are mixed and inconsistent: some found precarious employment being more detrimental for women's health, while others found no gender differences or stronger negative association for men. Finally, previous attempts to an empirical translation of the employment precariousness (EP) concept have not always been coherent with their theoretical framework. EP is usually assumed to be a multidimensional and continuous phenomenon; it is characterized by different dimensions of insecurity that may overlap in the same job and lead to different "degrees of precariousness." However, researchers have predominantly focused on one-dimensional indicators—e.g., temporary employment, subjective job insecurity—to measure EP and study the association with health. Besides the fact that this approach partially grasps the phenomenon's complexity, the major problem is the inconsistency of evidence that it has produced. Indeed, this line of inquiry generally reveals an ambiguous picture, with some studies finding substantial adverse effects of temporary over permanent employment, while others report only minor differences. To measure the (causal) effect of precarious work on self-rated health and its variation by gender, I focus on Germany and use four waves from SOEP data (2003, 2007, 2011, and 2015). Germany is a suitable context for my study. Indeed, since the 1980s, the labor market and welfare system have been restructured in many ways to increase the German economy's competitiveness in the global market. As a result, the (standard) employment relationship has been de-standardized: non-standard and atypical employment arrangements—i.e., part-time work, fixed-term contracts, mini-jobs, and work agencies—have increased over time while wages have lowered, even among workers with standard work. In addition, the power of unions has also fallen over the last three decades, leaving a large share of workers without collective protection. Because of this process of de-standardization, the link between wage employment and strong social rights has eroded, making workers more powerless and more vulnerable to labor market risks than in the past. EP refers to this uneven distribution of power in the employment relationship, which can be detrimental to workers' health. Indeed, by affecting individuals' access to power and other resources, EP puts precarious workers at risk of experiencing health shocks and influences their ability to gain and accumulate health advantages (Hp.1). Further, the focus on Germany allows me to investigate my second research question on the gender differential. Germany is usually regarded as a traditionalist gender regime: a context characterized by a configuration of roles. Here, being a caregiver is assumed to be women's primary role, whereas the primary breadwinner role is reserved for men. Although many signs of progress have been made over the last decades towards a greater equalization of opportunities and more egalitarianism, the breadwinner model has barely changed towards a modified version. Thus, women usually take on the double role of workers (the so-called secondary earner) and caregivers, and men still devote most of their time to paid work activities. Moreover, the overall upward trend towards more egalitarian gender ideologies has leveled off over the last decades, moving notably towards more traditional gender ideologies. In this setting, two alternative hypotheses are possible. Firstly, I assume that the negative relationship between EP and health is stronger for women than for men. This is because women are systematically more disadvantaged than men in the public and private spheres of life, having less access to formal and informal sources of power. These gender-related power asymmetries may interact with EP-related power asymmetries resulting in a stronger effect of EP on women's health than on men's health (Hp.2). An alternative way of looking at the gender differential is to consider the interaction that precariousness might have with men's and women's gender identities. According to this view, the negative relationship between EP and health is weaker for women than for men (Hp.2a). In a society with a gendered division of labor and a strong link between masculine identities and stable and well-rewarded job—i.e., a job that confers the role of primary family provider—a male worker with precarious employment might violate the traditional male gender role. Men in precarious jobs may perceive themselves (and by others) as possessing a socially undesirable characteristic, which conflicts with the stereotypical idea of themselves as the male breadwinner. Engaging in behaviors that contradict stereotypical gender identity may decrease self-esteem and foster feelings of inferiority, helplessness, and jealousy, leading to poor health. I develop a new indicator of EP that empirically translates a definition of EP as a multidimensional and continuous phenomenon. I assume that EP is a latent construct composed of seven dimensions of insecurity chosen according to the theory and previous empirical research: Income insecurity, social insecurity, legal insecurity, employment insecurity, working-time insecurity, representation insecurity, worker's vulnerability. The seven dimensions are proxied by eight indicators available in the four waves of the SOEP dataset. The EP composite indicator is obtained by performing a multiple correspondence analysis (MCA) on the eight indicators. This approach aims to construct a summary scale in which all dimensions contribute jointly to the measured experience of precariousness and its health impact. Further, the relationship between EP and 'general self-perceived health' is estimated by applying ordered probit random-effects estimators and calculating average marginal effect (further AME). Then, to control for unobserved heterogeneity, I implement correlated random-effects models that add to the model the within-individual means of the time-varying independent variables. To test the significance of the gender differential, I add an interaction term between EP and gender in the fully adjusted model in the pooled sample. My correlated random-effects models showed EP's negative and substantial 'effect' on self-perceived health for both men and women. Although nonsignificant, the evidence seems in line with previous cross-sectional literature. It supports the hypothesis that employment precariousness could be detrimental to workers' health. Further, my results showed the crucial role of unobserved heterogeneity in shaping the health consequences of precarious employment. This is particularly important as evidence accumulates, yet it is still mostly descriptive. Moreover, my results revealed a substantial difference among men and women in the relationship between EP and health: when EP increases, the risk of experiencing poor health increases much more for men than for women. This evidence falsifies previous theory according to whom the gender differential is contingent on the structurally disadvantaged position of women in western societies. In contrast, they seem to confirm the idea that men in precarious work could experience role conflict to a larger extent than women, as their self-standard is supposed to be the stereotypical breadwinner worker with a good and well-rewarded job. Finally, results from the multiple correspondence analysis contribute to the methodological debate on precariousness, showing that a multidimensional and continuous indicator can express a latent variable of EP. All in all, complementarities are revealed in the results of unemployment and employment precariousness, which have two implications: Policy-makers need to be aware that the total costs of unemployment and precariousness go far beyond the economic and material realm penetrating other fundamental life domains such as individual health. Moreover, they need to balance the trade-off between protecting adequately unemployed people and fostering high-quality employment in reaction to the highlighted market pressures. In this sense, the further development of a (universalistic) welfare state certainly helps mitigate the adverse health effects of unemployment and, therefore, the future costs of both individuals' health and welfare spending. In addition, the presence of a working partner is crucial for reducing the health consequences of employment instability. Therefore, policies aiming to increase female labor market participation should be promoted, especially in those contexts where the welfare state is less developed. Moreover, my results support the significance of taking account of a gender perspective in health research. The findings of the three articles show that job loss, unemployment, and precarious employment, in general, have adverse effects on men's health but less or absent consequences for women's health. Indeed, this suggests the importance of labor and health policies that consider and further distinguish the specific needs of the male and female labor force in Europe. Nevertheless, a further implication emerges: the health consequences of employment instability and de-standardization need to be investigated in light of the gender arrangements and the transforming gender relationships in specific cultural and institutional contexts. My results indeed seem to suggest that women's health advantage may be a transitory phenomenon, contingent on the predominant gendered institutional and cultural context. As the structural difference between men's and women's position in society is eroded, egalitarianism becomes the dominant normative status, so will probably be the gender difference in the health consequences of job loss and precariousness. Therefore, while gender equality in opportunities and roles is a desirable aspect for contemporary societies and a political goal that cannot be postponed further, this thesis raises a further and maybe more crucial question: What kind of equality should be pursued to provide men and women with both good life quality and equal chances in the public and private spheres? In this sense, I believe that social and labor policies aiming to reduce gender inequality in society should focus on improving women's integration into the labor market, implementing policies targeting men, and facilitating their involvement in the private sphere of life. Equal redistribution of social roles could activate a crucial transformation of gender roles and the cultural models that sustain and still legitimate gender inequality in Western societies.}, language = {en} } @phdthesis{Reike2017, author = {Reike, Dennis}, title = {A look behind perceptual performance in numerical cognition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407821}, school = {Universit{\"a}t Potsdam}, pages = {vi, 136}, year = {2017}, abstract = {Recognizing, understanding, and responding to quantities are considerable skills for human beings. We can easily communicate quantities, and we are extremely efficient in adapting our behavior to numerical related tasks. One usual task is to compare quantities. We also use symbols like digits in numerical-related tasks. To solve tasks including digits, we must to rely on our previously learned internal number representations. This thesis elaborates on the process of number comparison with the use of noisy mental representations of numbers, the interaction of number and size representations and how we use mental number representations strategically. For this, three studies were carried out. In the first study, participants had to decide which of two presented digits was numerically larger. They had to respond with a saccade in the direction of the anticipated answer. Using only a small set of meaningfully interpretable parameters, a variant of random walk models is described that accounts for response time, error rate, and variance of response time for the full matrix of 72 digit pairs. In addition, the used random walk model predicts a numerical distance effect even for error response times and this effect clearly occurs in the observed data. In relation to corresponding correct answers error responses were systematically faster. However, different from standard assumptions often made in random walk models, this account required that the distributions of step sizes of the induced random walks be asymmetric to account for this asymmetry between correct and incorrect responses. Furthermore, the presented model provides a well-defined framework to investigate the nature and scale (e.g., linear vs. logarithmic) of the mapping of numerical magnitude onto its internal representation. In comparison of the fits of proposed models with linear and logarithmic mapping, the logarithmic mapping is suggested to be prioritized. Finally, we discuss how our findings can help interpret complex findings (e.g., conflicting speed vs. accuracy trends) in applied studies that use number comparison as a well-established diagnostic tool. Furthermore, a novel oculomotoric effect is reported, namely the saccadic overschoot effect. The participants responded by saccadic eye movements and the amplitude of these saccadic responses decreases with numerical distance. For the second study, an experimental design was developed that allows us to apply the signal detection theory to a task where participants had to decide whether a presented digit was physically smaller or larger. A remaining question is, whether the benefit in (numerical magnitude - physical size) congruent conditions is related to a better perception than in incongruent conditions. Alternatively, the number-size congruency effect is mediated by response biases due to numbers magnitude. The signal detection theory is a perfect tool to distinguish between these two alternatives. It describes two parameters, namely sensitivity and response bias. Changes in the sensitivity are related to the actual task performance due to real differences in perception processes whereas changes in the response bias simply reflect strategic implications as a stronger preparation (activation) of an anticipated answer. Our results clearly demonstrate that the number-size congruency effect cannot be reduced to mere response bias effects, and that genuine sensitivity gains for congruent number-size pairings contribute to the number-size congruency effect. Third, participants had to perform a SNARC task - deciding whether a presented digit was odd or even. Local transition probability of irrelevant attributes (magnitude) was varied while local transition probability of relevant attributes (parity) and global probability occurrence of each stimulus were kept constantly. Participants were quite sensitive in recognizing the underlying local transition probability of irrelevant attributes. A gain in performance was observed for actual repetitions of the irrelevant attribute in relation to changes of the irrelevant attribute in high repetition conditions compared to low repetition conditions. One interpretation of these findings is that information about the irrelevant attribute (magnitude) in the previous trial is used as an informative precue, so that participants can prepare early processing stages in the current trial, with the corresponding benefits and costs typical of standard cueing studies. Finally, the results reported in this thesis are discussed in relation to recent studies in numerical cognition.}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @phdthesis{Kirschbaum2009, author = {Kirschbaum, Michael}, title = {A microfluidic approach for the initiation and investigation of surface-mediated signal transduction processes on a single-cell level}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-39576}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {For the elucidation of the dynamics of signal transduction processes that are induced by cellular interactions, defined events along the signal transduction cascade and subsequent activation steps have to be analyzed and then also correlated with each other. This cannot be achieved by ensemble measurements because averaging biological data ignores the variability in timing and response patterns of individual cells and leads to highly blurred results. Instead, only a multi-parameter analysis at a single-cell level is able to exploit the information that is crucially needed for deducing the signaling pathways involved. The aim of this work was to develop a process line that allows the initiation of cell-cell or cell-particle interactions while at the same time the induced cellular reactions can be analyzed at various stages along the signal transduction cascade and correlated with each other. As this approach requires the gentle management of individually addressable cells, a dielectrophoresis (DEP)-based microfluidic system was employed that provides the manipulation of microscale objects with very high spatiotemporal precision and without the need of contacting the cell membrane. The system offers a high potential for automation and parallelization. This is essential for achieving a high level of robustness and reproducibility, which are key requirements in order to qualify this approach for a biomedical application. As an example process for intercellular communication, T cell activation has been chosen. The activation of the single T cells was triggered by contacting them individually with microbeads that were coated with antibodies directed against specific cell surface proteins, like the T cell receptor-associated kinase CD3 and the costimulatory molecule CD28 (CD; cluster of differentiation). The stimulation of the cells with the functionalized beads led to a rapid rise of their cytosolic Ca2+ concentration which was analyzed by a dual-wavelength ratiometric fluorescence measurement of the Ca2+-sensitive dye Fura-2. After Ca2+ imaging, the cells were isolated individually from the microfluidic system and cultivated further. Cell division and expression of the marker molecule CD69 as a late activation event of great significance were analyzed the following day and correlated with the previously recorded Ca2+ traces for each individual cell. It turned out such that the temporal profile of the Ca2+ traces between both activated and non-activated cells as well as dividing and non-dividing cells differed significantly. This shows that the pattern of Ca2+ signals in T cells can provide early information about a later reaction of the cell. As isolated cells are highly delicate objects, a precondition for these experiments was the successful adaptation of the system to maintain the vitality of single cells during and after manipulation. In this context, the influences of the microfluidic environment as well as the applied electric fields on the vitality of the cells and the cytosolic Ca2+ concentration as crucially important physiological parameters were thoroughly investigated. While a short-term DEP manipulation did not affect the vitality of the cells, they showed irregular Ca2+ transients upon exposure to the DEP field only. The rate and the strength of these Ca2+ signals depended on exposure time, electric field strength and field frequency. By minimizing their occurrence rate, experimental conditions were identified that caused the least interference with the physiology of the cell. The possibility to precisely control the exact time point of stimulus application, to simultaneously analyze short-term reactions and to correlate them with later events of the signal transduction cascade on the level of individual cells makes this approach unique among previously described applications and offers new possibilities to unravel the mechanisms underlying intercellular communication.}, language = {en} } @phdthesis{Gerling2022, author = {Gerling, Marten Tobias}, title = {A microfluidic system for high-precision image-based live cell sorting using dielectrophoretic forces}, doi = {10.25932/publishup-58742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587421}, school = {Universit{\"a}t Potsdam}, pages = {vii, 87, VI}, year = {2022}, abstract = {An important goal in biotechnology and (bio-) medical research is the isolation of single cells from a heterogeneous cell population. These specialised cells are of great interest for bioproduction, diagnostics, drug development, (cancer) therapy and research. To tackle emerging questions, an ever finer differentiation between target cells and non-target cells is required. This precise differentiation is a challenge for a growing number of available methods. Since the physiological properties of the cells are closely linked to their morphology, it is beneficial to include their appearance in the sorting decision. For established methods, this represents a non addressable parameter, requiring new methods for the identification and isolation of target cells. Consequently, a variety of new flow-based methods have been developed and presented in recent years utilising 2D imaging data to identify target cells within a sample. As these methods aim for high throughput, the devices developed typically require highly complex fluid handling techniques, making them expensive while offering limited image quality. In this work, a new continuous flow system for image-based cell sorting was developed that uses dielectrophoresis to precisely handle cells in a microchannel. Dielectrophoretic forces are exerted by inhomogeneous alternating electric fields on polarisable particles (here: cells). In the present system, the electric fields can be switched on and off precisely and quickly by a signal generator. In addition to the resulting simple and effective cell handling, the system is characterised by the outstanding quality of the image data generated and its compatibility with standard microscopes. These aspects result in low complexity, making it both affordable and user-friendly. With the developed cell sorting system, cells could be sorted reliably and efficiently according to their cytosolic staining as well as morphological properties at different optical magnifications. The achieved purity of the target cell population was up to 95\% and about 85\% of the sorted cells could be recovered from the system. Good agreement was achieved between the results obtained and theoretical considerations. The achieved throughput of the system was up to 12,000 cells per hour. Cell viability studies indicated a high biocompatibility of the system. The results presented demonstrate the potential of image-based cell sorting using dielectrophoresis. The outstanding image quality and highly precise yet gentle handling of the cells set the system apart from other technologies. This results in enormous potential for processing valuable and sensitive cell samples.}, language = {en} } @phdthesis{Mauri2014, author = {Mauri, Marco}, title = {A model for sigma factor competition in bacterial cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72098}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2014}, abstract = {Bacteria respond to changing environmental conditions by switching the global pattern of expressed genes. In response to specific environmental stresses the cell activates several stress-specific molecules such as sigma factors. They reversibly bind the RNA polymerase to form the so-called holoenzyme and direct it towards the appropriate stress response genes. In exponentially growing E. coli cells, the majority of the transcriptional activity is carried out by the housekeeping sigma factor, while stress responses are often under the control of alternative sigma factors. Different sigma factors compete for binding to a limited pool of RNA polymerase (RNAP) core enzymes, providing a mechanism for cross talk between genes or gene classes via the sharing of expression machinery. To quantitatively analyze the contribution of sigma factor competition to global changes in gene expression, we develop a thermodynamic model that describes binding between sigma factors and core RNAP at equilibrium, transcription, non-specific binding to DNA and the modulation of the availability of the molecular components. Association of housekeeping sigma factor to RNAP is generally favored by its abundance and higher binding affinity to the core. In order to promote transcription by alternative sigma subunits, the bacterial cell modulates the transcriptional efficiency in a reversible manner through several strategies such as anti-sigma factors, 6S RNA and generally any kind of transcriptional regulators (e.g. activators or inhibitors). By shifting the outcome of sigma factor competition for the core, these modulators bias the transcriptional program of the cell. The model is validated by comparison with in vitro competition experiments, with which excellent agreement is found. We observe that transcription is affected via the modulation of the concentrations of the different types of holoenzymes, so saturated promoters are only weakly affected by sigma factor competition. However, in case of overlapping promoters or promoters recognized by two types of sigma factors, we find that even saturated promoters are strongly affected. Active transcription effectively lowers the affinity between the sigma factor driving it and the core RNAP, resulting in complex cross talk effects and raising the question of how their in vitro measure is relevant in the cell. We also estimate that sigma factor competition is not strongly affected by non-specific binding of core RNAPs, sigma factors, and holoenzymes to DNA. Finally, we analyze the role of increased core RNAP availability upon the shut-down of ribosomal RNA transcription during stringent response. We find that passive up-regulation of alternative sigma-dependent transcription is not only possible, but also displays hypersensitivity based on the sigma factor competition. Our theoretical analysis thus provides support for a significant role of passive control during that global switch of the gene expression program and gives new insights into RNAP partitioning in the cell.}, language = {en} } @phdthesis{Zass2021, author = {Zass, Alexander}, title = {A multifaceted study of marked Gibbs point processes}, doi = {10.25932/publishup-51277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512775}, school = {Universit{\"a}t Potsdam}, pages = {vii, 104}, year = {2021}, abstract = {This thesis focuses on the study of marked Gibbs point processes, in particular presenting some results on their existence and uniqueness, with ideas and techniques drawn from different areas of statistical mechanics: the entropy method from large deviations theory, cluster expansion and the Kirkwood--Salsburg equations, the Dobrushin contraction principle and disagreement percolation. We first present an existence result for infinite-volume marked Gibbs point processes. More precisely, we use the so-called entropy method (and large-deviation tools) to construct marked Gibbs point processes in R^d under quite general assumptions. In particular, the random marks belong to a general normed space S and are not bounded. Moreover, we allow for interaction functionals that may be unbounded and whose range is finite but random. The entropy method relies on showing that a family of finite-volume Gibbs point processes belongs to sequentially compact entropy level sets, and is therefore tight. We then present infinite-dimensional Langevin diffusions, that we put in interaction via a Gibbsian description. In this setting, we are able to adapt the general result above to show the existence of the associated infinite-volume measure. We also study its correlation functions via cluster expansion techniques, and obtain the uniqueness of the Gibbs process for all inverse temperatures β and activities z below a certain threshold. This method relies in first showing that the correlation functions of the process satisfy a so-called Ruelle bound, and then using it to solve a fixed point problem in an appropriate Banach space. The uniqueness domain we obtain consists then of the model parameters z and β for which such a problem has exactly one solution. Finally, we explore further the question of uniqueness of infinite-volume Gibbs point processes on R^d, in the unmarked setting. We present, in the context of repulsive interactions with a hard-core component, a novel approach to uniqueness by applying the discrete Dobrushin criterion to the continuum framework. We first fix a discretisation parameter a>0 and then study the behaviour of the uniqueness domain as a goes to 0. With this technique we are able to obtain explicit thresholds for the parameters z and β, which we then compare to existing results coming from the different methods of cluster expansion and disagreement percolation. Throughout this thesis, we illustrate our theoretical results with various examples both from classical statistical mechanics and stochastic geometry.}, language = {en} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @phdthesis{Falter2016, author = {Falter, Daniela}, title = {A novel approach for large-scale flood risk assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90239}, school = {Universit{\"a}t Potsdam}, pages = {95}, year = {2016}, abstract = {In the past, floods were basically managed by flood control mechanisms. The focus was set on the reduction of flood hazard. The potential consequences were of minor interest. Nowadays river flooding is increasingly seen from the risk perspective, including possible consequences. Moreover, the large-scale picture of flood risk became increasingly important for disaster management planning, national risk developments and the (re-) insurance industry. Therefore, it is widely accepted that risk-orientated flood management ap-proaches at the basin-scale are needed. However, large-scale flood risk assessment methods for areas of several 10,000 km² are still in early stages. Traditional flood risk assessments are performed reach wise, assuming constant probabilities for the entire reach or basin. This might be helpful on a local basis, but where large-scale patterns are important this approach is of limited use. Assuming a T-year flood (e.g. 100 years) for the entire river network is unrealistic and would lead to an overestimation of flood risk at the large scale. Due to the lack of damage data, additionally, the probability of peak discharge or rainfall is usually used as proxy for damage probability to derive flood risk. With a continuous and long term simulation of the entire flood risk chain, the spatial variability of probabilities could be consider and flood risk could be directly derived from damage data in a consistent way. The objective of this study is the development and application of a full flood risk chain, appropriate for the large scale and based on long term and continuous simulation. The novel approach of 'derived flood risk based on continuous simulations' is introduced, where the synthetic discharge time series is used as input into flood impact models and flood risk is directly derived from the resulting synthetic damage time series. The bottleneck at this scale is the hydrodynamic simu-lation. To find suitable hydrodynamic approaches for the large-scale a benchmark study with simplified 2D hydrodynamic models was performed. A raster-based approach with inertia formulation and a relatively high resolution of 100 m in combination with a fast 1D channel routing model was chosen. To investigate the suitability of the continuous simulation of a full flood risk chain for the large scale, all model parts were integrated into a new framework, the Regional Flood Model (RFM). RFM consists of the hydrological model SWIM, a 1D hydrodynamic river network model, a 2D raster based inundation model and the flood loss model FELMOps+r. Subsequently, the model chain was applied to the Elbe catchment, one of the largest catchments in Germany. For the proof-of-concept, a continuous simulation was per-formed for the period of 1990-2003. Results were evaluated / validated as far as possible with available observed data in this period. Although each model part introduced its own uncertainties, results and runtime were generally found to be adequate for the purpose of continuous simulation at the large catchment scale. Finally, RFM was applied to a meso-scale catchment in the east of Germany to firstly perform a flood risk assessment with the novel approach of 'derived flood risk assessment based on continuous simulations'. Therefore, RFM was driven by long term synthetic meteorological input data generated by a weather generator. Thereby, a virtual time series of climate data of 100 x 100 years was generated and served as input to RFM providing subsequent 100 x 100 years of spatially consistent river discharge series, inundation patterns and damage values. On this basis, flood risk curves and expected annual damage could be derived directly from damage data, providing a large-scale picture of flood risk. In contrast to traditional flood risk analysis, where homogenous return periods are assumed for the entire basin, the presented approach provides a coherent large-scale picture of flood risk. The spatial variability of occurrence probability is respected. Additionally, data and methods are consistent. Catchment and floodplain processes are repre-sented in a holistic way. Antecedent catchment conditions are implicitly taken into account, as well as physical processes like storage effects, flood attenuation or channel-floodplain interactions and related damage influencing effects. Finally, the simulation of a virtual period of 100 x 100 years and consequently large data set on flood loss events enabled the calculation of flood risk directly from damage distributions. Problems associated with the transfer of probabilities in rainfall or peak runoff to probabilities in damage, as often used in traditional approaches, are bypassed. RFM and the 'derived flood risk approach based on continuous simulations' has the potential to provide flood risk statements for national planning, re-insurance aspects or other questions where spatially consistent, large-scale assessments are required.}, language = {en} } @phdthesis{RudolphMohr2013, author = {Rudolph-Mohr, Nicole}, title = {A novel non-invasive optical method for quantitative visualization of pH and oxygen dynamics in soils}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66993}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In soils and sediments there is a strong coupling between local biogeochemical processes and the distribution of water, electron acceptors, acids and nutrients. Both sides are closely related and affect each other from small scale to larger scales. Soil structures such as aggregates, roots, layers or macropores enhance the patchiness of these distributions. At the same time it is difficult to access the spatial distribution and temporal dynamics of these parameter. Noninvasive imaging techniques with high spatial and temporal resolution overcome these limitations. And new non-invasive techniques are needed to study the dynamic interaction of plant roots with the surrounding soil, but also the complex physical and chemical processes in structured soils. In this study we developed an efficient non-destructive in-situ method to determine biogeochemical parameters relevant to plant roots growing in soil. This is a quantitative fluorescence imaging method suitable for visualizing the spatial and temporal pH changes around roots. We adapted the fluorescence imaging set-up and coupled it with neutron radiography to study simultaneously root growth, oxygen depletion by respiration activity and root water uptake. The combined set up was subsequently applied to a structured soil system to map the patchy structure of oxic and anoxic zones induced by a chemical oxygen consumption reaction for spatially varying water contents. Moreover, results from a similar fluorescence imaging technique for nitrate detection were complemented by a numerical modeling study where we used imaging data, aiming to simulate biodegradation under anaerobic, nitrate reducing conditions.}, language = {en} } @phdthesis{Gebauer2008, author = {Gebauer, Denis}, title = {A novel view on the early stage of crystallization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-19818}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This thesis provides a novel view on the early stage of crystallization utilizing calcium carbonate as a model system. Calcium carbonate is of great economical, scientific and ecological importance, because it is a major part of water hardness, the most abundant Biomineral and forms huge amounts of geological sediments thus binding large amounts of carbon dioxide. The primary experiments base on the evolution of supersaturation via slow addition of dilute calcium chloride solution into dilute carbonate buffer. The time-dependent measurement of the Ca2+ potential and concurrent pH = constant titration facilitate the calculation of the amount of calcium and carbonate ions bound in pre-nucleation stage clusters, which have never been detected experimentally so far, and in the new phase after nucleation, respectively. Analytical Ultracentrifugation independently proves the existence of pre-nucleation stage clusters, and shows that the clusters forming at pH = 9.00 have a proximately time-averaged size of altogether 70 calcium and carbonate ions. Both experiments show that pre-nucleation stage cluster formation can be described by means of equilibrium thermodynamics. Effectively, the cluster formation equilibrium is physico-chemically characterized by means of a multiple-binding equilibrium of calcium ions to a 'lattice' of carbonate ions. The evaluation gives GIBBS standard energy for the formation of calcium/carbonate ion pairs in clusters, which exhibits a maximal value of approximately 17.2 kJ mol^-1 at pH = 9.75 and relates to a minimal binding strength in clusters at this pH-value. Nucleated calcium carbonate particles are amorphous at first and subsequently become crystalline. At high binding strength in clusters, only calcite (the thermodynamically stable polymorph) is finally obtained, while with decreasing binding strength in clusters, vaterite (the thermodynamically least stable polymorph) and presumably aragonite (the thermodynamically intermediate stable polymorph) are obtained additionally. Concurrently, two different solubility products of nucleated amorphous calcium carbonate (ACC) are detected at low binding strength and high binding strength in clusters (ACC I 3.1EE-8 M^2, ACC II 3.8EE-8 M^2), respectively, indicating the precipitation of at least two different ACC species, while the clusters provide the precursor species of ACC. It is proximate that ACC I may relate to calcitic ACC -i.e. ACC exhibiting short range order similar to the long range order of calcite and that ACC II may relate to vateritic ACC, which will subsequently transform into the particular crystalline polymorph as discussed in the literature, respectively. Detailed analysis of nucleated particles forming at minimal binding strength in clusters (pH = 9.75) by means of SEM, TEM, WAXS and light microscopy shows that predominantly vaterite with traces of calcite forms. The crystalline particles of early stages are composed of nano-crystallites of approximately 5 to 10 nm size, respectively, which are aligned in high mutual order as in mesocrystals. The analyses of precipitation at pH = 9.75 in presence of additives -polyacrylic acid (pAA) as a model compound for scale inhibitors and peptides exhibiting calcium carbonate binding affinity as model compounds for crystal modifiers- shows that ACC I and ACC II are precipitated in parallel: pAA stabilizes ACC II particles against crystallization leading to their dissolution for the benefit of crystals that form from ACC I and exclusively calcite is finally obtained. Concurrently, the peptide additives analogously inhibit the formation of calcite and exclusively vaterite is finally obtained in case of one of the peptide additives. These findings show that classical nucleation theory is hardly applicable for the nucleation of calcium carbonate. The metastable system is stabilized remarkably due to cluster formation, while clusters forming by means of equilibrium thermodynamics are the nucleation relevant species and not ions. Most likely, the concept of cluster formation is a common phenomenon occurring during the precipitation of hardly soluble compounds as qualitatively shown for calcium oxalate and calcium phosphate. This finding is important for the fundamental understanding of crystallization and nucleation-inhibition and modification by additives with impact on materials of huge scientific and industrial importance as well as for better understanding of the mass transport in crystallization. It can provide a novel basis for simulation and modelling approaches. New mechanisms of scale formation in Bio- and Geomineralization and also in scale inhibition on the basis of the newly reported reaction channel need to be considered.}, language = {en} } @phdthesis{TabaresJimenez2021, author = {Tabares Jimenez, Ximena del Carmen}, title = {A palaeoecological approach to savanna dynamics and shrub encroachment in Namibia}, doi = {10.25932/publishup-49281}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492815}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2021}, abstract = {The spread of shrubs in Namibian savannas raises questions about the resilience of these ecosystems to global change. This makes it necessary to understand the past dynamics of the vegetation, since there is no consensus on whether shrub encroachment is a new phenomenon, nor on its main drivers. However, a lack of long-term vegetation datasets for the region and the scarcity of suitable palaeoecological archives, makes reconstructing past vegetation and land cover of the savannas a challenge. To help meet this challenge, this study addresses three main research questions: 1) is pollen analysis a suitable tool to reflect the vegetation change associated with shrub encroachment in savanna environments? 2) Does the current encroached landscape correspond to an alternative stable state of savanna vegetation? 3) To what extent do pollen-based quantitative vegetation reconstructions reflect changes in past land cover? The research focuses on north-central Namibia, where despite being the region most affected by shrub invasion, particularly since the 21st century, little is known about the dynamics of this phenomenon. Field-based vegetation data were compared with modern pollen data to assess their correspondence in terms of composition and diversity along precipitation and grazing intensity gradients. In addition, two sediment cores from Lake Otjikoto were analysed to reveal changes in vegetation composition that have occurred in the region over the past 170 years and their possible drivers. For this, a multiproxy approach (fossil pollen, sedimentary ancient DNA (sedaDNA), biomarkers, compound specific carbon (δ13C) and deuterium (δD) isotopes, bulk carbon isotopes (δ13Corg), grain size, geochemical properties) was applied at high taxonomic and temporal resolution. REVEALS modelling of the fossil pollen record from Lake Otjikoto was run to quantitatively reconstruct past vegetation cover. For this, we first made pollen productivity estimates (PPE) of the most relevant savanna taxa in the region using the extended R-value model and two pollen dispersal options (Gaussian plume model and Lagrangian stochastic model). The REVEALS-based vegetation reconstruction was then validated using remote sensing-based regional vegetation data. The results show that modern pollen reflects the composition of the vegetation well, but diversity less well. Interestingly, precipitation and grazing explain a significant amount of the compositional change in the pollen and vegetation spectra. The multiproxy record shows that a state change from open Combretum woodland to encroached Terminalia shrubland can occur over a century, and that the transition between states spans around 80 years and is characterized by a unique vegetation composition. This transition is supported by gradual environmental changes induced by management (i.e. broad-scale logging for the mining industry, selective grazing and reduced fire activity associated with intensified farming) and related land-use change. Derived environmental changes (i.e. reduced soil moisture, reduced grass cover, changes in species composition and competitiveness, reduced fire intensity) may have affected the resilience of Combretum open woodlands, making them more susceptible to change to an encroached state by stochastic events such as consecutive years of precipitation and drought, and by high concentrations of pCO2. We assume that the resulting encroached state was further stabilized by feedback mechanisms that favour the establishment and competitiveness of woody vegetation. The REVEALS-based quantitative estimates of plant taxa indicate the predominance of a semi-open landscape throughout the 20th century and a reduction in grass cover below 50\% since the 21st century associated with the spread of encroacher woody taxa. Cover estimates show a close match with regional vegetation data, providing support for the vegetation dynamics inferred from multiproxy analyses. Reasonable PPEs were made for all woody taxa, but not for Poaceae. In conclusion, pollen analysis is a suitable tool to reconstruct past vegetation dynamics in savannas. However, because pollen cannot identify grasses beyond family level, a multiproxy approach, particularly the use of sedaDNA, is required. I was able to separate stable encroached states from mere woodland phases, and could identify drivers and speculate about related feedbacks. In addition, the REVEALS-based quantitative vegetation reconstruction clearly reflects the magnitude of the changes in the vegetation cover that occurred during the last 130 years, despite the limitations of some PPEs. This research provides new insights into pollen-vegetation relationships in savannas and highlights the importance of multiproxy approaches when reconstructing past vegetation dynamics in semi-arid environments. It also provides the first time series with sufficient taxonomic resolution to show changes in vegetation composition during shrub encroachment, as well as the first quantitative reconstruction of past land cover in the region. These results help to identify the different stages in savanna dynamics and can be used to calibrate predictive models of vegetation change, which are highly relevant to land management.}, language = {en} } @phdthesis{D'Agata2014, author = {D'Agata, Valeria Costanza}, title = {A partire dalla Somaestetica di Shusterman}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72650}, school = {Universit{\"a}t Potsdam}, pages = {280}, year = {2014}, abstract = {Con la sua proposta di una Somaestetica, articolata fondamentalmente in analitica, pragmatica e pratica, Richard Shusterman intende in primo luogo fornire e creare una cornice metodologica, un orientamento unitario che sia in grado di rintracciare, ricostruire e portare a manifestazione - all'interno di eterogenee riflessioni teoriche e pratiche somatiche - la comune esigenza di ridare luce alla dimensione corporea come modo primario di essere nel mondo. Recuperando l'accezione baumgarteniana di Aesthetica come gnoseologia inferiore, arte dell'analogo della ragione, scienza della conoscenza sensibile, la somaestetica intende dare nuovo impulso alla pi{\`u} profonda radice di estetica e filosofia che coglie la vita nel suo processo di metamorfosi e rigenerazione continua, in quel respiro vitale che, per quanto possa diventare cosciente, non {\`e} mai totalmente afferrabile dalla ragione discorsiva, situandosi piuttosto in quello spazio primordiale in cui coscienza e corpo si coappartengono, in cui il soggetto non {\`e} ancora individualizzabile perch{\´e} fuso con l'ambiente, non {\`e} totalmente privatizzabile perch{\´e} intrinsecamente plasmato dal tessuto sociale cui egli stesso conferisce dinamicamente forma. A partire dunque dalla rivalutazione del concetto di Aisthesis la disciplina somaestetica mira ad una intensificazione di sensorialit{\`a}, percezione, emozione, commozione, rintracciando proprio nel Soma la fonte di quelle facolt{\`a} "inferiori" irriducibili a quelle puramente intellettuali, che permettono di accedere alle dimensioni qualitative dell'esperienza, di portare a manifestazione e far maturare l'essere umano come essere indivisibile che non si lascia incontrare da un pensiero che ne rinnega l'unitariet{\`a} in nome di fittizie e laceranti distinzioni dicotomiche. Nel corpo infatti si radicano in modo silente regole, convenzioni, norme e valori socioculturali che determinano e talvolta limitano la configurazione ed espressione di sensazioni, percezioni, cognizioni, pensieri, azioni, volizioni, disposizioni di un soggetto da sempre inserito in una Mitwelt (mondo comune), ed {\`e} allora proprio al corpo che bisogna rivolgersi per riconfigurare pi{\`u} autentiche modalit{\`a} di espressione del soggetto che crea equilibri dinamici per mantenere una relazione di coerenza con il pi{\`u} ampio contesto sociale, culturale, ambientale. L'apertura al confronto con eterogenee posizioni filosofiche e l'intrinseca multidisciplinariet{\`a} spiegano la centralit{\`a} nel contemporaneo dibattito estetologico internazionale della Somaestetica che, rivolgendosi tanto ad una formulazione teorica quanto ad una concreta applicazione pratica, intende rivalutare il soma come corporeit{\`a} intelligente, senziente, intenzionale e attiva, non riducibile all'accezione peccaminosa di caro (mero corpo fisico privo di vita e sensazione). Attraverso la riflessione e la pratica di tecniche di coscienza somatica si portano in primo piano i modi in cui il sempre pi{\`u} consapevole rapporto con la propria corporeit{\`a} come mediatamente esperita e immediatamente vissuta, sentita, offre occasioni autentiche di realizzazione progressiva di s{\´e} innanzitutto come persone, capaci di autocoltivazione, di riflessione cosciente sulle proprie abitudini incorporate, di ristrutturazione creativa di s{\´e}, di intensificata percezione e apprezzamento sensoriale sia nel concreto agire quotidiano, sia nella dimensione pi{\`u} propriamente estetologica di ricezione, fruizione e creazione artistica. L'indirizzo essenzialmente pragmatista della riflessione di Shusterman traccia cos{\`i} una concezione fondamentalmente relazionale dell'estetica in grado di porsi proprio nel movimento e nel rapporto continuamente diveniente di vera e propria trasformazione e passaggio tra le dimensioni fisiche, proprio-corporee, psichiche e spirituali del soggetto la cui interazione, ed il cui reciproco riversarsi le une nelle altre, pu{\`o} risultare profondamente arricchito attraverso una progressiva e sempre crescente consapevolizzazione della ricchezza della dimensione corporea in quanto intenzionale, percettiva, senziente, volitiva, tanto quanto vulnerabile, limitante, caduca, patica. Il presente lavoro intende ripercorrere ed approfondire alcuni dei principali referenti di Shusterman, focalizzandosi prevalentemente sulla radice pragmatista della sua proposta e sul confronto con il dibattito di area tedesca tra estetica, antropologia filosofica, neofenomenologia e antropologia medica, per riguadagnare una nozione di soma che proprio a partire dal contrasto, dall'impatto irriducibile con la potenza annullante delle situazioni limite, della crisi possa acquisire un pi{\`u} complesso e ricco valore armonizzante delle intrinseche e molteplici dimensioni che costituiscono il tessuto della soggettivit{\`a} incarnata. In particolare il primo capitolo (1. Somaestetica) chiarisce le radici essenzialmente pragmatiste della proposta shustermaniana e mostra come sia possibile destrutturare e dunque riconfigurare radicati modi di esperienza, rendendo coscienti abitudini e modi di vivere che si fissano a livello somatico in modo per lo pi{\`u} inavvertito. Il confronto con la nozione di Habitus, di cui Pierre Bourdieu mette brillantemente in luce l'invisibile e socialmente determinata matrice somatica, lascia scorgere come ogni manifestazione umana sia sostenuta dall'incorporazione di norme, credenze, valori che determinano e talvolta limitano l'espressione, lo sviluppo, persino le predisposizioni e le inclinazioni degli individui. Ed {\`e} proprio intervenendo a questo livello che si pu{\`o} restituire libert{\`a} alle scelte e aprirsi cos{\`i} alle dimensioni essenzialmente qualitative dell'esperienza che, nell'accezione deweyana {\`e} un insieme olistico unitario e coeso che fa da sfondo alle relazioni organismo-ambiente, un intreccio inestricabile di teoria e prassi, particolare e universale, psiche e soma, ragione ed emozione, percettivo e concettuale, insomma quell'immediata conoscenza corporea che struttura lo sfondo di manifestazione della coscienza.}, language = {it} } @phdthesis{Schoenheit2011, author = {Sch{\"o}nheit, J{\"o}rg}, title = {A phagocyte-specific Irf8 gene enhancer establishes early conventional dendritic cell commitment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55482}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Haematopoietic development is a complex process that is strictly hierarchically organized. Here, the phagocyte lineages are a very heterogeneous cell compartment with specialized functions in innate immunity and induction of adaptive immune responses. Their generation from a common precursor must be tightly controlled. Interference within lineage formation programs for example by mutation or change in expression levels of transcription factors (TF) is causative to leukaemia. However, the molecular mechanisms driving specification into distinct phagocytes remain poorly understood. In the present study I identify the transcription factor Interferon Regulatory Factor 8 (IRF8) as the specification factor of dendritic cell (DC) commitment in early phagocyte precursors. Employing an IRF8 reporter mouse, I showed the distinct Irf8 expression in haematopoietic lineage diversification and isolated a novel bone marrow resident progenitor which selectively differentiates into CD8α+ conventional dendritic cells (cDCs) in vivo. This progenitor strictly depends on Irf8 expression to properly establish its transcriptional DC program while suppressing a lineage-inappropriate neutrophile program. Moreover, I demonstrated that Irf8 expression during this cDC commitment-step depends on a newly discovered myeloid-specific cis-enhancer which is controlled by the haematopoietic transcription factors PU.1 and RUNX1. Interference with their binding leads to abrogation of Irf8 expression, subsequently to disturbed cell fate decisions, demonstrating the importance of these factors for proper phagocyte cell development. Collectively, these data delineate a transcriptional program establishing cDC fate choice with IRF8 in its center.}, language = {en} } @phdthesis{SanchezBarriga2010, author = {S{\´a}nchez-Barriga, Jaime}, title = {A photoemission study of quasiparticle excitations, electron-correlation effects and magnetization dynamics in thin magnetic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48499}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis is focused on the electronic, spin-dependent and dynamical properties of thin magnetic systems. Photoemission-related techniques are combined with synchrotron radiation to study the spin-dependent properties of these systems in the energy and time domains. In the first part of this thesis, the strength of electron correlation effects in the spin-dependent electronic structure of ferromagnetic bcc Fe(110) and hcp Co(0001) is investigated by means of spin- and angle-resolved photoemission spectroscopy. The experimental results are compared to theoretical calculations within the three-body scattering approximation and within the dynamical mean-field theory, together with one-step model calculations of the photoemission process. From this comparison it is demonstrated that the present state of the art many-body calculations, although improving the description of correlation effects in Fe and Co, give too small mass renormalizations and scattering rates thus demanding more refined many-body theories including nonlocal fluctuations. In the second part, it is shown in detail monitoring by photoelectron spectroscopy how graphene can be grown by chemical vapour deposition on the transition-metal surfaces Ni(111) and Co(0001) and intercalated by a monoatomic layer of Au. For both systems, a linear E(k) dispersion of massless Dirac fermions is observed in the graphene pi-band in the vicinity of the Fermi energy. Spin-resolved photoemission from the graphene pi-band shows that the ferromagnetic polarization of graphene/Ni(111) and graphene/Co(0001) is negligible and that graphene on Ni(111) is after intercalation of Au spin-orbit split by the Rashba effect. In the last part, a time-resolved x-ray magnetic circular dichroic-photoelectron emission microscopy study of a permalloy platelet comprising three cross-tie domain walls is presented. It is shown how a fast picosecond magnetic response in the precessional motion of the magnetization can be induced by means of a laser-excited photoswitch. From a comparision to micromagnetic calculations it is demonstrated that the relatively high precessional frequency observed in the experiments is directly linked to the nature of the vortex/antivortex dynamics and its response to the magnetic perturbation. This includes the time-dependent reversal of the vortex core polarization, a process which is beyond the limit of detection in the present experiments.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @phdthesis{Buschmann2018, author = {Buschmann, Stefan}, title = {A software framework for GPU-based geo-temporal visualization techniques}, doi = {10.25932/publishup-44340}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443406}, school = {Universit{\"a}t Potsdam}, pages = {viii, 99}, year = {2018}, abstract = {R{\"a}umlich-zeitliche Daten sind Daten, welche sowohl einen Raum- als auch einen Zeitbezug aufweisen. So k{\"o}nnen beispielsweise Zeitreihen von Geodaten, thematische Karten die sich {\"u}ber die Zeit ver{\"a}ndern, oder Bewegungsaufzeichnungen von sich bewegenden Objekten als r{\"a}umlich-zeitliche Daten aufgefasst werden. In der heutigen automatisierten Welt gibt es eine wachsende Anzahl von Datenquellen, die best{\"a}ndig r{\"a}umlich-zeitliche Daten generieren. Hierzu geh{\"o}ren beispielsweise Verkehrs{\"u}berwachungssysteme, die Bewegungsdaten von Menschen oder Fahrzeugen aufzeichnen, Fernerkundungssysteme, welche regelm{\"a}ßig unsere Umgebung scannen und digitale Abbilder wie z.B. Stadt- und Landschaftsmodelle erzeugen, sowie Sensornetzwerke in unterschiedlichsten Anwendungsgebieten, wie z.B. der Logistik, der Verhaltensforschung von Tieren, oder der Klimaforschung. Zur Analyse r{\"a}umlich-zeitlicher Daten werden neben der automatischen Analyse mittels statistischer Methoden und Data-Mining auch explorative Methoden angewendet, welche auf der interaktiven Visualisierung der Daten beruhen. Diese Methode der Analyse basiert darauf, dass Anwender in Form interaktiver Visualisierung die Daten explorieren k{\"o}nnen, wodurch die menschliche Wahrnehmung sowie das Wissen der User genutzt werden, um Muster zu erkennen und dadurch einen Einblick in die Daten zu erlangen. Diese Arbeit beschreibt ein Software-Framework f{\"u}r die Visualisierung r{\"a}umlich-zeitlicher Daten, welches GPU-basierte Techniken beinhaltet, um eine interaktive Visualisierung und Exploration großer r{\"a}umlich-zeitlicher Datens{\"a}tze zu erm{\"o}glichen. Die entwickelten Techniken umfassen Datenhaltung, Prozessierung und Rendering und erm{\"o}glichen es, große Datenmengen in Echtzeit zu prozessieren und zu visualisieren. Die Hauptbeitr{\"a}ge der Arbeit umfassen: - Konzept und Implementierung einer GPU-zentrierten Visualisierungspipeline. Die beschriebenen Techniken basieren auf dem Konzept einer GPU-zentrierten Visualisierungspipeline, in welcher alle Stufen -- Prozessierung,Mapping, Rendering -- auf der GPU ausgef{\"u}hrt werden. Bei diesem Konzept werden die r{\"a}umlich-zeitlichen Daten direkt im GPU-Speicher abgelegt. W{\"a}hrend des Rendering-Prozesses werden dann mittels Shader-Programmen die Daten prozessiert, gefiltert, ein Mapping auf visuelle Attribute vorgenommen, und schließlich die Geometrien f{\"u}r die Visualisierung erzeugt. Datenprozessierung, Filtering und Mapping k{\"o}nnen daher in Echtzeit ausgef{\"u}hrt werden. Dies erm{\"o}glicht es Usern, die Mapping-Parameter sowie den gesamten Visualisierungsprozess interaktiv zu steuern und zu kontrollieren. - Interaktive Visualisierung attributierter 3D-Trajektorien. Es wurde eine Visualisierungsmethode f{\"u}r die interaktive Exploration einer großen Anzahl von 3D Bewegungstrajektorien entwickelt. Die Trajektorien werden dabei innerhalb einer virtuellen geographischen Umgebung in Form von einfachen Geometrien, wie Linien, B{\"a}ndern, Kugeln oder R{\"o}hren dargestellt. Durch interaktives Mapping k{\"o}nnen Attributwerte der Trajektorien oder einzelner Messpunkte auf visuelle Eigenschaften abgebildet werden. Hierzu stehen Form, H{\"o}he, Gr{\"o}ße, Farbe, Textur, sowie Animation zur Verf{\"u}gung. Mithilfe dieses dynamischen Mappings wurden außerdem verschiedene Visualisierungsmethoden implementiert, wie z.B. eine Focus+Context-Visualisierung von Trajektorien mithilfe von interaktiven Dichtekarten, sowie einer Space-Time-Cube-Visualisierung zur Darstellung des zeitlichen Ablaufs einzelner Bewegungen. - Interaktive Visualisierung geographischer Netzwerke. Es wurde eine Visualisierungsmethode zur interaktiven Exploration geo-referenzierter Netzwerke entwickelt, welche die Visualisierung von Netzwerken mit einer großen Anzahl von Knoten und Kanten erm{\"o}glicht. Um die Analyse von Netzwerken verschiedener Gr{\"o}ßen und in unterschiedlichen Kontexten zu erm{\"o}glichen, stehen mehrere virtuelle geographische Umgebungen zur Verf{\"u}gung, wie bspw. ein virtueller 3D-Globus, als auch 2D-Karten mit unterschiedlichen geographischen Projektionen. Zur interaktiven Analyse dieser Netzwerke stehen interaktive Tools wie Filterung, Mapping und Selektion zur Verf{\"u}gung. Des weiteren wurden Visualisierungsmethoden f{\"u}r verschiedene Arten von Netzwerken, wie z.B. 3D-Netzwerke und zeitlich ver{\"a}nderliche Netzwerke, implementiert. Zur Demonstration des Konzeptes wurden interaktive Tools f{\"u}r zwei unterschiedliche Anwendungsf{\"a}lle entwickelt. Das erste beinhaltet die Visualisierung attributierter 3D-Trajektorien, welche die Bewegungen von Flugzeugen um einen Flughafen beschreiben. Es erm{\"o}glicht Nutzern, die Trajektorien von ankommenden und startenden Flugzeugen {\"u}ber den Zeitraum eines Monats interaktiv zu explorieren und zu analysieren. Durch Verwendung der interaktiven Visualisierungsmethoden f{\"u}r 3D-Trajektorien und interaktiven Dichtekarten k{\"o}nnen Einblicke in die Daten gewonnen werden, wie beispielsweise h{\"a}ufig genutzte Flugkorridore, typische sowie untypische Bewegungsmuster, oder ungew{\"o}hnliche Vorkommnisse wie Fehlanfl{\"u}ge. Der zweite Anwendungsfall beinhaltet die Visualisierung von Klimanetzwerken, welche geographischen Netzwerken in der Klimaforschung darstellen. Klimanetzwerke repr{\"a}sentieren die Dynamiken im Klimasystem durch eine Netzwerkstruktur, die die statistische Beziehungen zwischen Orten beschreiben. Das entwickelte Tool erm{\"o}glicht es Analysten, diese großen Netzwerke interaktiv zu explorieren und dadurch die Struktur des Netzwerks zu analysieren und mit den geographischen Daten in Beziehung zu setzen. Interaktive Filterung und Selektion erm{\"o}glichen es, Muster in den Daten zu identifizieren, und so bspw. Cluster in der Netzwerkstruktur oder Str{\"o}mungsmuster zu erkennen.}, language = {en} } @phdthesis{Herenz2014, author = {Herenz, Peter}, title = {A study of the absorption characteristics of gaseous galaxy halos in the local Universe}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70513}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Today, it is well known that galaxies like the Milky Way consist not only of stars but also of gas and dust. The galactic halo, a sphere of gas that surrounds the stellar disk of a galaxy, is especially interesting. It provides a wealth of information about in and outflowing gaseous material towards and away from galaxies and their hierarchical evolution. For the Milky Way, the so-called high-velocity clouds (HVCs), fast moving neutral gas complexes in the halo that can be traced by absorption-line measurements, are believed to play a crucial role in the overall matter cycle in our Galaxy. Over the last decades, the properties of these halo structures and their connection to the local circumgalactic and intergalactic medium (CGM and IGM, respectively) have been investigated in great detail by many different groups. So far it remains unclear, however, to what extent the results of these studies can be transferred to other galaxies in the local Universe. In this thesis, we study the absorption properties of Galactic HVCs and compare the HVC absorption characteristics with those of intervening QSO absorption-line systems at low redshift. The goal of this project is to improve our understanding of the spatial extent and physical conditions of gaseous galaxy halos in the local Universe. In the first part of the thesis we use HST /STIS ultraviolet spectra of more than 40 extragalactic background sources to statistically analyze the absorption properties of the HVCs in the Galactic halo. We determine fundamental absorption line parameters including covering fractions of different weakly/intermediately/highly ionized metals with a particular focus on SiII and MgII. Due to the similarity in the ionization properties of SiII and MgII, we are able to estimate the contribution of HVC-like halo structures to the cross section of intervening strong MgII absorbers at z = 0. Our study implies that only the most massive HVCs would be regarded as strong MgII absorbers, if the Milky Way halo would be seen as a QSO absorption line system from an exterior vantage point. Combining the observed absorption-cross section of Galactic HVCs with the well-known number density of intervening strong MgII absorbers at z = 0, we conclude that the contribution of infalling gas clouds (i.e., HVC analogs) in the halos of Milky Way-type galaxies to the cross section of strong MgII absorbers is 34\%. This result indicates that only about one third of the strong MgII absorption can be associated with HVC analogs around other galaxies, while the majority of the strong MgII systems possibly is related to galaxy outflows and winds. The second part of this thesis focuses on the properties of intervening metal absorbers at low redshift. The analysis of the frequency and physical conditions of intervening metal systems in QSO spectra and their relation to nearby galaxies offers new insights into the typical conditions of gaseous galaxy halos. One major aspect in our study was to regard intervening metal systems as possible HVC analogs. We perform a detailed analysis of absorption line properties and line statistics for 57 metal absorbers along 78 QSO sightlines using newly-obtained ultraviolet spectra obtained with HST /COS. We find clear evidence for bimodal distribution in the HI column density in the absorbers, a trend that we interpret as sign for two different classes of absorption systems (with HVC analogs at the high-column density end). With the help of the strong transitions of SiII λ1260, SiIII λ1206, and CIII λ977 we have set up Cloudy photoionization models to estimate the local ionization conditions, gas densities, and metallicities. We find that the intervening absorption systems studied by us have, on average, similar physical conditions as Galactic HVC absorbers, providing evidence that many of them represent HVC analogs in the vicinity of other galaxies. We therefore determine typical halo sizes for SiII, SiIII, and CIII for L = 0.01L∗ and L = 0.05L∗ galaxies. Based on the covering fractions of the different ions in the Galactic halo, we find that, for example, the typical halo size for SiIII is ∼ 160 kpc for L = 0.05L∗ galaxies. We test the plausibility of this result by searching for known galaxies close to the QSO sightlines and at similar redshifts as the absorbers. We find that more than 34\% of the measured SiIII absorbers have galaxies associated with them, with the majority of the absorbers indeed being at impact parameters ρ ≤160 kpc.}, language = {en} } @phdthesis{Stange2024, author = {Stange, Maike}, title = {A study on Coronin-A and Aip1 function in motility of Dictyostelium discoideum and on Aip1 interchangeability between Dictyostelium discoideum and Arabidopsis thaliana}, doi = {10.25932/publishup-62856}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-628569}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 168}, year = {2024}, abstract = {Actin is one of the most highly conserved proteins in eukaryotes and distinct actin-related proteins with filament-forming properties are even found in prokaryotes. Due to these commonalities, actin-modulating proteins of many species share similar structural properties and proposed functions. The polymerization and depolymerization of actin are critical processes for a cell as they can contribute to shape changes to adapt to its environment and to move and distribute nutrients and cellular components within the cell. However, to what extent functions of actin-binding proteins are conserved between distantly related species, has only been addressed in a few cases. In this work, functions of Coronin-A (CorA) and Actin-interacting protein 1 (Aip1), two proteins involved in actin dynamics, were characterized. In addition, the interchangeability and function of Aip1 were investigated in two phylogenetically distant model organisms. The flowering plant Arabidopsis thaliana (encoding two homologs, AIP1-1 and AIP1-2) and in the amoeba Dictyostelium discoideum (encoding one homolog, DdAip1) were chosen because the functions of their actin cytoskeletons may differ in many aspects. Functional analyses between species were conducted for AIP1 homologs as flowering plants do not harbor a CorA gene. In the first part of the study, the effect of four different mutation methods on the function of Coronin-A protein and the resulting phenotype in D. discoideum was revealed in two genetic knockouts, one RNAi knockdown and a sudden loss-of-function mutant created by chemical-induced dislocation (CID). The advantages and disadvantages of the different mutation methods on the motility, appearance and development of the amoebae were investigated, and the results showed that not all observed properties were affected with the same intensity. Remarkably, a new combination of Selection-Linked Integration and CID could be established. In the second and third parts of the thesis, the exchange of Aip1 between plant and amoeba was carried out. For A. thaliana, the two homologs (AIP1-1 and AIP1-2) were analyzed for functionality as well as in D. discoideum. In the Aip1-deficient amoeba, rescue with AIP1-1 was more effective than with AIP1-2. The main results in the plant showed that in the aip1-2 mutant background, reintroduced AIP1-2 displayed the most efficient rescue and A. thaliana AIP1-1 rescued better than DdAip1. The choice of the tagging site was important for the function of Aip1 as steric hindrance is a problem. The DdAip1 was less effective when tagged at the C-terminus, while the plant AIP1s showed mixed results depending on the tag position. In conclusion, the foreign proteins partially rescued phenotypes of mutant plants and mutant amoebae, despite the organisms only being very distantly related in evolutionary terms.}, language = {en} } @phdthesis{Andorf2011, author = {Andorf, Sandra}, title = {A systems biological approach towards the molecular basis of heterosis in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51173}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Heterosis is defined as the superiority in performance of heterozygous genotypes compared to their corresponding genetically different homozygous parents. This phenomenon is already known since the beginning of the last century and it has been widely used in plant breeding, but the underlying genetic and molecular mechanisms are not well understood. In this work, a systems biological approach based on molecular network structures is proposed to contribute to the understanding of heterosis. Hybrids are likely to contain additional regulatory possibilities compared to their homozygous parents and, therefore, they may be able to correctly respond to a higher number of environmental challenges, which leads to a higher adaptability and, thus, the heterosis phenomenon. In the network hypothesis for heterosis, presented in this work, more regulatory interactions are expected in the molecular networks of the hybrids compared to the homozygous parents. Partial correlations were used to assess this difference in the global interaction structure of regulatory networks between the hybrids and the homozygous genotypes. This network hypothesis for heterosis was tested on metabolite profiles as well as gene expression data of the two parental Arabidopsis thaliana accessions C24 and Col-0 and their reciprocal crosses. These plants are known to show a heterosis effect in their biomass phenotype. The hypothesis was confirmed for mid-parent and best-parent heterosis for either hybrid of our experimental metabolite as well as gene expression data. It was shown that this result is influenced by the used cutoffs during the analyses. Too strict filtering resulted in sets of metabolites and genes for which the network hypothesis for heterosis does not hold true for either hybrid regarding mid-parent as well as best-parent heterosis. In an over-representation analysis, the genes that show the largest heterosis effects according to our network hypothesis were compared to genes of heterotic quantitative trait loci (QTL) regions. Separately for either hybrid regarding mid-parent as well as best-parent heterosis, a significantly larger overlap between the resulting gene lists of the two different approaches towards biomass heterosis was detected than expected by chance. This suggests that each heterotic QTL region contains many genes influencing biomass heterosis in the early development of Arabidopsis thaliana. Furthermore, this integrative analysis led to a confinement and an increased confidence in the group of candidate genes for biomass heterosis in Arabidopsis thaliana identified by both approaches.}, language = {en} } @phdthesis{Kraus2021, author = {Kraus, Sara Milena}, title = {A Systems Medicine approach for heart valve diseases}, doi = {10.25932/publishup-52226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-522266}, school = {Universit{\"a}t Potsdam}, pages = {xi, 186}, year = {2021}, abstract = {In Systems Medicine, in addition to high-throughput molecular data (*omics), the wealth of clinical characterization plays a major role in the overall understanding of a disease. Unique problems and challenges arise from the heterogeneity of data and require new solutions to software and analysis methods. The SMART and EurValve studies establish a Systems Medicine approach to valvular heart disease -- the primary cause of subsequent heart failure. With the aim to ascertain a holistic understanding, different *omics as well as the clinical picture of patients with aortic stenosis (AS) and mitral regurgitation (MR) are collected. Our task within the SMART consortium was to develop an IT platform for Systems Medicine as a basis for data storage, processing, and analysis as a prerequisite for collaborative research. Based on this platform, this thesis deals on the one hand with the transfer of the used Systems Biology methods to their use in the Systems Medicine context and on the other hand with the clinical and biomolecular differences of the two heart valve diseases. To advance differential expression/abundance (DE/DA) analysis software for use in Systems Medicine, we state 21 general software requirements and features of automated DE/DA software, including a novel concept for the simple formulation of experimental designs that can represent complex hypotheses, such as comparison of multiple experimental groups, and demonstrate our handling of the wealth of clinical data in two research applications DEAME and Eatomics. In user interviews, we show that novice users are empowered to formulate and test their multiple DE hypotheses based on clinical phenotype. Furthermore, we describe insights into users' general impression and expectation of the software's performance and show their intention to continue using the software for their work in the future. Both research applications cover most of the features of existing tools or even extend them, especially with respect to complex experimental designs. Eatomics is freely available to the research community as a user-friendly R Shiny application. Eatomics continued to help drive the collaborative analysis and interpretation of the proteomic profile of 75 human left myocardial tissue samples from the SMART and EurValve studies. Here, we investigate molecular changes within the two most common types of valvular heart disease: aortic valve stenosis (AS) and mitral valve regurgitation (MR). Through DE/DA analyses, we explore shared and disease-specific protein alterations, particularly signatures that could only be found in the sex-stratified analysis. In addition, we relate changes in the myocardial proteome to parameters from clinical imaging. We find comparable cardiac hypertrophy but differences in ventricular size, the extent of fibrosis, and cardiac function. We find that AS and MR show many shared remodeling effects, the most prominent of which is an increase in the extracellular matrix and a decrease in metabolism. Both effects are stronger in AS. In muscle and cytoskeletal adaptations, we see a greater increase in mechanotransduction in AS and an increase in cortical cytoskeleton in MR. The decrease in proteostasis proteins is mainly attributable to the signature of female patients with AS. We also find relevant therapeutic targets. In addition to the new findings, our work confirms several concepts from animal and heart failure studies by providing the largest collection of human tissue from in vivo collected biopsies to date. Our dataset contributing a resource for isoform-specific protein expression in two of the most common valvular heart diseases. Apart from the general proteomic landscape, we demonstrate the added value of the dataset by showing proteomic and transcriptomic evidence for increased expression of the SARS-CoV-2- receptor at pressure load but not at volume load in the left ventricle and also provide the basis of a newly developed metabolic model of the heart.}, language = {en} } @phdthesis{Vu2022, author = {Vu, Nils Leif}, title = {A task-based parallel elliptic solver for numerical relativity with discontinuous Galerkin methods}, doi = {10.25932/publishup-56226}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-562265}, school = {Universit{\"a}t Potsdam}, pages = {172}, year = {2022}, abstract = {Elliptic partial differential equations are ubiquitous in physics. In numerical relativity---the study of computational solutions to the Einstein field equations of general relativity---elliptic equations govern the initial data that seed every simulation of merging black holes and neutron stars. In the quest to produce detailed numerical simulations of these most cataclysmic astrophysical events in our Universe, numerical relativists resort to the vast computing power offered by current and future supercomputers. To leverage these computational resources, numerical codes for the time evolution of general-relativistic initial value problems are being developed with a renewed focus on parallelization and computational efficiency. Their capability to solve elliptic problems for accurate initial data must keep pace with the increasing detail of the simulations, but elliptic problems are traditionally hard to parallelize effectively. In this thesis, I develop new numerical methods to solve elliptic partial differential equations on computing clusters, with a focus on initial data for orbiting black holes and neutron stars. I develop a discontinuous Galerkin scheme for a wide range of elliptic equations, and a stack of task-based parallel algorithms for their iterative solution. The resulting multigrid-Schwarz preconditioned Newton-Krylov elliptic solver proves capable of parallelizing over 200 million degrees of freedom to at least a few thousand cores, and already solves initial data for a black hole binary about ten times faster than the numerical relativity code SpEC. I also demonstrate the applicability of the new elliptic solver across physical disciplines, simulating the thermal noise in thin mirror coatings of interferometric gravitational-wave detectors to unprecedented accuracy. The elliptic solver is implemented in the new open-source SpECTRE numerical relativity code, and set up to support simulations of astrophysical scenarios for the emerging era of gravitational-wave and multimessenger astronomy.}, language = {en} } @phdthesis{Hu2006, author = {Hu, Ji}, title = {A virtual machine architecture for IT-security laboratories}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-7818}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {This thesis discusses challenges in IT security education, points out a gap between e-learning and practical education, and presents a work to fill the gap. E-learning is a flexible and personalized alternative to traditional education. Nonetheless, existing e-learning systems for IT security education have difficulties in delivering hands-on experience because of the lack of proximity. Laboratory environments and practical exercises are indispensable instruction tools to IT security education, but security education in conventional computer laboratories poses particular problems such as immobility as well as high creation and maintenance costs. Hence, there is a need to effectively transform security laboratories and practical exercises into e-learning forms. In this thesis, we introduce the Tele-Lab IT-Security architecture that allows students not only to learn IT security principles, but also to gain hands-on security experience by exercises in an online laboratory environment. In this architecture, virtual machines are used to provide safe user work environments instead of real computers. Thus, traditional laboratory environments can be cloned onto the Internet by software, which increases accessibility to laboratory resources and greatly reduces investment and maintenance costs. Under the Tele-Lab IT-Security framework, a set of technical solutions is also proposed to provide effective functionalities, reliability, security, and performance. The virtual machines with appropriate resource allocation, software installation, and system configurations are used to build lightweight security laboratories on a hosting computer. Reliability and availability of laboratory platforms are covered by a virtual machine management framework. This management framework provides necessary monitoring and administration services to detect and recover critical failures of virtual machines at run time. Considering the risk that virtual machines can be misused for compromising production networks, we present a security management solution to prevent the misuse of laboratory resources by security isolation at the system and network levels. This work is an attempt to bridge the gap between e-learning/tele-teaching and practical IT security education. It is not to substitute conventional teaching in laboratories but to add practical features to e-learning. This thesis demonstrates the possibility to implement hands-on security laboratories on the Internet reliably, securely, and economically.}, subject = {Computersicherheit}, language = {en} } @phdthesis{Kneis2007, author = {Kneis, David}, title = {A water quality model for shallow river-lake systems and its application in river basin management}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14647}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This work documents the development and application of a new model for simulating mass transport and turnover in rivers and shallow lakes. The simulation tool called 'TRAM' is intended to complement mesoscale eco-hydrological catchment models in studies on river basin management. TRAM aims at describing the water quality of individual water bodies, using problem- and scale-adequate approaches for representing their hydrological and ecological characteristics. The need for such flexible water quality analysis and prediction tools is expected to further increase during the implementation of the European Water Framework Directive (WFD) as well as in the context of climate change research. The developed simulation tool consists of a transport and a reaction module with the latter being highly flexible with respect to the description of turnover processes in the aquatic environment. Therefore, simulation approaches of different complexity can easily be tested and model formulations can be chosen in consideration of the problem at hand, knowledge of process functioning, and data availability. Consequently, TRAM is suitable for both heavily simplified engineering applications as well as scientific ecosystem studies involving a large number of state variables, interactions, and boundary conditions. TRAM can easily be linked to catchment models off-line and it requires the use of external hydrodynamic simulation software. Parametrization of the model and visualization of simulation results are facilitated by the use of geographical information systems as well as specific pre- and post-processors. TRAM has been developed within the research project 'Management Options for the Havel River Basin' funded by the German Ministry of Education and Research. The project focused on the analysis of different options for reducing the nutrient load of surface waters. It was intended to support the implementation of the WFD in the lowland catchment of the Havel River located in North-East Germany. Within the above-mentioned study TRAM was applied with two goals in mind. In a first step, the model was used for identifying the magnitude as well as spatial and temporal patterns of nitrogen retention and sediment phosphorus release in a 100~km stretch of the highly eutrophic Lower Havel River. From the system analysis, strongly simplified conceptual approaches for modeling N-retention and P-remobilization in the studied river-lake system were obtained. In a second step, the impact of reduced external nutrient loading on the nitrogen and phosphorus concentrations of the Havel River was simulated (scenario analysis) taking into account internal retention/release. The boundary conditions for the scenario analysis such as runoff and nutrient emissions from river basins were computed by project partners using the catchment models SWIM and ArcEGMO-Urban. Based on the output of TRAM, the considered options of emission control could finally be evaluated using a site-specific assessment scale which is compatible with the requirements of the WFD. Uncertainties in the model predictions were also examined. According to simulation results, the target of the WFD -- with respect to total phosphorus concentrations in the Lower Havel River -- could be achieved in the medium-term, if the full potential for reducing point and non-point emissions was tapped. Furthermore, model results suggest that internal phosphorus loading will ease off noticeably until 2015 due to a declining pool of sedimentary mobile phosphate. Mass balance calculations revealed that the lakes of the Lower Havel River are an important nitrogen sink. This natural retention effect contributes significantly to the efforts aimed at reducing the river's nitrogen load. If a sustainable improvement of the river system's water quality is to be achieved, enhanced measures to further reduce the immissions of both phosphorus and nitrogen are required.}, language = {en} } @phdthesis{Amaechi2020, author = {Amaechi, Mary Chimaobi}, title = {A'-movement dependencies and their reflexes in Igbo}, doi = {10.25932/publishup-47152}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471524}, school = {Universit{\"a}t Potsdam}, pages = {i, 195}, year = {2020}, abstract = {In this thesis, I examine different A-bar movement dependencies in Igbo, a Benue-Congo language spoken in southern Nigeria. Movement dependencies are found in constructions where an element is moved to the left edge of the clause to express information-structural categories such as in questions, relativization and focus. I show that these constructions in Igbo are very uniform from a syntactic point of view. The constructions are built on two basic fronting operations: relativization and focus movement, and are biclausal. I further investigate several morphophonological effects that are found in these A-bar constructions. I propose that these effects are reflexes of movement that are triggered when an element is moved overtly in relativization or focus. This proposal helps to explain the tone patterns that have previously been assumed to be a property of relative clauses. The thesis adds to the growing body of tonal reflexes of A-bar movement reported for a few African languages. The thesis also provides an insight into the complementizer domain (C-domain) of Igbo.}, language = {en} } @phdthesis{Hasnat2021, author = {Hasnat, Muhammad Abrar}, title = {A-Type Carrier Proteins are involved in [4Fe-4S] Cluster insertion into the Radical S-adenosylmethionine (SAM) Protein MoaA and other molybdoenzymes}, doi = {10.25932/publishup-53079}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-530791}, school = {Universit{\"a}t Potsdam}, pages = {200}, year = {2021}, abstract = {Iron-sulfur clusters are essential enzyme cofactors. The most common and stable clusters are [2Fe-2S] and [4Fe-4S] that are found in nature. They are involved in crucial biological processes like respiration, gene regulation, protein translation, replication and DNA repair in prokaryotes and eukaryotes. In Escherichia coli, Fe-S clusters are essential for molybdenum cofactor (Moco) biosynthesis, which is a ubiquitous and highly conserved pathway. The first step of Moco biosynthesis is catalyzed by the MoaA protein to produce cyclic pyranopterin monophosphate (cPMP) from 5'GTP. MoaA is a [4Fe-4S] cluster containing radical S-adenosyl-L-methionine (SAM) enzyme. The focus of this study was to investigate Fe-S cluster insertion into MoaA under nitrate and TMAO respiratory conditions using E. coli as a model organism. Nitrate and TMAO respiration usually occur under anaerobic conditions, where oxygen is depleted. Under these conditions, E. coli uses nitrate and TMAO as terminal electron. Previous studies revealed that Fe-S cluster insertion is performed by Fe-S cluster carrier proteins. In E. coli, these proteins are known as A-type carrier proteins (ATC) by phylogenomic and genetic studies. So far, three of them have been characterized in detail in E. coli, namely IscA, SufA, and ErpA. This study shows that ErpA and IscA are involved in Fe-S cluster insertion into MoaA under nitrate and TMAO respiratory conditions. ErpA and IscA can partially replace each other in their role to provide [4Fe-4S] clusters for MoaA. SufA is not able to replace the functions of IscA or ErpA under nitrate respiratory conditions. Nitrate reductase is a molybdoenzyme that coordinates Moco and Fe-S clusters. Under nitrate respiratory conditions, the expression of nitrate reductase is significantly increased in E. coli. Nitrate reductase is encoded in narGHJI genes, the expression of which is regulated by the transcriptional regulator, fumarate and nitrate reduction (FNR). The activation of FNR under conditions of nitrate respiration requires one [4Fe-4S] cluster. In this part of the study, we analyzed the insertion of Fe-S cluster into FNR for the expression of narGHJI genes in E. coli. The results indicate that ErpA is essential for the FNR-dependent expression of the narGHJI genes, a role that can be replaced partially by IscA and SufA when they are produced sufficiently under the conditions tested. This observation suggests that ErpA is indirectly regulating nitrate reductase expression via inserting Fe-S clusters into FNR. Most molybdoenzymes are complex multi-subunit and multi-cofactor-containing enzymes that coordinate Fe-S clusters, which are functioning as electron transfer chains for catalysis. In E. coli, periplasmic aldehyde oxidoreductase (PaoAC) is a heterotrimeric molybdoenzyme that consists of flavin, two [2Fe-2S], one [4Fe-4S] cluster and Moco. In the last part of this study, we investigated the insertion of Fe-S clusters into E. coli periplasmic aldehyde oxidoreductase (PaoAC). The results show that SufA and ErpA are involved in inserting [4Fe-4S] and [2Fe-2S] clusters into PaoABC, respectively under aerobic respiratory conditions.}, language = {en} } @phdthesis{Schwandt2003, author = {Schwandt, Daniel}, title = {Abflußentwicklung in Teileinzugsgebieten des Rheins : Simulationen f{\"u}r den Ist-Zustand und f{\"u}r Klimaszenarien}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001001}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {Die vorliegende Arbeit 'Abflu\ßentwicklung in Teileinzugsgebieten des Rheins - Simulationen f\ür den Ist-Zustand und f\ür Klimaszenarien' untersucht Auswirkungen m\öglicher zuk\ünftiger Klima\änderungen auf das Abflu\ßgeschehen in ausgew\ählten, durch Mittelgebirge gepr\ägten Teileinzugsgebieten des Rheins: Mosel (bis Pegel Cochem); Sieg (bis Pegel Menden 1) und Main (bis Pegel Kemmern).In einem ersten Schritt werden unter Verwendung des hydrologischen Modells HBV-D wichtige Modellprozesse entsprechend der Einzugsgebietscharakteristik parametrisiert und ein Abbild der Gebietshydrologie erzeugt, das mit Zeitreihen gemessener Tageswerte (Temperatur, Niederschlag) eine Zeitreihe der Pegeldurchfl\üsse simulieren kann. Die G\üte der Simulation des Ist-Zustandes (Standard-Me\ßzeitraum 1.1.1961-31.12.1999) ist f\ür die Kalibrierungs- und Validierungszeitr\äume in allen Untersuchungsgebieten gut bis sehr gut.Zur Erleichterung der umfangreichen, zeitaufwendigen einzugsgebietsbezogenen Datenaufbereitung f\ür das hydrologische Modell HBV-D wurde eine Arbeitsumgebung auf Basis von Programmerweiterungen des Geoinformationssystems ArcView und zus\ätzlichen Hilfsprogrammen entwickelt. Die Arbeitsumgebung HBV-Params enth\ält eine graphische Benutzeroberfl\äche und r\äumt sowohl erfahrenen Hydrologen als auch hydrologisch geschulten Anwendern, z.B. Studenten der Vertiefungsrichtung Hydrologie, Flexibilit\ät und vollst\ändige Kontrolle bei der Ableitung von Parameterwerten und der Editierung von Parameter- und Steuerdateien ein. Somit ist HBV-D im Gegensatz zu Vorl\äuferversionen mit rudiment\ären Arbeitsumgebungen auch au\ßerhalb der Forschung f\ür Lehr- und \Übungszwecke einsetzbar.In einem zweiten Schritt werden Gebietsniederschlagssummen, Gebietstemperaturen und simulierte Mittelwerte des Durchflusses (MQ) des Ist-Zustandes mit den Zust\änden zweier Klimaszenarien f\ür den Szenarienzeitraum 100 Jahre sp\äter (2061-2099) verglichen. Die Klimaszenarien beruhen auf simulierten Zirkulationsmustern je eines Modellaufes zweier Globaler Zirkulationsmodelle (GCM), die mit einem statistischen Regionalisierungsverfahren in Tageswertszenarien (Temperatur, Niederschlag) an Me\ßstationen in den Untersuchungsgebieten \überf\ührt wurden und als Eingangsdaten des hydrologischen Modells verwendet werden.F\ür die zweite H\älfte des 21. Jahrhunderts weisen beide regionalisierten Klimaszenarien eine Zunahme der Jahresmittel der Gebietstemperatur sowie eine Zunahme der Jahressummen der Gebietsniederschl\äge auf, die mit einer hohen Variabilit\ät einhergeht. Eine Betrachtung der saisonalen (monatlichen) \Änderungsbetr\äge von Temperatur, Niederschlag und mittlerem Durchflu\ß zwischen Szenarienzeitraum (2061-2099) und Ist-Zustand ergibt in allen Untersuchungsgebieten eine Temperaturzunahme (h\öher im Sommer als im Winter) und eine generelle Zunahme der Niederschlagssummen (mit starken Schwankungen zwischen den Einzelmonaten), die bei der hydrologischen Simulation zu deutlich h\öheren mittleren Durchfl\üssen von November bis M\ärz und leicht erh\öhten mittleren Durchfl\üssen in den restlichen Monaten f\ühren. Die St\ärke der Durchflu\ßerh\öhung ist nach den individuellen Klimaszenarien unterschiedlich und im Sommer- bzw. Winterhalbjahr gegenl\äufig ausgepr\ägt. Hauptursache f\ür die simulierte starke Zunahme der mittleren Durchfl\üsse im Winterhalbjahr ist die trotz Temperaturerh\öhung der Klimaszenarien winterlich niedrige Evapotranspiration, so da\ß erh\öhte Niederschl\äge direkt in erh\öhten Durchflu\ß transformiert werden k\önnen.Der Vergleich der Untersuchungsgebiete zeigt in Einzelmonaten von West nach Ost abnehmende \Änderungsbetr\äge der Niederschlagssummen, die als Hinweis auf die Bedeutung der Kontinentalit\ätseinfl\üsse auch unter ge\änderten klimatischen Bedingungen in S\üdwestdeutschland aufgefa\ßt werden k\önnten.Aus den regionalisierten Klimaszenarien werden \Änderungsbetr\äge f\ür die Modulation gemessener Zeitreihen mittels synthetischer Szenarien abgeleitet, die mit einem geringen Rechenaufwand in hydrologische Modellantworten \überf\ührt werden k\önnen. Die direkte Ableitung synthetischer Szenarien aus GCM-Ergebniswerten (bodennahe Temperatur und Gesamtniederschlag) an einzelnen GCM-Gitterpunkten erbrachte unbefriedigende Ergebnisse.Ob, in welcher H\öhe und zeitlichen Verteilung die in den (synthetischen) Szenarien verwendeten Niederschlags- und Temperatur\änderungen eintreten werden, kann nur die Zukunft zeigen. Eine Absch\ätzung, wie sich die Abflu\ßverh\ältnisse und insbesondere die mittleren Durchfl\üsse der Untersuchungsgebiete bei m\öglichen \Änderungen entwickeln w\ürden, kann jedoch heute schon vorgenommen werden. Simulationen auf Szenariogrundlagen sind ein Weg, unbekannte zuk\ünftige Randbedingungen sowie regionale Auswirkungen m\öglicher \Änderungen des Klimasystems ausschnittsweise abzusch\ätzen und entsprechende Risikominderungsstrategien zu entwickeln. Jegliche Modellierung und Simulation nat\ürlicher Systeme ist jedoch mit betr\ächtlichen Unsicherheiten verkn\üpft. Vergleichsweise gro\ße Unsicherheiten sind mit der zuk\ünftigen Entwicklung des sozio\ökonomischen Systems und der Komplexit\ät des Klimasystems verbunden. Weiterhin haben Unsicherheiten der einzelnen Modellbausteine der Modellkette Emissionsszenarien/Gaszyklusmodelle - Globale Zirkulationsmodelle/Regionalisierung - hydrologisches Modell, die eine Kaskade der Unsicherheiten ergeben, neben Datenunsicherheiten bei der Erfassung hydrometeorologischer Me\ßgr\ö\ßen einen erheblichen Einflu\ß auf die Vertrauensw\ürdigkeit der Simulationsergebnisse, die als ein dargestellter Wert eines Ergebnisbandes zu interpretieren sind.Der Einsatz (1) robuster hydrologischer Modelle, die insbesondere temperaturbeeinflu\ßte Prozesse ad\äquat beschreiben,(2) die Verwendung langer Zeitreihen (wenigsten 30 Jahre) von Me\ßwerten und(3) die gleichzeitige vergleichende Betrachtung von Klimaszenarien, die auf unterschiedlichen GCMs beruhen (und wenn m\öglich, verschiedene Emissionsszenarien ber\ücksichtigen),sollte aus Gr\ünden der wissenschaftlichen Sorgfalt, aber auch der besseren Vergleichbarkeit der Ergebnisse von Regionalstudien im noch jungen Forschungsfeld der Klimafolgenforschung beachtet werden.}, language = {de} } @phdthesis{Klisch2003, author = {Klisch, Anja}, title = {Ableitung von Blattfl{\"a}chenindex und Bedeckungsgrad aus Fernerkundungsdaten f{\"u}r das Erosionsmodell EROSION 3D}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001455}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {In den letzten Jahren wurden relativ komplexe Erosionsmodelle entwickelt, deren Teilprozesse immer mehr auf physikalisch begr{\"u}ndeten Ans{\"a}tzen beruhen. Damit verbunden ist eine h{\"o}here Anzahl aktueller Eingangsparameter, deren Bestimmung im Feld arbeits- und kostenaufwendig ist. Zudem werden die Parameter punktuell, also an bestimmten Stellen und nicht fl{\"a}chenhaft wie bei der Fernerkundung, erfasst. Im Rahmen dieser Arbeit wird gezeigt, wie Satellitendaten als relativ kosteng{\"u}nstige Erg{\"a}nzung oder Alternative zur konventionellen Parametererhebung genutzt werden k{\"o}nnen. Dazu werden beispielhaft der Blattfl{\"a}chenindex (LAI) und der Bedeckungsgrad f{\"u}r das physikalisch begr{\"u}ndete Erosionsmodell EROSION 3D abgeleitet. Im Mittelpunkt des Interesses steht dabei das Aufzeigen von existierenden Methoden, die die Basis f{\"u}r eine operationelle Bereitstellung solcher Gr{\"o}ßen nicht nur f{\"u}r Erosions- sondern allgemein f{\"u}r Prozessmodelle darstellen. Als Untersuchungsgebiet dient das prim{\"a}r landwirtschaftlich genutzte Einzugsgebiet des Mehltheuer Baches, das sich im S{\"a}chsischen L{\"o}ßgefilde befindet und f{\"u}r das Simulationsrechnungen mit konventionell erhobenen Eingangsparametern f{\"u}r 29 Niederschlagsereignisse im Jahr 1999 vorliegen [MICHAEL et al. 2000]. Die Fernerkundungsdatengrundlage bilden Landsat-5-TM-Daten vom 13.03.1999, 30.04.1999 und 19.07.1999. Da die Vegetationsparameter f{\"u}r alle Niederschlagsereignisse vorliegen sollen, werden sie basierend auf der Entwicklung des LAI zeitlich interpoliert. Dazu erfolgt zun{\"a}chst die Ableitung des LAI f{\"u}r alle vorhandenen Fruchtarten nach den semi-empirischen Modellen von CLEVERS [1986] und BARET \& GUYOT [1991] mit aus der Literatur entnommenen Koeffizienten. Des Weiteren wird eine Methode untersucht, nach der die Koeffizienten f{\"u}r das Clevers-Modell aus den TM-Daten und einem vereinfachten Wachstumsmodell bestimmt werden. Der Bedeckungsgrad wird nach ROSS [1981] aus dem LAI ermittelt. Die zeitliche Interpolation des LAI wird durch die schlagbezogene Anpassung eines vereinfachten Wachstumsmodells umgesetzt, das dem hydrologischen Modell SWIM [KRYSANOVA et al. 1999] entstammt und in das durchschnittliche Tagestemperaturen eingehen. Mit den genannten Methoden bleiben abgestorbene Pflanzenteile unber{\"u}cksichtigt. Im Vergleich zur konventionellen terrestrischen Parametererhebung erm{\"o}glichen sie eine differenziertere Abbildung r{\"a}umlicher Variabilit{\"a}ten und des zeitlichen Verlaufes der Vegetationsparameter. Die Simulationsrechnungen werden sowohl mit den direkten Bedeckungsgraden aus den TM-Daten (pixelbezogen) als auch mit den zeitlich interpolierten Bedeckungsgraden f{\"u}r alle Ereignisse (schlagbezogen) durchgef{\"u}hrt. Bei beiden Vorgehensweisen wird im Vergleich zur bisherigen Absch{\"a}tzung eine Verbesserung der r{\"a}umlichen Verteilung der Parameter und somit eine r{\"a}umliche Umverteilung von Erosions- und Depositionsfl{\"a}chen erreicht. F{\"u}r die im Untersuchungsgebiet vorliegende r{\"a}umliche Heterogenit{\"a}t (z. B. Schlaggr{\"o}ße) bieten Landsat-TM-Daten eine ausreichend genaue r{\"a}umliche Aufl{\"o}sung. Damit wird nachgewiesen, dass die satellitengest{\"u}tzte Fernerkundung im Rahmen dieser Untersuchungen sinnvoll einsetzbar ist. F{\"u}r eine operationelle Bereitstellung der Parameter mit einem vertretbaren Aufwand ist es erforderlich, die Methoden weiter zu validieren und m{\"o}glichst weitestgehend zu automatisieren.}, language = {de} } @phdthesis{Katzmann2023, author = {Katzmann, Maximilian}, title = {About the analysis of algorithms on networks with underlying hyperbolic geometry}, doi = {10.25932/publishup-58296}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-582965}, school = {Universit{\"a}t Potsdam}, pages = {xi, 191}, year = {2023}, abstract = {Many complex systems that we encounter in the world can be formalized using networks. Consequently, they have been in the focus of computer science for decades, where algorithms are developed to understand and utilize these systems. Surprisingly, our theoretical understanding of these algorithms and their behavior in practice often diverge significantly. In fact, they tend to perform much better on real-world networks than one would expect when considering the theoretical worst-case bounds. One way of capturing this discrepancy is the average-case analysis, where the idea is to acknowledge the differences between practical and worst-case instances by focusing on networks whose properties match those of real graphs. Recent observations indicate that good representations of real-world networks are obtained by assuming that a network has an underlying hyperbolic geometry. In this thesis, we demonstrate that the connection between networks and hyperbolic space can be utilized as a powerful tool for average-case analysis. To this end, we first introduce strongly hyperbolic unit disk graphs and identify the famous hyperbolic random graph model as a special case of them. We then consider four problems where recent empirical results highlight a gap between theory and practice and use hyperbolic graph models to explain these phenomena theoretically. First, we develop a routing scheme, used to forward information in a network, and analyze its efficiency on strongly hyperbolic unit disk graphs. For the special case of hyperbolic random graphs, our algorithm beats existing performance lower bounds. Afterwards, we use the hyperbolic random graph model to theoretically explain empirical observations about the performance of the bidirectional breadth-first search. Finally, we develop algorithms for computing optimal and nearly optimal vertex covers (problems known to be NP-hard) and show that, on hyperbolic random graphs, they run in polynomial and quasi-linear time, respectively. Our theoretical analyses reveal interesting properties of hyperbolic random graphs and our empirical studies present evidence that these properties, as well as our algorithmic improvements translate back into practice.}, language = {en} } @phdthesis{Poltrock2010, author = {Poltrock, Silvana}, title = {About the relation between implicit Theory of Mind \& the comprehension of complement sentences}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-52293}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Previous studies on the relation between language and social cognition have shown that children's mastery of embedded sentential complements plays a causal role for the development of a Theory of Mind (ToM). Children start to succeed on complementation tasks in which they are required to report the content of an embedded clause in the second half of the fourth year. Traditional ToM tasks test the child's ability to predict that a person who is holding a false belief (FB) about a situation will act "falsely". In these task, children do not represent FBs until the age of 4 years. According the linguistic determinism hypothesis, only the unique syntax of complement sentences provides the format for representing FBs. However, experiments measuring children's looking behavior instead of their explicit predictions provided evidence that already 2-year olds possess an implicit ToM. This dissertation examined the question of whether there is an interrelation also between implicit ToM and the comprehension of complement sentences in typically developing German preschoolers. Two studies were conducted. In a correlational study (Study 1 ), 3-year-old children's performance on a traditional (explicit) FB task, on an implicit FB task and on language tasks measuring children's comprehension of tensed sentential complements were collected and tested for their interdependence. Eye-tracking methodology was used to assess implicit ToM by measuring participants' spontaneous anticipatory eye movements while they were watching FB movies. Two central findings emerged. First, predictive looking (implicit ToM) was not correlated with complement mastery, although both measures were associated with explicit FB task performance. This pattern of results suggests that explicit, but not implicit ToM is language dependent. Second, as a group, 3-year-olds did not display implicit FB understanding. That is, previous findings on a precocious reasoning ability could not be replicated. This indicates that the characteristics of predictive looking tasks play a role for the elicitation of implicit FB understanding as the current task was completely nonverbal and as complex as traditional FB tasks. Study 2 took a methodological approach by investigating whether children display an earlier comprehension of sentential complements when using the same means of measurement as used in experimental tasks tapping implicit ToM, namely anticipatory looking. Two experiments were conducted. 3-year-olds were confronted either with a complement sentence expressing the protagonist's FB (Exp. 1) or with a complex sentence expressing the protagonist's belief without giving any information about the truth/ falsity of the belief (Exp. 2). Afterwards, their expectations about the protagonist's future behavior were measured. Overall, implicit measures reveal no considerably earlier understanding of sentential complementation. Whereas 3-year-olds did not display a comprehension of complex sentences if these embedded a false proposition, children from 3;9 years on were proficient in processing complement sentences if the truth value of the embedded proposition could not be evaluated. This pattern of results suggests that (1) the linguistic expression of a person's FB does not elicit implicit FB understanding and that (2) the assessment of the purely syntactic understanding of complement sentences is affected by competing reality information. In conclusion, this dissertation found no evidence that the implicit ToM is related to the comprehension of sentential complementation. The findings suggest that implicit ToM might be based on nonlinguistic processes. Results are discussed in the light of recently proposed dual-process models that assume two cognitive mechanisms that account for different levels of ToM task performance.}, language = {en} } @phdthesis{Jehannin2015, author = {Jehannin, Marie}, title = {About the role of physico-chemical properties and hydrodynamics on the progress of a precipitation reaction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-88364}, school = {Universit{\"a}t Potsdam}, pages = {xii, 130}, year = {2015}, abstract = {The size and morphology control of precipitated solid particles is a major economic issue for numerous industries. For instance, it is interesting for the nuclear industry, concerning the recovery of radioactive species from used nuclear fuel. The precipitates features, which are a key parameter from the post-precipitate processing, depend on the process local mixing conditions. So far, the relationship between precipitation features and hydrodynamic conditions have not been investigated. In this study, a new experimental configuration consisting of coalescing drops is set to investigate the link between reactive crystallization and hydrodynamics. Two configurations of aqueous drops are examined. The first one corresponds to high contact angle drops (>90°) in oil, as a model system for flowing drops, the second one correspond to sessile drops in air with low contact angle (<25°). In both cases, one reactive is dissolved in each drop, namely oxalic acid and cerium nitrate. When both drops get into contact, they may coalesce; the dissolved species mix and react to produce insoluble cerium oxalate. The precipitates features and effect on hydrodynamics are investigated depending on the solvent. In the case of sessile drops in air, the surface tension difference between the drops generates a gradient which induces a Marangoni flow from the low surface tension drop over the high surface tension drop. By setting the surface tension difference between the two drops and thus the Marangoni flow, the hydrodynamics conditions during the drop coalescence could be modified. Diols/water mixtures are used as solvent, in order to fix the surface tension difference between the liquids of both drops regardless from the reactant concentration. More precisely, the used diols, 1,2-propanediol and 1,3-propanediol, are isomer with identical density and close viscosity. By keeping the water volume fraction constant and playing with the 1,2-propanediol and 1,3-propanediol volume fractions of the solvents, the mixtures surface tensions differ up to 10 mN/m for identical/constant reactant concentration, density and viscosity. 3 precipitation behaviors were identified for the coalescence of water/diols/recatants drops depending on the oxalic excess. The corresponding precipitates patterns are visualized by optical microscopy and the precipitates are characterized by confocal microscopy SEM, XRD and SAXS measurements. In the intermediate oxalic excess regime, formation of periodic patterns can be observed. These patterns consist in alternating cerium oxalate precipitates with distinct morphologies, namely needles and "microflowers". Such periodic fringes can be explained by a feedback mechanism between convection, reaction and the diffusion.}, language = {en} } @phdthesis{Mueller2022, author = {M{\"u}ller, Daniela}, title = {Abrupt climate changes and extreme events in two different varved lake sediment records}, doi = {10.25932/publishup-55833}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-558331}, school = {Universit{\"a}t Potsdam}, pages = {XVIII, 209}, year = {2022}, abstract = {Different lake systems might reflect different climate elements of climate changes, while the responses of lake systems are also divers, and are not completely understood so far. Therefore, a comparison of lakes in different climate zones, during the high-amplitude and abrupt climate fluctuations of the Last Glacial to Holocene transition provides an exceptional opportunity to investigate distinct natural lake system responses to different abrupt climate changes. The aim of this doctoral thesis was to reconstruct climatic and environmental fluctuations down to (sub-) annual resolution from two different lake systems during the Last Glacial-Interglacial transition (~17 and 11 ka). Lake Gościąż, situated in the temperate central Poland, developed in the Aller{\o}d after recession of the Last Glacial ice sheets. The Dead Sea is located in the Levant (eastern Mediterranean) within a steep gradient from sub-humid to hyper-arid climate, and formed in the mid-Miocene. Despite their differences in sedimentation processes, both lakes form annual laminations (varves), which are crucial for studies of abrupt climate fluctuations. This doctoral thesis was carried out within the DFG project PALEX-II (Paleohydrology and Extreme Floods from the Dead Sea ICDP Core) that investigates extreme hydro-meteorological events in the ICDP core in relation to climate changes, and ICLEA (Virtual Institute of Integrated Climate and Landscape Evolution Analyses) that intends to better the understanding of climate dynamics and landscape evolutions in north-central Europe since the Last Glacial. Further, it contributes to the Helmholtz Climate Initiative REKLIM (Regional Climate Change and Humans) Research Theme 3 "Extreme events across temporal and spatial scales" that investigates extreme events using climate data, paleo-records and model-based simulations. The three main aims were to (1) establish robust chronologies of the lakes, (2) investigate how major and abrupt climate changes affect the lake systems, and (3) to compare the responses of the two varved lakes to these hemispheric-scale climate changes. Robust chronologies are a prerequisite for high-resolved climate and environmental reconstructions, as well as for archive comparisons. Thus, addressing the first aim, the novel chronology of Lake Gościąż was established by microscopic varve counting and Bayesian age-depth modelling in Bacon for a non-varved section, and was corroborated by independent age constrains from 137Cs activity concentration measurements, AMS radiocarbon dating and pollen analysis. The varve chronology reaches from the late Aller{\o}d until AD 2015, revealing more Holocene varves than a previous study of Lake Gościąż suggested. Varve formation throughout the complete Younger Dryas (YD) even allowed the identification of annually- to decadal-resolved leads and lags in proxy responses at the YD transitions. The lateglacial chronology of the Dead Sea (DS) was thus far mainly based on radiocarbon and U/Th-dating. In the unique ICDP core from the deep lake centre, continuous search for cryptotephra has been carried out in lateglacial sediments between two prominent gypsum deposits - the Upper and Additional Gypsum Units (UGU and AGU, respectively). Two cryptotephras were identified with glass analyses that correlate with tephra deposits from the S{\"u}phan and Nemrut volcanoes indicating that the AGU is ~1000 years younger than previously assumed, shifting it into the YD, and the underlying varved interval into the B{\o}lling/Aller{\o}d, contradicting previous assumptions. Using microfacies analyses, stable isotopes and temperature reconstructions, the second aim was achieved at Lake Gościąż. The YD lake system was dynamic, characterized by higher aquatic bioproductivity, more re-suspended material and less anoxia than during the Aller{\o}d and Early Holocene, mainly influenced by stronger water circulation and catchment erosion due to stronger westerly winds and less lake sheltering. Cooling at the YD onset was ~100 years longer than the final warming, while environmental proxies lagged the onset of cooling by ~90 years, but occurred contemporaneously during the termination of the YD. Chironomid-based temperature reconstructions support recent studies indicating mild YD summer temperatures. Such a comparison of annually-resolved proxy responses to both abrupt YD transitions is rare, because most European lake archives do not preserve varves during the YD. To accomplish the second aim at the DS, microfacies analyses were performed between the UGU (~17 ka) and Holocene onset (~11 ka) in shallow- (Masada) and deep-water (ICDP core) environments. This time interval is marked by a huge but fluctuating lake level drop and therefore the complete transition into the Holocene is only recorded in the deep-basin ICDP core. In this thesis, this transition was investigated for the first time continuously and in detail. The final two pronounced lake level drops recorded by deposition of the UGU and AGU, were interrupted by one millennium of relative depositional stability and a positive water budget as recorded by aragonite varve deposition interrupted by only a few event layers. Further, intercalation of aragonite varves between the gypsum beds of the UGU and AGU shows that these generally dry intervals were also marked by decadal- to centennial-long rises in lake level. While continuous aragonite varves indicate decadal-long stable phases, the occurrence of thicker and more frequent event layers suggests general more instability during the gypsum units. These results suggest a pattern of complex and variable hydroclimate at different time scales during the Lateglacial at the DS. The third aim was accomplished based on the individual studies above that jointly provide an integrated picture of different lake responses to different climate elements of hemispheric-scale abrupt climate changes during the Last Glacial-Interglacial transition. In general, climatically-driven facies changes are more dramatic in the DS than at Lake Gościąż. Further, Lake Gościąż is characterized by continuous varve formation nearly throughout the complete profile, whereas the DS record is widely characterized by extreme event layers, hampering the establishment of a continuous varve chronology. The lateglacial sedimentation in Lake Gościąż is mainly influenced by westerly winds and minor by changes in catchment vegetation, whereas the DS is primarily influenced by changes in winter precipitation, which are caused by temperature variations in the Mediterranean. Interestingly, sedimentation in both archives is more stable during the B{\o}lling/Aller{\o}d and more dynamic during the YD, even when sedimentation processes are different. In summary, this doctoral thesis presents seasonally-resolved records from two lake archives during the Lateglacial (ca 17-11 ka) to investigate the impact of abrupt climate changes in different lake systems. New age constrains from the identification of volcanic glass shards in the lateglacial sediments of the DS allowed the first lithology-based interpretation of the YD in the DS record and its comparison to Lake Gościąż. This highlights the importance of the construction of a robust chronology, and provides a first step for synchronization of the DS with other eastern Mediterranean archives. Further, climate reconstructions from the lake sediments showed variability on different time scales in the different archives, i.e. decadal- to millennial fluctuations in the lateglacial DS, and even annual variations and sub-decadal leads and lags in proxy responses during the rapid YD transitions in Lake Gościąż. This showed the importance of a comparison of different lake archives to better understand the regional and local impacts of hemispheric-scale climate variability. An unprecedented example is demonstrated here of how different lake systems show different lake responses and also react to different climate elements of abrupt climate changes. This further highlights the importance of the understanding of the respective lake system for climate reconstructions.}, language = {en} } @phdthesis{Regenstein1972, author = {Regenstein, Wolfgang}, title = {Absorptionsspektroskopische Untersuchungen zum Einfluß des Mediums und des Aggregatzustandes auf die Gestalt und Lage der Charge-Transfer-Bande}, doi = {10.25932/publishup-49670}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-496702}, school = {Universit{\"a}t Potsdam}, pages = {56}, year = {1972}, abstract = {An einigen CT-Modellkomplexen in verschiedenen L{\"o}sungsmitteln und bei Temperaturen von 113-300 K sollte der Einfluß der Umgebung auf die Form und Lage der Absorption von CT-Komplexen unterschiedlicher Bindungsfestigkeit untersucht werden. Dazu wurden bekannte Bandenprofilfunktionen auf ihre Anwendbar-keit gepr{\"u}ft. Da eine optimale Anpassung nicht m{\"o}glich war, wurde eine neue Profilfunktion entwickelt, die eine bessere Beschreibung ergab. Nach der Bestimmung der Gleichgewichtskonstante und des Extink-tionskoeffizienten konnte mit der Profilfl{\"a}che das {\"U}bergangsmoment berechnet werden. Die L{\"o}sungsmittelabh{\"a}ngigkeit wurde bei verschiedenen Brechzahlen und Dielektrizit{\"a}tskonstanten untersucht. F{\"u}r feste Komplexe wurde eine spezielle Pr{\"a}parationstechnik gew{\"a}hlt. Die beobachteten Feinstrukturen und der auftretende Streuuntergrund werden diskutiert.}, language = {de} } @phdthesis{Lemke2013, author = {Lemke, Karina}, title = {Abtrennung und Charakterisierung von Polyelektrolyt-modifizierten Nanopartikeln}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-68133}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Gegenstand der Dissertation ist die gr{\"o}ßen- und eigenschaftsoptimierte Synthese und Charakterisierung von anorganischen Nanopartikeln in einer geeigneten Polyelektrolytmodifizierten Mikroemulsion. Das Hauptziel bildet dabei die Auswahl einer geeigneten Mikroemulsion, zur Synthese von kleinen, stabilen, reproduzierbaren Nanopartikeln mit besonderen Eigenschaften. Die vorliegende Arbeit wurde in zwei Haupteile gegliedert. Der erste Teil befasst sich mit der Einmischung von unterschiedlichen Polykationen (lineares Poly (diallyldimethylammoniumchlorid) (PDADMAC) und verzweigtes Poly (ethylenimin) (PEI)) in verschiedene, auf unterschiedlichen Tensiden (CTAB - kationisch, SDS - anionisch, SB - zwitterionisch) basierenden, Mikroemulsionssysteme. Dabei zeigt sich, dass das Einmischen der Polykationen in die Wassertr{\"o}pfchen der Wasser-in-{\"O}l (W/O) Mikroemulsion prinzipiell m{\"o}glich ist. Der Einfluss der verschiedenen Polykationen auf das Phasenverhalten der W/O Mikroemulsion ist jedoch sehr unterschiedlich. In Gegenwart des kationischen Tensids f{\"u}hren die repulsiven Wechselwirkungen mit den Polykationen zu einer Destabilisierung des Systems, w{\"a}hrend die ausgepr{\"a}gten Wechselwirkungen mit dem anionischen Tensid in einer deutlichen Stabilisierung des Systems resultieren. F{\"u}r das zwitterionische Tensid f{\"u}hren die moderaten Wechselwirkungen mit den Polykationen zu einer partiellen Stabilisierung. Der zweite Teil der Arbeit besch{\"a}ftigt sich mit dem Einsatz der unterschiedlichen, Polyelektrolyt- modifizierten Mikroemulsionen als Templatphase f{\"u}r die Herstellung verschiedener, anorganischer Nanopartikel. Die CTAB-basierte Mikroemulsion erweist sich dabei als ungeeignet f{\"u}r die Herstellung von CdS Nanopartikeln, da zum einen nur eine geringe Toleranz gegen{\"u}ber den Reaktanden vorhanden ist (Destabilisierungseffekt) und zum anderen das Partikelwachstum durch den Polyelektrolyt-Tensid-Film nicht ausreichend begrenzt wird. Zudem zeigt sich, dass eine Abtrennung der Partikel aus der Mikroemulsion nicht m{\"o}glich ist. Die SDS-basierten Mikroemulsionen, erweisen sich als geeignete Templatphase zur Synthese kleiner anorganischer Nanopartikel (3 - 20 nm). Sowohl CdS Quantum Dots, als auch Gold Nanopartikel konnten erfolgreich in der Mikroemulsion synthetisiert werden, wobei das verzweigte PEI einen interessanten Templat-Effekt in der Mikroemulsion hervorruft. Als deutlicher Nachteil der SDS-basierten Mikroemulsionen offenbaren sich die starken Wechselwirkungen zwischen dem Tensid und den Polyelektrolyten w{\"a}hrend der Aufarbeitung der Nanopartikel aus der Mikroemulsion. Dabei erweist sich die Polyelektrolyt-Tensid-Komplexbildung als hinderlich f{\"u}r die Redispergierung der CdS Quantum Dots in Wasser, so dass Partikelaggregation einsetzt. Die SB-basierten Mikroemulsionen erweisen sich als g{\"u}nstige Templatphase f{\"u}r die Bildung von gr{\"o}ßen- und eigenschaftenoptimierten Nanopartikeln (< 4 nm), wobei insbesondere eine Modifizierung mit PEI als ideal betrachtet werden kann. In Gegenwart des verzweigten PEI gelang es erstmals ultrakleine, fluoreszierende Gold Cluster (< 2 nm) in einer SB-basierten Mikroemulsion als Templatphase herzustellen. Als besonderer Vorteil der SB-basierten Mikroemulsion zeigen sich die moderaten Wechselwirkungen zwischen dem zwitterionischen Tensid und den Polyelektrolyten, welche eine anschließende Abtrennung der Partikel aus der Mikroemulsion unter Erhalt der Gr{\"o}ße und ihrer optischen Eigenschaften erm{\"o}glichen. In der redispergierten w{\"a}ssrigen L{\"o}sung gelang somit eine Auftrennung der PEI-modifizierten Partikel mit Hilfe der asymmetrischer Fluss Feldflussfraktionierung (aF FFF). Die gebildeten Nanopartikel zeigen interessante optische Eigenschaften und k{\"o}nnen zum Beispiel erfolgreich zur Modifizierung von Biosensoren eingesetzt werden.}, language = {de} } @phdthesis{Gunold2022, author = {Gunold, Sascha}, title = {Abzug unter Beobachtung}, doi = {10.25932/publishup-57197}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571977}, school = {Universit{\"a}t Potsdam}, pages = {391}, year = {2022}, abstract = {Mehr als vier Jahrzehnte lang beobachteten die Streitkr{\"a}fte und Milit{\"a}rnachrichtendienste der NATO-Staaten die sowjetischen Truppen in der DDR. Hierf{\"u}r {\"u}bernahm in der Bundesrepublik Deutschland der Bundesnachrichtendienst (BND) die milit{\"a}rische Auslandsaufkl{\"a}rung unter Anwendung nachrichtendienstlicher Mittel und Methoden. Die Bundeswehr betrieb dagegen taktische Fernmelde- und elektronische Aufkl{\"a}rung und h{\"o}rte vor allem den Funkverkehr der „Gruppe der sowjetischen Streitkr{\"a}fte in Deutschland" (GSSD) ab. Mit der Aufstellung einer zentralen Dienststelle f{\"u}r das milit{\"a}rische Nachrichtenwesen, dem Amt f{\"u}r Nachrichtenwesen der Bundeswehr, b{\"u}ndelte und erweiterte zugleich das Bundesministerium f{\"u}r Verteidigung in den 1980er Jahren seine analytischen Kapazit{\"a}ten. Das Monopol des BND in der milit{\"a}rischen Auslandsaufkl{\"a}rung wurde von der Bundeswehr dadurch zunehmend infrage gestellt. Nach der deutschen Wiedervereinigung am 3. Oktober 1990 befanden sich immer noch mehr als 300.000 sowjetische Soldaten auf deutschem Territorium. Die 1989 in Westgruppe der Truppen (WGT) umbenannte GSSD sollte - so der Zwei-plus-Vier-Vertrag - bis 1994 vollst{\"a}ndig abziehen. Der Vertrag verbot auch den drei Westm{\"a}chten, in den neuen Bundesl{\"a}ndern milit{\"a}risch t{\"a}tig zu sein. Die f{\"u}r die Milit{\"a}raufkl{\"a}rung bis dahin unverzichtbaren Milit{\"a}rverbindungsmissionen der Westm{\"a}chte mussten ihre Dienste einstellen. Doch was geschah mit diesem „alliierten Erbe"? Wer {\"u}bernahm auf deutscher Seite die Aufkl{\"a}rung der sowjetischen Truppen und wer kontrollierte den Truppenabzug?  Die Studie untersucht die Rolle von Bundeswehr und BND beim Abzug der WGT zwischen 1990 und 1994 und fragt dabei nach Kooperation und Konkurrenz zwischen Streitkr{\"a}ften und Nachrichtendiensten. Welche milit{\"a}rischen und nachrichtendienstlichen Mittel und F{\"a}higkeiten stellte die Bundesregierung zur Bew{\"a}ltigung des Truppenabzugs zur Verf{\"u}gung, nachdem die westlichen Milit{\"a}rverbindungsmissionen aufgel{\"o}st wurden? Wie ver{\"a}nderten sich die Anforderungen an die milit{\"a}rische Auslandsaufkl{\"a}rung des BND? Inwieweit setzten sich Konkurrenz und Kooperation von Bundeswehr und BNDbeim Truppenabzug fort? Welche Rolle spielten dabei die einstigen Westm{\"a}chte? Die Arbeit versteht sich nicht nur als Beitrag zur Milit{\"a}rgeschichte, sondern auch zur deutschen Nachrichtendienstgeschichte.}, language = {de} } @phdthesis{Stanke2023, author = {Stanke, Sandra}, title = {AC electrokinetic immobilization of influenza viruses and antibodies on nanoelectrode arrays for on-chip immunoassays}, doi = {10.25932/publishup-61716}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-617165}, school = {Universit{\"a}t Potsdam}, pages = {x, 115}, year = {2023}, abstract = {In the present thesis, AC electrokinetic forces, like dielectrophoresis and AC electroosmosis, were demonstrated as a simple and fast method to functionalize the surface of nanoelectrodes with submicrometer sized biological objects. These nanoelectrodes have a cylindrical shape with a diameter of 500 nm arranged in an array of 6256 electrodes. Due to its medical relevance influenza virus as well as anti-influenza antibodies were chosen as a model organism. Common methods to bring antibodies or proteins to biosensor surfaces are complex and time-consuming. In the present work, it was demonstrated that by applying AC electric fields influenza viruses and antibodies can be immobilized onto the nanoelectrodes within seconds without any prior chemical modification of neither the surface nor the immobilized biological object. The distribution of these immobilized objects is not uniform over the entire array, it exhibits a decreasing gradient from the outer row to the inner ones. Different causes for this gradient have been discussed, such as the vortex-shaped fluid motion above the nanoelectrodes generated by, among others, electrothermal fluid flow. It was demonstrated that parts of the accumulated material are permanently immobilized to the electrodes. This is a unique characteristic of the presented system since in the literature the AC electrokinetic immobilization is almost entirely presented as a method just for temporary immobilization. The spatial distribution of the immobilized viral material or the anti-influenza antibodies at the electrodes was observed by either the combination of fluorescence microscopy and deconvolution or by super-resolution microscopy (STED). On-chip immunoassays were performed to examine the suitability of the functionalized electrodes as a potential affinity-based biosensor. Two approaches were pursued: A) the influenza virus as the bio-receptor or B) the influenza virus as the analyte. Different sources of error were eliminated by ELISA and passivation experiments. Hence, the activity of the immobilized object was inspected by incubation with the analyte. This resulted in the successful detection of anti-influenza antibodies by the immobilized viral material. On the other hand, a detection of influenza virus particles by the immobilized anti-influenza antibodies was not possible. The latter might be due to lost activity or wrong orientation of the antibodies. Thus, further examinations on the activity of by AC electric fields immobilized antibodies should follow. When combined with microfluidics and an electrical read-out system, the functionalized chips possess the potential to serve as a rapid, portable, and cost-effective point-of-care (POC) device. This device can be utilized as a basis for diverse applications in diagnosing and treating influenza, as well as various other pathogens.}, language = {en} } @phdthesis{Kellermann2011, author = {Kellermann, Thorsten}, title = {Accurate numerical relativity simulations of non-vacuumspace-times in two dimensions and applications to critical collapse}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59578}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {This Thesis puts its focus on the physics of neutron stars and its description with methods of numerical relativity. In the first step, a new numerical framework the Whisky2D code will be developed, which solves the relativistic equations of hydrodynamics in axisymmetry. Therefore we consider an improved formulation of the conserved form of these equations. The second part will use the new code to investigate the critical behaviour of two colliding neutron stars. Considering the analogy to phase transitions in statistical physics, we will investigate the evolution of the entropy of the neutron stars during the whole process. A better understanding of the evolution of thermodynamical quantities, like the entropy in critical process, should provide deeper understanding of thermodynamics in relativity. More specifically, we have written the Whisky2D code, which solves the general-relativistic hydrodynamics equations in a flux-conservative form and in cylindrical coordinates. This of course brings in 1/r singular terms, where r is the radial cylindrical coordinate, which must be dealt with appropriately. In the above-referenced works, the flux operator is expanded and the 1/r terms, not containing derivatives, are moved to the right-hand-side of the equation (the source term), so that the left hand side assumes a form identical to the one of the three-dimensional (3D) Cartesian formulation. We call this the standard formulation. Another possibility is not to split the flux operator and to redefine the conserved variables, via a multiplication by r. We call this the new formulation. The new equations are solved with the same methods as in the Cartesian case. From a mathematical point of view, one would not expect differences between the two ways of writing the differential operator, but, of course, a difference is present at the numerical level. Our tests show that the new formulation yields results with a global truncation error which is one or more orders of magnitude smaller than those of alternative and commonly used formulations. The second part of the Thesis uses the new code for investigations of critical phenomena in general relativity. In particular, we consider the head-on-collision of two neutron stars in a region of the parameter space where two final states a new stable neutron star or a black hole, lay close to each other. In 1993, Choptuik considered one-parameter families of solutions, S[P], of the Einstein-Klein-Gordon equations for a massless scalar field in spherical symmetry, such that for every P > P⋆, S[P] contains a black hole and for every P < P⋆, S[P] is a solution not containing singularities. He studied numerically the behavior of S[P] as P → P⋆ and found that the critical solution, S[P⋆], is universal, in the sense that it is approached by all nearly-critical solutions regardless of the particular family of initial data considered. All these phenomena have the common property that, as P approaches P⋆, S[P] approaches a universal solution S[P⋆] and that all the physical quantities of S[P] depend only on |P - P⋆|. The first study of critical phenomena concerning the head-on collision of NSs was carried out by Jin and Suen in 2007. In particular, they considered a series of families of equal-mass NSs, modeled with an ideal-gas EOS, boosted towards each other and varied the mass of the stars, their separation, velocity and the polytropic index in the EOS. In this way they could observe a critical phenomenon of type I near the threshold of black-hole formation, with the putative solution being a nonlinearly oscillating star. In a successive work, they performed similar simulations but considering the head-on collision of Gaussian distributions of matter. Also in this case they found the appearance of type-I critical behaviour, but also performed a perturbative analysis of the initial distributions of matter and of the merged object. Because of the considerable difference found in the eigenfrequencies in the two cases, they concluded that the critical solution does not represent a system near equilibrium and in particular not a perturbed Tolmann-Oppenheimer-Volkoff (TOV) solution. In this Thesis we study the dynamics of the head-on collision of two equal-mass NSs using a setup which is as similar as possible to the one considered above. While we confirm that the merged object exhibits a type-I critical behaviour, we also argue against the conclusion that the critical solution cannot be described in terms of equilibrium solution. Indeed, we show that, in analogy with what is found in, the critical solution is effectively a perturbed unstable solution of the TOV equations. Our analysis also considers fine-structure of the scaling relation of type-I critical phenomena and we show that it exhibits oscillations in a similar way to the one studied in the context of scalar-field critical collapse.}, language = {en} } @phdthesis{Antonelli2021, author = {Antonelli, Andrea}, title = {Accurate waveform models for gravitational-wave astrophysics: synergetic approaches from analytical relativity}, doi = {10.25932/publishup-57667}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576671}, school = {Universit{\"a}t Potsdam}, pages = {XII, 259, LXXV}, year = {2021}, abstract = {Gravitational-wave (GW) astrophysics is a field in full blossom. Since the landmark detection of GWs from a binary black hole on September 14th 2015, fifty-two compact-object binaries have been reported by the LIGO-Virgo collaboration. Such events carry astrophysical and cosmological information ranging from an understanding of how black holes and neutron stars are formed, what neutron stars are composed of, how the Universe expands, and allow testing general relativity in the highly-dynamical strong-field regime. It is the goal of GW astrophysics to extract such information as accurately as possible. Yet, this is only possible if the tools and technology used to detect and analyze GWs are advanced enough. A key aspect of GW searches are waveform models, which encapsulate our best predictions for the gravitational radiation under a certain set of parameters, and that need to be cross-correlated with data to extract GW signals. Waveforms must be very accurate to avoid missing important physics in the data, which might be the key to answer the fundamental questions of GW astrophysics. The continuous improvements of the current LIGO-Virgo detectors, the development of next-generation ground-based detectors such as the Einstein Telescope or the Cosmic Explorer, as well as the development of the Laser Interferometer Space Antenna (LISA), demand accurate waveform models. While available models are enough to capture the low spins, comparable-mass binaries routinely detected in LIGO-Virgo searches, those for sources from both current and next-generation ground-based and spaceborne detectors must be accurate enough to detect binaries with large spins and asymmetry in the masses. Moreover, the thousands of sources that we expect to detect with future detectors demand accurate waveforms to mitigate biases in the estimation of signals' parameters due to the presence of a foreground of many sources that overlap in the frequency band. This is recognized as one of the biggest challenges for the analysis of future-detectors' data, since biases might hinder the extraction of important astrophysical and cosmological information from future detectors' data. In the first part of this thesis, we discuss how to improve waveform models for binaries with high spins and asymmetry in the masses. In the second, we present the first generic metrics that have been proposed to predict biases in the presence of a foreground of many overlapping signals in GW data. For the first task, we will focus on several classes of analytical techniques. Current models for LIGO and Virgo studies are based on the post-Newtonian (PN, weak-field, small velocities) approximation that is most natural for the bound orbits that are routinely detected in GW searches. However, two other approximations have risen in prominence, the post-Minkowskian (PM, weak- field only) approximation natural for unbound (scattering) orbits and the small-mass-ratio (SMR) approximation typical of binaries in which the mass of one body is much bigger than the other. These are most appropriate to binaries with high asymmetry in the masses that challenge current waveform models. Moreover, they allow one to "cover" regions of the parameter space of coalescing binaries, thereby improving the interpolation (and faithfulness) of waveform models. The analytical approximations to the relativistic two-body problem can synergically be included within the effective-one-body (EOB) formalism, in which the two-body information from each approximation can be recast into an effective problem of a mass orbiting a deformed Schwarzschild (or Kerr) black hole. The hope is that the resultant models can cover both the low-spin comparable-mass binaries that are routinely detected, and the ones that challenge current models. The first part of this thesis is dedicated to a study about how to best incorporate information from the PN, PM, SMR and EOB approaches in a synergistic way. We also discuss how accurate the resulting waveforms are, as compared against numerical-relativity (NR) simulations. We begin by comparing PM models, whether alone or recast in the EOB framework, against PN models and NR simulations. We will show that PM information has the potential to improve currently-employed models for LIGO and Virgo, especially if recast within the EOB formalism. This is very important, as the PM approximation comes with a host of new computational techniques from particle physics to exploit. Then, we show how a combination of PM and SMR approximations can be employed to access previously-unknown PN orders, deriving the third subleading PN dynamics for spin-orbit and (aligned) spin1-spin2 couplings. Such new results can then be included in the EOB models currently used in GW searches and parameter estimation studies, thereby improving them when the binaries have high spins. Finally, we build an EOB model for quasi-circular nonspinning binaries based on the SMR approximation (rather than the PN one as usually done). We show how this is done in detail without incurring in the divergences that had affected previous attempts, and compare the resultant model against NR simulations. We find that the SMR approximation is an excellent approximation for all (quasi-circular nonspinning) binaries, including both the equal-mass binaries that are routinely detected in GW searches and the ones with highly asymmetric masses. In particular, the SMR-based models compare much better than the PN models, suggesting that SMR-informed EOB models might be the key to model binaries in the future. In the second task of this thesis, we work within the linear-signal ap- proximation and describe generic metrics to predict inference biases on the parameters of a GW source of interest in the presence of confusion noise from unfitted foregrounds and from residuals of other signals that have been incorrectly fitted out. We illustrate the formalism with simple (yet realistic) LISA sources, and demonstrate its validity against Monte-Carlo simulations. The metrics we describe pave the way for more realistic studies to quantify the biases with future ground-based and spaceborne detectors.}, language = {en} } @phdthesis{Chen2022, author = {Chen, Hui Ching}, title = {Acquisition of focus - in a cross-linguistic perspective}, doi = {10.25932/publishup-55345}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-553458}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 130}, year = {2022}, abstract = {In dieser Dissertation untersuchen wir, wie chinesischen Muttersprachler und deutschen Muttersprachler, sowohl die Erwachsenen als auch die Kinder, verschiedene linguistische Mittel, wie z. B. Wortstellungsinformationen, prosodische und lexikalische Mittel im Sprachverst{\"a}ndnis korrekt interpretieren.}, language = {en} } @phdthesis{Sawade2012, author = {Sawade, Christoph}, title = {Active evaluation of predictive models}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-255-1}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-65583}, school = {Universit{\"a}t Potsdam}, pages = {ix, 157}, year = {2012}, abstract = {The field of machine learning studies algorithms that infer predictive models from data. Predictive models are applicable for many practical tasks such as spam filtering, face and handwritten digit recognition, and personalized product recommendation. In general, they are used to predict a target label for a given data instance. In order to make an informed decision about the deployment of a predictive model, it is crucial to know the model's approximate performance. To evaluate performance, a set of labeled test instances is required that is drawn from the distribution the model will be exposed to at application time. In many practical scenarios, unlabeled test instances are readily available, but the process of labeling them can be a time- and cost-intensive task and may involve a human expert. This thesis addresses the problem of evaluating a given predictive model accurately with minimal labeling effort. We study an active model evaluation process that selects certain instances of the data according to an instrumental sampling distribution and queries their labels. We derive sampling distributions that minimize estimation error with respect to different performance measures such as error rate, mean squared error, and F-measures. An analysis of the distribution that governs the estimator leads to confidence intervals, which indicate how precise the error estimation is. Labeling costs may vary across different instances depending on certain characteristics of the data. For instance, documents differ in their length, comprehensibility, and technical requirements; these attributes affect the time a human labeler needs to judge relevance or to assign topics. To address this, the sampling distribution is extended to incorporate instance-specific costs. We empirically study conditions under which the active evaluation processes are more accurate than a standard estimate that draws equally many instances from the test distribution. We also address the problem of comparing the risks of two predictive models. The standard approach would be to draw instances according to the test distribution, label the selected instances, and apply statistical tests to identify significant differences. Drawing instances according to an instrumental distribution affects the power of a statistical test. We derive a sampling procedure that maximizes test power when used to select instances, and thereby minimizes the likelihood of choosing the inferior model. Furthermore, we investigate the task of comparing several alternative models; the objective of an evaluation could be to rank the models according to the risk that they incur or to identify the model with lowest risk. An experimental study shows that the active procedure leads to higher test power than the standard test in many application domains. Finally, we study the problem of evaluating the performance of ranking functions, which are used for example for web search. In practice, ranking performance is estimated by applying a given ranking model to a representative set of test queries and manually assessing the relevance of all retrieved items for each query. We apply the concepts of active evaluation and active comparison to ranking functions and derive optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} } @phdthesis{Riedl2021, author = {Riedl, Simon}, title = {Active tectonics in the Kenya Rift}, doi = {10.25932/publishup-53855}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-538552}, school = {Universit{\"a}t Potsdam}, pages = {xi, 207}, year = {2021}, abstract = {Magmatische und tektonisch aktive Grabenzonen (Rifts) stellen die Vorstufen entstehender Plattengrenzen dar. Diese sich spreizenden tektonischen Provinzen zeichnen sich durch allgegenw{\"a}rtige Abschiebungen aus, und die r{\"a}umliche Verteilung, die Geometrie, und das Alter dieser Abschiebungen l{\"a}sst R{\"u}ckschl{\"u}sse auf die r{\"a}umlichen und zeitlichen Zusammenh{\"a}nge zwischen tektonischer Deformation, Magmatismus und langwelliger Krustendeformation in Rifts zu. Diese Arbeit konzentriert sich auf die St{\"o}rungsaktivit{\"a}t im Kenia-Rift des k{\"a}nozoischen Ostafrikanischen Grabensystems im Zeitraum zwischen dem mittleren Pleistoz{\"a}n und dem Holoz{\"a}n. Um die fr{\"u}hen Stadien der Entstehung kontinentaler Plattengrenzen zu untersuchen, wird in dieser Arbeit eine zeitlich gemittelte minimale Extensionsrate f{\"u}r den inneren Graben des N{\"o}rdlichen Kenia-Rifts (NKR) f{\"u}r die letzten 0,5 Mio Jahre abgeleitet. Die Analyse beruht auf Messungen mit Hilfe des digitalen TanDEM-X-H{\"o}henmodells, um die Abschiebungen entlang der vulkanisch-tektonischen Achse des inneren Grabens des NKR zu kartieren und deren Versatzbetr{\"a}ge zu bestimmen. Mithilfe von vorhandenen Geochronologiedaten der deformierten vulkanischen Einheiten sowie in dieser Arbeit erstellten ⁴⁰Ar/³⁹Ar-Datierungen werden zeitlich gemittelte Extensionsraten berechnet. Die Auswertungen zeigen, dass im inneren Graben des NKR die langfristige Extensionsrate f{\"u}r mittelpleistoz{\"a}ne bis rezente St{\"o}rungen Mindestwerte von 1,0 bis 1,6 mm yr⁻¹ aufweist und lokal allerdings auch Werte bis zu 2,0 mm yr⁻¹ existieren. In Anbetracht der nahezu inaktiven Randst{\"o}rungen des NKR zeigt sich somit, dass sich die Extension auf die Region der aktiven vulkanisch-tektonischen Achse im inneren Graben konzentriert und somit ein fortgeschrittenes Stadium kontinentaler Extensionsprozesse im NKR vorliegt. In dieser Arbeit wird diese r{\"a}umlich fokussierte Extension zudem im Rahmen einer St{\"o}rungsanalyse der j{\"u}ngsten vulkanischen Erscheinungen des Kenia-Rifts betrachtet. Die Arbeit analysiert mithilfe von Gel{\"a}ndekartierungen und eines auf Luftbildern basierenden Gel{\"a}ndemodells die St{\"o}rungscharakteristika der etwa 36 tausend Jahre alten Menengai-Kaldera und der umliegenden Gebiete im zentralen Kenia-Rift. Im Allgemeinen sind die holoz{\"a}nen St{\"o}rungen innerhalb des Rifts reine, NNO-streichende Abschiebungen, die somit das gegenw{\"a}rtige tektonische Spannungsfeld wiederspiegeln; innerhalb der Menengai-Kaldera sind die jungen Strukturen jedoch von andauernder magmatischer Aktivit{\"a}t und von Aufdomung {\"u}berpr{\"a}gt. Die Kaldera befindet sich im Zentrum eines sich aktiv dehnenden Riftsegments und zusammen mit den anderen quart{\"a}ren Vulkanen des Kenia-Rifts lassen sich diese Bereiche als Kernpunkte der extensionalen St{\"o}rungsaktivit{\"a}t verstehen, die letztlich zu einer weiter entwickelten Phase magmengest{\"u}tzter Kontinentalseparation f{\"u}hren werden. Die bereits seit dem Terti{\"a}r andauernde St{\"o}rungsaktivit{\"a}t im Kenia-Rift f{\"u}hrt zur Zergliederung der gr{\"o}ßeren Rift-Senken in kleinere Segmente und beeinflusst die Sedimentologie und die Hydrologie dieser Riftbecken. Gegenw{\"a}rtig sind die meisten, durch St{\"o}rungen begrenzten Becken des Kenia-Rifts hydrologisch isoliert, sie waren aber w{\"a}hrend feuchter Klimaphasen hydrologisch miteinander verbunden; in dieser Arbeit untersuche ich deshalb auch diese hydrologische Verbindung der Rift-Becken f{\"u}r die Zeit der Afrikanischen Feuchteperiode des fr{\"u}hen Holoz{\"a}ns. Mithilfe der Analyse von digitalen Gel{\"a}ndemodellen, unter Ber{\"u}cksichtigung von geomorphologischen Anzeigern f{\"u}r Seespiegelhochst{\"a}nde, Radiokarbondatierungen und einer {\"U}bersicht {\"u}ber Fossiliendaten konnten zwei kaskadierende Flusssysteme aus diesen Daten abgeleitet werden: eine Flusskaskade in Richtung S{\"u}den und eine in Richtung Norden. Beide Kaskaden haben die derzeit isolierten Becken w{\"a}hrend des fr{\"u}hen Holoz{\"a}ns durch {\"u}berlaufende Seen und eingeschnittene Schluchten miteinander verbunden. Diese hydrologische Verbindung f{\"u}hrte zu der Ausbreitung aquatischer Fauna entlang des Rifts, und gleichzeitig stellte die Wasserscheide zwischen den beiden Flusssystemen den einzigen terrestrischen Ausbreitungskorridor dar, der eine {\"U}berquerung des Kenia-Rifts erm{\"o}glichte. Diese tektonisch-geomorphologische Rekonstruktion erkl{\"a}rt die heute isolierten Vorkommen nilotischer Fischarten in den Riftseen Kenias sowie die isolierten Vorkommen Guineo-Congolischer S{\"a}ugetiere in W{\"a}ldern {\"o}stlich des Kenia-Rifts, die sich {\"u}ber die Wasserscheide im Kenia-Rift ausbreiten konnten. Auf l{\"a}ngeren Zeitskalen sind solche Phasen hydrologischer Verbindung und Phasen der Isolation wiederholt aufgetreten und zeigen sich in wechselnden pal{\"a}o{\"o}kologischen Indikatoren in Sedimentbohrkernen. Hier stelle ich einen Sedimentbohrkern aus dem Koora-Becken des S{\"u}dlichen Kenia-Rifts vor, der einen Datensatz der Pal{\"a}o-Umweltbedingungen der letzten 1 Million Jahre beinhaltet. Dieser Datensatz zeigt, dass etwa vor 400 tausend Jahren die zuvor relativ stabilen Umweltbedingungen zum Erliegen kamen und tektonische, hydrologische und {\"o}kologische Ver{\"a}nderungen dazu f{\"u}hrten, dass die Wasserverf{\"u}gbarkeit, die Grasland-Vergesellschaftungen und die Bedeckung durch Baumvegetation zunehmend st{\"a}rkeren und h{\"a}ufigeren Schwankungen unterlagen. Diese großen Ver{\"a}nderungen fallen zeitlich mit Phasen zusammen, in denen das s{\"u}dliche Becken des Kenia-Rifts von vulkanischer und tektonischer Aktivit{\"a}t besonders betroffen war. Die vorliegende Arbeit zeigt deshalb deutlich, inwiefern die tektonischen und geomorphologischen Gegebenheiten im Zuge einer zeitlich langanhaltenden Extension die Hydrologie, die Pal{\"a}o-Umweltbedingungen sowie die Biodiversit{\"a}t einer Riftzone beeinflussen k{\"o}nnen.}, language = {en} } @phdthesis{Schirmack2015, author = {Schirmack, Janosch}, title = {Activity of methanogenic archaea under simulated Mars analog conditions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-73010}, school = {Universit{\"a}t Potsdam}, pages = {VI, 108}, year = {2015}, abstract = {Assumed comparable environmental conditions of early Mars and early Earth in 3.7 Ga ago - at a time when first fossil records of life on Earth could be found - suggest the possibility of life emerging on both planets in parallel. As conditions changed, the hypothetical life on Mars either became extinct or was able to adapt and might still exist in biological niches. The controversial discussed detection of methane on Mars led to the assumption, that it must have a recent origin - either abiotic through active volcanism or chemical processes, or through biogenic production. Spatial and seasonal variations in the detected methane concentrations and correlations between the presence of water vapor and geological features such as subsurface hydrogen, which are occurring together with locally increased detected concentrations of methane, gave fuel to the hypothesis of a possible biological source of the methane on Mars. Therefore the phylogenetically old methanogenic archaea, which have evolved under early Earth conditions, are often used as model-organisms in astrobiological studies to investigate the potential of life to exist in possible extraterrestrial habitats on our neighboring planet. In this thesis methanogenic archaea originating from two extreme environments on Earth were investigated to test their ability to be active under simulated Mars analog conditions. These extreme environments - the Siberian permafrost-affected soil and the chemoautotrophically based terrestrial ecosystem of Movile cave, Romania - are regarded as analogs for possible Martian (subsurface) habitats. Two novel species of methanogenic archaea isolated from these environments were described within the frame of this thesis. It could be shown that concentrations up to 1 wt\% of Mars regolith analogs added to the growth media had a positive influence on the methane production rates of the tested methanogenic archaea, whereas higher concentrations resulted in decreasing rates. Nevertheless it was possible for the organisms to metabolize when incubated on water-saturated soil matrixes made of Mars regolith analogs without any additional nutrients. Long-term desiccation resistance of more than 400 days was proven with reincubation and indirect counting of viable cells through a combined treatment with propidium monoazide (to inactivate DNA of destroyed cells) and quantitative PCR. Phyllosilicate rich regolith analogs seem to be the best soil mixtures for the tested methanogenic archaea to be active under Mars analog conditions. Furthermore, in a simulation chamber experiment the activity of the permafrost methanogen strain Methanosarcina soligelidi SMA-21 under Mars subsurface analog conditions could be proven. Through real-time wavelength modulation spectroscopy measurements the increase in the methane concentration at temperatures down to -5 °C could be detected. The results presented in this thesis contribute to the understanding of the activity potential of methanogenic archaea under Mars analog conditions and therefore provide insights to the possible habitability of present-day Mars (near) subsurface environments. Thus, it contributes also to the data interpretation of future life detection missions on that planet. For example the ExoMars mission of the European Space Agency (ESA) and Roscosmos which is planned to be launched in 2018 and is aiming to drill in the Martian subsurface.}, language = {en} } @phdthesis{Markov2023, author = {Markov, Adrian}, title = {Acute effects of exercise order in concurrent training on immunological stress responses and measures of muscular fitness in youth athletes of both sexes}, doi = {10.25932/publishup-61851}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-618517}, school = {Universit{\"a}t Potsdam}, pages = {X, 81}, year = {2023}, abstract = {Background and aims: To succeed in competition, elite team and individual athletes often seek the development of both, high levels of muscle strength and power as well as cardiorespiratory endurance. In this context, concurrent training (CT) is a commonly applied and effective training approach. While being exposed to high training loads, youth athletes (≤ 18 years) are yet underrepresented in the scientific literature. Besides, immunological responses to CT have received little attention. Therefore, the aims of this work were to examine the acute (< 15min) and delayed (≥ 6 hours) effects of dif-ferent exercise order in CT on immunological stress responses, muscular fitness, metabolic response, and rating of perceived exertion (RPE) in highly trained youth male and female judo athletes. Methods: A total of twenty male and thirteen female participants, with an average age of 16 ± 1.8 years and 14.4 ± 2.1 years, respectively, were included in the study. They were randomly assigned to two CT sessions; power-endurance versus endurance-power (i.e., study 1), or strength-endurance versus endurance-strength (i.e., study 2). Markers of immune response (i.e., white-blood-cells, granulocytes, lymphocytes, mon-ocytes, and lymphocytes, granulocyte-lymphocyte-ratio, and systemic-inflammation-index), muscular fitness (i.e., counter-movement jump [CMJ]), metabolic responses (i.e., blood lactate, glucose), and RPE were collected at different time points (i.e., PRE12H, PRE, MID, POST, POST6H, POST22H). Results (study 1): There were significant time*order interactions for white-blood-cells, lymphocytes, granulocytes, monocytes, granulocyte-lymphocyte-ratio, and systemic-inflammation-index. The power-endurance order resulted in significantly larger PRE-to-POST increases in white-blood-cells, monocytes, and lymphocytes while the endur-ance-power order resulted in significantly larger PRE-to-POST increases in the granu-locyte-lymphocyte-ratio and systemic-inflammation-index. Likewise, significantly larger increases from PRE-to-POST6H in white-blood-cells and granulocytes were observed following the power-endurance order compared to endurance-power. All markers of immune response returned toward baseline values at POST22H. Moreover, there was a significant time*order interaction for blood glucose and lactate. Following the endur-ance-power order, blood lactate and glucose increased from PRE-to-MID but not from PRE-to-POST. Meanwhile, in the power-endurance order blood lactate and glucose increased from PRE-to-POST but not from PRE-to-MID. A significant time*order inter-action was observed for CMJ-force with larger PRE-to-POST decreases in the endur-ance-power order compared to power-endurance order. Further, CMJ-power showed larger PRE-to-MID performance decreases following the power-endurance order, com-pared to the endurance-power order. Regarding RPE, significant time*order interactions were noted with larger PRE-to-MID values following the endurance-power order and larger PRE-to-POST values following the power-endurance order. Results (study 2): There were significant time*order interactions for lymphocytes, monocytes, granulocyte-lymphocyte-ratio, and systemic-inflammation-index. The strength-endurance order resulted in significantly larger PRE-to-POST increases in lymphocytes while the endurance-strength order resulted in significantly larger PRE-to-POST increases in the granulocyte-lymphocyte-ratio and systemic-inflammation-index. All markers of the immune system returned toward baseline values at POST22H. Moreover, there was a significant time*order interaction for blood glucose and lactate. From PRE-to-MID, there was a significantly greater increase in blood lactate and glu-cose following the endurance-strength order compared to strength-endurance order. Meanwhile, from PRE-to-POST there was a significantly higher increase in blood glu-cose following the strength-endurance order compared to endurance-strength order. Regarding physical fitness, a significant time*order interaction was observed for CMJ-force and CMJ-power with larger PRE-to-MID increases following the endurance-strength order compared to the strength-endurance order. For RPE, significant time*order interactions were noted with larger PRE-to-MID values following the endur-ance-power order and larger PRE-to-POST values following the power-endurance or-der. Conclusions: The primary findings from both studies revealed order-dependent effects on immune responses. In male youth judo athletes, the results demonstrated greater immunological stress responses, both immediately (≤ 15 min) and delayed (≥ 6 hours), following the power-endurance order compared to the endurance-power order. For female youth judo athletes, the results indicated higher acute, but not delayed, order-dependent changes in immune responses following the strength-endurance order compared to the endurance-strength order. It is worth noting that in both studies, all markers of immune system response returned to baseline levels within 22 hours. This suggests that successful recovery from the exercise-induced immune stress response was achieved within 22 hours. Regarding metabolic responses, physical fitness, and perceived exertion, the findings from both studies indicated acute (≤ 15 minutes) alterations that were dependent on the exercise order. These alterations were primarily influ-enced by the endurance exercise component. Moreover, study 1 provided substantial evidence suggesting that internal load measures, such as immune markers, may differ from external load measures. This indicates a disparity between immunological, perceived, and physical responses following both concurrent training orders. Therefore, it is crucial for practitioners to acknowledge these differences and take them into consideration when designing training programs.}, language = {en} } @phdthesis{ArboledaZapata2023, author = {Arboleda Zapata, Mauricio}, title = {Adapted inversion strategies for electrical resistivity data to explore layered near-surface environments}, doi = {10.25932/publishup-58135}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-581357}, school = {Universit{\"a}t Potsdam}, pages = {115}, year = {2023}, abstract = {The electrical resistivity tomography (ERT) method is widely used to investigate geological, geotechnical, and hydrogeological problems in inland and aquatic environments (i.e., lakes, rivers, and seas). The objective of the ERT method is to obtain reliable resistivity models of the subsurface that can be interpreted in terms of the subsurface structure and petrophysical properties. The reliability of the resulting resistivity models depends not only on the quality of the acquired data, but also on the employed inversion strategy. Inversion of ERT data results in multiple solutions that explain the measured data equally well. Typical inversion approaches rely on different deterministic (local) strategies that consider different smoothing and damping strategies to stabilize the inversion. However, such strategies suffer from the trade-off of smearing possible sharp subsurface interfaces separating layers with resistivity contrasts of up to several orders of magnitude. When prior information (e.g., from outcrops, boreholes, or other geophysical surveys) suggests sharp resistivity variations, it might be advantageous to adapt the parameterization and inversion strategies to obtain more stable and geologically reliable model solutions. Adaptations to traditional local inversions, for example, by using different structural and/or geostatistical constraints, may help to retrieve sharper model solutions. In addition, layer-based model parameterization in combination with local or global inversion approaches can be used to obtain models with sharp boundaries. In this thesis, I study three typical layered near-surface environments in which prior information is used to adapt 2D inversion strategies to favor layered model solutions. In cooperation with the coauthors of Chapters 2-4, I consider two general strategies. Our first approach uses a layer-based model parameterization and a well-established global inversion strategy to generate ensembles of model solutions and assess uncertainties related to the non-uniqueness of the inverse problem. We apply this method to invert ERT data sets collected in an inland coastal area of northern France (Chapter~2) and offshore of two Arctic regions (Chapter~3). Our second approach consists of using geostatistical regularizations with different correlation lengths. We apply this strategy to a more complex subsurface scenario on a local intermountain alluvial fan in southwestern Germany (Chapter~4). Overall, our inversion approaches allow us to obtain resistivity models that agree with the general geological understanding of the studied field sites. These strategies are rather general and can be applied to various geological environments where a layered subsurface structure is expected. The flexibility of our strategies allows adaptations to invert other kinds of geophysical data sets such as seismic refraction or electromagnetic induction methods, and could be considered for joint inversion approaches.}, language = {en} } @phdthesis{Lamanna2015, author = {Lamanna, Francesco}, title = {Adaptive radiation and speciation in African weakly-electric fish}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-80097}, school = {Universit{\"a}t Potsdam}, pages = {114}, year = {2015}, abstract = {The rise of evolutionary novelties is one of the major drivers of evolutionary diversification. African weakly-electric fishes (Teleostei, Mormyridae) have undergone an outstanding adaptive radiation, putatively owing to their ability to communicate through species-specific Electric Organ Discharges (EODs) produced by a novel, muscle-derived electric organ. Indeed, such EODs might have acted as effective pre-zygotic isolation mechanisms, hence favoring ecological speciation in this group of fishes. Despite the evolutionary importance of this organ, genetic investigations regarding its origin and function have remained limited. The ultimate aim of this study is to better understand the genetic basis of EOD production by exploring the transcriptomic profiles of the electric organ and of its ancestral counterpart, the skeletal muscle, in the genus Campylomormyrus. After having established a set of reference transcriptomes using "Next-Generation Sequencing" (NGS) technologies, I performed in silico analyses of differential expression, in order to identify sets of genes that might be responsible for the functional differences observed between these two kinds of tissues. The results of such analyses indicate that: i) the loss of contractile activity and the decoupling of the excitation-contraction processes are reflected by the down-regulation of the corresponding genes in the electric organ; ii) the metabolic activity of the electric organ might be specialized towards the production and turnover of membrane structures; iii) several ion channels are highly expressed in the electric organ in order to increase excitability, and iv) several myogenic factors might be down-regulated by transcription repressors in the EO. A secondary task of this study is to improve the genus level phylogeny of Campylomormyrus by applying new methods of inference based on the multispecies coalescent model, in order to reduce the conflict among gene trees and to reconstruct a phylogenetic tree as closest as possible to the actual species-tree. By using 1 mitochondrial and 4 nuclear markers, I was able to resolve the phylogenetic relationships among most of the currently described Campylomormyrus species. Additionally, I applied several coalescent-based species delimitation methods, in order to test the hypothesis that putatively cryptic species, which are distinguishable only from their EOD, belong to independently evolving lineages. The results of this analysis were additionally validated by investigating patterns of diversification at 16 microsatellite loci. The results suggest the presence of a new, yet undescribed species of Campylomormyrus.}, language = {en} } @phdthesis{Feulner2006, author = {Feulner, Philine}, title = {Adaptive radiation, speciation, and reproductive isolation in African weakly electric fish : (Genus Campylomormyrus, Mormyridae, Teleostei)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-9560}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The ultimate aim of this study is to better understand the relevance of weak electricity in the adaptive radiation of the African mormyrid fish. The chosen model taxon, the genus Campylomormyrus, exhibits a wide diversity of electric organ discharge (EOD) waveform types. Their EOD is age, sex, and species specific and is an important character for discriminating among species that are otherwise cryptic. After having established a complementary set of molecular markers, I examined the radiation of Campylomormyrus by a combined approach of molecular data (sequence data from the mitochondrial cytochrome b and the nuclear S7 ribosomal protein gene, as well as 18 microsatellite loci, especially developed for the genus Campylomormyrus), observation of ontogeny and diversification of EOD waveform, and morphometric analysis of relevant morphological traits. I built up the first convincing phylogenetic hypothesis for the genus Campylomormyrus. Taking advantage of microsatellite data, the identified phylogenetic clades proved to be reproductively isolated biological species. This way I detected at least six species occurring in sympatry near Brazzaville/Kinshasa (Congo Basin). By combining molecular data and EOD analyses, I could show that there are three cryptic species, characterised by their own adult EOD types, hidden under a common juvenile EOD form. In addition, I confirmed that adult male EOD is species-specific and is more different among closely related species than among more distantly related ones. This result and the observation that the EOD changes with maturity suggest its function as a reproductive isolation mechanism. As a result of my morphometric shape analysis, I could assign species types to the identified reproductively isolated groups to produce a sound taxonomy of the group. Besides this, I could also identify morphological traits relevant for the divergences between the identified species. Among them, the variations I found in the shape of the trunk-like snout, suggest the presence of different trophic specializations; therefore, this trait might have been involved in the ecological radiation of the group. In conclusion, I provided a convincing scenario envisioning an adaptive radiation of weakly electric fish triggered by sexual selection via assortative mating due to differences in EOD characteristics, but caused by a divergent selection of morphological traits correlated with the feeding ecology.}, subject = {Phylogenie}, language = {en} } @phdthesis{Teppner2000, author = {Teppner, Randolf}, title = {Adsorptionsschichten an fluiden Grenzfl{\"a}chen : Skalengesetze und Ionenverteilungen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000117}, school = {Universit{\"a}t Potsdam}, year = {2000}, abstract = {In dieser Arbeit wurden zwei Themenbereiche bearbeitet: 1. Ellipsometrie an Adsorpionsschichten niedermolekularer Tenside an der Wasser/Luft-Grenzfl{\"a}che (Ellipsometrie ist geeignet, adsorbierte Mengen von nicht- und zwitterionischen Tensiden zu messen, bei ionischen werden zus{\"a}tzlich die Gegenionen mit erfaßt; Ellipsometrie mißt sich {\"a}ndernde Gegenionenverteilung). 2. Ellipsometrische Untersuchung von endadsorbierten Polymerb{\"u}rsten an der Wasser/{\"O}l-Grenzfl{\"a}che (Ellipsometrie ist nicht in der Lage, verschiedene Segmentkonzentrationsprofile innerhalb der B{\"u}rste aufzul{\"o}sen, ist aber sehr wohl geeignet, Skalengesetze f{\"u}r Dicken und Dr{\"u}cke in Abh{\"a}ngigkeit von Ankerdichte und Kettenl{\"a}nge der Polymere zu {\"u}berpr{\"u}fen; f{\"u}r in Heptan gequollene Poly-isobuten-B{\"u}rsten konnte gezeigt werden, daß sie sich entsprechend den theoretischen Vorhersagen f{\"u}r B{\"u}rsten in einem theta-L{\"o}sungsmittel verhalten)}, language = {de} } @phdthesis{Donner2006, author = {Donner, Reik Volker}, title = {Advanced methods for analysing and modelling multivariate palaeoclimatic time series}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12560}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The separation of natural and anthropogenically caused climatic changes is an important task of contemporary climate research. For this purpose, a detailed knowledge of the natural variability of the climate during warm stages is a necessary prerequisite. Beside model simulations and historical documents, this knowledge is mostly derived from analyses of so-called climatic proxy data like tree rings or sediment as well as ice cores. In order to be able to appropriately interpret such sources of palaeoclimatic information, suitable approaches of statistical modelling as well as methods of time series analysis are necessary, which are applicable to short, noisy, and non-stationary uni- and multivariate data sets. Correlations between different climatic proxy data within one or more climatological archives contain significant information about the climatic change on longer time scales. Based on an appropriate statistical decomposition of such multivariate time series, one may estimate dimensions in terms of the number of significant, linear independent components of the considered data set. In the presented work, a corresponding approach is introduced, critically discussed, and extended with respect to the analysis of palaeoclimatic time series. Temporal variations of the resulting measures allow to derive information about climatic changes. For an example of trace element abundances and grain-size distributions obtained near the Cape Roberts (Eastern Antarctica), it is shown that the variability of the dimensions of the investigated data sets clearly correlates with the Oligocene/Miocene transition about 24 million years before present as well as regional deglaciation events. Grain-size distributions in sediments give information about the predominance of different transportation as well as deposition mechanisms. Finite mixture models may be used to approximate the corresponding distribution functions appropriately. In order to give a complete description of the statistical uncertainty of the parameter estimates in such models, the concept of asymptotic uncertainty distributions is introduced. The relationship with the mutual component overlap as well as with the information missing due to grouping and truncation of the measured data is discussed for a particular geological example. An analysis of a sequence of grain-size distributions obtained in Lake Baikal reveals that there are certain problems accompanying the application of finite mixture models, which cause an extended climatological interpretation of the results to fail. As an appropriate alternative, a linear principal component analysis is used to decompose the data set into suitable fractions whose temporal variability correlates well with the variations of the average solar insolation on millenial to multi-millenial time scales. The abundance of coarse-grained material is obviously related to the annual snow cover, whereas a significant fraction of fine-grained sediments is likely transported from the Taklamakan desert via dust storms in the spring season.}, language = {en} } @phdthesis{Hecher2021, author = {Hecher, Markus}, title = {Advanced tools and methods for treewidth-based problem solving}, doi = {10.25932/publishup-51251}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512519}, school = {Universit{\"a}t Potsdam}, pages = {xv, 184}, year = {2021}, abstract = {In the last decades, there was a notable progress in solving the well-known Boolean satisfiability (Sat) problem, which can be witnessed by powerful Sat solvers. One of the reasons why these solvers are so fast are structural properties of instances that are utilized by the solver's interna. This thesis deals with the well-studied structural property treewidth, which measures the closeness of an instance to being a tree. In fact, there are many problems parameterized by treewidth that are solvable in polynomial time in the instance size when parameterized by treewidth. In this work, we study advanced treewidth-based methods and tools for problems in knowledge representation and reasoning (KR). Thereby, we provide means to establish precise runtime results (upper bounds) for canonical problems relevant to KR. Then, we present a new type of problem reduction, which we call decomposition-guided (DG) that allows us to precisely monitor the treewidth when reducing from one problem to another problem. This new reduction type will be the basis for a long-open lower bound result for quantified Boolean formulas and allows us to design a new methodology for establishing runtime lower bounds for problems parameterized by treewidth. Finally, despite these lower bounds, we provide an efficient implementation of algorithms that adhere to treewidth. Our approach finds suitable abstractions of instances, which are subsequently refined in a recursive fashion, and it uses Sat solvers for solving subproblems. It turns out that our resulting solver is quite competitive for two canonical counting problems related to Sat.}, language = {en} } @phdthesis{Yadavalli2014, author = {Yadavalli, Nataraja Sekhar}, title = {Advances in experimental methods to probe surface relief grating formation mechanism in photosensitive materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71213}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {When azobenzene-modified photosensitive polymer films are irradiated with light interference patterns, topographic variations in the film develop that follow the electric field vector distribution resulting in the formation of surface relief grating (SRG). The exact correspondence of the electric field vector orientation in interference pattern in relation to the presence of local topographic minima or maxima of SRG is in general difficult to determine. In my thesis, we have established a systematic procedure to accomplish the correlation between different interference patterns and the topography of SRG. For this, we devise a new setup combining an atomic force microscope and a two-beam interferometer (IIAFM). With this set-up, it is possible to track the topography change in-situ, while at the same time changing polarization and phase of the impinging interference pattern. To validate our results, we have compared two photosensitive materials named in short as PAZO and trimer. This is the first time that an absolute correspondence between the local distribution of electric field vectors of interference pattern and the local topography of the relief grating could be established exhaustively. In addition, using our IIAFM we found that for a certain polarization combination of two orthogonally polarized interfering beams namely SP (↕, ↔) interference pattern, the topography forms SRG with only half the period of the interference patterns. Exploiting this phenomenon we are able to fabricate surface relief structures below diffraction limit with characteristic features measuring only 140 nm, by using far field optics with a wavelength of 491 nm. We have also probed for the stresses induced during the polymer mass transport by placing an ultra-thin gold film on top (5-30 nm). During irradiation, the metal film not only deforms along with the SRG formation, but ruptures in regular and complex manner. The morphology of the cracks differs strongly depending on the electric field distribution in the interference pattern even when the magnitude and the kinetic of the strain are kept constant. This implies a complex local distribution of the opto-mechanical stress along the topography grating. The neutron reflectivity measurements of the metal/polymer interface indicate the penetration of metal layer within the polymer resulting in the formation of bonding layer that confirms the transduction of light induced stresses in the polymer layer to a metal film.}, language = {en} } @phdthesis{Reich2023, author = {Reich, Marvin}, title = {Advances in hydrogravimetry}, doi = {10.25932/publishup-60479}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604794}, school = {Universit{\"a}t Potsdam}, pages = {ix, 142}, year = {2023}, abstract = {The interest of the hydrological community in the gravimetric method has steadily increased within the last decade. This is reflected by numerous studies from many different groups with a broad range of approaches and foci. Many of those are traditionally rather hydrology-oriented groups who recognized gravimetry as a potential added value for their hydrological investigations. While this resulted in a variety of interesting and useful findings, contributing to extend the respective knowledge and confirming the methodological potential, on the other hand, many interesting and unresolved questions emerged. This thesis manifests efforts, analyses and solutions carried out in this regard. Addressing and evaluating many of those unresolved questions, the research contributes to advancing hydrogravimetry, the combination of gravimetric and hydrological methods, in showing how gravimeters are a highly useful tool for applied hydrological field research. In the first part of the thesis, traditional setups of stationary terrestrial superconducting gravimeters are addressed. They are commonly installed within a dedicated building, the impermeable structure of which shields the underlying soil from natural exchange of water masses (infiltration, evapotranspiration, groundwater recharge). As gravimeters are most sensitive to mass changes directly beneath the meter, this could impede their suitability for local hydrological process investigations, especially for near-surface water storage changes (WSC). By studying temporal local hydrological dynamics at a dedicated site equipped with traditional hydrological measurement devices, both below and next to the building, the impact of these absent natural dynamics on the gravity observations were quantified. A comprehensive analysis with both a data-based and model-based approach led to the development of an alternative method for dealing with this limitation. Based on determinable parameters, this approach can be transferred to a broad range of measurement sites where gravimeters are deployed in similar structures. Furthermore, the extensive considerations on this topic enabled a more profound understanding of this so called umbrella effect. The second part of the thesis is a pilot study about the field deployment of a superconducting gravimeter. A newly developed field enclosure for this gravimeter was tested in an outdoor installation adjacent to the building used to investigate the umbrella effect. Analyzing and comparing the gravity observations from both indoor and outdoor gravimeters showed performance with respect to noise and stable environmental conditions was equivalent while the sensitivity to near-surface WSC was highly increased for the field deployed instrument. Furthermore it was demonstrated that the latter setup showed gravity changes independent of the depth where mass changes occurred, given their sufficiently wide horizontal extent. As a consequence, the field setup suits monitoring of WSC for both short and longer time periods much better. Based on a coupled data-modeling approach, its gravity time series was successfully used to infer and quantify local water budget components (evapotranspiration, lateral subsurface discharge) on the daily to annual time scale. The third part of the thesis applies data from a gravimeter field deployment for applied hydrological process investigations. To this end, again at the same site, a sprinkling experiment was conducted in a 15 x 15 m area around the gravimeter. A simple hydro-gravimetric model was developed for calculating the gravity response resulting from water redistribution in the subsurface. It was found that, from a theoretical point of view, different subsurface water distribution processes (macro pore flow, preferential flow, wetting front advancement, bypass flow and perched water table rise) lead to a characteristic shape of their resulting gravity response curve. Although by using this approach it was possible to identify a dominating subsurface water distribution process for this site, some clear limitations stood out. Despite the advantage for field installations that gravimetry is a non-invasive and integral method, the problem of non-uniqueness could only be overcome by additional measurements (soil moisture, electric resistivity tomography) within a joint evaluation. Furthermore, the simple hydrological model was efficient for theoretical considerations but lacked the capability to resolve some heterogeneous spatial structures of water distribution up to a needed scale. Nevertheless, this unique setup for plot to small scale hydrological process research underlines the high potential of gravimetery and the benefit of a field deployment. The fourth and last part is dedicated to the evaluation of potential uncertainties arising from the processing of gravity observations. The gravimeter senses all mass variations in an integral way, with the gravitational attraction being directly proportional to the magnitude of the change and inversely proportional to the square of the distance of the change. Consequently, all gravity effects (for example, tides, atmosphere, non-tidal ocean loading, polar motion, global hydrology and local hydrology) are included in an aggregated manner. To isolate the signal components of interest for a particular investigation, all non-desired effects have to be removed from the observations. This process is called reduction. The large-scale effects (tides, atmosphere, non-tidal ocean loading and global hydrology) cannot be measured directly and global model data is used to describe and quantify each effect. Within the reduction process, model errors and uncertainties propagate into the residual, the result of the reduction. The focus of this part of the thesis is quantifying the resulting, propagated uncertainty for each individual correction. Different superconducting gravimeter installations were evaluated with respect to their topography, distance to the ocean and the climate regime. Furthermore, different time periods of aggregated gravity observation data were assessed, ranging from 1 hour up to 12 months. It was found that uncertainties were highest for a frequency of 6 months and smallest for hourly frequencies. Distance to the ocean influences the uncertainty of the non-tidal ocean loading component, while geographical latitude affects uncertainties of the global hydrological component. It is important to highlight that the resulting correction-induced uncertainties in the residual have the potential to mask the signal of interest, depending on the signal magnitude and its frequency. These findings can be used to assess the value of gravity data across a range of applications and geographic settings. In an overarching synthesis all results and findings are discussed with a general focus on their added value for bringing hydrogravimetric field research to a new level. The conceptual and applied methodological benefits for hydrological studies are highlighted. Within an outlook for future setups and study designs, it was once again shown what enormous potential is offered by gravimeters as hydrological field tools.}, language = {en} } @phdthesis{Martin2017, author = {Martin, Thorsten}, title = {Advances in spatial econometrics and the political economy of local housing supply}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406836}, school = {Universit{\"a}t Potsdam}, pages = {207}, year = {2017}, abstract = {This cumulative dissertation consists of five chapters. In terms of research content, my thesis can be divided into two parts. Part one examines local interactions and spillover effects between small regional governments using spatial econometric methods. The second part focuses on patterns within municipalities and inspects which institutions of citizen participation, elections and local petitions, influence local housing policies.}, language = {en} } @phdthesis{Kegelmann2019, author = {Kegelmann, Lukas}, title = {Advancing charge selective contacts for efficient monolithic perovskite-silicon tandem solar cells}, doi = {10.25932/publishup-42642}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426428}, school = {Universit{\"a}t Potsdam}, pages = {v, 155}, year = {2019}, abstract = {Hybrid organic-inorganic perovskites are one of the most promising material classes for photovoltaic energy conversion. In solar cells, the perovskite absorber is sandwiched between n- and p-type contact layers which selectively transport electrons and holes to the cell's cathode and anode, respectively. This thesis aims to advance contact layers in perovskite solar cells and unravel the impact of interface and contact properties on the device performance. Further, the contact materials are applied in monolithic perovskite-silicon heterojunction (SHJ) tandem solar cells, which can overcome the single junction efficiency limits and attract increasing attention. Therefore, all contact layers must be highly transparent to foster light harvesting in the tandem solar cell design. Besides, the SHJ device restricts processing temperatures for the selective contacts to below 200°C. A comparative study of various electron selective contact materials, all processed below 180°C, in n-i-p type perovskite solar cells highlights that selective contacts and their interfaces to the absorber govern the overall device performance. Combining fullerenes and metal-oxides in a TiO2/PC60BM (phenyl-C60-butyric acid methyl ester) double-layer contact allows to merge good charge extraction with minimized interface recombination. The layer sequence thereby achieved high stabilized solar cell performances up to 18.0\% and negligible current-voltage hysteresis, an otherwise pronounced phenomenon in this device design. Double-layer structures are therefore emphasized as a general concept to establish efficient and highly selective contacts. Based on this success, the concept to combine desired properties of different materials is transferred to the p-type contact. Here, a mixture of the small molecule Spiro-OMeTAD [2,2',7,7'-tetrakis(N,N-di-p-methoxyphenylamine)-9,9'-spirobifluoren] and the doped polymer PEDOT [poly(3,4-ethylenedioxythiophene)] is presented as a novel hole selective contact. PEDOT thereby remarkably suppresses charge recombination at the perovskite surface, allowing an increase of quasi-Fermi level splitting in the absorber. Further, the addition of Spiro-OMeTAD into the PEDOT layer is shown to enhance charge extraction at the interface and allow high efficiencies up to 16.8\%. Finally, the knowledge on contact properties is applied to monolithic perovskite-SHJ tandem solar cells. The main goal is to optimize the top contact stack of doped Spiro-OMeTAD/molybdenum oxide(MoOx)/ITO towards higher transparency by two different routes. First, fine-tuning of the ITO deposition to mitigate chemical reduction of MoOx and increase the transmittance of MoOx/ITO stacks by 25\%. Second, replacing Spiro-OMeTAD with the alternative hole transport materials PEDOT/Spiro-OMeTAD mixtures, CuSCN or PTAA [poly(triaryl amine)]. Experimental results determine layer thickness constrains and validate optical simulations, which subsequently allow to realistically estimate the respective tandem device performances. As a result, PTAA represents the most promising replacement for Spiro-OMeTAD, with a projected increase of the optimum tandem device efficiency for the herein used architecture by 2.9\% relative to 26.5\% absolute. The results also reveal general guidelines for further performance gains of the technology.}, language = {en} } @phdthesis{Pfalz2024, author = {Pfalz, Gregor}, title = {Advancing knowledge of Arctic lake system dynamics: A data-driven perspective on spatiotemporal patterns}, doi = {10.25932/publishup-63655}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-636554}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 136}, year = {2024}, abstract = {Ecosystems play a pivotal role in addressing climate change but are also highly susceptible to drastic environmental changes. Investigating their historical dynamics can enhance our understanding of how they might respond to unprecedented future environmental shifts. With Arctic lakes currently under substantial pressure from climate change, lessons from the past can guide our understanding of potential disruptions to these lakes. However, individual lake systems are multifaceted and complex. Traditional isolated lake studies often fail to provide a global perspective because localized nuances—like individual lake parameters, catchment areas, and lake histories—can overshadow broader conclusions. In light of these complexities, a more nuanced approach is essential to analyze lake systems in a global context. A key to addressing this challenge lies in the data-driven analysis of sedimentological records from various northern lake systems. This dissertation emphasizes lake systems in the northern Eurasian region, particularly in Russia (n=59). For this doctoral thesis, we collected sedimentological data from various sources, which required a standardized framework for further analysis. Therefore, we designed a conceptual model for integrating and standardizing heterogeneous multi-proxy data into a relational database management system (PostgreSQL). Creating a database from the collected data enabled comparative numerical analyses between spatially separated lakes as well as between different proxies. When analyzing numerous lakes, establishing a common frame of reference was crucial. We achieved this by converting proxy values from depth dependency to age dependency. This required consistent age calculations across all lakes and proxies using one age-depth modeling software. Recognizing the broader implications and potential pitfalls of this, we developed the LANDO approach ("Linked Age and Depth Modelling"). LANDO is an innovative integration of multiple age-depth modeling software into a singular, cohesive platform (Jupyter Notebook). Beyond its ability to aggregate data from five renowned age-depth modeling software, LANDO uniquely empowers users to filter out implausible model outcomes using robust geoscientific data. Our method is not only novel but also significantly enhances the accuracy and reliability of lake analyses. Considering the preceding steps, this doctoral thesis further examines the relationship between carbon in sediments and temperature over the last 21,000 years. Initially, we hypothesized a positive correlation between carbon accumulation in lakes and modelled paleotemperature. Our homogenized dataset from heterogeneous lakes confirmed this association, even if the highest temperatures throughout our observation period do not correlate with the highest carbon values. We assume that rapid warming events contribute more to high accumulation, while sustained warming leads to carbon outgassing. Considering the current high concentration of carbon in the atmosphere and rising temperatures, ongoing climate change could cause northern lake systems to contribute to a further increase in atmospheric carbon (positive feedback loop). While our findings underscore the reliability of both our standardized data and the LANDO method, expanding our dataset might offer even greater assurance in our conclusions.}, language = {en} } @phdthesis{Ayzel2021, author = {Ayzel, Georgy}, title = {Advancing radar-based precipitation nowcasting}, doi = {10.25932/publishup-50426}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504267}, school = {Universit{\"a}t Potsdam}, pages = {xx, 68}, year = {2021}, abstract = {Precipitation forecasting has an important place in everyday life - during the day we may have tens of small talks discussing the likelihood that it will rain this evening or weekend. Should you take an umbrella for a walk? Or should you invite your friends for a barbecue? It will certainly depend on what your weather application shows. While for years people were guided by the precipitation forecasts issued for a particular region or city several times a day, the widespread availability of weather radars allowed us to obtain forecasts at much higher spatiotemporal resolution of minutes in time and hundreds of meters in space. Hence, radar-based precipitation nowcasting, that is, very-short-range forecasting (typically up to 1-3 h), has become an essential technique, also in various professional application contexts, e.g., early warning, sewage control, or agriculture. There are two major components comprising a system for precipitation nowcasting: radar-based precipitation estimates, and models to extrapolate that precipitation to the imminent future. While acknowledging the fundamental importance of radar-based precipitation retrieval for precipitation nowcasts, this thesis focuses only on the model development: the establishment of open and competitive benchmark models, the investigation of the potential of deep learning, and the development of procedures for nowcast errors diagnosis and isolation that can guide model development. The present landscape of computational models for precipitation nowcasting still struggles with the availability of open software implementations that could serve as benchmarks for measuring progress. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. We distribute the corresponding set of models as a software library, rainymotion, which is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion). That way, the library acts as a tool for providing fast, open, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing. One of the promising directions for model development is to challenge the potential of deep learning - a subfield of machine learning that refers to artificial neural networks with deep architectures, which may consist of many computational layers. Deep learning showed promising results in many fields of computer science, such as image and speech recognition, or natural language processing, where it started to dramatically outperform reference methods. The high benefit of using "big data" for training is among the main reasons for that. Hence, the emerging interest in deep learning in atmospheric sciences is also caused and concerted with the increasing availability of data - both observational and model-based. The large archives of weather radar data provide a solid basis for investigation of deep learning potential in precipitation nowcasting: one year of national 5-min composites for Germany comprises around 85 billion data points. To this aim, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. RainNet was trained to predict continuous precipitation intensities at a lead time of 5 min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900 km x 900 km and has a resolution of 1 km in space and 5 min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In these experiments, RainNet was applied recursively in order to achieve lead times of up to 1 h. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the previously developed rainymotion library. RainNet significantly outperformed the benchmark models at all lead times up to 60 min for the routine verification metrics mean absolute error (MAE) and critical success index (CSI) at intensity thresholds of 0.125, 1, and 5 mm/h. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15 mm/h). The limited ability of RainNet to predict high rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5 min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16 km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5 min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5 min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research on model development for precipitation nowcasting, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance. The model development together with the verification experiments for both conventional and deep learning model predictions also revealed the need to better understand the source of forecast errors. Understanding the dominant sources of error in specific situations should help in guiding further model improvement. The total error of a precipitation nowcast consists of an error in the predicted location of a precipitation feature and an error in the change of precipitation intensity over lead time. So far, verification measures did not allow to isolate the location error, making it difficult to specifically improve nowcast models with regard to location prediction. To fill this gap, we introduced a framework to directly quantify the location error. To that end, we detect and track scale-invariant precipitation features (corners) in radar images. We then consider these observed tracks as the true reference in order to evaluate the performance (or, inversely, the error) of any model that aims to predict the future location of a precipitation feature. Hence, the location error of a forecast at any lead time ahead of the forecast time corresponds to the Euclidean distance between the observed and the predicted feature location at the corresponding lead time. Based on this framework, we carried out a benchmarking case study using one year worth of weather radar composites of the DWD. We evaluated the performance of four extrapolation models, two of which are based on the linear extrapolation of corner motion; and the remaining two are based on the Dense Inverse Search (DIS) method: motion vectors obtained from DIS are used to predict feature locations by linear and Semi-Lagrangian extrapolation. For all competing models, the mean location error exceeds a distance of 5 km after 60 min, and 10 km after 110 min. At least 25\% of all forecasts exceed an error of 5 km after 50 min, and of 10 km after 90 min. Even for the best models in our experiment, at least 5 percent of the forecasts will have a location error of more than 10 km after 45 min. When we relate such errors to application scenarios that are typically suggested for precipitation nowcasting, e.g., early warning, it becomes obvious that location errors matter: the order of magnitude of these errors is about the same as the typical extent of a convective cell. Hence, the uncertainty of precipitation nowcasts at such length scales - just as a result of locational errors - can be substantial already at lead times of less than 1 h. Being able to quantify the location error should hence guide any model development that is targeted towards its minimization. To that aim, we also consider the high potential of using deep learning architectures specific to the assimilation of sequential (track) data. Last but not least, the thesis demonstrates the benefits of a general movement towards open science for model development in the field of precipitation nowcasting. All the presented models and frameworks are distributed as open repositories, thus enhancing transparency and reproducibility of the methodological approach. Furthermore, they are readily available to be used for further research studies, as well as for practical applications.}, language = {en} } @phdthesis{Engels2004, author = {Engels, Eva}, title = {Adverb placement : an optimality theoretic approach}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2453}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Adverb positioning is guided by syntactic, semantic, and pragmatic considerations and is subject to cross-linguistic as well as language-specific variation. The goal of the thesis is to identify the factors that determine adverb placement in general (Part I) as well as in constructions in which the adverb's sister constituent is deprived of its phonetic material by movement or ellipsis (gap constructions, Part II) and to provide an Optimality Theoretic approach to the contrasts in the effects of these factors on the distribution of adverbs in English, French, and German. In Optimality Theory (Prince \& Smolensky 1993), grammaticality is defined as optimal satisfaction of a hierarchy of violable constraints: for a given input, a set of output candidates are produced out of which that candidate is selected as grammatical output which optimally satisfies the constraint hierarchy. Since grammaticality crucially relies on the hierarchic relations of the constraints, cross-linguistic variation can be traced back to differences in the language-specific constraint rankings. Part I shows how diverse phenomena of adverb placement can be captured by corresponding constraints and their relative rankings: - contrasts in the linearization of adverbs and verbs/auxiliaries in English and French - verb placement in German and the filling of the prefield position - placement of focus-sensitive adverbs - fronting of topical arguments and adverbs Part II extends the analysis to a particular phenomenon of adverb positioning: the avoidance of adverb attachment to a phonetically empty constituent (gap). English and French are similar in that the acceptability of pre-gap adverb placement depends on the type of adverb, its scope, and the syntactic construction (English: wh-movement vs. topicalization / VP Fronting / VP Ellipsis, inverted vs. non-inverted clauses; French: CLLD vs. Cleft, simple vs. periphrastic tense). Yet, the two languages differ in which strategies a specific type of adverb may pursue to escape placement in front of a certain type of gap. In contrast to English and French, placement of an adverb in front of a gap never gives rise to ungrammaticality in German. Rather, word ordering has to obey the syntactic, semantic, and pragmatic principles discussed in Part I; whether or not it results in adverb attachment to a phonetically empty constituent seems to be irrelevant: though constraints are active in every language, the emergence of a visible effect of their requirements in a given language depends on their relative ranking. The complex interaction of the diverse factors as well as their divergent effects on adverb placement in the various languages are accounted for by the universal constraints and their language-specific hierarchic relations in the OT framework.}, subject = {Adverb}, language = {en} } @phdthesis{Schmidt2014, author = {Schmidt, Lukas}, title = {Aerosols and boundary layer structure over Arctic sea ice based on airborne lidar and dropsonde measurements}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-75076}, school = {Universit{\"a}t Potsdam}, pages = {vii, 98, xiii}, year = {2014}, abstract = {The atmosphere over the Arctic Ocean is strongly influenced by the distribution of sea ice and open water. Leads in the sea ice produce strong convective fluxes of sensible and latent heat and release aerosol particles into the atmosphere. They increase the occurrence of clouds and modify the structure and characteristics of the atmospheric boundary layer (ABL) and thereby influence the Arctic climate. In the course of this study aircraft measurements were performed over the western Arctic Ocean as part of the campaign PAMARCMIP 2012 of the Alfred Wegener Institute for Polar and Marine Research (AWI). Backscatter from aerosols and clouds within the lower troposphere and the ABL were measured with the nadir pointing Airborne Mobile Aerosol Lidar (AMALi) and dropsondes were launched to obtain profiles of meteorological variables. Furthermore, in situ measurements of aerosol properties, meteorological variables and turbulence were part of the campaign. The measurements covered a broad range of atmospheric and sea ice conditions. In this thesis, properties of the ABL over Arctic sea ice with a focus on the influence of open leads are studied based on the data from the PAMARCMIP campaign. The height of the ABL is determined by different methods that are applied to dropsonde and AMALi backscatter profiles. ABL heights are compared for different flights representing different conditions of the atmosphere and of sea ice and open water influence. The different criteria for ABL height that are applied show large variation in terms of agreement among each other, depending on the characteristics of the ABL and its history. It is shown that ABL height determination from lidar backscatter by methods commonly used under mid-latitude conditions is applicable to the Arctic ABL only under certain conditions. Aerosol or clouds within the ABL are needed as a tracer for ABL height detection from backscatter. Hence an aerosol source close to the surface is necessary, that is typically found under the present influence of open water and therefore convective conditions. However it is not always possible to distinguish residual layers from the actual ABL. Stable boundary layers are generally difficult to detect. To illustrate the complexity of the Arctic ABL and processes therein, four case studies are analyzed each of which represents a snapshot of the interplay between atmosphere and underlying sea ice or water surface. Influences of leads and open water on the aerosol and clouds within the ABL are identified and discussed. Leads are observed to cause the formation of fog and cloud layers within the ABL by humidity emission. Furthermore they decrease the stability and increase the height of the ABL and consequently facilitate entrainment of air and aerosol layers from the free troposphere.}, language = {en} } @phdthesis{Timme2023, author = {Timme, Sinika}, title = {Affective responses during exercise and situated exercise-related decision-making}, doi = {10.25932/publishup-61432}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614323}, school = {Universit{\"a}t Potsdam}, pages = {V, 123}, year = {2023}, abstract = {The field of exercise psychology has established robust evidence on the health benefits of physical activity. However, interventions to promote sustained exercise behavior have often proven ineffective. This dissertation addresses challenges in the field, particularly the neglect of situated and affective processes in understanding and changing exercise behavior. Dual process models, considering both rational and affective processes, have gained recognition. The Affective Reflective Theory of Physical Inactivity and Exercise (ART) is a notable model in this context, positing that situated processes in-the-moment of choice influence exercise decisions and subsequent exercise behavior. The dissertation identifies current challenges within exercise psychology and proposes methodological and theoretical advancements. It emphasizes the importance of momentary affective states and situated processes, offering alternatives to self-reported measures and advocating for a more comprehensive modeling of individual variability. The focus is on the affective processes during exercise, theorized to reappear in momentary decision-making, shaping overall exercise behavior. The first publication introduces a new method by using automated facial action analysis to measure variable affective responses during exercise. It explores how these behavioral indicators covary with self-reported measures of affective valence and perceived exertion. The second publication delves into situated processes at the moment of choice between exercise and non-exercise options, revealing that intraindividual factors play a crucial role in explaining exercise-related choices. The third publication presents an open-source research tool, the Decisional Preferences in Exercising Test (DPEX), designed to capture repeated situated decisions and predict exercise behavior based on past experiences. The findings challenge previous assumptions and provide insights into the complex interplay of affective responses, situated processes, and exercise choices. The dissertation underscores the need for individualized interventions that manipulate affective responses during exercise and calls for systematic testing to establish causal links to automatic affective processes and subsequent exercise behavior. This dissertation highlights the necessity for methodological and conceptual refinements in understanding and promoting exercise behavior, ultimately contributing to the broader goal of combating increasing inactivity trends.}, language = {en} } @phdthesis{Busching2014, author = {Busching, Robert}, title = {Affektive und kognitive Desensibilisierung als Konsequenz von Mediengewaltkonsum : eine experimentelle Untersuchung auf Basis lerntheoretischer {\"U}berlegungen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71360}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {NutzerInnen von gewalthaltigen Medien geben einerseits oftmals zu, dass sie fiktionale, gewalthaltige Medien konsumieren, behaupten jedoch gleichzeitig, dass dies nicht ihr Verhalten außerhalb des Medienkontexts beeinflusst. Sie argumentieren, dass sie leicht zwischen Dingen, die im fiktionalen Kontext und Dingen, die in der Realit{\"a}t gelernt wurden, unterscheiden k{\"o}nnen. Im Kontrast zu diesen Aussagen zeigen Metanalysen Effektst{\"a}rken im mittleren Bereich f{\"u}r den Zusammenhang zwischen Gewaltmedienkonsum und aggressivem Verhalten. Diese Ergebnisse k{\"o}nnen nur erkl{\"a}rt werden, wenn MediennutzerInnen gewalthaltige Lernerfahrungen auch außerhalb des Medienkontexts anwenden. Ein Prozess, der Lernerfahrungen innerhalb des Medienkontexts mit dem Verhalten in der realen Welt verkn{\"u}pft, ist Desensibilisierung, die oftmals eine Reduktion des negativen Affektes gegen{\"u}ber Gewalt definiert ist. Zur Untersuchung des Desensibilisierungsprozesses wurden vier Experimente durchgef{\"u}hrt. Die erste in dieser Arbeit untersuchte Hypothese war, dass je h{\"a}ufiger Personen Gewaltmedien konsumieren, desto weniger negativen Affekt zeigen sie gegen{\"u}ber Bildern mit realer Gewalt. Jedoch wurde angenommen, dass diese Bewertung auf Darstellungen von realer Gewalt beschr{\"a}nkt ist und nicht bei Bildern ohne Gewaltbezug, die einen negativen Affekt ausl{\"o}sen, zu finden ist. Die zweite Hypothese bezog sich auf den Affekt w{\"a}hrend des Konsums von Mediengewalt. Hier wurde angenommen, dass besonders Personen, die Freude an Gewalt in den Medien empfinden weniger negativen Affekt gegen{\"u}ber realen Gewaltdarstellungen zeigen. Die letzte Hypothese besch{\"a}ftigte sich mit kognitiver Desensibilisierung und sagte vorher, dass Gewaltmedienkonsum zu einem Transfer von Reaktionen, die normalerweise gegen{\"u}ber gewalthaltigen Reizen gezeigt werden, auf urspr{\"u}nglich neutrale Reize f{\"u}hrt. Das erste Experiment (N = 57) untersuchte, ob die habituelle Nutzung von gewalthaltigen Medien den selbstberichteten Affekt (Valenz und Aktivierung) gegen{\"u}ber Darstellungen von realer Gewalt und nichtgewalthaltigen Darstellungen, die negativen Affekt ausl{\"o}sen, vorhersagt. Die habituelle Nutzung von gewalthaltigen Medien sagte weniger negative Valenz und weniger allgemeine Aktivierung gegen{\"u}ber gewalthalten und nichtgewalthaltigen Bildern vorher. Das zweite Experiment (N = 103) untersuchte auch die Beziehung zwischen habituellem Gewaltmedienkonsum und den affektiven Reaktionen gegen{\"u}ber Bildern realer Gewalt und negativen affektausl{\"o}senden Bildern. Als weiterer Pr{\"a}diktor wurde der Affekt beim Betrachten von gewalthaltigen Medien hinzugef{\"u}gt. Der Affekt gegen{\"u}ber den Bildern wurde zus{\"a}tzlich durch psychophysiologische Maße (Valenz: C: Supercilii; Aktivierung: Hautleitreaktion) erhoben. Wie zuvor sagte habitueller Gewaltmedienkonsum weniger selbstberichte Erregung und weniger negative Valenz f{\"u}r die gewalthaltigen und die negativen, gewalthaltfreien Bilder vorher. Die physiologischen Maßen replizierten dieses Ergebnis. Jedoch zeigte sich ein anderes Muster f{\"u}r den Affekt beim Konsum von Gewalt in den Medien. Personen, die Gewalt in den Medien st{\"a}rker erfreut, zeigen eine Reduktion der Responsivit{\"a}t gegen{\"u}ber Gewalt auf allen vier Maßen. Weiterhin war bei drei dieser vier Maße (selbstberichte Valenz, Aktivit{\"a}t des C. Supercilii und Hautleitreaktion) dieser Zusammenhang auf die gewalthaltigen Bilder beschr{\"a}nkt, mit keinem oder nur einem kleinen Effekt auf die negativen, aber nichtgewalthaltigen Bilder. Das dritte Experiment (N = 73) untersuchte den Affekt w{\"a}hrend die Teilnehmer ein Computerspiel spielten. Das Spiel wurde eigens f{\"u}r dieses Experiment programmiert, sodass einzelne Handlungen im Spiel mit der Aktivit{\"a}t des C. Supercilii, dem Indikator f{\"u}r negativen Affekt, in Bezug gesetzt werden konnten. Die Analyse des C. Supercilii zeigte, dass wiederholtes Durchf{\"u}hren von aggressiven Spielz{\"u}gen zu einem R{\"u}ckgang von negativen Affekt f{\"u}hrte, der die aggressiven Spielhandlungen begleitete. Der negative Affekt w{\"a}hrend gewalthaltiger Spielz{\"u}ge wiederum sagte die affektive Reaktion gegen{\"u}ber Darstellungen von gewalthaltigen Bildern vorher, nicht jedoch gegen{\"u}ber den negativen Bildern. Das vierte Experiment (N = 77) untersuchte kognitive Desensibilisierung, die die Entwicklung von Verkn{\"u}pfungen zwischen neutralen und aggressiven Kognitionen beinhaltete. Die Teilnehmer spielten einen Ego-Shooter entweder auf einem Schiff- oder einem Stadtlevel. Die Beziehung zwischen den neutralen Konstrukten (Schiff/Stadt) und den aggressiven Kognitionen wurde mit einer lexikalischen Entscheidungsaufgabe gemessen. Das Spielen im Schiff-/Stadt-Level f{\"u}hrte zu einer k{\"u}rzen Reaktionszeit f{\"u}r aggressive W{\"o}rter, wenn sie einem Schiff- bzw. Stadtprime folgten. Dies zeigte, dass die im Spiel enthaltenen neutralen Konzepte mit aggressiven Knoten verkn{\"u}pft werden. Die Ergebnisse dieser vier Experimente wurden diskutiert im Rahmen eines lerntheoretischen Ansatzes um Desensibilisierung zu konzeptualisieren.}, language = {de} } @phdthesis{Raeling2016, author = {R{\"a}ling, Romy}, title = {Age of acquisition and semantic typicality effects}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-95943}, school = {Universit{\"a}t Potsdam}, pages = {x, 133}, year = {2016}, abstract = {Age of acquisition (AOA) is a psycholinguistic variable that significantly influences behavioural measures (response times and accuracy rates) in tasks that require lexical and semantic processing. Its origin is - unlike the origin of semantic typicality (TYP), which is assumed at the semantic level - controversially discussed. Different theories propose AOA effects to originate either at the semantic level or at the link between semantics and phonology (lemma-level). The dissertation aims at investigating the influence of AOA and its interdependence with the semantic variable TYP on particularly semantic processing in order to pinpoint the origin of AOA effects. Therefore, three studies have been conducted that considered the variables AOA and TYP in semantic processing tasks (category verifications and animacy decisions) by means of behavioural and partly electrophysiological (ERP) data and in different populations (healthy young and elderly participants and in semantically impaired individuals with aphasia (IWA)). The behavioural and electrophysiological data of the three studies provide evidence for distinct processing levels of the variables AOA and TYP. The data further support previous assumptions on a semantic origin for TYP but question the same for AOA. The findings, however, support an origin of AOA effects at the transition between the word form (phonology) and the semantic level that can be captured at the behavioural but not at the electrophysiological level.}, language = {en} } @phdthesis{Schrauth2022, author = {Schrauth, Philipp}, title = {Agglomerations, air quality and urban transformation}, doi = {10.25932/publishup-58608}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-586087}, school = {Universit{\"a}t Potsdam}, pages = {253}, year = {2022}, abstract = {Die vorliegende Dissertation stellt empirische Untersuchungen {\"u}ber den Zusammenhang von st{\"a}dtischem Leben und dessen {\"o}konomische Kosten, insbesondere f{\"u}r die Umwelt, an. Dabei werden zum einen bestehende Forschungsl{\"u}cken des Einflusses von Bev{\"o}lkerungsdichte auf die Luftqualit{\"a}t geschlossen und zum anderen innovative Politikmaßnahmen im Verkehrsbereich untersucht, die Ballungsr{\"a}ume nachhaltiger gestalten sollen. Im Zentrum der Betrachtungen stehen Luftverschmutzung, Staus und Verkehrsunf{\"a}lle, die f{\"u}r Fragen der allgemeinen Wohlfahrt bedeutend sind und erhebliche Kostenfaktoren f{\"u}r urbanes Leben darstellen. Von ihnen ist ein betr{\"a}chtlicher Anteil der Weltbev{\"o}lkerung betroffen. W{\"a}hrend im Jahr 2018 bereits 55\% der Menschen weltweit in St{\"a}dten lebten, soll dieser Anteil bis zum Jahr 2050 ungef{\"a}hr 68\% betragen. Die vier in sich geschlossenen Kapitel dieser Arbeit lassen sich in zwei Abschnitte aufteilen: In den Kapiteln 2 und 3 werden neue kausale Erkenntnisse {\"u}ber das komplexe Zusammenspiel von st{\"a}dtischen Strukturen und Luftverschmutzung erbracht. Kapitel 4 und 5 untersuchen anschließend politische Maßnahmen zur F{\"o}rderung nicht-motorisierter Verkehrsmittel und deren Einfluss auf Luftqualit{\"a}t sowie Staugeschehen und Verkehrsunf{\"a}lle.}, language = {en} } @phdthesis{Lampert2009, author = {Lampert, Astrid}, title = {Airborne lidar observations of tropospheric arctic clouds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-41211}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Due to the unique environmental conditions and different feedback mechanisms, the Arctic region is especially sensitive to climate changes. The influence of clouds on the radiation budget is substantial, but difficult to quantify and parameterize in models. In the framework of the PhD, elastic backscatter and depolarization lidar observations of Arctic clouds were performed during the international Arctic Study of Tropospheric Aerosol, Clouds and Radiation (ASTAR) from Svalbard in March and April 2007. Clouds were probed above the inaccessible Arctic Ocean with a combination of airborne instruments: The Airborne Mobile Aerosol Lidar (AMALi) of the Alfred Wegener Institute for Polar and Marine Research provided information on the vertical and horizontal extent of clouds along the flight track, optical properties (backscatter coefficient), and cloud thermodynamic phase. From the data obtained by the spectral albedometer (University of Mainz), the cloud phase and cloud optical thickness was deduced. Furthermore, in situ observations with the Polar Nephelometer, Cloud Particle Imager and Forward Scattering Spectrometer Probe (Laboratoire de M{\´e}t{\´e}orologie Physique, France) provided information on the microphysical properties, cloud particle size and shape, concentration, extinction, liquid and ice water content. In the thesis, a data set of four flights is analyzed and interpreted. The lidar observations served to detect atmospheric structures of interest, which were then probed by in situ technique. With this method, an optically subvisible ice cloud was characterized by the ensemble of instruments (10 April 2007). Radiative transfer simulations based on the lidar, radiation and in situ measurements allowed the calculation of the cloud forcing, amounting to -0.4 W m-2. This slight surface cooling is negligible on a local scale. However, thin Arctic clouds have been reported more frequently in winter time, when the clouds' effect on longwave radiation (a surface warming of 2.8 W m-2) is not balanced by the reduced shortwave radiation (surface cooling). Boundary layer mixed-phase clouds were analyzed for two days (8 and 9 April 2007). The typical structure consisting of a predominantly liquid water layer on cloud top and ice crystals below were confirmed by all instruments. The lidar observations were compared to European Centre for Medium-Range Weather Forecasts (ECMWF) meteorological analyses. A change of air masses along the flight track was evidenced in the airborne data by a small completely glaciated cloud part within the mixed-phase cloud system. This indicates that the updraft necessary for the formation of new cloud droplets at cloud top is disturbed by the mixing processes. The measurements served to quantify the shortcomings of the ECMWF model to describe mixed-phase clouds. As the partitioning of cloud condensate into liquid and ice water is done by a diagnostic equation based on temperature, the cloud structures consisting of a liquid cloud top layer and ice below could not be reproduced correctly. A small amount of liquid water was calculated for the lowest (and warmest) part of the cloud only. Further, the liquid water content was underestimated by an order of magnitude compared to in situ observations. The airborne lidar observations of 9 April 2007 were compared to space borne lidar data on board of the satellite Cloud-Aerosol Lidar and Infrared Pathfinder Satellite Observations (CALIPSO). The systems agreed about the increase of cloud top height along the same flight track. However, during the time delay of 1 h between the lidar measurements, advection and cloud processing took place, and a detailed comparison of small-scale cloud structures was not possible. A double layer cloud at an altitude of 4 km was observed with lidar at the West coast in the direct vicinity of Svalbard (14 April 2007). The cloud system consisted of two geometrically thin liquid cloud layers (each 150 m thick) with ice below each layer. While the upper one was possibly formed by orographic lifting under the influence of westerly winds, or by the vertical wind shear shown by ECMWF analyses, the lower one might be the result of evaporating precipitation out of the upper layer. The existence of ice precipitation between the two layers supports the hypothesis that humidity released from evaporating precipitation was cooled and consequently condensed as it experienced the radiative cooling from the upper layer. In summary, a unique data set characterizing tropospheric Arctic clouds was collected with lidar, in situ and radiation instruments. The joint evaluation with meteorological analyses allowed a detailed insight in cloud properties, cloud evolution processes and radiative effects.}, language = {en} } @phdthesis{NickeltCzycykowski2008, author = {Nickelt-Czycykowski, Iliya Peter}, title = {Aktive Regionen der Sonnenoberfl{\"a}che und ihre zeitliche Variation in zweidimensionaler Spektro-Polarimetrie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-25524}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Die Arbeit beschreibt die Analyse von Beobachtungen zweier Sonnenflecken in zweidimensionaler Spektro-Polarimetrie. Die Daten wurden mit dem Fabry-P{\´e}rot-Interferometer der Universit{\"a}t G{\"o}ttingen am Vakuum-Turm-Teleskop auf Teneriffa erfasst. Von der aktiven Region NOAA 9516 wurde der volle Stokes-Vektor des polarisierten Lichts in der Absorptionslinie bei 630,249 nm in Einzelaufnahmen beobachtet, und von der aktiven Region NOAA 9036 wurde bei 617,3 nm Wellenl{\"a}nge eine 90-min{\"u}tige Zeitserie des zirkular polarisierten Lichts aufgezeichnet. Aus den reduzierten Daten werden Ergebniswerte f{\"u}r Intensit{\"a}t, Geschwindigkeit in Beobachtungsrichtung, magnetische Feldst{\"a}rke sowie verschiedene weitere Plasmaparameter abgeleitet. Mehrere Ans{\"a}tze zur Inversion solarer Modellatmosph{\"a}ren werden angewendet und verglichen. Die teilweise erheblichen Fehlereinfl{\"u}sse werden ausf{\"u}hrlich diskutiert. Das Frequenzverhalten der Ergebnisse und Abh{\"a}ngigkeiten nach Ort und Zeit werden mit Hilfe der Fourier- und Wavelet-Transformation weiter analysiert. Als Resultat l{\"a}sst sich die Existenz eines hochfrequenten Bandes f{\"u}r Geschwindigkeitsoszillationen mit einer zentralen Frequenz von 75 Sekunden (13 mHz) best{\"a}tigen. In gr{\"o}ßeren photosph{\"a}rischen H{\"o}hen von etwa 500 km entstammt die Mehrheit der damit zusammenh{\"a}ngenden Schockwellen den dunklen Anteilen der Granulen, im Unterschied zu anderen Frequenzbereichen. Die 75-Sekunden-Oszillationen werden ebenfalls in der aktiven Region beobachtet, vor allem in der Lichtbr{\"u}cke. In den identifizierten B{\"a}ndern oszillatorischer Power der Geschwindigkeit sind in einer dunklen, penumbralen Struktur sowie in der Lichtbr{\"u}cke ausgepr{\"a}gte Strukturen erkennbar, die sich mit einer Horizontalgeschwindigkeit von 5-8 km/s in die ruhige Sonne bewegen. Diese zeigen einen deutlichen Anstieg der Power, vor allem im 5-Minuten-Band, und stehen m{\"o}glicherweise in Zusammenhang mit dem Ph{\"a}nomen der „Evershed-clouds". Eingeschr{\"a}nkt durch ein sehr geringes Signal-Rausch-Verh{\"a}ltnis und hohe Fehlereinfl{\"u}sse werden auch Magnetfeldvariationen mit einer Periode von sechs Minuten am {\"U}bergang von Umbra zu Penumbra in der N{\"a}he einer Lichtbr{\"u}cke beobachtet. Um die beschriebenen Resultate zu erzielen, wurden bestehende Visualisierungsverfahren der Frequenzanalyse verbessert oder neu entwickelt, insbesondere f{\"u}r Ergebnisse der Wavelet-Transformation.}, language = {de} } @phdthesis{Bojahr2015, author = {Bojahr, Juliane}, title = {Aktivierung des humanen S{\"u}ßgeschmacksrezeptors im zellbasierten Testsystem}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93331}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 174}, year = {2015}, abstract = {Zellbasierte heterologe Expressionssysteme bieten ein einfaches und schnelles Verfahren, um neue S{\"u}ßstoffe oder S{\"u}ßverst{\"a}rker zu finden. Unter Verwendung eines solchen Testsystems, konnte ich in Zusammenarbeit mit der Symrise AG, Holzminden und dem Institut f{\"u}r Pflanzenbiochemie in Halle/Saale die vietnamesische Pflanze Mycetia balansae als Quelle eines neuen S{\"u}ßstoffs identifizieren. Deren Hauptkomponenten, genannt Balansine, aktivieren spezifisch den humanen S{\"u}ßrezeptor. Chim{\"a}re Rezeptoren zeigten, dass die amino-terminalen Dom{\"a}nen der S{\"u}ßrezeptoruntereinheiten, welche ein Großteil der Liganden des S{\"u}ßrezeptors binden, f{\"u}r dessen Aktivierung durch Balansin A nicht notwendig sind. Voraussetzung f{\"u}r die Anwendung zellbasierter Testsysteme zum Auffinden neuer S{\"u}ßstoffe ist jedoch, dass s{\"u}ße Substanzen gesichert identifiziert werden, w{\"a}hrend nicht s{\"u}ße Substanzen zuverl{\"a}ssig keine Rezeptoraktivierung aufweisen. W{\"a}hrend in HEK293 TAS1R2 TAS1R3To Galpha15i3-Zellen S{\"u}ßrezeptoraktivierung gegen{\"u}ber nicht s{\"u}ß schmeckenden Substanzen beobachtet wurde, konnte mit den HEK293PEAKrapid Galpha15-Zellen ein zuverl{\"a}ssiges Testsystem identifiziert, welches den S{\"u}ßgeschmack der untersuchten Substanzen widerspiegelte. Es fanden sich keine Hinweise, dass akzessorische Proteine oder verwandte Rezeptoren des S{\"u}ßrezeptors das unterschiedliche Verhalten der Zellen verursachen. Es konnte gezeigt werden, dass die Verwendung unterschiedlicher G-Proteine die Signalamplituden des S{\"u}ßrezeptors beeinflusst, die Unterschiede zwischen den Zellsystemen jedoch nicht vollst{\"a}ndig erkl{\"a}rt. Keine der untersuchten Galpha-Proteinchim{\"a}ren spiegelte die intrinsische S{\"u}ße der Substanzen wider. Wenn auch nicht urs{\"a}chlich f{\"u}r die Diskrepanz zwischen S{\"u}ßrezeptoraktivierung in vitro und S{\"u}ßgeschmack in vivo, so weisen die Ergebnisse dieser Arbeit auf eine Interaktion der S{\"u}ßrezeptoruntereinheiten mit dem humanen Calcium-sensing Rezeptor hin. Vanillin und Ethylvanillin konnten als neue Agonisten des Calcium-sensing Rezeptors identifiziert werden. Wie die vorliegende Arbeit zeigt, k{\"o}nnen sich kleine Unterschiede im Zellhintergrund deutlich auf die Funktionsweise heterolog exprimierter Rezeptoren auswirken. Dies zeigt wie wichtig die Wahl der Zellen f{\"u}r solche Screeningsysteme ist.}, language = {de} } @phdthesis{Hoehne2011, author = {H{\"o}hne, Janet}, title = {Aktivit{\"a}ts- und Herzfrequenz-Monitoring zur Erfassung der Bewegungszeit und der Bewegungsintensit{\"a}t im schulischen und außerschulischen Kontext von Grundsch{\"u}lern im Land Brandenburg}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57937}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {"Kinderwelt ist Bewegungswelt" (Schmidt, 1997, S. 156, zitiert nach Schmidt, Hartmann-Tews \& Brettschneider, 2003, S. 31). Das kindliche Bewegungsverhalten hat sich bereits im Grundschulalter ver{\"a}ndert, so dass sich Bewegungsaktivit{\"a}ten von Kindern erheblich unterscheiden und keineswegs mehr verallgemeinert werden k{\"o}nnen. Richtet man den Fokus auf die Frage „Wie bewegt sind unsere Kinder?" so scheint diese von den Medien bereits beantwortet zu sein, da dort von ansteigendem Bewegungsmangel der heutigen Kinder gegen{\"u}ber fr{\"u}heren Generationen berichtet wird. Wenn es in den Diskussionen um den Gesundheitszustand unserer Kinder geht, nimmt die k{\"o}rperlich-sportliche Aktivit{\"a}t eine entscheidende Rolle ein. Bewegungsmangel ist hierbei ein zentraler Begriff der in der {\"O}ffentlichkeit diskutiert wird. Bei der Betrachtung der einzelnen Studien f{\"a}llt auf, dass deutliche Defizite in der Messung der k{\"o}rperlich-sportlichen Aktivit{\"a}t bestehen. Zentraler Kritikpunkt in den meisten Studien ist die subjektive Erfassung der k{\"o}rperlich-sportlichen Aktivit{\"a}t. Ein Großteil bisheriger Untersuchungen zum Bewegungsverhalten basiert auf Beobachtungen, Befragungen oder Bewegungstageb{\"u}chern. Diese liefern ausschließlich zum Teil subjektive Einsch{\"a}tzungen der Kinder oder Eltern {\"u}ber die tats{\"a}chliche Bewegungszeit und -intensit{\"a}t. Das objektive Erfassen der Aktivit{\"a}t bzw. Inaktivit{\"a}t ist zwar seit einigen Jahren zentraler Gegenstand vieler Studien, dennoch gilt es, dieses noch sachkundiger zu l{\"o}sen, um subjektive und objektive Daten zu vergleichen. Um dem Bewegungsmangel der heutigen Kinder entgegenzuwirken, sind empirisch abgesicherte Erkenntnisse {\"u}ber die Bedingungsfaktoren und die Folgen des ver{\"a}nderten Bewegungsverhaltens dringend n{\"o}tig. Die Quer- und L{\"a}ngsschnittuntersuchung umfasst die Bereiche Anthropometrie, die Erfassung der k{\"o}rperlich-sportlichen Aktivit{\"a}t und die Herzfrequenzmessung {\"u}ber 24h. F{\"u}r die Studie konnten 106 Jungen und M{\"a}dchen im Zeitraum von Januar 2007 bis April 2009 rekrutiert und {\"u}berpr{\"u}ft werden. Die physiologischen Parameter wurden mit Hilfe des ACTIHEART-Messsytems aufgezeichnet und berechnet. Die Ergebnisse zur k{\"o}rperlich-sportlichen Aktivit{\"a}t wurden in die Untersuchungsabschnitte Schulzeit gesamt, Pause, Sportunterricht, Nachmittag und 24h unterteilt. Durch das Messsystem werden die Bewegungsaktivit{\"a}t und die Herzfrequenz synchron aufgezeichnet. Das System nimmt die Beschleunigungswerte des K{\"o}rpers auf und speichert sie im frei w{\"a}hlbaren Zeitintervall, Short oder Long Term, in Form von „activity counts" ab. Das Messsytem berechnet weiterhin die Intensit{\"a}t k{\"o}rperlicher Aktivit{\"a}t.}, language = {de} } @phdthesis{Schmidt2003, author = {Schmidt, Peter Michael}, title = {Aktivit{\"a}tsmessung auf nukleins{\"a}uremodifizierten Oberfl{\"a}chen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000797}, school = {Universit{\"a}t Potsdam}, year = {2003}, abstract = {Im Bereich der medizinischen Diagnostik spielen DNA-Chips eine immer wichtigere Rolle. Dabei werden Glas- oder Silikon-Oberfl{\"a}chen mit Tausenden von einzelstr{\"a}ngigen DNA-Fragmenten, sog. Sonden, best{\"u}ckt, die mit den passenden DNA-Fragmenten in der zugef{\"u}gten Patientenprobe verschmelzen. Die Auswertung solcher Messungen liefert die Diagnose f{\"u}r Krankheiten wie z.B. Krebs, Alzheimer oder f{\"u}r den Nachweis pathogener Erreger. Durch fortschreitende Miniaturisierung dieser Meßsysteme k{\"o}nnen bis zu 40.000 Genfragmente des Menschen in einer einzigen Messung analysiert werden. Neben den DNA-Fragmenten k{\"o}nnen Bio-Chips auch f{\"u}r andere biologische Komponenten wie Antik{\"o}rper und Proteine eingesetzt werden, wobei bei letzteren neben der Bindung auch die Aktivit{\"a}t ein wichtiger Diagnoseparamter ist. Am Fraunhofer-Institut f{\"u}r medizinische Technik und am Lehrstuhl f{\"u}r Analytische Biochemie der Universit{\"a}t Potsdam wurden im Rahmen einer Doktorarbeit Methoden entwickelt, die es erm{\"o}glichen auf nukleins{\"a}uremodifizierten Sensoroberfl{\"a}chen die Aktivit{\"a}t von Proteinen zu messen. Es wurden Nukleins{\"a}uren auf Oberfl{\"a}chen optischer Sensoren verankert. Diese fungierten als Rezeptor f{\"u}r die Proteine sowie auch als Substrat f{\"u}r Restriktionsenzyme, die Nukleins{\"a}uren schneiden und Polymerasen, die Nukleins{\"a}uren synthetisieren und verl{\"a}ngern k{\"o}nnen. Seine Anwendung fand diese Messmethode in der Messung der Aktivit{\"a}t des Proteins Telomerase, das in 90\% aller Tumore erh{\"o}hte Aktivit{\"a}t gegen{\"u}ber gesunden Zellen aufweist. Die Vorteile dieses neuen Assays gegen{\"u}ber {\"a}lteren Methoden liegt im Verzicht auf radioaktiv-markierten Komponenten und einer deutlich verk{\"u}rzten Analysezeit. Die Arbeit schliesst mit einem funktionsf{\"a}higen Nachweis der Telomeraseaktivit{\"a}t im Zellextrakt von gesunden und kranken Zellen. Der direkte Einfluß von Hemmstoffen auf die Aktivit{\"a}t konnte sichtbar gemacht werden, und steht daher bei der Entwicklung neuer Tumor-Diagnostika und Therapeutika zur Verf{\"u}gung.}, language = {de} } @phdthesis{Jaeger2009, author = {J{\"a}ger, Reingard}, title = {Aktuelle Entwicklungen des Hochschulmarketing in Deutschland : am Beispiel der Region Berlin/Brandenburg}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-45783}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {1. Problemstellung und Relevanz des Themas Die deutsche Hochschullandschaft hat in den letzten Jahren zahlreiche Ver{\"a}nderungen bew{\"a}ltigen m{\"u}ssen und steht weiterhin großen Herausforderungen gegen{\"u}ber, durch welche sich zunehmend wettbewerbs{\"a}hnliche Merkmale in diesem Sektor verfestigen: • Umstellung auf international vergleichbare Studieng{\"a}nge • Neuregelung der Studienplatzvergabe • Einf{\"u}hrung von Studiengeb{\"u}hren in einigen Bundesl{\"a}ndern • Leistungsindikatoren zur Verteilung der staatlichen Haushaltsmittel • Demographischer Wandel Eine Bildungseinrichtung besitzt mehrere Anspruchsgruppen: die Studierenden, welche Bildungsleistungen nachfragen, den Staat, der f{\"u}r die Leistungen zahlt, die {\"O}ffentlichkeit, die an Grundlagenforschungen interessiert ist und schließlich die Wirtschaft, die Absolventen rekrutiert (vgl. Berthold, C. 2001, S.431). Die Hochschulen befinden sich untereinander verst{\"a}rkt im Wettbewerb um qualifizierte (und ggf. zahlungswillige) Studierende, um finanzielle Mittel vom Staat oder aus der Privatwirtschaft und um renommierte Wissenschaftler. Hochschulen m{\"u}ssen sich nun den ver{\"a}nderten Bedingungen anpassen, um auch weiterhin im nationalen und internationalen Wettbewerb {\"u}berlebensf{\"a}hig zu bleiben. Grunds{\"a}tzlich kann sich hierbei an in der Privatwirtschaft erfolgreich eingesetzten Marketinginstrumenten orientiert werden. 2. Zielsetzung und Aufbau der Arbeit Nach einer Analyse der oben genannten Rahmenbedingungen, wird im ersten Teil dieser Arbeit gezeigt, welche Erkenntnisse aus dem Marketing auf Hochschulen {\"u}bertragen werden k{\"o}nnen. Dabei werden sowohl strategische Fragen beleuchtet als auch die Instrumente des Marketing-Mix vorgestellt. In einer anschließenden Untersuchung wurden Faktoren bestimmt, welche sich positiv auf den Entwicklungsstand von Marketingaktivit{\"a}ten an Hochschulen auswirken. Dabei konnten - beispielhaft f{\"u}r die Region Berlin/Brandenburg - sechs verschiedene Hochschultypen identifiziert werden. Diese weisen, in Abh{\"a}ngigkeit der verschiedenen Eigenschaften der jeweiligen Institutionen, einen unterschiedlichen Entwicklungsstand oder ein anderes Verst{\"a}ndnis von Hochschulmarketing auf. Aufgrund dessen erscheinen f{\"u}r sie jeweils andere Marketingstrategien empfehlenswert. Die gr{\"o}ßte Rolle f{\"u}r den differenzierten Status quo im Hochschulmarketing an Berliner und Brandenburger Hochschulen spielt die St{\"a}rke des {\"a}ußeren Drucks unter dem sich die Hochschule befindet, um ihre Auslastung und die notwendige finanzielle Ausstattung sicherzustellen. Ferner unterscheiden sich die Hochschulleitungen erheblich in ihrem Engagement und der Bereitschaft, diesen Herausforderungen mit Marketinginstrumenten zu begegnen. Trotz der gestiegenen Anzahl von Beitr{\"a}gen zur Notwendigkeit der Einf{\"u}hrung von {\"o}konomischen {\"U}berlegungen auch im Hochschulmanagement gibt es viele Kritiker, die ein Ende der Freiheit f{\"u}r Forschung und Lehre prophezeien, wenn der Marketing-Gedanke verst{\"a}rkt auch an Bildungseinrichtungen Einzug h{\"a}lt. Unumstritten ist, dass Managementans{\"a}tze aus der privaten Wirtschaft nicht ohne weiteres auf eine Hochschule adaptiert werden k{\"o}nnen. Wahrscheinlich besteht jedoch die gr{\"o}ßere Gefahr f{\"u}r Freiheit und Erfolg von Forschung und Lehre in der Missachtung dieser aktuellen Tendenzen (vgl. Tutt 2006, S. 171)!}, language = {de} } @phdthesis{Gidion2018, author = {Gidion, Gunnar}, title = {Akustische Resonatoren zur Analyse und Kontrolle von schwingungsf{\"a}higen Systemen am Beispiel von Streichinstrumenten und Dielektrischen Elastomeraktoren}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411772}, school = {Universit{\"a}t Potsdam}, pages = {190}, year = {2018}, abstract = {Die Klangeigenschaften von Musikinstrumenten werden durch das Zusammenwirken der auf ihnen anregbaren akustischen Schwingungsmoden bestimmt, welche sich wiederum aus der geometrischen Struktur des Resonators in Kombination mit den verwendeten Materialien ergeben. In dieser Arbeit wurde das Schwingungsverhalten von Streichinstrumenten durch den Einsatz minimal-invasiver piezoelektrischer Polymerfilmsensoren untersucht. Die studierten Kopplungsph{\"a}nomene umfassen den sogenannten Wolfton und Schwingungstilger, die zu dessen Abschw{\"a}chung verwendet werden, sowie die gegenseitige Beeinflussung von Bogen und Instrument beim Spielvorgang. An Dielektrischen Elastomeraktormembranen wurde dagegen der Einfluss der elastischen Eigenschaften des Membranmaterials auf das akustische und elektromechanische Schwingungsverhalten gezeigt. Die Dissertation gliedert sich in drei Teile, deren wesentliche Ergebnisse im Folgenden zusammengefasst werden. In Teil I wurde die Funktionsweise eines abstimmbaren Schwingungstilgers zur D{\"a}mpfung von Wolft{\"o}nen auf Streichinstrumenten untersucht. Durch Abstimmung der Resonanzfrequenz des Schwingungstilgers auf die Wolftonfrequenz kann ein Teil der Saitenschwingungen absorbiert werden, so dass die zu starke Anregung der Korpusresonanz vermieden wird, die den Wolfton verursacht. Der Schwingungstilger besteht aus einem „Wolft{\"o}ter", einem Massest{\"u}ck, welches auf der Nachl{\"a}nge der betroffenen Saite (zwischen Steg und Saitenhalter) installiert wird. Hier wurde gezeigt, wie die Resonanzen dieses Schwingungstilgers von der Masse des Wolft{\"o}ters und von dessen Position auf der Nachl{\"a}nge abh{\"a}ngen. Aber auch die Geometrie des Wolft{\"o}ters stellte sich als ausschlaggebend heraus, insbesondere bei einem nicht-rotationssymmetrischen Wolft{\"o}ter: In diesem Fall entsteht - basierend auf den zu erwartenden nicht-harmonischen Moden einer massebelasteten Saite - eine zus{\"a}tzliche Mode, die von der Polarisationsrichtung der Saitenschwingung abh{\"a}ngt. Teil II der Dissertation befasst sich mit Elastomermembranen, die als Basis von Dielektrischen Elastomeraktoren dienen, und die wegen der Membranspannung auch akustische Resonanzen aufweisen. Die Ansprache von Elastomeraktoren h{\"a}ngt unter anderem von der Geschwindigkeit der elektrischen Anregung ab. Die damit zusammenh{\"a}ngenden viskoelastischen Eigenschaften der hier verwendeten Elastomere, Silikon und Acrylat, wurden einerseits in einer frequenzabh{\"a}ngigen dynamisch-mechanischen Analyse des Elastomers erfasst, andererseits auch optisch an vollst{\"a}ndigen Aktoren selbst gemessen. Die h{\"o}here Viskosit{\"a}t des Acrylats, das bei tieferen Frequenzen h{\"o}here Aktuationsdehnungen als das Silikon zeigt, f{\"u}hrt zu einer Verminderung der Dehnungen bei h{\"o}heren Frequenzen, so dass {\"u}ber etwa 40 Hertz mit Silikon gr{\"o}ßere Aktuationsdehnungen erreicht werden. Mit den untersuchten Aktoren konnte die Gitterkonstante weicher optischer Beugungsgitter kontrolliert werden, die als zus{\"a}tzlicher Film auf der Membran installiert wurden. {\"U}ber eine Messung der akustischen Resonanzfrequenz von Elastomermebranen aus Acrylat in 1Abh{\"a}ngigkeit von ihrer Vorstreckung konnte in Verbindung mit einer Modellierung des hyperelastischen Verhaltens des Elastomers (Ogden-Modell) der Schermodul bestimmt werden. Schließlich wird in Teil III die Untersuchung von Geigen und ihrer Streichanregung mit Hilfe minimal-invasiver piezoelektrischer Polymerfilme geschildert. Es konnten am Bogen und am Steg von Geigen - unter den beiden F{\"u}ßen des Stegs - jeweils zwei Filmsensoren installiert werden. Mit den beiden Sensoren am Steg wurden Frequenzg{\"a}nge von Geigen gemessen, welche eine Bestimmung der frequenzabh{\"a}ngigen Stegbewegung erlaubten. Diese Methode erm{\"o}glicht damit auch eine umfassende Charakterisierung der Signaturmoden in Bezug auf die Stegdynamik. Die Ergebnisse der komplement{\"a}ren Methoden von Impulsanregung und nat{\"u}rlichem Spielen der Geigen konnten dank der Sensoren verglichen werden. F{\"u}r die Nutzung der Sensoren am Bogen - insbesondere f{\"u}r eine Messung des Bogendrucks - wurde eine Kalibrierung des Bogen-Sensor-Systems mit Hilfe einer Materialpr{\"u}fmaschine durchgef{\"u}hrt. Bei einer Messung w{\"a}hrend des nat{\"u}rlichen Spielens wurde mit den Sensoren am Bogen einerseits die {\"U}bertragung der Saitenschwingung auf den Bogen festgestellt. Dabei konnten außerdem longitudinale Bogenhaarresonanzen identifiziert werden, die von der Position der Saite auf dem Bogen abh{\"a}ngen. Aus der Analyse dieses Ph{\"a}nomens konnte die longitudinale Wellengeschwindigkeit der Bogenhaare bestimmt werden, die eine wichtige Gr{\"o}ße f{\"u}r die Kopplung zwischen Saite und Bogen ist. Mit Hilfe des Systems aus Sensoren an Bogen und Steg werden auf Grundlage der vorliegenden Arbeit Studien an Streichinstrumenten vorgeschlagen, in denen die Bespielbarkeit der Instrumente zu den jeweils angeregten Steg- und Bogenschwingungen in Beziehung gesetzt werden kann. Damit k{\"o}nnte nicht zuletzt auch die bisher nicht vollst{\"a}ndig gekl{\"a}rte Rolle des Bogens f{\"u}r Klang und Bespielbarkeit besser beurteilt werden}, language = {de} } @phdthesis{Gruendel2007, author = {Gr{\"u}ndel, Sindy}, title = {Akuteffekte des polyphenolreichen unl{\"o}slichen Carobballaststoffes auf Parameter des Metabolischen Syndroms bei gesunden Erwachsenen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14899}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Das Metabolische Syndrom stellt eine Kombination verschiedener metabolischer Anomalien in einem Individuum dar. Starkes {\"U}bergewicht gilt als maßgebende Gr{\"o}ße in der Genese des Syndroms, welches mit einem enormen Risiko f{\"u}r kardiovaskul{\"a}re Erkrankungen einhergeht. Um die stark steigende Pr{\"a}valenz des Metabolischen Syndroms einzud{\"a}mmen, sind dringend Konzepte f{\"u}r die Behandlung, vor allem jedoch f{\"u}r die Pr{\"a}vention von {\"U}bergewicht erforderlich. Einen wichtigen Beitrag leisten diesbez{\"u}glich Ballaststoffe in der Ern{\"a}hrung. Sie tragen auf unterschiedlichen Wegen zur Gewichtskontrolle bei und beeinflussen zudem verschiedene mit dem Metabolischen Syndrom assoziierte Blutparameter. Ebenso werden protektive Effekte von Polyphenolen, welche zur Gruppe der sekund{\"a}ren Pflanzenstoffe z{\"a}hlen, beschrieben. Diese wirken u. a. auf den Glukose- sowie den Insulinhaushalt und greifen dar{\"u}ber hinaus in die Regulation der Fettverbrennung sowie des Energieverbrauches ein. Die Kombination beider Substanzgruppen verspricht bedeutendes gesundheitsf{\"o}rderndes Potential; dieses wurde gegenw{\"a}rtig jedoch kaum untersucht. Carobballaststoff ist ein polyphenolreicher und vorwiegend unl{\"o}slicher Extrakt der Frucht des Johannisbrotbaumes (Ceratonia siliqua L). Bislang publizierte Studien zur physiologischen Wirksamkeit dieses Ballaststoffpr{\"a}parates weisen sowohl beim Tier als auch beim Menschen bemerkenswerte hypocholesterin{\"a}mische Eigenschaften nach. Inwiefern sich der Verzehr des Carobballaststoffes ebenso auf die Entwicklung von {\"U}bergewicht sowie anderen Messgr{\"o}ßen des Metabolischen Syndroms auswirkt, ist allerdings nicht bekannt. Die Zielstellung der Promotionsarbeit bestand darin, die postprandialen Wirkungen des Carobballaststoffverzehrs mit Hilfe einer Humanstudie aufzuzeigen. In die randomisierten, einfach verblindeten Untersuchungen im cross-over-Design wurden 20 gesunde Erwachsene im Alter zwischen 22 und 62 Jahren eingeschlossen. Unter Verwendung variierender Begleitmahlzeiten wurden die postprandialen Effekte verschiedener Mengen des Carobballaststoffes untersucht. Hierbei standen die Ver{\"a}nderungen der Plasmakonzentrationen von Glukose, Triglyceriden (TG), totalem und acyliertem Ghrelin sowie der Serumkonzentrationen von Insulin und nicht-veresterten Fetts{\"a}uren (NEFA) im Mittelpunkt der Betrachtungen. Der Verzehr des Carobballaststoffes in Kombination mit 200 ml Wasser und 50 g Glukose erh{\"o}hte die postprandialen Glukose- und Insulinkonzentrationen gegen{\"u}ber der Glukosel{\"o}sung ohne Ballaststoffzusatz. In Kombination mit 400 ml einer Fl{\"u}ssigmahlzeit verzehrt, senkte Carobballaststoff die postprandialen TG-, NEFA- und Ghrelin- (acyliert) Antworten. Die Untersuchung des respiratorischen Quotienten nach Zusatz von Carobballaststoff zur Fl{\"u}ssigmahlzeit mittels indirekter Respirationskalorimetrie bekr{\"a}ftigte die bereits bekannten Effekte auf den Lipidmetabolismus und wies zudem eine Steigerung der Fettverwertung unter Verminderung der Glukoseoxidation nach. Wurde Carobballaststoff schließlich in Lebensmittel eingebracht, sanken nach dem Verzehr dieser Lebensmittel erneut die postprandialen Konzentrationen an TG und NEFA. Gleichzeitig erh{\"o}hten sich die Glukose-, Insulin- sowie Ghrelin- (acyliert) Antworten. Carobballaststoff l{\"o}st in Abh{\"a}ngigkeit von der jeweils verzehrten Begleitmatrix unterschiedliche Effekte aus. Das Pr{\"a}parat weist beachtliche Wirkungen auf die Blutlipide sowie den Energieverbrauch auf, hat indes ung{\"u}nstige Wirkungen auf die Blutglukose, sofern er in Kombination mit einer ver{\"a}nderten N{\"a}hrstoffmatrix aufgenommen wird. Carobballaststoff besitzt starkes gesundheitsf{\"o}rderndes Potential; jedoch sind weitere Studien notwendig, um seine Wirkungen sowie deren Voraussetzungen besser zu verstehen. Ferner sollten Untersuchungen {\"u}ber einen l{\"a}ngeren Zeitraum vorgenommen werden, um die langfristige Relevanz der gewonnenen Ergebnisse darzulegen. Danach stellt die Anreicherung spezieller Lebensmittel mit Carobballaststoff einen geeigneten Weg dar, um von den viel versprechenden protektiven Wirkungen des Pr{\"a}parates zu profitieren.}, language = {de} } @phdthesis{Kegeles2018, author = {Kegeles, Alexander}, title = {Algebraic foundation of Group Field Theory}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421014}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2018}, abstract = {In this thesis we provide a construction of the operator framework starting from the functional formulation of group field theory (GFT). We define operator algebras on Hilbert spaces whose expectation values in specific states provide correlation functions of the functional formulation. Our construction allows us to give a direct relation between the ingredients of the functional GFT and its operator formulation in a perturbative regime. Using this construction we provide an example of GFT states that can not be formulated as states in a Fock space and lead to math- ematically inequivalent representations of the operator algebra. We show that such inequivalent representations can be grouped together by their symmetry properties and sometimes break the left translation symmetry of the GFT action. We interpret these groups of inequivalent representations as phases of GFT, similar to the classification of phases that we use in QFT's on space-time.}, language = {en} } @phdthesis{Lindauer2014, author = {Lindauer, T. Marius}, title = {Algorithm selection, scheduling and configuration of Boolean constraint solvers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-71260}, school = {Universit{\"a}t Potsdam}, pages = {ii, 130}, year = {2014}, abstract = {Boolean constraint solving technology has made tremendous progress over the last decade, leading to industrial-strength solvers, for example, in the areas of answer set programming (ASP), the constraint satisfaction problem (CSP), propositional satisfiability (SAT) and satisfiability of quantified Boolean formulas (QBF). However, in all these areas, there exist multiple solving strategies that work well on different applications; no strategy dominates all other strategies. Therefore, no individual solver shows robust state-of-the-art performance in all kinds of applications. Additionally, the question arises how to choose a well-performing solving strategy for a given application; this is a challenging question even for solver and domain experts. One way to address this issue is the use of portfolio solvers, that is, a set of different solvers or solver configurations. We present three new automatic portfolio methods: (i) automatic construction of parallel portfolio solvers (ACPP) via algorithm configuration,(ii) solving the \$NP\$-hard problem of finding effective algorithm schedules with Answer Set Programming (aspeed), and (iii) a flexible algorithm selection framework (claspfolio2) allowing for fair comparison of different selection approaches. All three methods show improved performance and robustness in comparison to individual solvers on heterogeneous instance sets from many different applications. Since parallel solvers are important to effectively solve hard problems on parallel computation systems (e.g., multi-core processors), we extend all three approaches to be effectively applicable in parallel settings. We conducted extensive experimental studies different instance sets from ASP, CSP, MAXSAT, Operation Research (OR), SAT and QBF that indicate an improvement in the state-of-the-art solving heterogeneous instance sets. Last but not least, from our experimental studies, we deduce practical advice regarding the question when to apply which of our methods.}, language = {en} } @phdthesis{Gomolka2011, author = {Gomolka, Johannes}, title = {Algorithmic Trading}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-125-7}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51009}, school = {Universit{\"a}t Potsdam}, pages = {383}, year = {2011}, abstract = {Die Elektronisierung der Finanzm{\"a}rkte ist in den letzten Jahren weit vorangeschritten. Praktisch jede B{\"o}rse verf{\"u}gt {\"u}ber ein elektronisches Handelssystem. In diesem Kontext beschreibt der Begriff Algorithmic Trading ein Ph{\"a}nomen, bei dem Computerprogramme den Menschen im Wertpapierhandel ersetzen. Sie helfen dabei Investmententscheidungen zu treffen oder Transaktionen durchzuf{\"u}hren. Algorithmic Trading selbst ist dabei nur eine unter vielen Innovationen, welche die Entwicklung des B{\"o}rsenhandels gepr{\"a}gt haben. Hier sind z.B. die Erfindung der Telegraphie, des Telefons, des FAX oder der elektronische Wertpapierabwicklung zu nennen. Die Frage ist heute nicht mehr, ob Computerprogramme im B{\"o}rsenhandel eingesetzt werden. Sondern die Frage ist, wo die Grenze zwischen vollautomatischem B{\"o}rsenhandel (durch Computer) und manuellem B{\"o}rsenhandel (von Menschen) verl{\"a}uft. Bei der Erforschung von Algorithmic Trading wird die Wissenschaft mit dem Problem konfrontiert, dass keinerlei Informationen {\"u}ber diese Computerprogramme zug{\"a}nglich sind. Die Idee dieser Dissertation bestand darin, dieses Problem zu umgehen und Informationen {\"u}ber Algorithmic Trading indirekt aus der Analyse von (Fonds-)Renditen zu extrahieren. Johannes Gomolka untersucht daher die Forschungsfrage, ob sich Aussagen {\"u}ber computergesteuerten Wertpapierhandel (kurz: Algorithmic Trading) aus der Analyse von (Fonds-)Renditen ziehen lassen. Zur Beantwortung dieser Forschungsfrage formuliert der Autor eine neue Definition von Algorithmic Trading und unterscheidet mit Buy-Side und Sell-Side Algorithmic Trading zwei grundlegende Funktionen der Computerprogramme (die Entscheidungs- und die Transaktionsunterst{\"u}tzung). F{\"u}r seine empirische Untersuchung greift Gomolka auf das Multifaktorenmodell zur Style-Analyse von Fung und Hsieh (1997) zur{\"u}ck. Mit Hilfe dieses Modells ist es m{\"o}glich, die Zeitreihen von Fondsrenditen in interpretierbare Grundbestandteile zu zerlegen und den einzelnen Regressionsfaktoren eine inhaltliche Bedeutung zuzuordnen. Die Ergebnisse dieser Dissertation zeigen, dass man mit Hilfe der Style-Analyse Aussagen {\"u}ber Algorithmic Trading aus der Analyse von (Fonds-)Renditen machen kann. Die Aussagen sind jedoch keiner technischen Natur, sondern auf die Analyse von Handelsstrategien (Investment-Styles) begrenzt.}, language = {de} } @phdthesis{Kupke2008, author = {Kupke, S{\"o}ren}, title = {Allianzf{\"a}higkeit}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-27309}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Ein empirisch belegtes Ph{\"a}nomen ist die steigende Anzahl strategischer Allianzen von Unternehmen, speziell in dynamischen Wettbewerbsumgebungen. {\"U}ber strategische Allianzen versuchen Unternehmen, auch in einer dynamischen Wettbewerbsumwelt unter hoher Unsicherheit Wettbewerbsvorteile aufzubauen. Dabei belegen verschiedene Forschungsarbeiten, dass strategische Allianzen h{\"a}ufig scheitern und weisen {\"u}bereinstimmend relativ niedrige Erfolgsquoten von 50\% nach. Harbison und Pek{\´a}r belegen zwischen Unternehmen eine hohe Varianz und identifizieren, dass einige Unternehmen bei 87\% ihrer strategischen Allianzen erfolgreich sind, w{\"a}hrend andere Unternehmen nur eine Erfolgsrate von 37\% aufweisen. Die Fragestellung, warum Unternehmen in dieser Hinsicht so signifikant differieren, bildet die grundlegende Motivation f{\"u}r die vorliegende Arbeit. Die Ursache f{\"u}r die hohe Varianz der Erfolgsrate von strategischen Allianzen zwischen Unternehmen, ist in den internen Prozessen und Strukturen der Unternehmen zu vermuten. Diese unternehmensinternen Prozesse und Strukturen werden mit dem Begriff Allianzf{\"a}higkeit bezeichnet. 1. Forschungsfrage Aus welchen Prozessen und Strukturen besteht Allianzf{\"a}higkeit in Unternehmen? 2. Forschungsfrage Wie entwickelt sich Allianzf{\"a}higkeit in Unternehmen? Die vermutete Kausalit{\"a}t zwischen Allianzf{\"a}higkeit und der Erfolgsrate bei strategischen Allianzen verweist auf einen post-positivistischen Charakter der Untersuchung. Es wird auf wissenschaftliche Literatur, aber auch auf Beitr{\"a}ge von Unternehmensberatungen, Nichtregierungsorganisationen und andere Quellen zur{\"u}ckgegriffen, wie es als Forschungsansatz im Kontext strategischer Allianzen vorgeschlagen wird. Zun{\"a}chst werden im zweiten Kapitel die beiden beobachtbaren Ph{\"a}nomene, eine steigende Dynamik der Wettbewerbsumwelt vieler Unternehmen und die zunehmende Anzahl strategischer Allianzen, die Interdependenzen aufweisen, unter Einbeziehung der Literatur, charakterisiert. Insbesondere die Motive von Unternehmen und deren Strategien zum Eingehen strategischer Allianzen werden erl{\"a}utert. Im folgenden dritten Kapitel werden die traditionellen Erkl{\"a}rungsans{\"a}tze des strategischen Managements anhand einer empirischen Literaturanalyse skizziert. Das Ergebnis der empirischen Literaturanalyse ist die Notwendigkeit eines neuen theoretischen Zugangs, der die gestiegene Dynamik der Wettbewerbsumwelt st{\"a}rker reflektiert und interne Prozesse von Unternehmen intensiver betrachtet. Mit den Beitr{\"a}gen von Teece et al., Eisenhardt und Martin, scheint sich der f{\"a}higkeitsorientierte Ansatz als neue Erkl{\"a}rungsperspektive zu etablieren. Der junge f{\"a}higkeitsorientierte Ansatz wird als geeignet identifiziert und im vierten Kapitel ausf{\"u}hrlich dargestellt sowie in seinen aktuellen Entwicklungen erl{\"a}utert. Das vierte Kapitel bildet den theoretischen Rahmen f{\"u}r das folgende f{\"u}nfte Kapitel, das die Charakterisierung von Allianzf{\"a}higkeit auf der Grundlage eines Literaturreviews umfasst und direkt die Forschungsfragen anhand einer Modellentwicklung adressiert. Erkennt¬nisse aus anderen theoretischen Perspektiven werden in den f{\"a}higkeitsorientierten Ansatz {\"u}bertragen und in das Modell integriert. Ein Vorgehen, das in der Tradition des strategischen Managements, insbesondere der Beitr{\"a}ge zum f{\"a}higkeitsorientierten Ansatz, steht. Im sechsten Kapitel wird das skizzierte Modell als Grundlage verwendet, um die Entwicklung von Allianzf{\"a}higkeit empirisch anhand einer Longitudinal-Fallstudie zu untersuchen. Die Analyse von Allianzf{\"a}higkeit sollte vor dem Hintergrund einer hohen Wettbewerbsdynamik unter der Bedingung von Hyperwettbewerb (Kriterium 1) in einer Branche erfolgen, bei der strategische Allianzen eine hohe Rolle spielen (Kriterium 2). Dar{\"u}ber hinaus soll die M{\"o}glichkeit bestehen, Zugang zu Dokumenten, Interviewpartnern sowie Datenbanken zu erhalten (Kriterium 3). Aufgrund dieser Kriterien wurde die B{\"o}rsenindustrie als Industrie und die Deutsche B{\"o}rse als Unternehmen ausgew{\"a}hlt. Das Ziel der Fallstudie ist die Darstellung der Entwicklung der strategischen Allianzen der Deutschen B{\"o}rse von 1997 bis 2007, um R{\"u}ckschl{\"u}sse auf die Allianzf{\"a}higkeit des Unternehmens ziehen zu k{\"o}nnen. Da die Entwicklung der Allianzf{\"a}higkeit im Vordergrund der Fallstudie steht, wird eine L{\"a}ngsschnittfallstudie mit chronologischem Aufbau verwendet. Dabei wurden drei verschiedene Quellen, Experteninterviews, Datenbanken sowie Dokumente, ausgewertet.}, language = {de} } @phdthesis{Schmidt2024, author = {Schmidt, Lena Katharina}, title = {Altered hydrological and sediment dynamics in high-alpine areas - Exploring the potential of machine-learning for estimating past and future changes}, doi = {10.25932/publishup-62330}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-623302}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 129}, year = {2024}, abstract = {Climate change fundamentally transforms glaciated high-alpine regions, with well-known cryospheric and hydrological implications, such as accelerating glacier retreat, transiently increased runoff, longer snow-free periods and more frequent and intense summer rainstorms. These changes affect the availability and transport of sediments in high alpine areas by altering the interaction and intensity of different erosion processes and catchment properties. Gaining insight into the future alterations in suspended sediment transport by high alpine streams is crucial, given its wide-ranging implications, e.g. for flood damage potential, flood hazard in downstream river reaches, hydropower production, riverine ecology and water quality. However, the current understanding of how climate change will impact suspended sediment dynamics in these high alpine regions is limited. For one, this is due to the scarcity of measurement time series that are long enough to e.g. infer trends. On the other hand, it is difficult - if not impossible - to develop process-based models, due to the complexity and multitude of processes involved in high alpine sediment dynamics. Therefore, knowledge has so far been confined to conceptual models (which do not facilitate deriving concrete timings or magnitudes for individual catchments) or qualitative estimates ('higher export in warmer years') that may not be able to capture decreases in sediment export. Recently, machine-learning approaches have gained in popularity for modeling sediment dynamics, since their black box nature tailors them to the problem at hand, i.e. relatively well-understood input and output data, linked by very complex processes. Therefore, the overarching aim of this thesis is to estimate sediment export from the high alpine {\"O}tztal valley in Tyrol, Austria, over decadal timescales in the past and future - i.e. timescales relevant to anthropogenic climate change. This is achieved by informing, extending, evaluating and applying a quantile regression forest (QRF) approach, i.e. a nonparametric, multivariate machine-learning technique based on random forest. The first study included in this thesis aimed to understand present sediment dynamics, i.e. in the period with available measurements (up to 15 years). To inform the modeling setup for the two subsequent studies, this study identified the most important predictors, areas within the catchments and time periods. To that end, water and sediment yields from three nested gauges in the upper {\"O}tztal, Vent, S{\"o}lden and Tumpen (98 to almost 800 km² catchment area, 930 to 3772 m a.s.l.) were analyzed for their distribution in space, their seasonality and spatial differences therein, and the relative importance of short-term events. The findings suggest that the areas situated above 2500 m a.s.l., containing glacier tongues and recently deglaciated areas, play a pivotal role in sediment generation across all sub-catchments. In contrast, precipitation events were relatively unimportant (on average, 21 \% of annual sediment yield was associated to precipitation events). Thus, the second and third study focused on the Vent catchment and its sub-catchment above gauge Vernagt (11.4 and 98 km², 1891 to 3772 m a.s.l.), due to their higher share of areas above 2500 m. Additionally, they included discharge, precipitation and air temperature (as well as their antecedent conditions) as predictors. The second study aimed to estimate sediment export since the 1960s/70s at gauges Vent and Vernagt. This was facilitated by the availability of long records of the predictors, discharge, precipitation and air temperature, and shorter records (four and 15 years) of turbidity-derived sediment concentrations at the two gauges. The third study aimed to estimate future sediment export until 2100, by applying the QRF models developed in the second study to pre-existing precipitation and temperature projections (EURO-CORDEX) and discharge projections (physically-based hydroclimatological and snow model AMUNDSEN) for the three representative concentration pathways RCP2.6, RCP4.5 and RCP8.5. The combined results of the second and third study show overall increasing sediment export in the past and decreasing export in the future. This suggests that peak sediment is underway or has already passed - unless precipitation changes unfold differently than represented in the projections or changes in the catchment erodibility prevail and override these trends. Despite the overall future decrease, very high sediment export is possible in response to precipitation events. This two-fold development has important implications for managing sediment, flood hazard and riverine ecology. This thesis shows that QRF can be a very useful tool to model sediment export in high-alpine areas. Several validations in the second study showed good performance of QRF and its superiority to traditional sediment rating curves - especially in periods that contained high sediment export events, which points to its ability to deal with threshold effects. A technical limitation of QRF is the inability to extrapolate beyond the range of values represented in the training data. We assessed the number and severity of such out-of-observation-range (OOOR) days in both studies, which showed that there were few OOOR days in the second study and that uncertainties associated with OOOR days were small before 2070 in the third study. As the pre-processed data and model code have been made publically available, future studies can easily test further approaches or apply QRF to further catchments.}, language = {en} } @phdthesis{Steeples2016, author = {Steeples, Elliot}, title = {Amino acid-derived imidazolium salts: platform molecules for N-Heterocyclic carbene metal complexes and organosilica materials}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-101861}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2016}, abstract = {In the interest of producing functional catalysts from sustainable building-blocks, 1, 3-dicarboxylate imidazolium salts derived from amino acids were successfully modified to be suitable as N-Heterocyclic carbene (NHC) ligands within metal complexes. Complexes of Ag(I), Pd(II), and Ir(I) were successfully produced using known procedures using ligands derived from glycine, alanine, β-alanine and phenylalanine. The complexes were characterized in solid state using X-Ray crystallography, which allowed for the steric and electronic comparison of these ligands to well-known NHC ligands within analogous metal complexes. The palladium complexes were tested as catalysts for aqueous-phase Suzuki-Miyaura cross-coupling. Water-solubility could be induced via ester hydrolysis of the N-bound groups in the presence of base. The mono-NHC-Pd complexes were seen to be highly active in the coupling of aryl bromides with phenylboronic acid; the active catalyst of which was determined to be mostly Pd(0) nanoparticles. Kinetic studies determined that reaction proceeds quickly in the coupling of bromoacetophenone, for both pre-hydrolyzed and in-situ hydrolysis catalyst dissolution. The catalyst could also be recycled for an extra run by simply re-using the aqueous layer. The imidazolium salts were also used to produce organosilica hybrid materials. This was attempted via two methods: by post-grafting onto a commercial organosilica, and co-condensation of the corresponding organosilane. The co-condensation technique harbours potential for the production of solid-support catalysts.}, language = {en} }