@phdthesis{Zauber2013, author = {Zauber, Henrik}, title = {A systems biology driven approach for analyzing lipid protein interactions in sterol biosynthesis mutants}, address = {Potsdam}, pages = {126 S.}, year = {2013}, language = {en} } @phdthesis{Andorf2011, author = {Andorf, Sandra}, title = {A systems biological approach towards the molecular basis of heterosis in Arabidopsis thaliana}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51173}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Heterosis is defined as the superiority in performance of heterozygous genotypes compared to their corresponding genetically different homozygous parents. This phenomenon is already known since the beginning of the last century and it has been widely used in plant breeding, but the underlying genetic and molecular mechanisms are not well understood. In this work, a systems biological approach based on molecular network structures is proposed to contribute to the understanding of heterosis. Hybrids are likely to contain additional regulatory possibilities compared to their homozygous parents and, therefore, they may be able to correctly respond to a higher number of environmental challenges, which leads to a higher adaptability and, thus, the heterosis phenomenon. In the network hypothesis for heterosis, presented in this work, more regulatory interactions are expected in the molecular networks of the hybrids compared to the homozygous parents. Partial correlations were used to assess this difference in the global interaction structure of regulatory networks between the hybrids and the homozygous genotypes. This network hypothesis for heterosis was tested on metabolite profiles as well as gene expression data of the two parental Arabidopsis thaliana accessions C24 and Col-0 and their reciprocal crosses. These plants are known to show a heterosis effect in their biomass phenotype. The hypothesis was confirmed for mid-parent and best-parent heterosis for either hybrid of our experimental metabolite as well as gene expression data. It was shown that this result is influenced by the used cutoffs during the analyses. Too strict filtering resulted in sets of metabolites and genes for which the network hypothesis for heterosis does not hold true for either hybrid regarding mid-parent as well as best-parent heterosis. In an over-representation analysis, the genes that show the largest heterosis effects according to our network hypothesis were compared to genes of heterotic quantitative trait loci (QTL) regions. Separately for either hybrid regarding mid-parent as well as best-parent heterosis, a significantly larger overlap between the resulting gene lists of the two different approaches towards biomass heterosis was detected than expected by chance. This suggests that each heterotic QTL region contains many genes influencing biomass heterosis in the early development of Arabidopsis thaliana. Furthermore, this integrative analysis led to a confinement and an increased confidence in the group of candidate genes for biomass heterosis in Arabidopsis thaliana identified by both approaches.}, language = {en} } @phdthesis{Stange2024, author = {Stange, Maike}, title = {A study on Coronin-A and Aip1 function in motility of Dictyostelium discoideum and on Aip1 interchangeability between Dictyostelium discoideum and Arabidopsis thaliana}, doi = {10.25932/publishup-62856}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-628569}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 168}, year = {2024}, abstract = {Actin is one of the most highly conserved proteins in eukaryotes and distinct actin-related proteins with filament-forming properties are even found in prokaryotes. Due to these commonalities, actin-modulating proteins of many species share similar structural properties and proposed functions. The polymerization and depolymerization of actin are critical processes for a cell as they can contribute to shape changes to adapt to its environment and to move and distribute nutrients and cellular components within the cell. However, to what extent functions of actin-binding proteins are conserved between distantly related species, has only been addressed in a few cases. In this work, functions of Coronin-A (CorA) and Actin-interacting protein 1 (Aip1), two proteins involved in actin dynamics, were characterized. In addition, the interchangeability and function of Aip1 were investigated in two phylogenetically distant model organisms. The flowering plant Arabidopsis thaliana (encoding two homologs, AIP1-1 and AIP1-2) and in the amoeba Dictyostelium discoideum (encoding one homolog, DdAip1) were chosen because the functions of their actin cytoskeletons may differ in many aspects. Functional analyses between species were conducted for AIP1 homologs as flowering plants do not harbor a CorA gene. In the first part of the study, the effect of four different mutation methods on the function of Coronin-A protein and the resulting phenotype in D. discoideum was revealed in two genetic knockouts, one RNAi knockdown and a sudden loss-of-function mutant created by chemical-induced dislocation (CID). The advantages and disadvantages of the different mutation methods on the motility, appearance and development of the amoebae were investigated, and the results showed that not all observed properties were affected with the same intensity. Remarkably, a new combination of Selection-Linked Integration and CID could be established. In the second and third parts of the thesis, the exchange of Aip1 between plant and amoeba was carried out. For A. thaliana, the two homologs (AIP1-1 and AIP1-2) were analyzed for functionality as well as in D. discoideum. In the Aip1-deficient amoeba, rescue with AIP1-1 was more effective than with AIP1-2. The main results in the plant showed that in the aip1-2 mutant background, reintroduced AIP1-2 displayed the most efficient rescue and A. thaliana AIP1-1 rescued better than DdAip1. The choice of the tagging site was important for the function of Aip1 as steric hindrance is a problem. The DdAip1 was less effective when tagged at the C-terminus, while the plant AIP1s showed mixed results depending on the tag position. In conclusion, the foreign proteins partially rescued phenotypes of mutant plants and mutant amoebae, despite the organisms only being very distantly related in evolutionary terms.}, language = {en} } @phdthesis{Herenz2014, author = {Herenz, Peter}, title = {A study of the absorption characteristics of gaseous galaxy halos in the local Universe}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70513}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Today, it is well known that galaxies like the Milky Way consist not only of stars but also of gas and dust. The galactic halo, a sphere of gas that surrounds the stellar disk of a galaxy, is especially interesting. It provides a wealth of information about in and outflowing gaseous material towards and away from galaxies and their hierarchical evolution. For the Milky Way, the so-called high-velocity clouds (HVCs), fast moving neutral gas complexes in the halo that can be traced by absorption-line measurements, are believed to play a crucial role in the overall matter cycle in our Galaxy. Over the last decades, the properties of these halo structures and their connection to the local circumgalactic and intergalactic medium (CGM and IGM, respectively) have been investigated in great detail by many different groups. So far it remains unclear, however, to what extent the results of these studies can be transferred to other galaxies in the local Universe. In this thesis, we study the absorption properties of Galactic HVCs and compare the HVC absorption characteristics with those of intervening QSO absorption-line systems at low redshift. The goal of this project is to improve our understanding of the spatial extent and physical conditions of gaseous galaxy halos in the local Universe. In the first part of the thesis we use HST /STIS ultraviolet spectra of more than 40 extragalactic background sources to statistically analyze the absorption properties of the HVCs in the Galactic halo. We determine fundamental absorption line parameters including covering fractions of different weakly/intermediately/highly ionized metals with a particular focus on SiII and MgII. Due to the similarity in the ionization properties of SiII and MgII, we are able to estimate the contribution of HVC-like halo structures to the cross section of intervening strong MgII absorbers at z = 0. Our study implies that only the most massive HVCs would be regarded as strong MgII absorbers, if the Milky Way halo would be seen as a QSO absorption line system from an exterior vantage point. Combining the observed absorption-cross section of Galactic HVCs with the well-known number density of intervening strong MgII absorbers at z = 0, we conclude that the contribution of infalling gas clouds (i.e., HVC analogs) in the halos of Milky Way-type galaxies to the cross section of strong MgII absorbers is 34\%. This result indicates that only about one third of the strong MgII absorption can be associated with HVC analogs around other galaxies, while the majority of the strong MgII systems possibly is related to galaxy outflows and winds. The second part of this thesis focuses on the properties of intervening metal absorbers at low redshift. The analysis of the frequency and physical conditions of intervening metal systems in QSO spectra and their relation to nearby galaxies offers new insights into the typical conditions of gaseous galaxy halos. One major aspect in our study was to regard intervening metal systems as possible HVC analogs. We perform a detailed analysis of absorption line properties and line statistics for 57 metal absorbers along 78 QSO sightlines using newly-obtained ultraviolet spectra obtained with HST /COS. We find clear evidence for bimodal distribution in the HI column density in the absorbers, a trend that we interpret as sign for two different classes of absorption systems (with HVC analogs at the high-column density end). With the help of the strong transitions of SiII λ1260, SiIII λ1206, and CIII λ977 we have set up Cloudy photoionization models to estimate the local ionization conditions, gas densities, and metallicities. We find that the intervening absorption systems studied by us have, on average, similar physical conditions as Galactic HVC absorbers, providing evidence that many of them represent HVC analogs in the vicinity of other galaxies. We therefore determine typical halo sizes for SiII, SiIII, and CIII for L = 0.01L∗ and L = 0.05L∗ galaxies. Based on the covering fractions of the different ions in the Galactic halo, we find that, for example, the typical halo size for SiIII is ∼ 160 kpc for L = 0.05L∗ galaxies. We test the plausibility of this result by searching for known galaxies close to the QSO sightlines and at similar redshifts as the absorbers. We find that more than 34\% of the measured SiIII absorbers have galaxies associated with them, with the majority of the absorbers indeed being at impact parameters ρ ≤160 kpc.}, language = {en} } @phdthesis{Sokolova2006, author = {Sokolova, Elena}, title = {A Study of large-scale Atmospheric Dynamics on the Basic of NCEP Data and AOGCM Simulations}, address = {Potsdam}, pages = {128 S. : graph. Darst.}, year = {2006}, language = {en} } @phdthesis{Buschmann2018, author = {Buschmann, Stefan}, title = {A software framework for GPU-based geo-temporal visualization techniques}, doi = {10.25932/publishup-44340}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-443406}, school = {Universit{\"a}t Potsdam}, pages = {viii, 99}, year = {2018}, abstract = {R{\"a}umlich-zeitliche Daten sind Daten, welche sowohl einen Raum- als auch einen Zeitbezug aufweisen. So k{\"o}nnen beispielsweise Zeitreihen von Geodaten, thematische Karten die sich {\"u}ber die Zeit ver{\"a}ndern, oder Bewegungsaufzeichnungen von sich bewegenden Objekten als r{\"a}umlich-zeitliche Daten aufgefasst werden. In der heutigen automatisierten Welt gibt es eine wachsende Anzahl von Datenquellen, die best{\"a}ndig r{\"a}umlich-zeitliche Daten generieren. Hierzu geh{\"o}ren beispielsweise Verkehrs{\"u}berwachungssysteme, die Bewegungsdaten von Menschen oder Fahrzeugen aufzeichnen, Fernerkundungssysteme, welche regelm{\"a}ßig unsere Umgebung scannen und digitale Abbilder wie z.B. Stadt- und Landschaftsmodelle erzeugen, sowie Sensornetzwerke in unterschiedlichsten Anwendungsgebieten, wie z.B. der Logistik, der Verhaltensforschung von Tieren, oder der Klimaforschung. Zur Analyse r{\"a}umlich-zeitlicher Daten werden neben der automatischen Analyse mittels statistischer Methoden und Data-Mining auch explorative Methoden angewendet, welche auf der interaktiven Visualisierung der Daten beruhen. Diese Methode der Analyse basiert darauf, dass Anwender in Form interaktiver Visualisierung die Daten explorieren k{\"o}nnen, wodurch die menschliche Wahrnehmung sowie das Wissen der User genutzt werden, um Muster zu erkennen und dadurch einen Einblick in die Daten zu erlangen. Diese Arbeit beschreibt ein Software-Framework f{\"u}r die Visualisierung r{\"a}umlich-zeitlicher Daten, welches GPU-basierte Techniken beinhaltet, um eine interaktive Visualisierung und Exploration großer r{\"a}umlich-zeitlicher Datens{\"a}tze zu erm{\"o}glichen. Die entwickelten Techniken umfassen Datenhaltung, Prozessierung und Rendering und erm{\"o}glichen es, große Datenmengen in Echtzeit zu prozessieren und zu visualisieren. Die Hauptbeitr{\"a}ge der Arbeit umfassen: - Konzept und Implementierung einer GPU-zentrierten Visualisierungspipeline. Die beschriebenen Techniken basieren auf dem Konzept einer GPU-zentrierten Visualisierungspipeline, in welcher alle Stufen -- Prozessierung,Mapping, Rendering -- auf der GPU ausgef{\"u}hrt werden. Bei diesem Konzept werden die r{\"a}umlich-zeitlichen Daten direkt im GPU-Speicher abgelegt. W{\"a}hrend des Rendering-Prozesses werden dann mittels Shader-Programmen die Daten prozessiert, gefiltert, ein Mapping auf visuelle Attribute vorgenommen, und schließlich die Geometrien f{\"u}r die Visualisierung erzeugt. Datenprozessierung, Filtering und Mapping k{\"o}nnen daher in Echtzeit ausgef{\"u}hrt werden. Dies erm{\"o}glicht es Usern, die Mapping-Parameter sowie den gesamten Visualisierungsprozess interaktiv zu steuern und zu kontrollieren. - Interaktive Visualisierung attributierter 3D-Trajektorien. Es wurde eine Visualisierungsmethode f{\"u}r die interaktive Exploration einer großen Anzahl von 3D Bewegungstrajektorien entwickelt. Die Trajektorien werden dabei innerhalb einer virtuellen geographischen Umgebung in Form von einfachen Geometrien, wie Linien, B{\"a}ndern, Kugeln oder R{\"o}hren dargestellt. Durch interaktives Mapping k{\"o}nnen Attributwerte der Trajektorien oder einzelner Messpunkte auf visuelle Eigenschaften abgebildet werden. Hierzu stehen Form, H{\"o}he, Gr{\"o}ße, Farbe, Textur, sowie Animation zur Verf{\"u}gung. Mithilfe dieses dynamischen Mappings wurden außerdem verschiedene Visualisierungsmethoden implementiert, wie z.B. eine Focus+Context-Visualisierung von Trajektorien mithilfe von interaktiven Dichtekarten, sowie einer Space-Time-Cube-Visualisierung zur Darstellung des zeitlichen Ablaufs einzelner Bewegungen. - Interaktive Visualisierung geographischer Netzwerke. Es wurde eine Visualisierungsmethode zur interaktiven Exploration geo-referenzierter Netzwerke entwickelt, welche die Visualisierung von Netzwerken mit einer großen Anzahl von Knoten und Kanten erm{\"o}glicht. Um die Analyse von Netzwerken verschiedener Gr{\"o}ßen und in unterschiedlichen Kontexten zu erm{\"o}glichen, stehen mehrere virtuelle geographische Umgebungen zur Verf{\"u}gung, wie bspw. ein virtueller 3D-Globus, als auch 2D-Karten mit unterschiedlichen geographischen Projektionen. Zur interaktiven Analyse dieser Netzwerke stehen interaktive Tools wie Filterung, Mapping und Selektion zur Verf{\"u}gung. Des weiteren wurden Visualisierungsmethoden f{\"u}r verschiedene Arten von Netzwerken, wie z.B. 3D-Netzwerke und zeitlich ver{\"a}nderliche Netzwerke, implementiert. Zur Demonstration des Konzeptes wurden interaktive Tools f{\"u}r zwei unterschiedliche Anwendungsf{\"a}lle entwickelt. Das erste beinhaltet die Visualisierung attributierter 3D-Trajektorien, welche die Bewegungen von Flugzeugen um einen Flughafen beschreiben. Es erm{\"o}glicht Nutzern, die Trajektorien von ankommenden und startenden Flugzeugen {\"u}ber den Zeitraum eines Monats interaktiv zu explorieren und zu analysieren. Durch Verwendung der interaktiven Visualisierungsmethoden f{\"u}r 3D-Trajektorien und interaktiven Dichtekarten k{\"o}nnen Einblicke in die Daten gewonnen werden, wie beispielsweise h{\"a}ufig genutzte Flugkorridore, typische sowie untypische Bewegungsmuster, oder ungew{\"o}hnliche Vorkommnisse wie Fehlanfl{\"u}ge. Der zweite Anwendungsfall beinhaltet die Visualisierung von Klimanetzwerken, welche geographischen Netzwerken in der Klimaforschung darstellen. Klimanetzwerke repr{\"a}sentieren die Dynamiken im Klimasystem durch eine Netzwerkstruktur, die die statistische Beziehungen zwischen Orten beschreiben. Das entwickelte Tool erm{\"o}glicht es Analysten, diese großen Netzwerke interaktiv zu explorieren und dadurch die Struktur des Netzwerks zu analysieren und mit den geographischen Daten in Beziehung zu setzen. Interaktive Filterung und Selektion erm{\"o}glichen es, Muster in den Daten zu identifizieren, und so bspw. Cluster in der Netzwerkstruktur oder Str{\"o}mungsmuster zu erkennen.}, language = {en} } @phdthesis{Chen2023, author = {Chen, Junchao}, title = {A self-adaptive resilient method for implementing and managing the high-reliability processing system}, doi = {10.25932/publishup-58313}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583139}, school = {Universit{\"a}t Potsdam}, pages = {XXIII, 167}, year = {2023}, abstract = {As a result of CMOS scaling, radiation-induced Single-Event Effects (SEEs) in electronic circuits became a critical reliability issue for modern Integrated Circuits (ICs) operating under harsh radiation conditions. SEEs can be triggered in combinational or sequential logic by the impact of high-energy particles, leading to destructive or non-destructive faults, resulting in data corruption or even system failure. Typically, the SEE mitigation methods are deployed statically in processing architectures based on the worst-case radiation conditions, which is most of the time unnecessary and results in a resource overhead. Moreover, the space radiation conditions are dynamically changing, especially during Solar Particle Events (SPEs). The intensity of space radiation can differ over five orders of magnitude within a few hours or days, resulting in several orders of magnitude fault probability variation in ICs during SPEs. This thesis introduces a comprehensive approach for designing a self-adaptive fault resilient multiprocessing system to overcome the static mitigation overhead issue. This work mainly addresses the following topics: (1) Design of on-chip radiation particle monitor for real-time radiation environment detection, (2) Investigation of space environment predictor, as support for solar particle events forecast, (3) Dynamic mode configuration in the resilient multiprocessing system. Therefore, according to detected and predicted in-flight space radiation conditions, the target system can be configured to use no mitigation or low-overhead mitigation during non-critical periods of time. The redundant resources can be used to improve system performance or save power. On the other hand, during increased radiation activity periods, such as SPEs, the mitigation methods can be dynamically configured appropriately depending on the real-time space radiation environment, resulting in higher system reliability. Thus, a dynamic trade-off in the target system between reliability, performance and power consumption in real-time can be achieved. All results of this work are evaluated in a highly reliable quad-core multiprocessing system that allows the self-adaptive setting of optimal radiation mitigation mechanisms during run-time. Proposed methods can serve as a basis for establishing a comprehensive self-adaptive resilient system design process. Successful implementation of the proposed design in the quad-core multiprocessor shows its application perspective also in the other designs.}, language = {en} } @phdthesis{Mussil2008, author = {Mussil, Stephan}, title = {A secret in spite of itself : recursive meaning in Henry James's 'The Figure in the Carpet'}, issn = {0028-6087}, year = {2008}, language = {en} } @phdthesis{Zhang2011, author = {Zhang, Zhuodong}, title = {A regional scale study of wind erosion in the Xilingele grassland based on computational fluid dynamics}, address = {Potsdam}, pages = {143 S.}, year = {2011}, language = {en} } @phdthesis{Mueller2012, author = {M{\"u}ller, J{\"u}rgen J.}, title = {A real-time in-memory discovery service}, address = {Potsdam}, pages = {XXV, 172 S.}, year = {2012}, language = {en} } @phdthesis{Marx2016, author = {Marx, Robert}, title = {A quantitative model of spatial correlations in parametric down conversion for investigating complementarity at a double slit}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2016}, language = {en} } @phdthesis{Gaetjen2023, author = {G{\"a}tjen, Dominic}, title = {A Pichia pastoris surface display system for the efficient screening of high-producing antibody clones}, school = {Universit{\"a}t Potsdam}, pages = {120}, year = {2023}, abstract = {Pichia pastoris (syn. Komagataella phaffi) is a distinguished expression system widely used in industrial production processes. Recent molecular research has focused on numerous approaches to increase recombinant protein yield in P. pastoris. For example, the design of expression vectors and synthetic genetic elements, gene copy number optimization, or co-expression of helper proteins (transcription factors, chaperones, etc.). However, high clonal variability of transformants and low screening throughput have hampered significant success. To enhance screening capacities, display-based methodologies inherit the potential for efficient isolation of producer clones via fluorescence-activated cell sorting (FACS). Therefore, this study focused on developing a novel clone selection method that is based on the non-covalent attachment of Fab fragments on the P. pastoris cell surface to be applicable for FACS. Initially, a P. pastoris display system was developed, which is a prerequisite for the surface capture of secreted Fabs. A Design of Experiments approach was applied to analyze the influence of various genetic elements on antibody fragment display. The combined P. pastoris formaldehyde dehydrogenase promoter (PFLD1), Saccharomyces cerevisiae invertase 2 signal peptide (ScSUC2), - agglutinin (ScSAG1) anchor protein, and the ARS of Kluyveromyces lactis (panARS) conferred highest display levels. Subsequently, eight single-chain variable fragments (scFv) specific for the constant part of the Fab heavy or light chain were individually displayed in P. pastoris. Among the tested scFvs, the anti-human CH1 IgG domain scFv allowed the most efficient Fab capture detected by flow cytometry. Irrespective of the Fab sequence, exogenously added as well as simultaneously secreted Fabs were successfully captured on the cell surface. Furthermore, Fab secretion capacities were shown to correlate to the level of surface-bound Fabs as demonstrated for characterized producer clones. Flow-sorted clones presenting high amounts of Fabs showed an increase in median Fab titers (factor of 21 to 49) compared to unsorted clones when screened in deep-well plates. For selected candidates, improved functional Fab yields of sorted cells vs. unsorted cells were confirmed in an upscaled shake flask production. Since the scFv capture matrix was encoded on an episomal plasmid with inherently unstable autonomously replicating sequences (ARS), efficient plasmid curing was observed after removing the selective pressure. Hence, sorted clones could be immediately used for production without the need to modify the expression host or vector. The resulting switchable display/secretion system provides a streamlined approach for the isolation of Fab producers and subsequent Fab production.}, language = {en} } @phdthesis{SanchezBarriga2010, author = {S{\´a}nchez-Barriga, Jaime}, title = {A photoemission study of quasiparticle excitations, electron-correlation effects and magnetization dynamics in thin magnetic systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-48499}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {This thesis is focused on the electronic, spin-dependent and dynamical properties of thin magnetic systems. Photoemission-related techniques are combined with synchrotron radiation to study the spin-dependent properties of these systems in the energy and time domains. In the first part of this thesis, the strength of electron correlation effects in the spin-dependent electronic structure of ferromagnetic bcc Fe(110) and hcp Co(0001) is investigated by means of spin- and angle-resolved photoemission spectroscopy. The experimental results are compared to theoretical calculations within the three-body scattering approximation and within the dynamical mean-field theory, together with one-step model calculations of the photoemission process. From this comparison it is demonstrated that the present state of the art many-body calculations, although improving the description of correlation effects in Fe and Co, give too small mass renormalizations and scattering rates thus demanding more refined many-body theories including nonlocal fluctuations. In the second part, it is shown in detail monitoring by photoelectron spectroscopy how graphene can be grown by chemical vapour deposition on the transition-metal surfaces Ni(111) and Co(0001) and intercalated by a monoatomic layer of Au. For both systems, a linear E(k) dispersion of massless Dirac fermions is observed in the graphene pi-band in the vicinity of the Fermi energy. Spin-resolved photoemission from the graphene pi-band shows that the ferromagnetic polarization of graphene/Ni(111) and graphene/Co(0001) is negligible and that graphene on Ni(111) is after intercalation of Au spin-orbit split by the Rashba effect. In the last part, a time-resolved x-ray magnetic circular dichroic-photoelectron emission microscopy study of a permalloy platelet comprising three cross-tie domain walls is presented. It is shown how a fast picosecond magnetic response in the precessional motion of the magnetization can be induced by means of a laser-excited photoswitch. From a comparision to micromagnetic calculations it is demonstrated that the relatively high precessional frequency observed in the experiments is directly linked to the nature of the vortex/antivortex dynamics and its response to the magnetic perturbation. This includes the time-dependent reversal of the vortex core polarization, a process which is beyond the limit of detection in the present experiments.}, language = {en} } @phdthesis{Schoenheit2011, author = {Sch{\"o}nheit, J{\"o}rg}, title = {A phagocyte-specific Irf8 gene enhancer establishes early conventional dendritic cell commitment}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-55482}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Haematopoietic development is a complex process that is strictly hierarchically organized. Here, the phagocyte lineages are a very heterogeneous cell compartment with specialized functions in innate immunity and induction of adaptive immune responses. Their generation from a common precursor must be tightly controlled. Interference within lineage formation programs for example by mutation or change in expression levels of transcription factors (TF) is causative to leukaemia. However, the molecular mechanisms driving specification into distinct phagocytes remain poorly understood. In the present study I identify the transcription factor Interferon Regulatory Factor 8 (IRF8) as the specification factor of dendritic cell (DC) commitment in early phagocyte precursors. Employing an IRF8 reporter mouse, I showed the distinct Irf8 expression in haematopoietic lineage diversification and isolated a novel bone marrow resident progenitor which selectively differentiates into CD8α+ conventional dendritic cells (cDCs) in vivo. This progenitor strictly depends on Irf8 expression to properly establish its transcriptional DC program while suppressing a lineage-inappropriate neutrophile program. Moreover, I demonstrated that Irf8 expression during this cDC commitment-step depends on a newly discovered myeloid-specific cis-enhancer which is controlled by the haematopoietic transcription factors PU.1 and RUNX1. Interference with their binding leads to abrogation of Irf8 expression, subsequently to disturbed cell fate decisions, demonstrating the importance of these factors for proper phagocyte cell development. Collectively, these data delineate a transcriptional program establishing cDC fate choice with IRF8 in its center.}, language = {en} } @phdthesis{D'Agata2014, author = {D'Agata, Valeria Costanza}, title = {A partire dalla Somaestetica di Shusterman}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72650}, school = {Universit{\"a}t Potsdam}, pages = {280}, year = {2014}, abstract = {Con la sua proposta di una Somaestetica, articolata fondamentalmente in analitica, pragmatica e pratica, Richard Shusterman intende in primo luogo fornire e creare una cornice metodologica, un orientamento unitario che sia in grado di rintracciare, ricostruire e portare a manifestazione - all'interno di eterogenee riflessioni teoriche e pratiche somatiche - la comune esigenza di ridare luce alla dimensione corporea come modo primario di essere nel mondo. Recuperando l'accezione baumgarteniana di Aesthetica come gnoseologia inferiore, arte dell'analogo della ragione, scienza della conoscenza sensibile, la somaestetica intende dare nuovo impulso alla pi{\`u} profonda radice di estetica e filosofia che coglie la vita nel suo processo di metamorfosi e rigenerazione continua, in quel respiro vitale che, per quanto possa diventare cosciente, non {\`e} mai totalmente afferrabile dalla ragione discorsiva, situandosi piuttosto in quello spazio primordiale in cui coscienza e corpo si coappartengono, in cui il soggetto non {\`e} ancora individualizzabile perch{\´e} fuso con l'ambiente, non {\`e} totalmente privatizzabile perch{\´e} intrinsecamente plasmato dal tessuto sociale cui egli stesso conferisce dinamicamente forma. A partire dunque dalla rivalutazione del concetto di Aisthesis la disciplina somaestetica mira ad una intensificazione di sensorialit{\`a}, percezione, emozione, commozione, rintracciando proprio nel Soma la fonte di quelle facolt{\`a} "inferiori" irriducibili a quelle puramente intellettuali, che permettono di accedere alle dimensioni qualitative dell'esperienza, di portare a manifestazione e far maturare l'essere umano come essere indivisibile che non si lascia incontrare da un pensiero che ne rinnega l'unitariet{\`a} in nome di fittizie e laceranti distinzioni dicotomiche. Nel corpo infatti si radicano in modo silente regole, convenzioni, norme e valori socioculturali che determinano e talvolta limitano la configurazione ed espressione di sensazioni, percezioni, cognizioni, pensieri, azioni, volizioni, disposizioni di un soggetto da sempre inserito in una Mitwelt (mondo comune), ed {\`e} allora proprio al corpo che bisogna rivolgersi per riconfigurare pi{\`u} autentiche modalit{\`a} di espressione del soggetto che crea equilibri dinamici per mantenere una relazione di coerenza con il pi{\`u} ampio contesto sociale, culturale, ambientale. L'apertura al confronto con eterogenee posizioni filosofiche e l'intrinseca multidisciplinariet{\`a} spiegano la centralit{\`a} nel contemporaneo dibattito estetologico internazionale della Somaestetica che, rivolgendosi tanto ad una formulazione teorica quanto ad una concreta applicazione pratica, intende rivalutare il soma come corporeit{\`a} intelligente, senziente, intenzionale e attiva, non riducibile all'accezione peccaminosa di caro (mero corpo fisico privo di vita e sensazione). Attraverso la riflessione e la pratica di tecniche di coscienza somatica si portano in primo piano i modi in cui il sempre pi{\`u} consapevole rapporto con la propria corporeit{\`a} come mediatamente esperita e immediatamente vissuta, sentita, offre occasioni autentiche di realizzazione progressiva di s{\´e} innanzitutto come persone, capaci di autocoltivazione, di riflessione cosciente sulle proprie abitudini incorporate, di ristrutturazione creativa di s{\´e}, di intensificata percezione e apprezzamento sensoriale sia nel concreto agire quotidiano, sia nella dimensione pi{\`u} propriamente estetologica di ricezione, fruizione e creazione artistica. L'indirizzo essenzialmente pragmatista della riflessione di Shusterman traccia cos{\`i} una concezione fondamentalmente relazionale dell'estetica in grado di porsi proprio nel movimento e nel rapporto continuamente diveniente di vera e propria trasformazione e passaggio tra le dimensioni fisiche, proprio-corporee, psichiche e spirituali del soggetto la cui interazione, ed il cui reciproco riversarsi le une nelle altre, pu{\`o} risultare profondamente arricchito attraverso una progressiva e sempre crescente consapevolizzazione della ricchezza della dimensione corporea in quanto intenzionale, percettiva, senziente, volitiva, tanto quanto vulnerabile, limitante, caduca, patica. Il presente lavoro intende ripercorrere ed approfondire alcuni dei principali referenti di Shusterman, focalizzandosi prevalentemente sulla radice pragmatista della sua proposta e sul confronto con il dibattito di area tedesca tra estetica, antropologia filosofica, neofenomenologia e antropologia medica, per riguadagnare una nozione di soma che proprio a partire dal contrasto, dall'impatto irriducibile con la potenza annullante delle situazioni limite, della crisi possa acquisire un pi{\`u} complesso e ricco valore armonizzante delle intrinseche e molteplici dimensioni che costituiscono il tessuto della soggettivit{\`a} incarnata. In particolare il primo capitolo (1. Somaestetica) chiarisce le radici essenzialmente pragmatiste della proposta shustermaniana e mostra come sia possibile destrutturare e dunque riconfigurare radicati modi di esperienza, rendendo coscienti abitudini e modi di vivere che si fissano a livello somatico in modo per lo pi{\`u} inavvertito. Il confronto con la nozione di Habitus, di cui Pierre Bourdieu mette brillantemente in luce l'invisibile e socialmente determinata matrice somatica, lascia scorgere come ogni manifestazione umana sia sostenuta dall'incorporazione di norme, credenze, valori che determinano e talvolta limitano l'espressione, lo sviluppo, persino le predisposizioni e le inclinazioni degli individui. Ed {\`e} proprio intervenendo a questo livello che si pu{\`o} restituire libert{\`a} alle scelte e aprirsi cos{\`i} alle dimensioni essenzialmente qualitative dell'esperienza che, nell'accezione deweyana {\`e} un insieme olistico unitario e coeso che fa da sfondo alle relazioni organismo-ambiente, un intreccio inestricabile di teoria e prassi, particolare e universale, psiche e soma, ragione ed emozione, percettivo e concettuale, insomma quell'immediata conoscenza corporea che struttura lo sfondo di manifestazione della coscienza.}, language = {it} } @phdthesis{TabaresJimenez2021, author = {Tabares Jimenez, Ximena del Carmen}, title = {A palaeoecological approach to savanna dynamics and shrub encroachment in Namibia}, doi = {10.25932/publishup-49281}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-492815}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2021}, abstract = {The spread of shrubs in Namibian savannas raises questions about the resilience of these ecosystems to global change. This makes it necessary to understand the past dynamics of the vegetation, since there is no consensus on whether shrub encroachment is a new phenomenon, nor on its main drivers. However, a lack of long-term vegetation datasets for the region and the scarcity of suitable palaeoecological archives, makes reconstructing past vegetation and land cover of the savannas a challenge. To help meet this challenge, this study addresses three main research questions: 1) is pollen analysis a suitable tool to reflect the vegetation change associated with shrub encroachment in savanna environments? 2) Does the current encroached landscape correspond to an alternative stable state of savanna vegetation? 3) To what extent do pollen-based quantitative vegetation reconstructions reflect changes in past land cover? The research focuses on north-central Namibia, where despite being the region most affected by shrub invasion, particularly since the 21st century, little is known about the dynamics of this phenomenon. Field-based vegetation data were compared with modern pollen data to assess their correspondence in terms of composition and diversity along precipitation and grazing intensity gradients. In addition, two sediment cores from Lake Otjikoto were analysed to reveal changes in vegetation composition that have occurred in the region over the past 170 years and their possible drivers. For this, a multiproxy approach (fossil pollen, sedimentary ancient DNA (sedaDNA), biomarkers, compound specific carbon (δ13C) and deuterium (δD) isotopes, bulk carbon isotopes (δ13Corg), grain size, geochemical properties) was applied at high taxonomic and temporal resolution. REVEALS modelling of the fossil pollen record from Lake Otjikoto was run to quantitatively reconstruct past vegetation cover. For this, we first made pollen productivity estimates (PPE) of the most relevant savanna taxa in the region using the extended R-value model and two pollen dispersal options (Gaussian plume model and Lagrangian stochastic model). The REVEALS-based vegetation reconstruction was then validated using remote sensing-based regional vegetation data. The results show that modern pollen reflects the composition of the vegetation well, but diversity less well. Interestingly, precipitation and grazing explain a significant amount of the compositional change in the pollen and vegetation spectra. The multiproxy record shows that a state change from open Combretum woodland to encroached Terminalia shrubland can occur over a century, and that the transition between states spans around 80 years and is characterized by a unique vegetation composition. This transition is supported by gradual environmental changes induced by management (i.e. broad-scale logging for the mining industry, selective grazing and reduced fire activity associated with intensified farming) and related land-use change. Derived environmental changes (i.e. reduced soil moisture, reduced grass cover, changes in species composition and competitiveness, reduced fire intensity) may have affected the resilience of Combretum open woodlands, making them more susceptible to change to an encroached state by stochastic events such as consecutive years of precipitation and drought, and by high concentrations of pCO2. We assume that the resulting encroached state was further stabilized by feedback mechanisms that favour the establishment and competitiveness of woody vegetation. The REVEALS-based quantitative estimates of plant taxa indicate the predominance of a semi-open landscape throughout the 20th century and a reduction in grass cover below 50\% since the 21st century associated with the spread of encroacher woody taxa. Cover estimates show a close match with regional vegetation data, providing support for the vegetation dynamics inferred from multiproxy analyses. Reasonable PPEs were made for all woody taxa, but not for Poaceae. In conclusion, pollen analysis is a suitable tool to reconstruct past vegetation dynamics in savannas. However, because pollen cannot identify grasses beyond family level, a multiproxy approach, particularly the use of sedaDNA, is required. I was able to separate stable encroached states from mere woodland phases, and could identify drivers and speculate about related feedbacks. In addition, the REVEALS-based quantitative vegetation reconstruction clearly reflects the magnitude of the changes in the vegetation cover that occurred during the last 130 years, despite the limitations of some PPEs. This research provides new insights into pollen-vegetation relationships in savannas and highlights the importance of multiproxy approaches when reconstructing past vegetation dynamics in semi-arid environments. It also provides the first time series with sufficient taxonomic resolution to show changes in vegetation composition during shrub encroachment, as well as the first quantitative reconstruction of past land cover in the region. These results help to identify the different stages in savanna dynamics and can be used to calibrate predictive models of vegetation change, which are highly relevant to land management.}, language = {en} } @phdthesis{Gebauer2008, author = {Gebauer, Denis}, title = {A novel view on the early stage of crystallization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-19818}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This thesis provides a novel view on the early stage of crystallization utilizing calcium carbonate as a model system. Calcium carbonate is of great economical, scientific and ecological importance, because it is a major part of water hardness, the most abundant Biomineral and forms huge amounts of geological sediments thus binding large amounts of carbon dioxide. The primary experiments base on the evolution of supersaturation via slow addition of dilute calcium chloride solution into dilute carbonate buffer. The time-dependent measurement of the Ca2+ potential and concurrent pH = constant titration facilitate the calculation of the amount of calcium and carbonate ions bound in pre-nucleation stage clusters, which have never been detected experimentally so far, and in the new phase after nucleation, respectively. Analytical Ultracentrifugation independently proves the existence of pre-nucleation stage clusters, and shows that the clusters forming at pH = 9.00 have a proximately time-averaged size of altogether 70 calcium and carbonate ions. Both experiments show that pre-nucleation stage cluster formation can be described by means of equilibrium thermodynamics. Effectively, the cluster formation equilibrium is physico-chemically characterized by means of a multiple-binding equilibrium of calcium ions to a 'lattice' of carbonate ions. The evaluation gives GIBBS standard energy for the formation of calcium/carbonate ion pairs in clusters, which exhibits a maximal value of approximately 17.2 kJ mol^-1 at pH = 9.75 and relates to a minimal binding strength in clusters at this pH-value. Nucleated calcium carbonate particles are amorphous at first and subsequently become crystalline. At high binding strength in clusters, only calcite (the thermodynamically stable polymorph) is finally obtained, while with decreasing binding strength in clusters, vaterite (the thermodynamically least stable polymorph) and presumably aragonite (the thermodynamically intermediate stable polymorph) are obtained additionally. Concurrently, two different solubility products of nucleated amorphous calcium carbonate (ACC) are detected at low binding strength and high binding strength in clusters (ACC I 3.1EE-8 M^2, ACC II 3.8EE-8 M^2), respectively, indicating the precipitation of at least two different ACC species, while the clusters provide the precursor species of ACC. It is proximate that ACC I may relate to calcitic ACC -i.e. ACC exhibiting short range order similar to the long range order of calcite and that ACC II may relate to vateritic ACC, which will subsequently transform into the particular crystalline polymorph as discussed in the literature, respectively. Detailed analysis of nucleated particles forming at minimal binding strength in clusters (pH = 9.75) by means of SEM, TEM, WAXS and light microscopy shows that predominantly vaterite with traces of calcite forms. The crystalline particles of early stages are composed of nano-crystallites of approximately 5 to 10 nm size, respectively, which are aligned in high mutual order as in mesocrystals. The analyses of precipitation at pH = 9.75 in presence of additives -polyacrylic acid (pAA) as a model compound for scale inhibitors and peptides exhibiting calcium carbonate binding affinity as model compounds for crystal modifiers- shows that ACC I and ACC II are precipitated in parallel: pAA stabilizes ACC II particles against crystallization leading to their dissolution for the benefit of crystals that form from ACC I and exclusively calcite is finally obtained. Concurrently, the peptide additives analogously inhibit the formation of calcite and exclusively vaterite is finally obtained in case of one of the peptide additives. These findings show that classical nucleation theory is hardly applicable for the nucleation of calcium carbonate. The metastable system is stabilized remarkably due to cluster formation, while clusters forming by means of equilibrium thermodynamics are the nucleation relevant species and not ions. Most likely, the concept of cluster formation is a common phenomenon occurring during the precipitation of hardly soluble compounds as qualitatively shown for calcium oxalate and calcium phosphate. This finding is important for the fundamental understanding of crystallization and nucleation-inhibition and modification by additives with impact on materials of huge scientific and industrial importance as well as for better understanding of the mass transport in crystallization. It can provide a novel basis for simulation and modelling approaches. New mechanisms of scale formation in Bio- and Geomineralization and also in scale inhibition on the basis of the newly reported reaction channel need to be considered.}, language = {en} } @phdthesis{Wang2013, author = {Wang, Ting}, title = {A novel R2R3 MYB-like transcription factor regulates ABA mediated stress response and leaf growth in Arabidopsis}, address = {Potsdam}, pages = {102 S.}, year = {2013}, language = {en} } @phdthesis{RudolphMohr2013, author = {Rudolph-Mohr, Nicole}, title = {A novel non-invasive optical method for quantitative visualization of pH and oxygen dynamics in soils}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66993}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In soils and sediments there is a strong coupling between local biogeochemical processes and the distribution of water, electron acceptors, acids and nutrients. Both sides are closely related and affect each other from small scale to larger scales. Soil structures such as aggregates, roots, layers or macropores enhance the patchiness of these distributions. At the same time it is difficult to access the spatial distribution and temporal dynamics of these parameter. Noninvasive imaging techniques with high spatial and temporal resolution overcome these limitations. And new non-invasive techniques are needed to study the dynamic interaction of plant roots with the surrounding soil, but also the complex physical and chemical processes in structured soils. In this study we developed an efficient non-destructive in-situ method to determine biogeochemical parameters relevant to plant roots growing in soil. This is a quantitative fluorescence imaging method suitable for visualizing the spatial and temporal pH changes around roots. We adapted the fluorescence imaging set-up and coupled it with neutron radiography to study simultaneously root growth, oxygen depletion by respiration activity and root water uptake. The combined set up was subsequently applied to a structured soil system to map the patchy structure of oxic and anoxic zones induced by a chemical oxygen consumption reaction for spatially varying water contents. Moreover, results from a similar fluorescence imaging technique for nitrate detection were complemented by a numerical modeling study where we used imaging data, aiming to simulate biodegradation under anaerobic, nitrate reducing conditions.}, language = {en} } @phdthesis{Rackwitz2016, author = {Rackwitz, Jenny}, title = {A novel approach to study low-energy electron-induced damage to DNA oligonucleotides}, school = {Universit{\"a}t Potsdam}, pages = {137}, year = {2016}, language = {en} } @phdthesis{Pellizzer2016, author = {Pellizzer, Tommaso}, title = {A novel approach to identify plastidic factors for plastome genome incompatibility and evidence for the central involvement of the chloroplast in leaf shaping}, school = {Universit{\"a}t Potsdam}, pages = {136}, year = {2016}, language = {en} } @phdthesis{Falter2016, author = {Falter, Daniela}, title = {A novel approach for large-scale flood risk assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90239}, school = {Universit{\"a}t Potsdam}, pages = {95}, year = {2016}, abstract = {In the past, floods were basically managed by flood control mechanisms. The focus was set on the reduction of flood hazard. The potential consequences were of minor interest. Nowadays river flooding is increasingly seen from the risk perspective, including possible consequences. Moreover, the large-scale picture of flood risk became increasingly important for disaster management planning, national risk developments and the (re-) insurance industry. Therefore, it is widely accepted that risk-orientated flood management ap-proaches at the basin-scale are needed. However, large-scale flood risk assessment methods for areas of several 10,000 km² are still in early stages. Traditional flood risk assessments are performed reach wise, assuming constant probabilities for the entire reach or basin. This might be helpful on a local basis, but where large-scale patterns are important this approach is of limited use. Assuming a T-year flood (e.g. 100 years) for the entire river network is unrealistic and would lead to an overestimation of flood risk at the large scale. Due to the lack of damage data, additionally, the probability of peak discharge or rainfall is usually used as proxy for damage probability to derive flood risk. With a continuous and long term simulation of the entire flood risk chain, the spatial variability of probabilities could be consider and flood risk could be directly derived from damage data in a consistent way. The objective of this study is the development and application of a full flood risk chain, appropriate for the large scale and based on long term and continuous simulation. The novel approach of 'derived flood risk based on continuous simulations' is introduced, where the synthetic discharge time series is used as input into flood impact models and flood risk is directly derived from the resulting synthetic damage time series. The bottleneck at this scale is the hydrodynamic simu-lation. To find suitable hydrodynamic approaches for the large-scale a benchmark study with simplified 2D hydrodynamic models was performed. A raster-based approach with inertia formulation and a relatively high resolution of 100 m in combination with a fast 1D channel routing model was chosen. To investigate the suitability of the continuous simulation of a full flood risk chain for the large scale, all model parts were integrated into a new framework, the Regional Flood Model (RFM). RFM consists of the hydrological model SWIM, a 1D hydrodynamic river network model, a 2D raster based inundation model and the flood loss model FELMOps+r. Subsequently, the model chain was applied to the Elbe catchment, one of the largest catchments in Germany. For the proof-of-concept, a continuous simulation was per-formed for the period of 1990-2003. Results were evaluated / validated as far as possible with available observed data in this period. Although each model part introduced its own uncertainties, results and runtime were generally found to be adequate for the purpose of continuous simulation at the large catchment scale. Finally, RFM was applied to a meso-scale catchment in the east of Germany to firstly perform a flood risk assessment with the novel approach of 'derived flood risk assessment based on continuous simulations'. Therefore, RFM was driven by long term synthetic meteorological input data generated by a weather generator. Thereby, a virtual time series of climate data of 100 x 100 years was generated and served as input to RFM providing subsequent 100 x 100 years of spatially consistent river discharge series, inundation patterns and damage values. On this basis, flood risk curves and expected annual damage could be derived directly from damage data, providing a large-scale picture of flood risk. In contrast to traditional flood risk analysis, where homogenous return periods are assumed for the entire basin, the presented approach provides a coherent large-scale picture of flood risk. The spatial variability of occurrence probability is respected. Additionally, data and methods are consistent. Catchment and floodplain processes are repre-sented in a holistic way. Antecedent catchment conditions are implicitly taken into account, as well as physical processes like storage effects, flood attenuation or channel-floodplain interactions and related damage influencing effects. Finally, the simulation of a virtual period of 100 x 100 years and consequently large data set on flood loss events enabled the calculation of flood risk directly from damage distributions. Problems associated with the transfer of probabilities in rainfall or peak runoff to probabilities in damage, as often used in traditional approaches, are bypassed. RFM and the 'derived flood risk approach based on continuous simulations' has the potential to provide flood risk statements for national planning, re-insurance aspects or other questions where spatially consistent, large-scale assessments are required.}, language = {en} } @phdthesis{Oskinova2012, author = {Oskinova, Lida}, title = {A new understanding of the structured winds from massive stars : the implications for their X-ray emission, mass-loss diagnostics, and feedback}, address = {Potsdam}, year = {2012}, language = {en} } @phdthesis{Eisinger2007, author = {Eisinger, Dirk}, title = {A new management model for developing small scale contingency plans of rabies}, address = {Potsdam}, pages = {115 S. : graph. Darst.}, year = {2007}, language = {en} } @phdthesis{DereseYenesewMidiwoetal.2003, author = {Derese, Solomon and Yenesew, Abiy and Midiwo, Jacob O. and Heydenreich, Matthias and Peter, Martin G.}, title = {A new isoflavone from stem bark of Millettia dura}, issn = {1011-3924}, year = {2003}, language = {en} } @phdthesis{Ghasemzadeh2005, author = {Ghasemzadeh, Mohammad}, title = {A new algorithm for the quantified satisfiability problem, based on zero-suppressed binary decision diagrams and memoization}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-6378}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {Quantified Boolean formulas (QBFs) play an important role in theoretical computer science. QBF extends propositional logic in such a way that many advanced forms of reasoning can be easily formulated and evaluated. In this dissertation we present our ZQSAT, which is an algorithm for evaluating quantified Boolean formulas. ZQSAT is based on ZBDD: Zero-Suppressed Binary Decision Diagram , which is a variant of BDD, and an adopted version of the DPLL algorithm. It has been implemented in C using the CUDD: Colorado University Decision Diagram package. The capability of ZBDDs in storing sets of subsets efficiently enabled us to store the clauses of a QBF very compactly and let us to embed the notion of memoization to the DPLL algorithm. These points led us to implement the search algorithm in such a way that we could store and reuse the results of all previously solved subformulas with a little overheads. ZQSAT can solve some sets of standard QBF benchmark problems (known to be hard for DPLL based algorithms) faster than the best existing solvers. In addition to prenex-CNF, ZQSAT accepts prenex-NNF formulas. We show and prove how this capability can be exponentially beneficial.}, subject = {Bin{\"a}res Entscheidungsdiagramm}, language = {en} } @phdthesis{Thiede2019, author = {Thiede, Tobias}, title = {A multiscale analysis of additively manufactured lattice structures}, doi = {10.25932/publishup-47041}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-470418}, school = {Universit{\"a}t Potsdam}, pages = {xi, 97, LIII}, year = {2019}, abstract = {Additive Manufacturing (AM) in terms of laser powder-bed fusion (L-PBF) offers new prospects regarding the design of parts and enables therefore the production of lattice structures. These lattice structures shall be implemented in various industrial applications (e.g. gas turbines) for reasons of material savings or cooling channels. However, internal defects, residual stress, and structural deviations from the nominal geometry are unavoidable. In this work, the structural integrity of lattice structures manufactured by means of L-PBF was non-destructively investigated on a multiscale approach. A workflow for quantitative 3D powder analysis in terms of particle size, particle shape, particle porosity, inter-particle distance and packing density was established. Synchrotron computed tomography (CT) was used to correlate the packing density with the particle size and particle shape. It was also observed that at least about 50\% of the powder porosity was released during production of the struts. Struts are the component of lattice structures and were investigated by means of laboratory CT. The focus was on the influence of the build angle on part porosity and surface quality. The surface topography analysis was advanced by the quantitative characterisation of re-entrant surface features. This characterisation was compared with conventional surface parameters showing their complementary information, but also the need for AM specific surface parameters. The mechanical behaviour of the lattice structure was investigated with in-situ CT under compression and successive digital volume correlation (DVC). The deformation was found to be knot-dominated, and therefore the lattice folds unit cell layer wise. The residual stress was determined experimentally for the first time in such lattice structures. Neutron diffraction was used for the non-destructive 3D stress investigation. The principal stress directions and values were determined in dependence of the number of measured directions. While a significant uni-axial stress state was found in the strut, a more hydrostatic stress state was found in the knot. In both cases, strut and knot, seven directions were at least needed to find reliable principal stress directions.}, language = {en} } @phdthesis{Zass2021, author = {Zass, Alexander}, title = {A multifaceted study of marked Gibbs point processes}, doi = {10.25932/publishup-51277}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-512775}, school = {Universit{\"a}t Potsdam}, pages = {vii, 104}, year = {2021}, abstract = {This thesis focuses on the study of marked Gibbs point processes, in particular presenting some results on their existence and uniqueness, with ideas and techniques drawn from different areas of statistical mechanics: the entropy method from large deviations theory, cluster expansion and the Kirkwood--Salsburg equations, the Dobrushin contraction principle and disagreement percolation. We first present an existence result for infinite-volume marked Gibbs point processes. More precisely, we use the so-called entropy method (and large-deviation tools) to construct marked Gibbs point processes in R^d under quite general assumptions. In particular, the random marks belong to a general normed space S and are not bounded. Moreover, we allow for interaction functionals that may be unbounded and whose range is finite but random. The entropy method relies on showing that a family of finite-volume Gibbs point processes belongs to sequentially compact entropy level sets, and is therefore tight. We then present infinite-dimensional Langevin diffusions, that we put in interaction via a Gibbsian description. In this setting, we are able to adapt the general result above to show the existence of the associated infinite-volume measure. We also study its correlation functions via cluster expansion techniques, and obtain the uniqueness of the Gibbs process for all inverse temperatures β and activities z below a certain threshold. This method relies in first showing that the correlation functions of the process satisfy a so-called Ruelle bound, and then using it to solve a fixed point problem in an appropriate Banach space. The uniqueness domain we obtain consists then of the model parameters z and β for which such a problem has exactly one solution. Finally, we explore further the question of uniqueness of infinite-volume Gibbs point processes on R^d, in the unmarked setting. We present, in the context of repulsive interactions with a hard-core component, a novel approach to uniqueness by applying the discrete Dobrushin criterion to the continuum framework. We first fix a discretisation parameter a>0 and then study the behaviour of the uniqueness domain as a goes to 0. With this technique we are able to obtain explicit thresholds for the parameters z and β, which we then compare to existing results coming from the different methods of cluster expansion and disagreement percolation. Throughout this thesis, we illustrate our theoretical results with various examples both from classical statistical mechanics and stochastic geometry.}, language = {en} } @phdthesis{Horn2017, author = {Horn, Juliane}, title = {A modelling framework for exploration of a multidimensional factor causing decline in honeybee health}, school = {Universit{\"a}t Potsdam}, pages = {221}, year = {2017}, language = {en} } @phdthesis{Merlo2007, author = {Merlo, Claudia}, title = {A model of protein folding kinetics}, address = {Potsdam}, pages = {89 S. : graph. Darst.}, year = {2007}, language = {en} } @phdthesis{Mauri2014, author = {Mauri, Marco}, title = {A model for sigma factor competition in bacterial cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-72098}, school = {Universit{\"a}t Potsdam}, pages = {167}, year = {2014}, abstract = {Bacteria respond to changing environmental conditions by switching the global pattern of expressed genes. In response to specific environmental stresses the cell activates several stress-specific molecules such as sigma factors. They reversibly bind the RNA polymerase to form the so-called holoenzyme and direct it towards the appropriate stress response genes. In exponentially growing E. coli cells, the majority of the transcriptional activity is carried out by the housekeeping sigma factor, while stress responses are often under the control of alternative sigma factors. Different sigma factors compete for binding to a limited pool of RNA polymerase (RNAP) core enzymes, providing a mechanism for cross talk between genes or gene classes via the sharing of expression machinery. To quantitatively analyze the contribution of sigma factor competition to global changes in gene expression, we develop a thermodynamic model that describes binding between sigma factors and core RNAP at equilibrium, transcription, non-specific binding to DNA and the modulation of the availability of the molecular components. Association of housekeeping sigma factor to RNAP is generally favored by its abundance and higher binding affinity to the core. In order to promote transcription by alternative sigma subunits, the bacterial cell modulates the transcriptional efficiency in a reversible manner through several strategies such as anti-sigma factors, 6S RNA and generally any kind of transcriptional regulators (e.g. activators or inhibitors). By shifting the outcome of sigma factor competition for the core, these modulators bias the transcriptional program of the cell. The model is validated by comparison with in vitro competition experiments, with which excellent agreement is found. We observe that transcription is affected via the modulation of the concentrations of the different types of holoenzymes, so saturated promoters are only weakly affected by sigma factor competition. However, in case of overlapping promoters or promoters recognized by two types of sigma factors, we find that even saturated promoters are strongly affected. Active transcription effectively lowers the affinity between the sigma factor driving it and the core RNAP, resulting in complex cross talk effects and raising the question of how their in vitro measure is relevant in the cell. We also estimate that sigma factor competition is not strongly affected by non-specific binding of core RNAPs, sigma factors, and holoenzymes to DNA. Finally, we analyze the role of increased core RNAP availability upon the shut-down of ribosomal RNA transcription during stringent response. We find that passive up-regulation of alternative sigma-dependent transcription is not only possible, but also displays hypersensitivity based on the sigma factor competition. Our theoretical analysis thus provides support for a significant role of passive control during that global switch of the gene expression program and gives new insights into RNAP partitioning in the cell.}, language = {en} } @phdthesis{Kamprath2014, author = {Kamprath, Martin}, title = {A microfoundations perspectives on fresight and business models}, pages = {224}, year = {2014}, language = {en} } @phdthesis{Gerling2022, author = {Gerling, Marten Tobias}, title = {A microfluidic system for high-precision image-based live cell sorting using dielectrophoretic forces}, doi = {10.25932/publishup-58742}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-587421}, school = {Universit{\"a}t Potsdam}, pages = {vii, 87, VI}, year = {2022}, abstract = {An important goal in biotechnology and (bio-) medical research is the isolation of single cells from a heterogeneous cell population. These specialised cells are of great interest for bioproduction, diagnostics, drug development, (cancer) therapy and research. To tackle emerging questions, an ever finer differentiation between target cells and non-target cells is required. This precise differentiation is a challenge for a growing number of available methods. Since the physiological properties of the cells are closely linked to their morphology, it is beneficial to include their appearance in the sorting decision. For established methods, this represents a non addressable parameter, requiring new methods for the identification and isolation of target cells. Consequently, a variety of new flow-based methods have been developed and presented in recent years utilising 2D imaging data to identify target cells within a sample. As these methods aim for high throughput, the devices developed typically require highly complex fluid handling techniques, making them expensive while offering limited image quality. In this work, a new continuous flow system for image-based cell sorting was developed that uses dielectrophoresis to precisely handle cells in a microchannel. Dielectrophoretic forces are exerted by inhomogeneous alternating electric fields on polarisable particles (here: cells). In the present system, the electric fields can be switched on and off precisely and quickly by a signal generator. In addition to the resulting simple and effective cell handling, the system is characterised by the outstanding quality of the image data generated and its compatibility with standard microscopes. These aspects result in low complexity, making it both affordable and user-friendly. With the developed cell sorting system, cells could be sorted reliably and efficiently according to their cytosolic staining as well as morphological properties at different optical magnifications. The achieved purity of the target cell population was up to 95\% and about 85\% of the sorted cells could be recovered from the system. Good agreement was achieved between the results obtained and theoretical considerations. The achieved throughput of the system was up to 12,000 cells per hour. Cell viability studies indicated a high biocompatibility of the system. The results presented demonstrate the potential of image-based cell sorting using dielectrophoresis. The outstanding image quality and highly precise yet gentle handling of the cells set the system apart from other technologies. This results in enormous potential for processing valuable and sensitive cell samples.}, language = {en} } @phdthesis{Kirschbaum2009, author = {Kirschbaum, Michael}, title = {A microfluidic approach for the initiation and investigation of surface-mediated signal transduction processes on a single-cell level}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-39576}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {For the elucidation of the dynamics of signal transduction processes that are induced by cellular interactions, defined events along the signal transduction cascade and subsequent activation steps have to be analyzed and then also correlated with each other. This cannot be achieved by ensemble measurements because averaging biological data ignores the variability in timing and response patterns of individual cells and leads to highly blurred results. Instead, only a multi-parameter analysis at a single-cell level is able to exploit the information that is crucially needed for deducing the signaling pathways involved. The aim of this work was to develop a process line that allows the initiation of cell-cell or cell-particle interactions while at the same time the induced cellular reactions can be analyzed at various stages along the signal transduction cascade and correlated with each other. As this approach requires the gentle management of individually addressable cells, a dielectrophoresis (DEP)-based microfluidic system was employed that provides the manipulation of microscale objects with very high spatiotemporal precision and without the need of contacting the cell membrane. The system offers a high potential for automation and parallelization. This is essential for achieving a high level of robustness and reproducibility, which are key requirements in order to qualify this approach for a biomedical application. As an example process for intercellular communication, T cell activation has been chosen. The activation of the single T cells was triggered by contacting them individually with microbeads that were coated with antibodies directed against specific cell surface proteins, like the T cell receptor-associated kinase CD3 and the costimulatory molecule CD28 (CD; cluster of differentiation). The stimulation of the cells with the functionalized beads led to a rapid rise of their cytosolic Ca2+ concentration which was analyzed by a dual-wavelength ratiometric fluorescence measurement of the Ca2+-sensitive dye Fura-2. After Ca2+ imaging, the cells were isolated individually from the microfluidic system and cultivated further. Cell division and expression of the marker molecule CD69 as a late activation event of great significance were analyzed the following day and correlated with the previously recorded Ca2+ traces for each individual cell. It turned out such that the temporal profile of the Ca2+ traces between both activated and non-activated cells as well as dividing and non-dividing cells differed significantly. This shows that the pattern of Ca2+ signals in T cells can provide early information about a later reaction of the cell. As isolated cells are highly delicate objects, a precondition for these experiments was the successful adaptation of the system to maintain the vitality of single cells during and after manipulation. In this context, the influences of the microfluidic environment as well as the applied electric fields on the vitality of the cells and the cytosolic Ca2+ concentration as crucially important physiological parameters were thoroughly investigated. While a short-term DEP manipulation did not affect the vitality of the cells, they showed irregular Ca2+ transients upon exposure to the DEP field only. The rate and the strength of these Ca2+ signals depended on exposure time, electric field strength and field frequency. By minimizing their occurrence rate, experimental conditions were identified that caused the least interference with the physiology of the cell. The possibility to precisely control the exact time point of stimulus application, to simultaneously analyze short-term reactions and to correlate them with later events of the signal transduction cascade on the level of individual cells makes this approach unique among previously described applications and offers new possibilities to unravel the mechanisms underlying intercellular communication.}, language = {en} } @phdthesis{Wolter2010, author = {Wolter, Christian}, title = {A methodology for model-driven process security}, address = {Potsdam}, pages = {xv, 144 S. : graph. Darst.}, year = {2010}, language = {en} } @phdthesis{Andjelkovic2021, author = {Andjelkovic, Marko}, title = {A methodology for characterization, modeling and mitigation of single event transient effects in CMOS standard combinational cells}, doi = {10.25932/publishup-53484}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-534843}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 216}, year = {2021}, abstract = {With the downscaling of CMOS technologies, the radiation-induced Single Event Transient (SET) effects in combinational logic have become a critical reliability issue for modern integrated circuits (ICs) intended for operation under harsh radiation conditions. The SET pulses generated in combinational logic may propagate through the circuit and eventually result in soft errors. It has thus become an imperative to address the SET effects in the early phases of the radiation-hard IC design. In general, the soft error mitigation solutions should accommodate both static and dynamic measures to ensure the optimal utilization of available resources. An efficient soft-error-aware design should address synergistically three main aspects: (i) characterization and modeling of soft errors, (ii) multi-level soft error mitigation, and (iii) online soft error monitoring. Although significant results have been achieved, the effectiveness of SET characterization methods, accuracy of predictive SET models, and efficiency of SET mitigation measures are still critical issues. Therefore, this work addresses the following topics: (i) Characterization and modeling of SET effects in standard combinational cells, (ii) Static mitigation of SET effects in standard combinational cells, and (iii) Online particle detection, as a support for dynamic soft error mitigation. Since the standard digital libraries are widely used in the design of radiation-hard ICs, the characterization of SET effects in standard cells and the availability of accurate SET models for the Soft Error Rate (SER) evaluation are the main prerequisites for efficient radiation-hard design. This work introduces an approach for the SPICE-based standard cell characterization with the reduced number of simulations, improved SET models and optimized SET sensitivity database. It has been shown that the inherent similarities in the SET response of logic cells for different input levels can be utilized to reduce the number of required simulations. Based on characterization results, the fitting models for the SET sensitivity metrics (critical charge, generated SET pulse width and propagated SET pulse width) have been developed. The proposed models are based on the principle of superposition, and they express explicitly the dependence of the SET sensitivity of individual combinational cells on design, operating and irradiation parameters. In contrast to the state-of-the-art characterization methodologies which employ extensive look-up tables (LUTs) for storing the simulation results, this work proposes the use of LUTs for storing the fitting coefficients of the SET sensitivity models derived from the characterization results. In that way the amount of characterization data in the SET sensitivity database is reduced significantly. The initial step in enhancing the robustness of combinational logic is the application of gate-level mitigation techniques. As a result, significant improvement of the overall SER can be achieved with minimum area, delay and power overheads. For the SET mitigation in standard cells, it is essential to employ the techniques that do not require modifying the cell structure. This work introduces the use of decoupling cells for improving the robustness of standard combinational cells. By insertion of two decoupling cells at the output of a target cell, the critical charge of the cell's output node is increased and the attenuation of short SETs is enhanced. In comparison to the most common gate-level techniques (gate upsizing and gate duplication), the proposed approach provides better SET filtering. However, as there is no single gate-level mitigation technique with optimal performance, a combination of multiple techniques is required. This work introduces a comprehensive characterization of gate-level mitigation techniques aimed to quantify their impact on the SET robustness improvement, as well as introduced area, delay and power overhead per gate. By characterizing the gate-level mitigation techniques together with the standard cells, the required effort in subsequent SER analysis of a target design can be reduced. The characterization database of the hardened standard cells can be utilized as a guideline for selection of the most appropriate mitigation solution for a given design. As a support for dynamic soft error mitigation techniques, it is important to enable the online detection of energetic particles causing the soft errors. This allows activating the power-greedy fault-tolerant configurations based on N-modular redundancy only at the high radiation levels. To enable such a functionality, it is necessary to monitor both the particle flux and the variation of particle LET, as these two parameters contribute significantly to the system SER. In this work, a particle detection approach based on custom-sized pulse stretching inverters is proposed. Employing the pulse stretching inverters connected in parallel enables to measure the particle flux in terms of the number of detected SETs, while the particle LET variations can be estimated from the distribution of SET pulse widths. This approach requires a purely digital processing logic, in contrast to the standard detectors which require complex mixed-signal processing. Besides the possibility of LET monitoring, additional advantages of the proposed particle detector are low detection latency and power consumption, and immunity to error accumulation. The results achieved in this thesis can serve as a basis for establishment of an overall soft-error-aware database for a given digital library, and a comprehensive multi-level radiation-hard design flow that can be implemented with the standard IC design tools. The following step will be to evaluate the achieved results with the irradiation experiments.}, language = {en} } @phdthesis{Reike2017, author = {Reike, Dennis}, title = {A look behind perceptual performance in numerical cognition}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407821}, school = {Universit{\"a}t Potsdam}, pages = {vi, 136}, year = {2017}, abstract = {Recognizing, understanding, and responding to quantities are considerable skills for human beings. We can easily communicate quantities, and we are extremely efficient in adapting our behavior to numerical related tasks. One usual task is to compare quantities. We also use symbols like digits in numerical-related tasks. To solve tasks including digits, we must to rely on our previously learned internal number representations. This thesis elaborates on the process of number comparison with the use of noisy mental representations of numbers, the interaction of number and size representations and how we use mental number representations strategically. For this, three studies were carried out. In the first study, participants had to decide which of two presented digits was numerically larger. They had to respond with a saccade in the direction of the anticipated answer. Using only a small set of meaningfully interpretable parameters, a variant of random walk models is described that accounts for response time, error rate, and variance of response time for the full matrix of 72 digit pairs. In addition, the used random walk model predicts a numerical distance effect even for error response times and this effect clearly occurs in the observed data. In relation to corresponding correct answers error responses were systematically faster. However, different from standard assumptions often made in random walk models, this account required that the distributions of step sizes of the induced random walks be asymmetric to account for this asymmetry between correct and incorrect responses. Furthermore, the presented model provides a well-defined framework to investigate the nature and scale (e.g., linear vs. logarithmic) of the mapping of numerical magnitude onto its internal representation. In comparison of the fits of proposed models with linear and logarithmic mapping, the logarithmic mapping is suggested to be prioritized. Finally, we discuss how our findings can help interpret complex findings (e.g., conflicting speed vs. accuracy trends) in applied studies that use number comparison as a well-established diagnostic tool. Furthermore, a novel oculomotoric effect is reported, namely the saccadic overschoot effect. The participants responded by saccadic eye movements and the amplitude of these saccadic responses decreases with numerical distance. For the second study, an experimental design was developed that allows us to apply the signal detection theory to a task where participants had to decide whether a presented digit was physically smaller or larger. A remaining question is, whether the benefit in (numerical magnitude - physical size) congruent conditions is related to a better perception than in incongruent conditions. Alternatively, the number-size congruency effect is mediated by response biases due to numbers magnitude. The signal detection theory is a perfect tool to distinguish between these two alternatives. It describes two parameters, namely sensitivity and response bias. Changes in the sensitivity are related to the actual task performance due to real differences in perception processes whereas changes in the response bias simply reflect strategic implications as a stronger preparation (activation) of an anticipated answer. Our results clearly demonstrate that the number-size congruency effect cannot be reduced to mere response bias effects, and that genuine sensitivity gains for congruent number-size pairings contribute to the number-size congruency effect. Third, participants had to perform a SNARC task - deciding whether a presented digit was odd or even. Local transition probability of irrelevant attributes (magnitude) was varied while local transition probability of relevant attributes (parity) and global probability occurrence of each stimulus were kept constantly. Participants were quite sensitive in recognizing the underlying local transition probability of irrelevant attributes. A gain in performance was observed for actual repetitions of the irrelevant attribute in relation to changes of the irrelevant attribute in high repetition conditions compared to low repetition conditions. One interpretation of these findings is that information about the irrelevant attribute (magnitude) in the previous trial is used as an informative precue, so that participants can prepare early processing stages in the current trial, with the corresponding benefits and costs typical of standard cueing studies. Finally, the results reported in this thesis are discussed in relation to recent studies in numerical cognition.}, language = {en} } @phdthesis{Scherfenberg2012, author = {Scherfenberg, Ivonne}, title = {A logic-based Framwork to enable Attribute Assurance for Digital Identities in Service-oriented Architectures and the Web}, address = {Potsdam}, pages = {126 S.}, year = {2012}, language = {en} } @phdthesis{Tattarini2022, author = {Tattarini, Giulia}, title = {A job is good, but is a good job healthier?}, doi = {10.25932/publishup-53672}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-536723}, school = {Universit{\"a}t Potsdam}, pages = {182}, year = {2022}, abstract = {What are the consequences of unemployment and precarious employment for individuals' health in Europe? What are the moderating factors that may offset (or increase) the health consequences of labor-market risks? How do the effects of these risks vary across different contexts, which differ in their institutional and cultural settings? Does gender, regarded as a social structure, play a role, and how? To answer these questions is the aim of my cumulative thesis. This study aims to advance our knowledge about the health consequences that unemployment and precariousness cause over the life course. In particular, I investigate how several moderating factors, such as gender, the family, and the broader cultural and institutional context, may offset or increase the impact of employment instability and insecurity on individual health. In my first paper, 'The buffering role of the family in the relationship between job loss and self-perceived health: Longitudinal results from Europe, 2004-2011', I and my co-authors measure the causal effect of job loss on health and the role of the family and welfare states (regimes) as moderating factors. Using EU-SILC longitudinal data (2004-2011), we estimate the probability of experiencing 'bad health' following a transition to unemployment by applying linear probability models and undertake separate analyses for men and women. Firstly, we measure whether changes in the independent variable 'job loss' lead to changes in the dependent variable 'self-rated health' for men and women separately. Then, by adding into the model different interaction terms, we measure the moderating effect of the family, both in terms of emotional and economic support, and how much it varies across different welfare regimes. As an identification strategy, we first implement static fixed-effect panel models, which control for time-varying observables and indirect health selection—i.e., constant unobserved heterogeneity. Secondly, to control for reverse causality and path dependency, we implement dynamic fixed-effect panel models, adding a lagged dependent variable to the model. We explore the role of the family by focusing on close ties within households: we consider the presence of a stable partner and his/her working status as a source of social and economic support. According to previous literature, having a partner should reduce the stress from adverse events, thanks to the symbolic and emotional dimensions that such a relationship entails, regardless of any economic benefits. Our results, however, suggest that benefits linked to the presence of a (female) partner also come from the financial stability that (s)he can provide in terms of a second income. Furthermore, we find partners' employment to be at least as important as the mere presence of the partner in reducing the negative effect of job loss on the individual's health by maintaining the household's standard of living and decreasing economic strain on the family. Our results are in line with previous research, which has highlighted that some people cope better than others with adverse life circumstances, and the support provided by the family is a crucial resource in that regard. We also reported an important interaction between the family and the welfare state in moderating the health consequences of unemployment, showing how the compensation effect of the family varies across welfare regimes. The family plays a decisive role in cushioning the adverse consequences of labor market risks in Southern and Eastern welfare states, characterized by less developed social protection systems and -especially the Southern - high level of familialism. The first paper also found important gender differences concerning job loss, family and welfare effects. Of particular interest is the evidence suggesting that health selection works differently for men and women, playing a more prominent role for women than for men in explaining the relationship between job loss and self-perceived health. The second paper, 'Gender roles and selection mechanisms across contexts: A comparative analysis of the relationship between unemployment, self-perceived health, and gender.' investigates more in-depth the gender differential in health driven by unemployment. Being a highly contested issue in literature, we aim to study whether men are more penalized than women or the other way around and the mechanisms that may explain the gender difference. To do that, we rely on two theoretical arguments: the availability of alternative roles and social selection. The first argument builds on the idea that men and women may compensate for the detrimental health consequences of unemployment through the commitment to 'alternative roles,' which can provide for the resources needed to fulfill people's socially constructed needs. Notably, the availability of alternative options depends on the different positions that men and women have in society. Further, we merge the availability of the 'alternative roles' argument with the health selection argument. We assume that health selection could be contingent on people's social position as defined by gender and, thus, explain the gender differential in the relationship between unemployment and health. Ill people might be less reluctant to fall or remain (i.e., self-select) in unemployment if they have alternative roles. In Western societies, women generally have more alternative roles than men and thus more discretion in their labor market attachment. Therefore, health selection should be stronger for them, explaining why unemployment is less menace for women than for their male counterparts. Finally, relying on the idea of different gender regimes, we extended these arguments to comparison across contexts. For example, in contexts where being a caregiver is assumed to be women's traditional and primary roles and the primary breadwinner role is reserved to men, unemployment is less stigmatized, and taking up alternative roles is more socially accepted for women than for men (Hp.1). Accordingly, social (self)selection should be stronger for women than for men in traditional contexts, where, in the case of ill-health, the separation from work is eased by the availability of alternative roles (Hp.2). By focusing on contexts that are representative of different gender regimes, we implement a multiple-step comparative approach. Firstly, by using EU-SILC longitudinal data (2004-2015), our analysis tests gender roles and selection mechanisms for Sweden and Italy, representing radically different gender regimes, thus providing institutional and cultural variation. Then, we limit institutional heterogeneity by focusing on Germany and comparing East- and West-Germany and older and younger cohorts—for West-Germany (SOEP data 1995-2017). Next, to assess the differential impact of unemployment for men and women, we compared (unemployed and employed) men with (unemployed and employed) women. To do so, we calculate predicted probabilities and average marginal effect from two distinct random-effects probit models. Our first step is estimating random-effects models that assess the association between unemployment and self-perceived health, controlling for observable characteristics. In the second step, our fully adjusted model controls for both direct and indirect selection. We do this using dynamic correlated random-effects (CRE) models. Further, based on the fully adjusted model, we test our hypotheses on alternative roles (Hp.1) by comparing several contexts - models are estimated separately for each context. For this hypothesis, we pool men and women and include an interaction term between unemployment and gender, which has the advantage to allow for directly testing whether gender differences in the effect of unemployment exist and are statistically significant. Finally, we test the role of selection mechanisms (Hp.2), using the KHB method to compare coefficients across nested nonlinear models. Specifically, we test the role of selection for the relationship between unemployment and health by comparing the partially-adjusted and fully-adjusted models. To allow selection mechanisms to operate differently between genders, we estimate separate models for men and women. We found support to our first hypotheses—the context where people are embedded structures the relationship between unemployment, health, and gender. We found no gendered effect of unemployment on health in the egalitarian context of Sweden. Conversely, in the traditional context of Italy, we observed substantive and statistically significant gender differences in the effect of unemployment on bad health, with women suffering less than men. We found the same pattern for comparing East and West Germany and younger and older cohorts in West Germany. On the contrary, our results did not support our theoretical argument on social selection. We found that in Sweden, women are more selected out of employment than men. In contrast, in Italy, health selection does not seem to be the primary mechanism behind the gender differential—Italian men and women seem to be selected out of employment to the same extent. Namely, we do not find any evidence that health selection is stronger for women in more traditional countries (Hp2), despite the fact that the institutional and the cultural context would offer them a more comprehensive range of 'alternative roles' relative to men. Moreover, our second hypothesis is also rejected in the second and third comparisons, where the cross-country heterogeneity is reduced to maximize cultural differences within the same institutional context. Further research that addresses selection into inactivity is needed to evaluate the interplay between selection and social roles across gender regimes. While the health consequences of unemployment have been on the research agenda for a pretty long time, the interest in precarious employment—defined as the linking of the vulnerable worker to work that is characterized by uncertainty and insecurity concerning pay, the stability of the work arrangement, limited access to social benefits, and statutory protections—has emerged only later. Since the 80s, scholars from different disciplines have raised concerns about the social consequences of de-standardization of employment relationships. However, while work has become undoubtedly more precarious, very little is known about its causal effect on individual health and the role of gender as a moderator. These questions are at the core of my third paper : 'Bad job, bad health? A longitudinal analysis of the interaction between precariousness, gender and self-perceived health in Germany'. Herein, I investigate the multidimensional nature of precarious employment and its causal effect on health, particularly focusing on gender differences. With this paper, I aim at overcoming three major shortcomings of earlier studies: The first one regards the cross-sectional nature of data that prevents the authors from ruling out unobserved heterogeneity as a mechanism for the association between precarious employment and health. Indeed, several unmeasured individual characteristics—such as cognitive abilities—may confound the relationship between precarious work and health, leading to biased results. Secondly, only a few studies have directly addressed the role of gender in shaping the relationship. Moreover, available results on the gender differential are mixed and inconsistent: some found precarious employment being more detrimental for women's health, while others found no gender differences or stronger negative association for men. Finally, previous attempts to an empirical translation of the employment precariousness (EP) concept have not always been coherent with their theoretical framework. EP is usually assumed to be a multidimensional and continuous phenomenon; it is characterized by different dimensions of insecurity that may overlap in the same job and lead to different "degrees of precariousness." However, researchers have predominantly focused on one-dimensional indicators—e.g., temporary employment, subjective job insecurity—to measure EP and study the association with health. Besides the fact that this approach partially grasps the phenomenon's complexity, the major problem is the inconsistency of evidence that it has produced. Indeed, this line of inquiry generally reveals an ambiguous picture, with some studies finding substantial adverse effects of temporary over permanent employment, while others report only minor differences. To measure the (causal) effect of precarious work on self-rated health and its variation by gender, I focus on Germany and use four waves from SOEP data (2003, 2007, 2011, and 2015). Germany is a suitable context for my study. Indeed, since the 1980s, the labor market and welfare system have been restructured in many ways to increase the German economy's competitiveness in the global market. As a result, the (standard) employment relationship has been de-standardized: non-standard and atypical employment arrangements—i.e., part-time work, fixed-term contracts, mini-jobs, and work agencies—have increased over time while wages have lowered, even among workers with standard work. In addition, the power of unions has also fallen over the last three decades, leaving a large share of workers without collective protection. Because of this process of de-standardization, the link between wage employment and strong social rights has eroded, making workers more powerless and more vulnerable to labor market risks than in the past. EP refers to this uneven distribution of power in the employment relationship, which can be detrimental to workers' health. Indeed, by affecting individuals' access to power and other resources, EP puts precarious workers at risk of experiencing health shocks and influences their ability to gain and accumulate health advantages (Hp.1). Further, the focus on Germany allows me to investigate my second research question on the gender differential. Germany is usually regarded as a traditionalist gender regime: a context characterized by a configuration of roles. Here, being a caregiver is assumed to be women's primary role, whereas the primary breadwinner role is reserved for men. Although many signs of progress have been made over the last decades towards a greater equalization of opportunities and more egalitarianism, the breadwinner model has barely changed towards a modified version. Thus, women usually take on the double role of workers (the so-called secondary earner) and caregivers, and men still devote most of their time to paid work activities. Moreover, the overall upward trend towards more egalitarian gender ideologies has leveled off over the last decades, moving notably towards more traditional gender ideologies. In this setting, two alternative hypotheses are possible. Firstly, I assume that the negative relationship between EP and health is stronger for women than for men. This is because women are systematically more disadvantaged than men in the public and private spheres of life, having less access to formal and informal sources of power. These gender-related power asymmetries may interact with EP-related power asymmetries resulting in a stronger effect of EP on women's health than on men's health (Hp.2). An alternative way of looking at the gender differential is to consider the interaction that precariousness might have with men's and women's gender identities. According to this view, the negative relationship between EP and health is weaker for women than for men (Hp.2a). In a society with a gendered division of labor and a strong link between masculine identities and stable and well-rewarded job—i.e., a job that confers the role of primary family provider—a male worker with precarious employment might violate the traditional male gender role. Men in precarious jobs may perceive themselves (and by others) as possessing a socially undesirable characteristic, which conflicts with the stereotypical idea of themselves as the male breadwinner. Engaging in behaviors that contradict stereotypical gender identity may decrease self-esteem and foster feelings of inferiority, helplessness, and jealousy, leading to poor health. I develop a new indicator of EP that empirically translates a definition of EP as a multidimensional and continuous phenomenon. I assume that EP is a latent construct composed of seven dimensions of insecurity chosen according to the theory and previous empirical research: Income insecurity, social insecurity, legal insecurity, employment insecurity, working-time insecurity, representation insecurity, worker's vulnerability. The seven dimensions are proxied by eight indicators available in the four waves of the SOEP dataset. The EP composite indicator is obtained by performing a multiple correspondence analysis (MCA) on the eight indicators. This approach aims to construct a summary scale in which all dimensions contribute jointly to the measured experience of precariousness and its health impact. Further, the relationship between EP and 'general self-perceived health' is estimated by applying ordered probit random-effects estimators and calculating average marginal effect (further AME). Then, to control for unobserved heterogeneity, I implement correlated random-effects models that add to the model the within-individual means of the time-varying independent variables. To test the significance of the gender differential, I add an interaction term between EP and gender in the fully adjusted model in the pooled sample. My correlated random-effects models showed EP's negative and substantial 'effect' on self-perceived health for both men and women. Although nonsignificant, the evidence seems in line with previous cross-sectional literature. It supports the hypothesis that employment precariousness could be detrimental to workers' health. Further, my results showed the crucial role of unobserved heterogeneity in shaping the health consequences of precarious employment. This is particularly important as evidence accumulates, yet it is still mostly descriptive. Moreover, my results revealed a substantial difference among men and women in the relationship between EP and health: when EP increases, the risk of experiencing poor health increases much more for men than for women. This evidence falsifies previous theory according to whom the gender differential is contingent on the structurally disadvantaged position of women in western societies. In contrast, they seem to confirm the idea that men in precarious work could experience role conflict to a larger extent than women, as their self-standard is supposed to be the stereotypical breadwinner worker with a good and well-rewarded job. Finally, results from the multiple correspondence analysis contribute to the methodological debate on precariousness, showing that a multidimensional and continuous indicator can express a latent variable of EP. All in all, complementarities are revealed in the results of unemployment and employment precariousness, which have two implications: Policy-makers need to be aware that the total costs of unemployment and precariousness go far beyond the economic and material realm penetrating other fundamental life domains such as individual health. Moreover, they need to balance the trade-off between protecting adequately unemployed people and fostering high-quality employment in reaction to the highlighted market pressures. In this sense, the further development of a (universalistic) welfare state certainly helps mitigate the adverse health effects of unemployment and, therefore, the future costs of both individuals' health and welfare spending. In addition, the presence of a working partner is crucial for reducing the health consequences of employment instability. Therefore, policies aiming to increase female labor market participation should be promoted, especially in those contexts where the welfare state is less developed. Moreover, my results support the significance of taking account of a gender perspective in health research. The findings of the three articles show that job loss, unemployment, and precarious employment, in general, have adverse effects on men's health but less or absent consequences for women's health. Indeed, this suggests the importance of labor and health policies that consider and further distinguish the specific needs of the male and female labor force in Europe. Nevertheless, a further implication emerges: the health consequences of employment instability and de-standardization need to be investigated in light of the gender arrangements and the transforming gender relationships in specific cultural and institutional contexts. My results indeed seem to suggest that women's health advantage may be a transitory phenomenon, contingent on the predominant gendered institutional and cultural context. As the structural difference between men's and women's position in society is eroded, egalitarianism becomes the dominant normative status, so will probably be the gender difference in the health consequences of job loss and precariousness. Therefore, while gender equality in opportunities and roles is a desirable aspect for contemporary societies and a political goal that cannot be postponed further, this thesis raises a further and maybe more crucial question: What kind of equality should be pursued to provide men and women with both good life quality and equal chances in the public and private spheres? In this sense, I believe that social and labor policies aiming to reduce gender inequality in society should focus on improving women's integration into the labor market, implementing policies targeting men, and facilitating their involvement in the private sphere of life. Equal redistribution of social roles could activate a crucial transformation of gender roles and the cultural models that sustain and still legitimate gender inequality in Western societies.}, language = {en} } @phdthesis{Paschke2012, author = {Paschke, Marco}, title = {A highly resolved P-velocity image of the Messum igneous complex in Namibia obtained by waveform tomography}, address = {Potsdam}, pages = {117 S.}, year = {2012}, language = {en} } @phdthesis{Beyhl2017, author = {Beyhl, Thomas}, title = {A framework for incremental view graph maintenance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-405929}, school = {Universit{\"a}t Potsdam}, pages = {VII, 293}, year = {2017}, abstract = {Nowadays, graph data models are employed, when relationships between entities have to be stored and are in the scope of queries. For each entity, this graph data model locally stores relationships to adjacent entities. Users employ graph queries to query and modify these entities and relationships. These graph queries employ graph patterns to lookup all subgraphs in the graph data that satisfy certain graph structures. These subgraphs are called graph pattern matches. However, this graph pattern matching is NP-complete for subgraph isomorphism. Thus, graph queries can suffer a long response time, when the number of entities and relationships in the graph data or the graph patterns increases. One possibility to improve the graph query performance is to employ graph views that keep ready graph pattern matches for complex graph queries for later retrieval. However, these graph views must be maintained by means of an incremental graph pattern matching to keep them consistent with the graph data from which they are derived, when the graph data changes. This maintenance adds subgraphs that satisfy a graph pattern to the graph views and removes subgraphs that do not satisfy a graph pattern anymore from the graph views. Current approaches for incremental graph pattern matching employ Rete networks. Rete networks are discrimination networks that enumerate and maintain all graph pattern matches of certain graph queries by employing a network of condition tests, which implement partial graph patterns that together constitute the overall graph query. Each condition test stores all subgraphs that satisfy the partial graph pattern. Thus, Rete networks suffer high memory consumptions, because they store a large number of partial graph pattern matches. But, especially these partial graph pattern matches enable Rete networks to update the stored graph pattern matches efficiently, because the network maintenance exploits the already stored partial graph pattern matches to find new graph pattern matches. However, other kinds of discrimination networks exist that can perform better in time and space than Rete networks. Currently, these other kinds of networks are not used for incremental graph pattern matching. This thesis employs generalized discrimination networks for incremental graph pattern matching. These discrimination networks permit a generalized network structure of condition tests to enable users to steer the trade-off between memory consumption and execution time for the incremental graph pattern matching. For that purpose, this thesis contributes a modeling language for the effective definition of generalized discrimination networks. Furthermore, this thesis contributes an efficient and scalable incremental maintenance algorithm, which updates the (partial) graph pattern matches that are stored by each condition test. Moreover, this thesis provides a modeling evaluation, which shows that the proposed modeling language enables the effective modeling of generalized discrimination networks. Furthermore, this thesis provides a performance evaluation, which shows that a) the incremental maintenance algorithm scales, when the graph data becomes large, and b) the generalized discrimination network structures can outperform Rete network structures in time and space at the same time for incremental graph pattern matching.}, language = {en} } @phdthesis{Muksin2014, author = {Muksin, Umar}, title = {A fault-controlled geothermal system in Tarutung (North Sumatra, Indonesia)investigated by seismological analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72065}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The seismic structure (Vp, Vp/Vs, and Qp anomalies) contributes to the physical properties and the lithology of rocks and possible fluid distribution in the region. The Vp model images the geometry of the Tarutung and the Sarulla basins. Both basins have a depth of around 2.0 km. High Vp/Vs and high attenuation (low Qp) anomalies are observed along the Sarulla graben associated with a weak zone caused by volcanic activities along the graben. Low Vp/Vs and low conductivity anomalies are found in the west of the Tarutung basin. This anomaly is interpreted as dry, compact, and rigid granitic rock in the region as also found by geological observations. Low Vp, high Vp/Vs and low Qp anomalies are found at the east of the Tarutung basin which appear to be associated with the three big geothermal manifestations in Sipoholon, Hutabarat, and Panabungan area. These anomalies are connected with high Vp/Vs and low Qp anomalies below the Tarutung basin at depth of around 3 - 10 km. This suggests that these geothermal manifestations are fed by the same source of the hot fluid below the Tarutung basin. The hot fluids from below the Tarutung basin propagate to the more dilatational and more permeable zone in the northeast. Granite found in the west of the Tarutung basin could also be abundant underneath the basin at a certain depth so that it prevents the hot fluid to be transported directly to the Tarutung basin. High seismic attenuation and low Vp/Vs anomalies are found in the southwest of the Tarutung basin below the Martimbang volcano. These anomalies are associated with hot rock below the volcano without or with less amount of partial melting. There is no indication that the volcano controls the geothermal system around the Tarutung basin. The geothermal resources around the Tarutung basin is a fault-controlled system as a result of deep circulation of fluids. Outside of the basin, the seismicity delineation and the focal mechanism correlate with the shape and the characteristics of the strike-slip Sumatran fault. Within the Tarutung basin, the seismicity is distributed more broadly which coincides with the margin of the basin. An extensional duplex system in the Tarutung basin is derived from the seismicity and focal mechanism analysis which is also consistent with the geological observations. The vertical distribution of the seismicity suggests the presence of a negative flower structure within the Tarutung basin.}, language = {de} } @phdthesis{Albrecht2013, author = {Albrecht, Torsten}, title = {A dynamic memory of fracture processes in ice shelves}, address = {Potsdam}, pages = {167 S.}, year = {2013}, language = {en} } @phdthesis{Biewald2008, author = {Biewald, Anne}, title = {A dynamic life cycle model for Germany with unemployment uncertainty}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-33111}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {This work analyzes the saving and consumption behavior of agents faced with the possibility of unemployment in a dynamic and stochastic life cycle model. The intertemporal optimization is based on Dynamic Programming with a backward recursion algorithm. The implemented uncertainty is not based on income shocks as it is done in traditional life cycle models but uses Markov probabilities where the probability for the next employment status of the agent depends on the current status. The utility function used is a CRRA function (constant relative risk aversion), combined with a CES function (constant elasticity of substitution) and has several consumption goods, a subsistence level, money and a bequest function.}, language = {en} } @phdthesis{Kaersten1998, author = {K{\"a}rsten, Fatma}, title = {A duality for a compact group and a second cohomology of its dual}, pages = {76 S.}, year = {1998}, language = {en} } @phdthesis{Hanke2001, author = {Hanke, Timo}, title = {A direct approach to noncrossed product division algebras}, pages = {64 S.}, year = {2001}, language = {en} } @phdthesis{Klar2012, author = {Klar, Jochen}, title = {A detailed view of filaments and sheets of the warm-hot intergalactic medium}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-58038}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {In the context of cosmological structure formation sheets, filaments and eventually halos form due to gravitational instabilities. It is noteworthy, that at all times, the majority of the baryons in the universe does not reside in the dense halos but in the filaments and the sheets of the intergalactic medium. While at higher redshifts of z > 2, these baryons can be detected via the absorption of light (originating from more distant sources) by neutral hydrogen at temperatures of T ~ 10^4 K (the Lyman-alpha forest), at lower redshifts only about 20 \% can be found in this state. The remain (about 50 to 70 \% of the total baryons mass) is unaccounted for by observational means. Numerical simulations predict that these missing baryons could reside in the filaments and sheets of the cosmic web at high temperatures of T = 10^4.5 - 10^7 K, but only at low to intermediate densities, and constitutes the warm-hot intergalactic medium (WHIM). The high temperatures of the WHIM are caused by the formation of shocks and the subsequent shock-heating of the gas. This results in a high degree of ionization and renders the reliable detection of the WHIM a challenging task. Recent high-resolution hydrodynamical simulations indicate that, at redshifts of z ~ 2, filaments are able to provide very massive galaxies with a significant amount of cool gas at temperatures of T ~ 10^4 K. This could have an important impact on the star-formation in those galaxies. It is therefore of principle importance to investigate the particular hydro- and thermodynamical conditions of these large filament structures. Density and temperature profiles, and velocity fields, are expected to leave their special imprint on spectroscopic observations. A potential multiphase structure may act as tracer in observational studies of the WHIM. In the context of cold streams, it is important to explore the processes, which regulate the amount of gas transported by the streams. This includes the time evolution of filaments, as well as possible quenching mechanisms. In this context, the halo mass range in which cold stream accretion occurs is of particular interest. In order to address these questions, we perform particular hydrodynamical simulations of very high resolution, and investigate the formation and evolution of prototype structures representing the typical filaments and sheets of the WHIM. We start with a comprehensive study of the one-dimensional collapse of a sinusoidal density perturbation (pancake formation) and examine the influence of radiative cooling, heating due to an UV background, thermal conduction, and the effect of small-scale perturbations given by the cosmological power spectrum. We use a set of simulations, parametrized by the wave length of the initial perturbation L. For L ~ 2 Mpc/h the collapse leads to shock-confined structures. As a result of radiative cooling and of heating due to an UV background, a relatively cold and dense core forms. With increasing L the core becomes denser and more concentrated. Thermal conduction enhances this trend and may lead to an evaporation of the core at very large L ~ 30 Mpc/h. When extending our simulations into three dimensions, instead of a pancake structure, we obtain a configuration consisting of well-defined sheets, filaments, and a gaseous halo. For L > 4 Mpc/h filaments form, which are fully confined by an accretion shock. As with the one-dimensional pancakes, they exhibit an isothermal core. Thus, our results confirm a multiphase structure, which may generate particular spectral tracers. We find that, after its formation, the core becomes shielded against further infall of gas onto the filament, and its mass content decreases with time. In the vicinity of the halo, the filament's core can be attributed to the cold streams found in other studies. We show, that the basic structure of these cold streams exists from the very beginning of the collapse process. Further on, the cross section of the streams is constricted by the outwards moving accretion shock of the halo. Thermal conduction leads to a complete evaporation of the cold stream for L > 6 Mpc/h. This corresponds to halos with a total mass higher than M_halo = 10^13 M_sun, and predicts that in more massive halos star-formation can not be sustained by cold streams. Far away from the gaseous halo, the temperature gradients in the filament are not sufficiently strong for thermal conduction to be effective.}, language = {en} } @phdthesis{Luckow2009, author = {Luckow, Andr{\´e}}, title = {A dependable middleware for enhancing the fault tolerance of distributed computations in grid environments}, address = {Potsdam}, pages = {235 S.}, year = {2009}, language = {en} } @phdthesis{Krentz2019, author = {Krentz, Konrad-Felix}, title = {A Denial-of-Sleep-Resilient Medium Access Control Layer for IEEE 802.15.4 Networks}, doi = {10.25932/publishup-43930}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439301}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 187}, year = {2019}, abstract = {With the emergence of the Internet of things (IoT), plenty of battery-powered and energy-harvesting devices are being deployed to fulfill sensing and actuation tasks in a variety of application areas, such as smart homes, precision agriculture, smart cities, and industrial automation. In this context, a critical issue is that of denial-of-sleep attacks. Such attacks temporarily or permanently deprive battery-powered, energy-harvesting, or otherwise energy-constrained devices of entering energy-saving sleep modes, thereby draining their charge. At the very least, a successful denial-of-sleep attack causes a long outage of the victim device. Moreover, to put battery-powered devices back into operation, their batteries have to be replaced. This is tedious and may even be infeasible, e.g., if a battery-powered device is deployed at an inaccessible location. While the research community came up with numerous defenses against denial-of-sleep attacks, most present-day IoT protocols include no denial-of-sleep defenses at all, presumably due to a lack of awareness and unsolved integration problems. After all, despite there are many denial-of-sleep defenses, effective defenses against certain kinds of denial-of-sleep attacks are yet to be found. The overall contribution of this dissertation is to propose a denial-of-sleep-resilient medium access control (MAC) layer for IoT devices that communicate over IEEE 802.15.4 links. Internally, our MAC layer comprises two main components. The first main component is a denial-of-sleep-resilient protocol for establishing session keys among neighboring IEEE 802.15.4 nodes. The established session keys serve the dual purpose of implementing (i) basic wireless security and (ii) complementary denial-of-sleep defenses that belong to the second main component. The second main component is a denial-of-sleep-resilient MAC protocol. Notably, this MAC protocol not only incorporates novel denial-of-sleep defenses, but also state-of-the-art mechanisms for achieving low energy consumption, high throughput, and high delivery ratios. Altogether, our MAC layer resists, or at least greatly mitigates, all denial-of-sleep attacks against it we are aware of. Furthermore, our MAC layer is self-contained and thus can act as a drop-in replacement for IEEE 802.15.4-compliant MAC layers. In fact, we implemented our MAC layer in the Contiki-NG operating system, where it seamlessly integrates into an existing protocol stack.}, language = {en} } @phdthesis{Hakansson2017, author = {H{\aa}kansson, Nils}, title = {A Dark Matter line search using 3D-modeling of Cherenkov showers below 10 TeV with VERITAS}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397670}, school = {Universit{\"a}t Potsdam}, pages = {107, xxxvi}, year = {2017}, abstract = {Dark matter, DM, has not yet been directly observed, but it has a very solid theoretical basis. There are observations that provide indirect evidence, like galactic rotation curves that show that the galaxies are rotating too fast to keep their constituent parts, and galaxy clusters that bends the light coming from behind-lying galaxies more than expected with respect to the mass that can be calculated from what can be visibly seen. These observations, among many others, can be explained with theories that include DM. The missing piece is to detect something that can exclusively be explained by DM. Direct observation in a particle accelerator is one way and indirect detection using telescopes is another. This thesis is focused on the latter method. The Very Energetic Radiation Imaging Telescope Array System, V ERITAS, is a telescope array that detects Cherenkov radiation. Theory predicts that DM particles annihilate into, e.g., a γγ pair and create a distinctive energy spectrum when detected by such telescopes, e.i., a monoenergetic line at the same energy as the particle mass. This so called "smoking-gun" signature is sought with a sliding window line search within the sub-range ∼ 0.3 - 10 TeV of the VERITAS energy range, ∼ 0.01 - 30 TeV. Standard analysis within the VERITAS collaboration uses Hillas analysis and look-up tables, acquired by analysing particle simulations, to calculate the energy of the particle causing the Cherenkov shower. In this thesis, an improved analysis method has been used. Modelling each shower as a 3Dgaussian should increase the energy recreation quality. Five dwarf spheroidal galaxies were chosen as targets with a total of ∼ 224 hours of data. The targets were analysed individually and stacked. Particle simulations were based on two simulation packages, CARE and GrISU. Improvements have been made to the energy resolution and bias correction, up to a few percent each, in comparison to standard analysis. Nevertheless, no line with a relevant significance has been detected. The most promising line is at an energy of ∼ 422 GeV with an upper limit cross section of 8.10 · 10^-24 cm^3 s^-1 and a significance of ∼ 2.73 σ, before trials correction and ∼ 1.56 σ after. Upper limit cross sections have also been calculated for the γγ annihilation process and four other outcomes. The limits are in line with current limits using other methods, from ∼ 8.56 · 10^-26 - 6.61 · 10^-23 cm^3s^-1. Future larger telescope arrays, like the upcoming Cherenkov Telescope Array, CTA, will provide better results with the help of this analysis method.}, language = {en} }