@phdthesis{Krumpe2007, author = {Krumpe, Mirko}, title = {X-ray and optical properties of X-ray luminous active galactic nuclei}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-16993}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Giacconi et al. (1962) discovered a diffuse cosmic X-ray background with rocket experiments when they searched for lunar X-ray emission. Later satellite missions found a spectral peak in the cosmic X-ray background at ~30 keV. Imaging X-ray satellites such as ROSAT (1990-1999) were able to resolve up to 80\% of the background below 2 keV into single point sources, mainly active galaxies. The cosmic X-ray background is the integration of all accreting super-massive (several million solar masses) black holes in the centre of active galaxies over cosmic time. Synthesis models need further populations of X-ray absorbed active galaxy nuclei (AGN) in order to explain the cosmic X-ray background peak at ~30 keV. Current X-ray missions such as XMM-Newton and Chandra offer the possibility of studying these additional populations. This Ph.D. thesis studies the populations that dominate the X-ray sky. For this purpose the 120 ksec XMM-Newton Marano field survey, named for an earlier optical quasar survey in the southern hemisphere, is analysed. Based on the optical follow-up observations the X-ray sources are spectroscopically classified. Optical and X-ray properties of the different X-ray source populations are studied and differences are derived. The amount of absorption in the X-ray spectra of type II AGN, which are considered as a main contributor to the X-ray background at ~30 keV, is determined. In order to extend the sample size of the rare type II AGN, this study also includes objects from another survey, the XMM-Newton Serendipitous Medium Sample. In addition, the dependence of the absorption in type II AGN with redshift and X-ray luminosity is analysed. We detected 328 X-ray sources in the Marano field. 140 sources were spectroscopically classified. We found 89 type I AGN, 36 type II AGN, 6 galaxies, and 9 stars. AGN, galaxies, and stars are clearly distinguishable by their optical and X-ray properties. Type I and II AGN do not separate clearly. They have a significant overlap in all studied properties. In a few cases the X-ray properties are in contradiction to the observed optical properties for type I and type II AGN. For example we find type II AGN that show evidence for optical absorption but are not absorbed in X-rays. Based on the additional use of near infra-red imaging (K-band), we were able to identify several of the rare type II AGN. The X-ray spectra of type II AGN from the XMM-Newton Marano field survey and the XMM-Newton Serendipitous Medium Sample were analysed. Since most of the sources have only ~40 X-ray counts in the XMM-Newton PN-detector, I carefully studied the fit results of simulated X-ray spectra as a function of fit statistic and binning method. The objects revealed only moderate absorption. In particular, I do not find any Compton-thick sources (absorbed by column densities of NH > 1.5 x 10^24 cm^-2). This gives evidence that type II AGN are not the main contributor of the X-ray background around 30 keV. Although bias effects may occur, type II AGN show no noticeable trend of the amount of absorption with redshift or X-ray luminosity.}, language = {en} } @phdthesis{Sojka2022, author = {S{\´o}jka, Pia}, title = {Writing travel, writing life}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-537-8}, issn = {2629-2548}, doi = {10.25932/publishup-55879}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-558799}, school = {Universit{\"a}t Potsdam}, pages = {319}, year = {2022}, abstract = {The book compares the texts of three Swiss authors: Ella Maillart, Annemarie Schwarzenbach and Nicolas Bouvier. The focus is on their trip from Gen{\`e}ve to Kabul that Ella Maillart and Annemarie Schwarzenbach made together in 1939/1940 and Nicolas Bouvier 1953/1954 with the artist Thierry Vernet. The comparison shows the strong connection between the journey and life and between ars vivendi and travel literature. This book also gives an overview of and organises the numerous terms, genres, and categories that already exist to describe various travel texts and proposes the new term travelling narration. The travelling narration looks at the text from a narratological perspective that distinguishes the author, narrator, and protagonist within the narration. In the examination, ten motifs could be found to characterise the travelling narration: Culture, Crossing Borders, Freedom, Time and Space, the Aesthetics of Landscapes, Writing and Reading, the Self and/as the Other, Home, Religion and Spirituality as well as the Journey. The importance of each individual motif does not only apply in the 1930s or 1950s but also transmits important findings for living together today and in the future.}, language = {en} } @phdthesis{Priewe2006, author = {Priewe, Marc}, title = {Writing transit : refiguring national imaginaries in Chicana/o narratives}, series = {American Studies}, volume = {140}, journal = {American Studies}, publisher = {Winter}, address = {Heidelberg}, isbn = {978-3-8253-5215-8}, pages = {XII, 267 S. : Ill.}, year = {2006}, language = {en} } @phdthesis{Dahlsten2014, author = {Dahlsten, Ulf}, title = {World market governance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-70168}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Democratic capitalism or liberal democracy, as the successful marriage of convenience between market liberalism and democracy sometimes is called, is in trouble. The market economy system has become global and there is a growing mismatch with the territoriality of the nation-states. The functional global networks and inter-governmental order can no longer keep pace with the rapid development of the global market economy and regulatory capture is all too common. Concepts like de-globalization, self-regulation, and global government are floated in the debate. The alternatives are analysed and found to be improper, inadequate or plainly impossible. The proposed route is instead to accept that the global market economy has developed into an independent fundamental societal system that needs its own governance. The suggestion is World Market Governance based on the Rule of Law in order to shape the fitness environment for the global market economy and strengthen the nation-states so that they can regain the sovereignty to decide upon the social and cultural conditions in each country. Elements in the proposed Rule of Law are international legislation decided by an Assembly supported by a Council, and an independent Judiciary. Existing international organisations would function as executors. The need for broad sustained demand for regulations in the common interest is identified.}, language = {en} } @phdthesis{Heinzel2021, author = {Heinzel, Mirko Noa}, title = {World Bank staff and project implementation}, year = {2021}, language = {en} } @phdthesis{Muschalla2008, author = {Muschalla, Beate}, title = {Workplace-related anxieties and workplace phobia : a concept of domain-specific mental disorders}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-20048}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Background: Anxiety in the workplace is a special problem as workplaces are especially prone to provoke anxiety: There are social hierarchies, rivalries between colleagues, sanctioning through superiors, danger of accidents, failure, and worries of job security. Workplace phobia is a phobic anxiety reaction with symptoms of panic occurring when thinking of or approaching the workplace, and with clear tendency of avoidance. Objectives: What characterizes workplace-related anxieties and workplace phobia as domain-specific mental disorders in contrast to conventional anxiety disorders? Method: 230 patients from an inpatient psychosomatic rehabilitation center were interviewed with the (semi-)structured Mini-Work-Anxiety-Interview and the Mini International Neuropsychiatric Interview, concerning workplace-related anxieties and conventional mental disorders. Additionally, the patients filled in the self-rating questionnaires Job-Anxiety-Scale (JAS) and the Symptom Checklist (SCL-90-R)measuring job-related and general psychosomatic symptom load. Results: Workplace-related anxieties occurred together with conventional anxiety disorders in 35\% of the patients, but also alone in others (23\%). Workplace phobia could be found in 17\% of the interviewed, any diagnosis of workplace-related anxiety was stated in 58\%. Workplace phobic patients had significantly higher scores in job-anxiety than patients without workplace phobia. Patients with workplace phobia were significantly longer on sick leave in the past 12 months (23,5 weeks) than patients without workplace phobia (13,4 weeks). Different qualities of workplace-related anxieties lead with different frequencies to work participation disorders. Conclusion: Workplace phobia cannot be described by only assessing the general level of psychosomatic symptom load and conventional mental disorders. Workplace-related anxieties and workplace phobia have an own clinical value which is mainly defined by specific workplace-related symptom load and work-participation disorders. They require special therapeutic attention and treatment instead of a "sick leave" certification by the general health physician. Workplace phobia should be named with a proper diagnosis according to ICD-10 chapter V, F 40.8: "workplace phobia".}, language = {en} } @phdthesis{Jaeger2015, author = {J{\"a}ger, Lena Ann}, title = {Working memory and prediction in human sentence parsing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82517}, school = {Universit{\"a}t Potsdam}, pages = {xi, 144}, year = {2015}, abstract = {This dissertation investigates the working memory mechanism subserving human sentence processing and its relative contribution to processing difficulty as compared to syntactic prediction. Within the last decades, evidence for a content-addressable memory system underlying human cognition in general has accumulated (e.g., Anderson et al., 2004). In sentence processing research, it has been proposed that this general content-addressable architecture is also used for language processing (e.g., McElree, 2000). Although there is a growing body of evidence from various kinds of linguistic dependencies that is consistent with a general content-addressable memory subserving sentence processing (e.g., McElree et al., 2003; VanDyke2006), the case of reflexive-antecedent dependencies has challenged this view. It has been proposed that in the processing of reflexive-antecedent dependencies, a syntactic-structure based memory access is used rather than cue-based retrieval within a content-addressable framework (e.g., Sturt, 2003). Two eye-tracking experiments on Chinese reflexives were designed to tease apart accounts assuming a syntactic-structure based memory access mechanism from cue-based retrieval (implemented in ACT-R as proposed by Lewis and Vasishth (2005). In both experiments, interference effects were observed from noun phrases which syntactically do not qualify as the reflexive's antecedent but match the animacy requirement the reflexive imposes on its antecedent. These results are interpreted as evidence against a purely syntactic-structure based memory access. However, the exact pattern of effects observed in the data is only partially compatible with the Lewis and Vasishth cue-based parsing model. Therefore, an extension of the Lewis and Vasishth model is proposed. Two principles are added to the original model, namely 'cue confusion' and 'distractor prominence'. Although interference effects are generally interpreted in favor of a content-addressable memory architecture, an alternative explanation for interference effects in reflexive processing has been proposed which, crucially, might reconcile interference effects with a structure-based account. It has been argued that interference effects do not necessarily reflect cue-based retrieval interference in a content-addressable memory but might equally well be accounted for by interference effects which have already occurred at the moment of encoding the antecedent in memory (Dillon, 2011). Three experiments (eye-tracking and self-paced reading) on German reflexives and Swedish possessives were designed to tease apart cue-based retrieval interference from encoding interference. The results of all three experiments suggest that there is no evidence that encoding interference affects the retrieval of a reflexive's antecedent. Taken together, these findings suggest that the processing of reflexives can be explained with the same cue-based retrieval mechanism that has been invoked to explain syntactic dependency resolution in a range of other structures. This supports the view that the language processing system is located within a general cognitive architecture, with a general-purpose content-addressable working memory system operating on linguistic expressions. Finally, two experiments (self-paced reading and eye-tracking) using Chinese relative clauses were conducted to determine the relative contribution to sentence processing difficulty of working-memory processes as compared to syntactic prediction during incremental parsing. Chinese has the cross-linguistically rare property of being a language with subject-verb-object word order and pre-nominal relative clauses. This property leads to opposing predictions of expectation-based accounts and memory-based accounts with respect to the relative processing difficulty of subject vs. object relatives. Previous studies showed contradictory results, which has been attributed to different kinds local ambiguities confounding the materials (Lin and Bever, 2011). The two experiments presented are the first to compare Chinese relatives clauses in syntactically unambiguous contexts. The results of both experiments were consistent with the predictions of the expectation-based account of sentence processing but not with the memory-based account. From these findings, I conclude that any theory of human sentence processing needs to take into account the power of predictive processes unfolding in the human mind.}, language = {en} } @phdthesis{Teetz2022, author = {Teetz, Tim}, title = {Work design and leadership in lean production}, school = {Universit{\"a}t Potsdam}, pages = {138}, year = {2022}, language = {en} } @phdthesis{MarimonTarter2019, author = {Marimon Tarter, Mireia}, title = {Word segmentation in German-learning infants and German-speaking adults}, doi = {10.25932/publishup-43740}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437400}, school = {Universit{\"a}t Potsdam}, pages = {132}, year = {2019}, abstract = {There is evidence that infants start extracting words from fluent speech around 7.5 months of age (e.g., Jusczyk \& Aslin, 1995) and that they use at least two mechanisms to segment words forms from fluent speech: prosodic information (e.g., Jusczyk, Cutler \& Redanz, 1993) and statistical information (e.g., Saffran, Aslin \& Newport, 1996). However, how these two mechanisms interact and whether they change during development is still not fully understood. The main aim of the present work is to understand in what way different cues to word segmentation are exploited by infants when learning the language in their environment, as well as to explore whether this ability is related to later language skills. In Chapter 3 we pursued to determine the reliability of the method used in most of the experiments in the present thesis (the Headturn Preference Procedure), as well as to examine correlations and individual differences between infants' performance and later language outcomes. In Chapter 4 we investigated how German-speaking adults weigh statistical and prosodic information for word segmentation. We familiarized adults with an auditory string in which statistical and prosodic information indicated different word boundaries and obtained both behavioral and pupillometry responses. Then, we conducted further experiments to understand in what way different cues to word segmentation are exploited by 9-month-old German-learning infants (Chapter 5) and by 6-month-old German-learning infants (Chapter 6). In addition, we conducted follow-up questionnaires with the infants and obtained language outcomes at later stages of development. Our findings from this thesis revealed that (1) German-speaking adults show a strong weight of prosodic cues, at least for the materials used in this study and that (2) German-learning infants weight these two kind of cues differently depending on age and/or language experience. We observed that, unlike English-learning infants, 6-month-old infants relied more strongly on prosodic cues. Nine-month-olds do not show any preference for either of the cues in the word segmentation task. From the present results it remains unclear whether the ability to use prosodic cues to word segmentation relates to later language vocabulary. We speculate that prosody provides infants with their first window into the specific acoustic regularities in the signal, which enables them to master the specific stress pattern of German rapidly. Our findings are a step forwards in the understanding of an early impact of the native prosody compared to statistical learning in early word segmentation.}, language = {en} } @phdthesis{Pregla2024, author = {Pregla, Andreas}, title = {Word order variability in OV languages}, doi = {10.25932/publishup-64363}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-643636}, school = {Universit{\"a}t Potsdam}, pages = {xv, 265}, year = {2024}, abstract = {This thesis explores word order variability in verb-final languages. Verb-final languages have a reputation for a high amount of word order variability. However, that reputation amounts to an urban myth due to a lack of systematic investigation. This thesis provides such a systematic investigation by presenting original data from several verb-final languages with a focus on four Uralic ones: Estonian, Udmurt, Meadow Mari, and South S{\´a}mi. As with every urban myth, there is a kernel of truth in that many unrelated verb-final languages share a particular kind of word order variability, A-scrambling, in which the fronted elements do not receive a special information-structural role, such as topic or contrastive focus. That word order variability goes hand in hand with placing focussed phrases further to the right in the position directly in front of the verb. Variations on this pattern are exemplified by Uyghur, Standard Dargwa, Eastern Armenian, and three of the Uralic languages, Estonian, Udmurt, and Meadow Mari. So far for the kernel of truth, but the fourth Uralic language, South S{\´a}mi, is comparably rigid and does not feature this particular kind of word order variability. Further such comparably rigid, non-scrambling verb-final languages are Dutch, Afrikaans, Amharic, and Korean. In contrast to scrambling languages, non-scrambling languages feature obligatory subject movement, causing word order rigidity next to other typical EPP effects. The EPP is a defining feature of South S{\´a}mi clause structure in general. South S{\´a}mi exhibits a one-of-a-kind alternation between SOV and SAuxOV order that is captured by the assumption of the EPP and obligatory movement of auxiliaries but not lexical verbs. Other languages that allow for SAuxOV order either lack an alternation because the auxiliary is obligatorily present (Macro-Sudan SAuxOVX languages), or feature an alternation between SVO and SAuxOV (Kru languages; V2 with underlying OV as a fringe case). In the SVO-SAuxOV languages, both auxiliaries and lexical verbs move. Hence, South S{\´a}mi shows that the textbook difference between the VO languages English and French, whether verb movement is restricted to auxiliaries, also extends to OV languages. SAuxOV languages are an outlier among OV languages in general but are united by the presence of the EPP. Word order variability is not restricted to the preverbal field in verb-final languages, as most of them feature postverbal elements (PVE). PVE challenge the notion of verb-finality in a language. Strictly verb-final languages without any clause-internal PVE are rare. This thesis charts the first structural and descriptive typology of PVE. Verb-final languages vary in the categories they allow as PVE. Allowing for non-oblique PVE is a pivotal threshold: when non-oblique PVE are allowed, PVE can be used for information-structural effects. Many areally and genetically unrelated languages only allow for given PVE but differ in whether the PVE are contrastive. In those languages, verb-finality is not at stake since verb-medial orders are marked. In contrast, the Uralic languages Estonian and Udmurt allow for any PVE, including information focus. Verb-medial orders can be used in the same contexts as verb-final orders without semantic and pragmatic differences. As such, verb placement is subject to actual free variation. The underlying verb-finality of Estonian and Udmurt can only be inferred from a range of diagnostics indicating optional verb movement in both languages. In general, it is not possible to account for PVE with a uniform analysis: rightwards merge, leftward verb movement, and rightwards phrasal movement are required to capture the cross- and intralinguistic variation. Knowing that a language is verb-final does not allow one to draw conclusions about word order variability in that language. There are patterns of homogeneity, such as the word order variability driven by directly preverbal focus and the givenness of postverbal elements, but those are not brought about by verb-finality alone. Preverbal word order variability is restricted by the more abstract property of obligatory subject movement, whereas the determinant of postverbal word order variability has to be determined in the future.}, language = {en} } @phdthesis{DeCesare2021, author = {De Cesare, Ilaria}, title = {Word order variability and change in German infinitival complements}, doi = {10.25932/publishup-52735}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-527358}, school = {Universit{\"a}t Potsdam}, pages = {xii, 231}, year = {2021}, abstract = {The present work deals with the variation in the linearisation of German infinitival complements from a diachronic perspective. Based on the observation that in present-day German the position of infinitival complements is restricted by properties of the matrix verb (Haider, 2010, Wurmbrand, 2001), whereas this appears much more liberal in older stages of German (Demske, 2008, Mach{\´e} and Abraham, 2011, Demske, 2015), this dissertation investigates the emergence of those restrictions and the factors that have led to a reduced, yet still existing variability. The study contrasts infinitival complements of two types of matrix verbs, namely raising and control verbs. In present-day German, these show different syntactic behaviour and opposite preferences as far as the position of the infinitive is concerned: while infinitival complements of raising verbs build a single clausal domain with the with the matrix verb and occur obligatorily intraposed, infinitive complements of control verbs can form clausal constituents and occur predominantly extraposed. This correlation is not attested in older stages of German, at least not until Early New High German. Drawing on diachronic corpus data, the present work provides a description of the changes in the linearisation of infinitival complements from Early New High German to present-day German which aims at finding out when the correlation between infinitive type and word order emerged and further examines their possible causes. The study shows that word order change in German infinitival complements is not a case of syntactic change in the narrow sense, but that the diachronic variation results from the interaction of different language-internal and language-external factors and that it reflects, on the one hand, the influence of language modality on the emerging standard language and, on the other hand, a process of specialisation.}, language = {en} } @phdthesis{Skopeteas2009, author = {Skopeteas, Stavros}, title = {Word oder and information structure: empirical methods for linguistic fieldwork}, address = {Potsdam}, pages = {373 S.}, year = {2009}, language = {en} } @phdthesis{Ermeydan2014, author = {Ermeydan, Mahmut Ali}, title = {Wood cell wall modification with hydrophobic molecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71325}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Wood is used for many applications because of its excellent mechanical properties, relative abundance and as it is a renewable resource. However, its wider utilization as an engineering material is limited because it swells and shrinks upon moisture changes and is susceptible to degradation by microorganisms and/or insects. Chemical modifications of wood have been shown to improve dimensional stability, water repellence and/or durability, thus increasing potential service-life of wood materials. However current treatments are limited because it is difficult to introduce and fix such modifications deep inside the tissue and cell wall. Within the scope of this thesis, novel chemical modification methods of wood cell walls were developed to improve both dimensional stability and water repellence of wood material. These methods were partly inspired by the heartwood formation in living trees, a process, that for some species results in an insertion of hydrophobic chemical substances into the cell walls of already dead wood cells, In the first part of this thesis a chemistry to modify wood cell walls was used, which was inspired by the natural process of heartwood formation. Commercially available hydrophobic flavonoid molecules were effectively inserted in the cell walls of spruce, a softwood species with low natural durability, after a tosylation treatment to obtain "artificial heartwood". Flavonoid inserted cell walls show a reduced moisture absorption, resulting in better dimensional stability, water repellency and increased hardness. This approach was quite different compared to established modifications which mainly address hydroxyl groups of cell wall polymers with hydrophilic substances. In the second part of the work in-situ styrene polymerization inside the tosylated cell walls was studied. It is known that there is a weak adhesion between hydrophobic polymers and hydrophilic cell wall components. The hydrophobic styrene monomers were inserted into the tosylated wood cell walls for further polymerization to form polystyrene in the cell walls, which increased the dimensional stability of the bulk wood material and reduced water uptake of the cell walls considerably when compared to controls. In the third part of the work, grafting of another hydrophobic and also biodegradable polymer, poly(ɛ-caprolactone) in the wood cell walls by ring opening polymerization of ɛ-caprolactone was studied at mild temperatures. Results indicated that polycaprolactone attached into the cell walls, caused permanent swelling of the cell walls up to 5\%. Dimensional stability of the bulk wood material increased 40\% and water absorption reduced more than 35\%. A fully biodegradable and hydrophobized wood material was obtained with this method which reduces disposal problem of the modified wood materials and has improved properties to extend the material's service-life. Starting from a bio-inspired approach which showed great promise as an alternative to standard cell wall modifications we showed the possibility of inserting hydrophobic molecules in the cell walls and supported this fact with in-situ styrene and ɛ-caprolactone polymerization into the cell walls. It was shown in this thesis that despite the extensive knowledge and long history of using wood as a material there is still room for novel chemical modifications which could have a high impact on improving wood properties.}, language = {en} } @phdthesis{Arf2019, author = {Arf, Shelan Ali}, title = {Women's everyday reality of social insecurity}, doi = {10.25932/publishup-43433}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-434333}, school = {Universit{\"a}t Potsdam}, pages = {334}, year = {2019}, abstract = {Since 1980 Iraq passed through various wars and conflicts including Iraq-Iran war, Saddam Hussein's the Anfals and Halabja campaigns against the Kurds and the killing campaigns against Shiite in 1986, Saddam Hussein's invasion of Kuwait in August 1990, the Gulf war in 1990, Iraq war in 2003 and the fall of Saddam, the conflicts and chaos in the transmission of power after the death of Saddam, and the war against ISIS . All these wars left severe impacts in most households in Iraq; on women and children in particular. The consequences of such long wars could be observed in all sectors including economic, social, cultural and religious sectors. The social structure, norms and attitudes are intensely affected. Many women specifically divorced women found them-selves in challenging different difficulties such as social as well as economic situations. Thus the divorced women in Iraqi Kurdistan are the focus of this research. Considering the fact that there is very few empirical researches on this topic, a constructivist grounded theory methodology (CGT) is viewed as reliable in order to come up with a comprehensive picture about the everyday life of divorced women in Iraqi Kurdistan. Data collected in Sulaimani city in Iraqi Kurdistan. The work of Kathy Charmaz was chosen to be the main methodological context of the research and the main data collection method was individual intensive narrative interviews with divorced women. Women generally and divorced women specifically in Iraqi Kurdistan are living in a patriarchal society that passing through many changes due to the above mentioned wars among many other factors. This research is trying to study the everyday life of divorced women in such situations and the forms of social insecurity they are experiencing. The social institutions starting from the family as a very significant institution for women to the governmental and non-governmental institutions that are working to support women, and the copying strategies, are in focus in this research. The main research argument is that the family is playing ambivalent roles in divorced women's life. For instance, on one side families are revealed to be an essential source of security to most respondents, on the other side families posed also many threats and restrictions on those women. This argument supported by what called by Suad joseph "the paradox of support and suppression" . Another important finding is that the stat institution(laws , constitutions ,Offices of combating violence against woman and family) are supporting women somehow and offering them protection from the insecurities but it is clear that the existence of the laws does not stop the violence against women in Iraqi Kurdistan, As explained by Pateman because the laws /the contract is a sexual-social contract that upholds the sex rights of males and grants them more privileges than females. The political instability, Tribal social norms also play a major role in influencing the rule of law. It is noteworthy to refer that analyzing the interviews in this research showed that in spite that divorced women living in insecurities and facing difficulties but most of the respondents try to find a coping strategies to tackle difficult situations and to deal with the violence they face; these strategies are bargaining, sometimes compromising or resisting …etc. Different theories used to explain these coping strategies such as bargaining with patriarchy. Kandiyoti who stated that women living under certain restraints struggle to find way and strategies to enhance their situations. The research finding also revealed that the western liberal feminist view of agency is limited this is agree with Saba Mahmood and what she explained about Muslim women agency. For my respondents, who are divorced women, their agency reveals itself in different ways, in resisting or compromising with or even obeying the power of male relatives, and the normative system in the society. Agency is also explained the behavior of women contacting formal state institutions in cases of violence like the police or Offices of combating violence against woman and family.}, language = {en} } @phdthesis{Wittenberg2016, author = {Wittenberg, Eva}, title = {With Light Verb Constructions from Syntax to Concepts}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-329-9}, issn = {2190-4545}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82361}, school = {Universit{\"a}t Potsdam}, pages = {ii, 139}, year = {2016}, abstract = {This dissertation uses a common grammatical phenomenon, light verb constructions (LVCs) in English and German, to investigate how syntax-semantics mapping defaults influence the relationships between language processing, representation and conceptualization. LVCs are analyzed as a phenomenon of mismatch in the argument structure. The processing implication of this mismatch are experimentally investigated, using ERPs and a dual task. Data from these experiments point to an increase in working memory. Representational questions are investigated using structural priming. Data from this study suggest that while the syntax of LVCs is not different from other structures', the semantics and mapping are represented differently. This hypothesis is tested with a new categorization paradigm, which reveals that the conceptual structure that LVC evoke differ in interesting, and predictable, ways from non-mismatching structures'.}, language = {en} } @phdthesis{Afifi2023, author = {Afifi, Haitham}, title = {Wireless In-Network Processing for Multimedia Applications}, doi = {10.25932/publishup-60437}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604371}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 233}, year = {2023}, abstract = {With the recent growth of sensors, cloud computing handles the data processing of many applications. Processing some of this data on the cloud raises, however, many concerns regarding, e.g., privacy, latency, or single points of failure. Alternatively, thanks to the development of embedded systems, smart wireless devices can share their computation capacity, creating a local wireless cloud for in-network processing. In this context, the processing of an application is divided into smaller jobs so that a device can run one or more jobs. The contribution of this thesis to this scenario is divided into three parts. In part one, I focus on wireless aspects, such as power control and interference management, for deciding which jobs to run on which node and how to route data between nodes. Hence, I formulate optimization problems and develop heuristic and meta-heuristic algorithms to allocate wireless and computation resources. Additionally, to deal with multiple applications competing for these resources, I develop a reinforcement learning (RL) admission controller to decide which application should be admitted. Next, I look into acoustic applications to improve wireless throughput by using microphone clock synchronization to synchronize wireless transmissions. In the second part, I jointly work with colleagues from the acoustic processing field to optimize both network and application (i.e., acoustic) qualities. My contribution focuses on the network part, where I study the relation between acoustic and network qualities when selecting a subset of microphones for collecting audio data or selecting a subset of optional jobs for processing these data; too many microphones or too many jobs can lessen quality by unnecessary delays. Hence, I develop RL solutions to select the subset of microphones under network constraints when the speaker is moving while still providing good acoustic quality. Furthermore, I show that autonomous vehicles carrying microphones improve the acoustic qualities of different applications. Accordingly, I develop RL solutions (single and multi-agent ones) for controlling these vehicles. In the third part, I close the gap between theory and practice. I describe the features of my open-source framework used as a proof of concept for wireless in-network processing. Next, I demonstrate how to run some algorithms developed by colleagues from acoustic processing using my framework. I also use the framework for studying in-network delays (wireless and processing) using different distributions of jobs and network topologies.}, language = {en} } @phdthesis{Siegmund2022, author = {Siegmund, Nicole}, title = {Wind driven soil particle uptake Quantifying drivers of wind erosion across the particle size spectrum}, doi = {10.25932/publishup-57489}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-574897}, school = {Universit{\"a}t Potsdam}, pages = {ix, 56}, year = {2022}, abstract = {Among the multitude of geomorphological processes, aeolian shaping processes are of special character, Pedogenic dust is one of the most important sources of atmospheric aerosols and therefore regarded as a key player for atmospheric processes. Soil dust emissions, being complex in composition and properties, influence atmospheric processes and air quality and has impacts on other ecosystems. In this because even though their immediate impact can be considered low (exceptions exist), their constant and large-scale force makes them a powerful player in the earth system. dissertation, we unravel a novel scientific understanding of this complex system based on a holistic dataset acquired during a series of field experiments on arable land in La Pampa, Argentina. The field experiments as well as the generated data provide information about topography, various soil parameters, the atmospheric dynamics in the very lower atmosphere (4m height) as well as measurements regarding aeolian particle movement across a wide range of particle size classes between 0.2μm up to the coarse sand. The investigations focus on three topics: (a) the effects of low-scale landscape structures on aeolian transport processes of the coarse particle fraction, (b) the horizontal and vertical fluxes of the very fine particles and (c) the impact of wind gusts on particle emissions. Among other considerations presented in this thesis, it could in particular be shown, that even though the small-scale topology does have a clear impact on erosion and deposition patterns, also physical soil parameters need to be taken into account for a robust statistical modelling of the latter. Furthermore, specifically the vertical fluxes of particulate matter have different characteristics for the particle size classes. Finally, a novel statistical measure was introduced to quantify the impact of wind gusts on the particle uptake and its application on the provided data set. The aforementioned measure shows significantly increased particle concentrations during points in time defined as gust event. With its holistic approach, this thesis further contributes to the fundamental understanding of how atmosphere and pedosphere are intertwined and affect each other.}, language = {en} } @phdthesis{Loll1997, author = {Loll, Renate}, title = {Wilson loop and discrete lattice methods in quantum gauge and gravitational theories}, pages = {getr. Z{\"a}hl.}, year = {1997}, language = {en} } @phdthesis{Meyn2010, author = {Meyn, Andrea}, title = {Wildfire-climate relationships : conceptual model and regional-scale analyses}, address = {Potsdam}, pages = {148 S.}, year = {2010}, language = {en} } @phdthesis{Haarmann2022, author = {Haarmann, Stephan}, title = {WICKR: A Joint Semantics for Flexible Processes and Data}, doi = {10.25932/publishup-54613}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-546137}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 191}, year = {2022}, abstract = {Knowledge-intensive business processes are flexible and data-driven. Therefore, traditional process modeling languages do not meet their requirements: These languages focus on highly structured processes in which data plays a minor role. As a result, process-oriented information systems fail to assist knowledge workers on executing their processes. We propose a novel case management approach that combines flexible activity-centric processes with data models, and we provide a joint semantics using colored Petri nets. The approach is suited to model, verify, and enact knowledge-intensive processes and can aid the development of information systems that support knowledge work. Knowledge-intensive processes are human-centered, multi-variant, and data-driven. Typical domains include healthcare, insurances, and law. The processes cannot be fully modeled, since the underlying knowledge is too vast and changes too quickly. Thus, models for knowledge-intensive processes are necessarily underspecified. In fact, a case emerges gradually as knowledge workers make informed decisions. Knowledge work imposes special requirements on modeling and managing respective processes. They include flexibility during design and execution, ad-hoc adaption to unforeseen situations, and the integration of behavior and data. However, the predominantly used process modeling languages (e.g., BPMN) are unsuited for this task. Therefore, novel modeling languages have been proposed. Many of them focus on activities' data requirements and declarative constraints rather than imperative control flow. Fragment-Based Case Management, for example, combines activity-centric imperative process fragments with declarative data requirements. At runtime, fragments can be combined dynamically, and new ones can be added. Yet, no integrated semantics for flexible activity-centric process models and data models exists. In this thesis, Wickr, a novel case modeling approach extending fragment-based Case Management, is presented. It supports batch processing of data, sharing data among cases, and a full-fledged data model with associations and multiplicity constraints. We develop a translational semantics for Wickr targeting (colored) Petri nets. The semantics assert that a case adheres to the constraints in both the process fragments and the data models. Among other things, multiplicity constraints must not be violated. Furthermore, the semantics are extended to multiple cases that operate on shared data. Wickr shows that the data structure may reflect process behavior and vice versa. Based on its semantics, prototypes for executing and verifying case models showcase the feasibility of Wickr. Its applicability to knowledge-intensive and to data-centric processes is evaluated using well-known requirements from related work.}, language = {en} } @phdthesis{Kroll2012, author = {Kroll, Alexander}, title = {Why public managers use performance information : concepts, theory, and empirical analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59795}, school = {Universit{\"a}t Potsdam}, year = {2012}, abstract = {Diese Dissertation untersucht die Verwendung von Performance-Informationen („Kennzahlen") durch F{\"u}hrungskr{\"a}fte in der {\"o}ffentlichen Verwaltung. Unter „Verwendung" wird dabei die zweckorientierte Nutzung der Daten verstanden, um zu steuern, zu lernen und {\"o}ffentliche Leistungen zu verbessern. Die zentrale Frage der Arbeit lautet: Wie k{\"o}nnen Unterschiede bei der Verwendung von Performance-Informationen erkl{\"a}rt werden? Um diese Frage zu beantworten, wird die bereits existierende Literatur strukturiert ausgewertet. Forschungsl{\"u}cken werden aufgezeigt und eigene Ans{\"a}tze vorgestellt, wie diese geschlossen werden k{\"o}nnen. Der erste Teil der Dissertation untersucht den Einfluss von Manager-bezogenen Faktoren auf die Nutzung von Performance-Daten, die bislang in der Forschung noch keine Ber{\"u}cksichtigung gefunden haben. Der zweite Abschnitt testet ein modifiziertes Modell aus der Psychologie, das auf der Annahme basiert, dass die Verwendung von Performance-Informationen ein bewusstes und durchdachtes Verhalten darstellt. Der dritte Teil untersucht, inwieweit sich die Erkl{\"a}rungen f{\"u}r die Nutzung von Performance-Informationen unterscheiden, wenn wir diese nicht nur als Kennzahlen definieren, sondern ebenfalls andere Quellen von „unsystematischem" Feedback ber{\"u}cksichtigen. Die empirischen Ergebnisse der Arbeit basieren auf einer Umfrage aus dem Jahr 2011. Im Rahmen dieses Surveys habe ich die mittleren Manager (Amtsleiter und Fachbereichsleiter) aus acht ausgew{\"a}hlten Bereichen aller kreisfreien St{\"a}dte in Deutschland befragt (n=954). Zur Auswertung der Daten wurden die Verfahren Faktorenanalyse, Multiple Regressionsanalyse und Strukturgleichungsmodellierung eingesetzt. Meine Forschung f{\"o}rderte unter anderem vier Erkenntnisse zu Tage, die durch {\"a}hnliche Befunde der verschiedenen Teile der Dissertation abgesichert sind: 1) Die Verwendung von Performance-Daten kann als bewusstes Verhalten von F{\"u}hrungskr{\"a}ften modelliert werden, das durch deren Einstellung sowie durch die Einstellung ihres sozialen Umfeldes bestimmt wird. 2) H{\"a}ufige Nutzer von Performance-Informationen zeigen {\"u}berraschenderweise keine generelle Pr{\"a}ferenz f{\"u}r das abstrakt-analytische Verarbeiten von Informationen. Stattdessen bevorzugen sie, Informationen durch pers{\"o}nliche Interaktionen aufzunehmen. 3) Manager, die sich fr{\"u}h im Rahmen der Ermittlung von Performance-Informationen engagieren, nutzen diese sp{\"a}ter auch h{\"a}ufiger, um Entscheidungen zu treffen. 4) Performance-Berichte sind nur eine Informationsquelle unter vielen. Verwaltungsmanager pr{\"a}ferieren verbales Feedback von Insidern sowie das Feedback von wichtigen externen Stakeholdern gegen{\"u}ber systematischen Performance-Daten. Die Dissertation erkl{\"a}rt diese Befunde theoretisch und verdeutlicht deren Implikationen f{\"u}r Theorie und Praxis.}, language = {en} } @phdthesis{Verch2023, author = {Verch, Ronald}, title = {Whole-body electrical muscle stimulation superimposed walking as training tool in the management of type 2 diabetes mellitus}, doi = {10.25932/publishup-63424}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-634240}, school = {Universit{\"a}t Potsdam}, pages = {IX, 78}, year = {2023}, abstract = {Background: The worldwide prevalence of diabetes has been increasing in recent years, with a projected prevalence of 700 million patients by 2045, leading to economic burdens on societies. Type 2 diabetes mellitus (T2DM), representing more than 95\% of all diabetes cases, is a multifactorial metabolic disorder characterized by insulin resistance leading to an imbalance between insulin requirements and supply. Overweight and obesity are the main risk factors for developing type 2 diabetes mellitus. The lifestyle modification of following a healthy diet and physical activity are the primary successful treatment and prevention methods for type 2 diabetes mellitus. Problems may exist with patients not achieving recommended levels of physical activity. Electrical muscle stimulation (EMS) is an increasingly popular training method and has become in the focus of research in recent years. It involves the external application of an electric field to muscles, which can lead to muscle contraction. Positive effects of EMS training have been found in healthy individuals as well as in various patient groups. New EMS devices offer a wide range of mobile applications for whole-body electrical muscle stimulation (WB-EMS) training, e.g., the intensification of dynamic low-intensity endurance exercises through WB-EMS. This dissertation project aims to investigate whether WB-EMS is suitable for intensifying low-intensive dynamic exercises such as walking and Nordic walking. Methods: Two independent studies were conducted. The first study aimed to investigate the reliability of exercise parameters during the 10-meter Incremental Shuttle Walk Test (10MISWT) using superimposed WB-EMS (research question 1, sub-question a) and the difference in exercise intensity compared to conventional walking (CON-W, research question 1, sub-question b). The second study aimed to compare differences in exercise parameters between superimposed WB-EMS (WB-EMS-W) and conventional walking (CON-W), as well as between superimposed WB-EMS (WB-EMS-NW) and conventional Nordic walking (CON-NW) on a treadmill (research question 2). Both studies took place in participant groups of healthy, moderately active men aged 35-70 years. During all measurements, the Easy Motion Skin® WB-EMS low frequency stimulation device with adjustable intensities for eight muscle groups was used. The current intensity was individually adjusted for each participant at each trial to ensure safety, avoiding pain and muscle cramps. In study 1, thirteen individuals were included for each sub question. A randomized cross-over design with three measurement appointments used was to avoid confounding factors such as delayed onset muscle soreness. The 10MISWT was performed until the participants no longer met the criteria of the test and recording five outcome measures: peak oxygen uptake (VO2peak), relative VO2peak (rel.VO2peak), maximum walk distance (MWD), blood lactate concentration, and the rate of perceived exertion (RPE). Eleven participants were included in study 2. A randomized cross-over design in a study with four measurement appointments was used to avoid confounding factors. A treadmill test protocol at constant velocity (6.5 m/s) was developed to compare exercise intensities. Oxygen uptake (VO2), relative VO2 (rel.VO2) blood lactate, and the RPE were used as outcome variables. Test-retest reliability between measurements was determined using a compilation of absolute and relative measures of reliability. Outcome measures in study 2 were studied using multifactorial analyses of variances. Results: Reliability analysis showed good reliability for VO2peak, rel.VO2peak, MWD and RPE with no statistically significant difference for WB-EMS-W during 10WISWT. However, differences compared to conventional walking in outcome variables were not found. The analysis of the treadmill tests showed significant effects for the factors CON/WB-EMS and W/NW for the outcome variables VO2, rel.VO2 and lactate, with both factors leading to higher results. However, the difference in VO2 and relative VO2 is within the range of biological variability of ± 12\%. The factor combination EMS∗W/NW is statistically non-significant for all three variables. WB-EMS resulted in the higher RPE values, RPE differences for W/NW and EMS∗W/NW were not significant. Discussion: The present project found good reliability for measuring VO2peak, rel. VO2peak, MWD and RPE during 10MISWT during WB-EMS-W, confirming prior research of the test. The test appears technically limited rather than physiologically in healthy, moderately active men. However, it is unsuitable for investigating differences in exercise intensities using WB-EMS-W compared to CON-W due to different perceptions of current intensity between exercise and rest. A treadmill test with constant walking speed was conducted to adjust individual maximum tolerable current intensity for the second part of the project. The treadmill test showed a significant increase in metabolic demands during WB-EMS-W and WB-EMS-NW by an increased VO2 and blood lactate concentration. However, the clinical relevance of these findings remains debatable. The study also found that WB-EMS superimposed exercises are perceived as more strenuous than conventional exercise. While in parts comparable studies lead to higher results for VO2, our results are in line with those of other studies using the same frequency. Due to the minor clinical relevance the use of WB-EMS as exercise intensification tool during walking and Nordic walking is limited. High device cost should be considered. Habituation to WB-EMS could increase current intensity tolerance and VO2 and make it a meaningful method in the treatment of T2DM. Recent figures show that WB-EMS is used in obese people to achieve health and weight goals. The supposed benefit should be further investigated scientifically.}, language = {en} } @phdthesis{Jacob2022, author = {Jacob, Karen}, title = {Who are the Bene Israel from India?}, doi = {10.25932/publishup-55450}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-554508}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2022}, abstract = {This study explores the identity of the Bene Israel caste from India and its assimilation into Israeli society. The large immigration from India to Israel started in the early 1950s and continued until the early 1970s. Initially, these immigrants struggled hard as they faced many problems such as the language barrier, cultural differences, a new climate, geographical isolation, and racial discrimination. This analysis focuses on the three major aspects of the integration process involving the Bene Israel: economic, socio-cultural and political. The study covers the period from the early fifties to the present. I will focus on the origin of the Bene Israel, which has evolved after their immigration to Israel; from a Hindu-Muslim lifestyle and customs they integrated into the Jewish life of Israel. Despite its ethnographic nature, this study has theological implications as it is an encounter between Jewish monotheism and Indian polytheism. All the western scholars who researched the Bene Israel community felt impelled to rely on information received by community members themselves. No written historical evidence recorded Bene Israel culture and origin. Only during the nineteenth century onwards, after the intrusion of western Jewish missionaries, were Jewish books translated into Marathi . Missionary activities among the Bene Israel served as a catalyst for the Bene Israel themselves to investigate their historical past . Haeem Samuel Kehimkar (1830-1908), a Bene Israel teacher, wrote notes on the history of the Bene Israel in India in Marathi in 1897. Brenda Ness wrote in her dissertation: The results [of the missionary activities] are several works about the community in English and Marathi by Bene-Israel authors which have appeared during the last century. These are, for the most part, not documented; they consist of much theorizing on accepted tradition and tend to be apologetic in nature. There can be no philosophical explanation or rational justification for an entire community to leave their motherland India, and enter into a process of annihilation of its own free will. I see this as a social and cultural suicide. In craving for a better future in Israel, the Indian Bene Israel community pays an enormously heavy price as a people that are today discarded by the East and disowned by the West: because they chose to become something that they never were and never could be. As it is written, "know where you came from, and where you are going." A community with an ancient history from a spiritual culture has completely lost its identity and self-esteem. In concluding this dissertation, I realize the dilemma with which I have confronted the members of the Bene Israel community which I have reviewed after strenuous and constant self-examination. I chose to evolve the diversifications of the younger generations urges towards acceptance, and wish to clarify my intricate analysis of this controversial community. The complexity of living in a Jewish State, where citizens cannot fulfill their basic desires, like matrimony, forced an entire community to conceal their true identity and perjure themselves to blend in, for the sake of national integration. Although scholars accepted their new claims, the skepticism of the rabbinate authorities prevails, and they refuse to marry them to this day, suspecting they are an Indian caste.}, language = {en} } @phdthesis{Itonaga2009, author = {Itonaga, Naomi}, title = {White storks (Ciconia ciconia) of Eastern Germany: age-dependent breeding ability, and age- and density-dependent effects on dispersal behavior}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-39052}, school = {Universit{\"a}t Potsdam}, year = {2009}, abstract = {Dispersal behavior plays an important role for the geographical distribution and population structure of any given species. Individual's fitness, reproductive and competitive ability, and dispersal behavior can be determined by the age of the individual. Age-dependent as well as density-dependent dispersal patterns are common in many bird species. In this thesis, I first present age-dependent breeding ability and natal site fidelity in white storks (Ciconia ciconia); migratory birds breeding in large parts of Europe. I predicted that both the proportion of breeding birds and natal site fidelity increase with the age. After the seventies of the last century, following a steep population decline, a recovery of the white stork population has been observed in many regions in Europe. Increasing population density in the white stork population in Eastern Germany especially after 1983 allowed examining density- as well as age-dependent breeding dispersal patterns. Therefore second, I present whether: young birds show more often and longer breeding dispersal than old birds, and frequency of dispersal events increase with the population density increase, especially in the young storks. Third, I present age- and density-dependent dispersal direction preferences in the give population. I asked whether and how the major spring migration direction interacts with dispersal directions of white storks: in different age, and under different population densities. The proportion of breeding individuals increased in the first 22 years of life and then decreased suggesting, the senescent decay in aging storks. Young storks were more faithful to their natal sites than old storks probably due to their innate migratory direction and distance. Young storks dispersed more frequently than old storks in general, but not for longer distance. Proportion of dispersing individuals increased significantly with increasing population densities indicating, density- dependent dispersal behavior in white storks. Moreover, the finding of a significant interaction effects between the age of dispersing birds and year (1980-2006) suggesting, older birds dispersed more from their previous nest sites over time due to increased competition. Both young and old storks dispersed along their spring migration direction; however, directional preferences were different in young storks and old storks. Young storks tended to settle down before reaching their previous nest sites (leading to the south-eastward dispersal) while old birds tended to keep migrating along the migration direction after reaching their previous nest sites (leading to the north-westward dispersal). Cues triggering dispersal events may be age-dependent. Changes in the dispersal direction over time were observed. Dispersal direction became obscured during the second half of the observation period (1993-2006). Increase in competition may affect dispersal behavior in storks. I discuss the potential role of: age for the observed age-dependent dispersal behavior, and competition for the density dependent dispersal behavior. This Ph.D. thesis contributes significantly to the understanding of population structure and geographical distribution of white storks. Moreover, presented age- and density (competition)-dependent dispersal behavior helps understanding underpinning mechanisms of dispersal behavior in bird species.}, language = {en} } @phdthesis{Knoll2013, author = {Knoll, Lisa Joanna}, title = {When the hedgehog kisses the frog : a functional and structural investigatin of syntactic processing in the developing brain}, series = {MPI series in human cognitive and brain sciences}, volume = {150}, journal = {MPI series in human cognitive and brain sciences}, publisher = {MPI}, address = {Leipzig}, isbn = {978-3-941504-34-9}, pages = {157 S.}, year = {2013}, language = {en} } @phdthesis{SchmidtKassow2007, author = {Schmidt-Kassow, Maren}, title = {What? beat got to do with it? The influence of meter on syntactic processing: ERP evidence from healthy and patient populations}, series = {MPI series in human cognitive and brain sciences}, volume = {89}, journal = {MPI series in human cognitive and brain sciences}, publisher = {MPI for Human Cognitive and Brain Sciences}, address = {Leipzig}, isbn = {978-3-936816-63-1}, pages = {iii, 190 S. : Ill., graph. Darst.}, year = {2007}, language = {en} } @phdthesis{Lorey2014, author = {Lorey, Johannes}, title = {What's in a query : analyzing, predicting, and managing linked data access}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The term Linked Data refers to connected information sources comprising structured data about a wide range of topics and for a multitude of applications. In recent years, the conceptional and technical foundations of Linked Data have been formalized and refined. To this end, well-known technologies have been established, such as the Resource Description Framework (RDF) as a Linked Data model or the SPARQL Protocol and RDF Query Language (SPARQL) for retrieving this information. Whereas most research has been conducted in the area of generating and publishing Linked Data, this thesis presents novel approaches for improved management. In particular, we illustrate new methods for analyzing and processing SPARQL queries. Here, we present two algorithms suitable for identifying structural relationships between these queries. Both algorithms are applied to a large number of real-world requests to evaluate the performance of the approaches and the quality of their results. Based on this, we introduce different strategies enabling optimized access of Linked Data sources. We demonstrate how the presented approach facilitates effective utilization of SPARQL endpoints by prefetching results relevant for multiple subsequent requests. Furthermore, we contribute a set of metrics for determining technical characteristics of such knowledge bases. To this end, we devise practical heuristics and validate them through thorough analysis of real-world data sources. We discuss the findings and evaluate their impact on utilizing the endpoints. Moreover, we detail the adoption of a scalable infrastructure for improving Linked Data discovery and consumption. As we outline in an exemplary use case, this platform is eligible both for processing and provisioning the corresponding information.}, language = {en} } @phdthesis{Urbach2012, author = {Urbach, Tina}, title = {What makes or breaks proactivity at work : how personal motives affect the evaluation of improvement suggestions}, address = {Potsdam}, pages = {226 S.}, year = {2012}, language = {en} } @phdthesis{Scheithauer2019, author = {Scheithauer, Linda}, title = {What have you been doing besides studying? Evaluators' competence attributions on extracurricular activities in application documents}, pages = {213}, year = {2019}, language = {en} } @phdthesis{Maraun2006, author = {Maraun, Douglas}, title = {What can we learn from climate data? : Methods for fluctuation, time/scale and phase analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-9047}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {Since Galileo Galilei invented the first thermometer, researchers have tried to understand the complex dynamics of ocean and atmosphere by means of scientific methods. They observe nature and formulate theories about the climate system. Since some decades powerful computers are capable to simulate the past and future evolution of climate. Time series analysis tries to link the observed data to the computer models: Using statistical methods, one estimates characteristic properties of the underlying climatological processes that in turn can enter the models. The quality of an estimation is evaluated by means of error bars and significance testing. On the one hand, such a test should be capable to detect interesting features, i.e. be sensitive. On the other hand, it should be robust and sort out false positive results, i.e. be specific. This thesis mainly aims to contribute to methodological questions of time series analysis with a focus on sensitivity and specificity and to apply the investigated methods to recent climatological problems. First, the inference of long-range correlations by means of Detrended Fluctuation Analysis (DFA) is studied. It is argued that power-law scaling of the fluctuation function and thus long-memory may not be assumed a priori but have to be established. This requires to investigate the local slopes of the fluctuation function. The variability characteristic for stochastic processes is accounted for by calculating empirical confidence regions. The comparison of a long-memory with a short-memory model shows that the inference of long-range correlations from a finite amount of data by means of DFA is not specific. When aiming to infer short memory by means of DFA, a local slope larger than \$\alpha=0.5\$ for large scales does not necessarily imply long-memory. Also, a finite scaling of the autocorrelation function is shifted to larger scales in the fluctuation function. It turns out that long-range correlations cannot be concluded unambiguously from the DFA results for the Prague temperature data set. In the second part of the thesis, an equivalence class of nonstationary Gaussian stochastic processes is defined in the wavelet domain. These processes are characterized by means of wavelet multipliers and exhibit well defined time dependent spectral properties; they allow one to generate realizations of any nonstationary Gaussian process. The dependency of the realizations on the wavelets used for the generation is studied, bias and variance of the wavelet sample spectrum are calculated. To overcome the difficulties of multiple testing, an areawise significance test is developed and compared to the conventional pointwise test in terms of sensitivity and specificity. Applications to Climatological and Hydrological questions are presented. The thesis at hand mainly aims to contribute to methodological questions of time series analysis and to apply the investigated methods to recent climatological problems. In the last part, the coupling between El Nino/Southern Oscillation (ENSO) and the Indian Monsoon on inter-annual time scales is studied by means of Hilbert transformation and a curvature defined phase. This method allows one to investigate the relation of two oscillating systems with respect to their phases, independently of their amplitudes. The performance of the technique is evaluated using a toy model. From the data, distinct epochs are identified, especially two intervals of phase coherence, 1886-1908 and 1964-1980, confirming earlier findings from a new point of view. A significance test of high specificity corroborates these results. Also so far unknown periods of coupling invisible to linear methods are detected. These findings suggest that the decreasing correlation during the last decades might be partly inherent to the ENSO/Monsoon system. Finally, a possible interpretation of how volcanic radiative forcing could cause the coupling is outlined.}, subject = {Spektralanalyse }, language = {en} } @phdthesis{CamaraMattosMartins2011, author = {Camara Mattos Martins, Marina}, title = {What are the downstream targets of trehalose-6-phosphate signalling in plants?}, address = {Potsdam}, pages = {164 S.}, year = {2011}, language = {en} } @phdthesis{FigueroaCampos2022, author = {Figueroa Campos, Gustavo Adolfo}, title = {Wet-coffee processing production wastes}, doi = {10.25932/publishup-55882}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-558828}, school = {Universit{\"a}t Potsdam}, pages = {X, 159}, year = {2022}, abstract = {Countries processing raw coffee beans are burdened with low economical incomes to fight the serious environmental problems caused by the by-products and wastewater that is generated during the wet-coffee processing. The aim of this work was to develop alternative methods of improving the waste by-product quality and thus making the process economically more attractive with valorization options that can be brought to the coffee producers. The type of processing influences not only the constitution of green coffee but also of by-products and wastewater. Therefore, coffee bean samples as well as by-products and wastewater collected at different production steps of were analyzed. Results show that the composition of wastewater is dependent on how much and how often the wastewater is recycled in the processing. Considering the coffee beans, results indicate that the proteins might be affected during processing and a positive effect of the fermentation on the solubility and accessibility of proteins seems to be probable. The steps of coffee processing influence the different constituents of green coffee beans which, during roasting, give rise to aroma compounds and express the characteristics of roasted coffee beans. Knowing that this group of compounds is involved in the Maillard reaction during roasting, this possibility could be utilized for the coffee producers to improve the quality of green coffee beans and finally the coffee cup quality. The valorization of coffee wastes through modification to activated carbon has been considered as a low-cost option creating an adsorbent with prospective to compete with commercial carbons. Activation protocol using spent coffee and parchment was developed and prepared to assess their adsorption capacity for organic compounds. Spent coffee grounds and parchment proved to have similar adsorption efficiency to commercial activated carbon. The results of this study document a significant information originating from the processing of the de-pulped to green coffee beans. Furthermore, it showed that coffee parchment and spent coffee grounds can be valorized as low-cost option to produce activated carbons. Further work needs to be directed to the optimization of the activation methods to improve the quality of the materials produced and the viability of applying such experiments in-situ to bring the coffee producer further valorization opportunities with environmental perspectives. Coffee producers would profit in establishing appropriate simple technologies to improve green coffee quality, re-use coffee by-products, and wastewater valorization.}, language = {en} } @phdthesis{Fuchs2013, author = {Fuchs, Sven}, title = {Well-log based determination of rock thermal conductivity in the North German Basin}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-67801}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {In sedimentary basins, rock thermal conductivity can vary both laterally and vertically, thus altering the basin's thermal structure locally and regionally. Knowledge of the thermal conductivity of geological formations and its spatial variations is essential, not only for quantifying basin evolution and hydrocarbon maturation processes, but also for understanding geothermal conditions in a geological setting. In conjunction with the temperature gradient, thermal conductivity represents the basic input parameter for the determination of the heat-flow density; which, in turn, is applied as a major input parameter in thermal modeling at different scales. Drill-core samples, which are necessary to determine thermal properties by laboratory measurements, are rarely available and often limited to previously explored reservoir formations. Thus, thermal conductivities of Mesozoic rocks in the North German Basin (NGB) are largely unknown. In contrast, geophysical borehole measurements are often available for the entire drilled sequence. Therefore, prediction equations to determine thermal conductivity based on well-log data are desirable. In this study rock thermal conductivity was investigated on different scales by (1) providing thermal-conductivity measurements on Mesozoic rocks, (2) evaluating and improving commonly applied mixing models which were used to estimate matrix and pore-filled rock thermal conductivities, and (3) developing new well-log based equations to predict thermal conductivity in boreholes without core control. Laboratory measurements are performed on sedimentary rock of major geothermal reservoirs in the Northeast German Basin (NEGB) (Aalenian, Rhaethian-Liassic, Stuttgart Fm., and Middle Buntsandstein). Samples are obtained from eight deep geothermal wells that approach depths of up to 2,500 m. Bulk thermal conductivities of Mesozoic sandstones range between 2.1 and 3.9 W/(m∙K), while matrix thermal conductivity ranges between 3.4 and 7.4 W/(m∙K). Local heat flow for the Stralsund location averages 76 mW/m², which is in good agreement to values reported previously for the NEGB. For the first time, in-situ bulk thermal conductivity is indirectly calculated for entire borehole profiles in the NEGB using the determined surface heat flow and measured temperature data. Average bulk thermal conductivity, derived for geological formations within the Mesozoic section, ranges between 1.5 and 3.1 W/(m∙K). The measurement of both dry- and water-saturated thermal conductivities allow further evaluation of different two-component mixing models which are often applied in geothermal calculations (e.g., arithmetic mean, geometric mean, harmonic mean, Hashin-Shtrikman mean, and effective-medium theory mean). It is found that the geometric-mean model shows the best correlation between calculated and measured bulk thermal conductivity. However, by applying new model-dependent correction, equations the quality of fit could be significantly improved and the error diffusion of each model reduced. The 'corrected' geometric mean provides the most satisfying results and constitutes a universally applicable model for sedimentary rocks. Furthermore, lithotype-specific and model-independent conversion equations are developed permitting a calculation of water-saturated thermal conductivity from dry-measured thermal conductivity and porosity within an error range of 5 to 10\%. The limited availability of core samples and the expensive core-based laboratory measurements make it worthwhile to use petrophysical well logs to determine thermal conductivity for sedimentary rocks. The approach followed in this study is based on the detailed analyses of the relationships between thermal conductivity of rock-forming minerals, which are most abundant in sedimentary rocks, and the properties measured by standard logging tools. By using multivariate statistics separately for clastic, carbonate and evaporite rocks, the findings from these analyses allow the development of prediction equations from large artificial data sets that predict matrix thermal conductivity within an error of 4 to 11\%. These equations are validated successfully on a comprehensive subsurface data set from the NGB. In comparison to the application of earlier published approaches formation-dependent developed for certain areas, the new developed equations show a significant error reduction of up to 50\%. These results are used to infer rock thermal conductivity for entire borehole profiles. By inversion of corrected in-situ thermal-conductivity profiles, temperature profiles are calculated and compared to measured high-precision temperature logs. The resulting uncertainty in temperature prediction averages < 5\%, which reveals the excellent temperature prediction capabilities using the presented approach. In conclusion, data and methods are provided to achieve a much more detailed parameterization of thermal models.}, language = {en} } @phdthesis{Pingel2021, author = {Pingel, Ruta}, title = {Well-being effects of proactivity through the lens of self-determination theory}, school = {Universit{\"a}t Potsdam}, pages = {106}, year = {2021}, abstract = {In modern times of evolving globalization and continuous technological developments, organizations are required to respond to ever-changing demands. Therefore, to be successful in today's highly uncertain environments, organizations need employees to actively search for opportunities, anticipate challenges, and act ahead. In other words, employee proactivity in the workplace represents a highly valuable resource in nowadays organizations. Empirical studies conducted as part of this thesis advance the research on the outcomes of proactivity from the individual perspective. The main contribution of this thesis pertains to revealing several important individual and contextual conditions under which engaging in proactivity will have negative and positive effects on employees' well-being and their consequent behaviours, as well as shedding light on the unique psychological mechanisms through which these effects unfold. From a practical standpoint, this research underscores the importance of creating work environments that support employees' autonomous motivation for proactivity and urge organizations and managers to be mindful about the pressures they place on employees to be proactive at work. Besides, this thesis stimulates research efforts aimed at further extending our knowledge of when and how individual proactive behaviours at work will do more good than harm for those who enact them.}, language = {en} } @phdthesis{Mofina2012, author = {Mofina, Sabine}, title = {Weighing the Role of the Hypoxia Inducible Factor-1a in Tumor and Stroma for tumorigenesis}, address = {Potsdam}, pages = {IX, 132 S.}, year = {2012}, language = {en} } @phdthesis{LedererKessler2008, author = {Lederer, Markus and Kessler, Oliver}, title = {Weber, S., (Hrsg.) Globalization and the European Political Economy; New York, Univ., 2001}, year = {2008}, language = {en} } @phdthesis{Klimke2018, author = {Klimke, Jan}, title = {Web-based provisioning and application of large-scale virtual 3D city models}, doi = {10.25932/publishup-42805}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-428053}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 141}, year = {2018}, abstract = {Virtual 3D city models represent and integrate a variety of spatial data and georeferenced data related to urban areas. With the help of improved remote-sensing technology, official 3D cadastral data, open data or geodata crowdsourcing, the quantity and availability of such data are constantly expanding and its quality is ever improving for many major cities and metropolitan regions. There are numerous fields of applications for such data, including city planning and development, environmental analysis and simulation, disaster and risk management, navigation systems, and interactive city maps. The dissemination and the interactive use of virtual 3D city models represent key technical functionality required by nearly all corresponding systems, services, and applications. The size and complexity of virtual 3D city models, their management, their handling, and especially their visualization represent challenging tasks. For example, mobile applications can hardly handle these models due to their massive data volume and data heterogeneity. Therefore, the efficient usage of all computational resources (e.g., storage, processing power, main memory, and graphics hardware, etc.) is a key requirement for software engineering in this field. Common approaches are based on complex clients that require the 3D model data (e.g., 3D meshes and 2D textures) to be transferred to them and that then render those received 3D models. However, these applications have to implement most stages of the visualization pipeline on client side. Thus, as high-quality 3D rendering processes strongly depend on locally available computer graphics resources, software engineering faces the challenge of building robust cross-platform client implementations. Web-based provisioning aims at providing a service-oriented software architecture that consists of tailored functional components for building web-based and mobile applications that manage and visualize virtual 3D city models. This thesis presents corresponding concepts and techniques for web-based provisioning of virtual 3D city models. In particular, it introduces services that allow us to efficiently build applications for virtual 3D city models based on a fine-grained service concept. The thesis covers five main areas: 1. A Service-Based Concept for Image-Based Provisioning of Virtual 3D City Models It creates a frame for a broad range of services related to the rendering and image-based dissemination of virtual 3D city models. 2. 3D Rendering Service for Virtual 3D City Models This service provides efficient, high-quality 3D rendering functionality for virtual 3D city models. In particular, it copes with requirements such as standardized data formats, massive model texturing, detailed 3D geometry, access to associated feature data, and non-assumed frame-to-frame coherence for parallel service requests. In addition, it supports thematic and artistic styling based on an expandable graphics effects library. 3. Layered Map Service for Virtual 3D City Models It generates a map-like representation of virtual 3D city models using an oblique view. It provides high visual quality, fast initial loading times, simple map-based interaction and feature data access. Based on a configurable client framework, mobile and web-based applications for virtual 3D city models can be created easily. 4. Video Service for Virtual 3D City Models It creates and synthesizes videos from virtual 3D city models. Without requiring client-side 3D rendering capabilities, users can create camera paths by a map-based user interface, configure scene contents, styling, image overlays, text overlays, and their transitions. The service significantly reduces the manual effort typically required to produce such videos. The videos can automatically be updated when the underlying data changes. 5. Service-Based Camera Interaction It supports task-based 3D camera interactions, which can be integrated seamlessly into service-based visualization applications. It is demonstrated how to build such web-based interactive applications for virtual 3D city models using this camera service. These contributions provide a framework for design, implementation, and deployment of future web-based applications, systems, and services for virtual 3D city models. The approach shows how to decompose the complex, monolithic functionality of current 3D geovisualization systems into independently designed, implemented, and operated service- oriented units. In that sense, this thesis also contributes to microservice architectures for 3D geovisualization systems—a key challenge of today's IT systems engineering to build scalable IT solutions.}, language = {en} } @phdthesis{Leeratanavalee2002, author = {Leeratanavalee, Sorasak}, title = {Weak Hypersubstitutions}, pages = {105 S.}, year = {2002}, language = {en} } @phdthesis{Hayn2010, author = {Hayn, Michael}, title = {Wavelet analysis and spline modeling of geophysical data on the sphere}, address = {Potsdam}, pages = {95 S. : graph. Darst.}, year = {2010}, language = {en} } @phdthesis{Solopow2019, author = {Solopow, Sergej}, title = {Wavelength dependent demagnetization dynamics in Co2MnGa Heusler-alloy}, doi = {10.25932/publishup-42786}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427860}, school = {Universit{\"a}t Potsdam}, pages = {91}, year = {2019}, abstract = {In dieser Arbeit haben wir ultraschnelle Entmagnetisierung an einer Heusler-Legierung untersucht. Es handelt sich um ein Halbmetall, das sich in einer ferromagnetischen Phase befindet. Die Besonderheit dieses Materials besteht im Aufbau einer Bandstruktur. Diese bildet Zustandsdichten, in der die Majorit{\"a}tselektronen eine metallische B{\"a}nderbildung aufweisen und die Minorit{\"a}tselektronen eine Bandl{\"u}cke in der N{\"a}he des Fermi-Niveaus aufweisen, das dem Aufbau eines Halbleiters entspricht. Mit Hilfe der Pump-Probe-Experimente haben wir zeitaufgel{\"o}ste Messungen durchgef{\"u}hrt. F{\"u}r das Pumpen wurden ultrakurze Laserpulse mit einer Pulsdauer von 100 fs benutzt. Wir haben dabei zwei verschiedene Wellenl{\"a}ngen mit 400 nm und 1240 nm benutzt, um den Effekt der Prim{\"a}ranregung und der Bandl{\"u}cke in den Minorit{\"a}tszust{\"a}nden zu untersuchen. Dabei wurde zum ersten Mal OPA (Optical Parametrical Amplifier) f{\"u}r die Erzeugung der langwelligen Pulse an der FEMTOSPEX-Beamline getestet und erfolgreich bei den Experimenten verwendet. Wir haben Wellenl{\"a}ngen bedingte Unterschiede in der Entmagnetisierungszeit gemessen. Mit der Erh{\"o}hung der Photonenenergie ist der Prozess der Entmagnetisierung deutlich schneller als bei einer niedrigeren Photonenenergie. Wir verkn{\"u}pften diese Ergebnisse mit der Existenz der Energiel{\"u}cke f{\"u}r Minorit{\"a}tselektronen. Mit Hilfe lokaler Elliot-Yafet-Streuprozesse k{\"o}nnen die beobachteten Zeiten gut erkl{\"a}rt werden. Wir haben in dieser Arbeit auch eine neue Probe-Methode f{\"u}r die Magnetisierung angewandt und somit experimentell deren Effektivit{\"a}t, n{\"a}mlich XMCD in Refletiongeometry, best{\"a}tigen k{\"o}nnen. Statische Experimente liefern somit deutliche Indizien daf{\"u}r, dass eine magnetische von einer rein elektronischen Antwort des Systems getrennt werden kann. Unter der Voraussetzung, dass die Photonenenergie der R{\"o}ntgenstrahlung auf die L3 Kante des entsprechenden Elements eingestellt, ein geeigneter Einfallswinkel gew{\"a}hlt und die zirkulare Polarisation fixiert wird, ist es m{\"o}glich, diese Methode zur Analyse magnetischer und elektronischer Respons anzuwenden.}, language = {en} } @phdthesis{FortesMartin2023, author = {Fortes Mart{\´i}n, Rebeca}, title = {Water-in-oil microemulsions as soft-templates to mediate nanoparticle interfacial assembly into hybrid nanostructures}, doi = {10.25932/publishup-57180}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-571801}, school = {Universit{\"a}t Potsdam}, pages = {119}, year = {2023}, abstract = {Hybrid nanomaterials offer the combination of individual properties of different types of nanoparticles. Some strategies for the development of new nanostructures in larger scale rely on the self-assembly of nanoparticles as a bottom-up approach. The use of templates provides ordered assemblies in defined patterns. In a typical soft-template, nanoparticles and other surface-active agents are incorporated into non-miscible liquids. The resulting self-organized dispersions will mediate nanoparticle interactions to control the subsequent self-assembly. Especially interactions between nanoparticles of very different dispersibility and functionality can be directed at a liquid-liquid interface. In this project, water-in-oil microemulsions were formulated from quasi-ternary mixtures with Aerosol-OT as surfactant. Oleyl-capped superparamagnetic iron oxide and/or silver nanoparticles were incorporated in the continuous organic phase, while polyethyleneimine-stabilized gold nanoparticles were confined in the dispersed water droplets. Each type of nanoparticle can modulate the surfactant film and the inter-droplet interactions in diverse ways, and their combination causes synergistic effects. Interfacial assemblies of nanoparticles resulted after phase-separation. On one hand, from a biphasic Winsor type II system at low surfactant concentration, drop-casting of the upper phase afforded thin films of ordered nanoparticles in filament-like networks. Detailed characterization proved that this templated assembly over a surface is based on the controlled clustering of nanoparticles and the elongation of the microemulsion droplets. This process offers versatility to use different nanoparticle compositions by keeping the surface functionalization, in different solvents and over different surfaces. On the other hand, a magnetic heterocoagulate was formed at higher surfactant concentration, whose phase-transfer from oleic acid to water was possible with another auxiliary surfactant in ethanol-water mixture. When the original components were initially mixed under heating, defined oil-in-water, magnetic-responsive nanostructures were obtained, consisting on water-dispersible nanoparticle domains embedded by a matrix-shell of oil-dispersible nanoparticles. Herein, two different approaches were demonstrated to form diverse hybrid nanostructures from reverse microemulsions as self-organized dispersions of the same components. This shows that microemulsions are versatile soft-templates not only for the synthesis of nanoparticles, but also for their self-assembly, which suggest new approaches towards the production of new sophisticated nanomaterials in larger scale.}, language = {en} } @phdthesis{Burdack2014, author = {Burdack, Doreen}, title = {Water management policies and their impact on irrigated crop production in the Murray-Darling Basin, Australia}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-306-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72245}, school = {Universit{\"a}t Potsdam}, pages = {307}, year = {2014}, abstract = {The economic impact analysis contained in this book shows how irrigation farming is particularly susceptible when applying certain water management policies in the Australian Murray-Darling Basin, one of the world largest river basins and Australia's most fertile region. By comparing different pricing and non-pricing water management policies with the help of the Water Integrated Market Model, it is found that the impact of water demand reducing policies is most severe on crops that need to be intensively irrigated and are at the same time less water productive. A combination of increasingly frequent and severe droughts and the application of policies that decrease agricultural water demand, in the same region, will create a situation in which the highly water dependent crops rice and cotton cannot be cultivated at all.}, language = {en} } @phdthesis{Munz2017, author = {Munz, Matthias}, title = {Water flow and heat transport modelling at the interface between river and aquifer}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404319}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 123}, year = {2017}, abstract = {The functioning of the surface water-groundwater interface as buffer, filter and reactive zone is important for water quality, ecological health and resilience of streams and riparian ecosystems. Solute and heat exchange across this interface is driven by the advection of water. Characterizing the flow conditions in the streambed is challenging as flow patterns are often complex and multidimensional, driven by surface hydraulic gradients and groundwater discharge. This thesis presents the results of an integrated approach of studies, ranging from the acquisition of field data, the development of analytical and numerical approaches to analyse vertical temperature profiles to the detailed, fully-integrated 3D numerical modelling of water and heat flux at the reach scale. All techniques were applied in order to characterize exchange flux between stream and groundwater, hyporheic flow paths and temperature patterns. The study was conducted at a reach-scale section of the lowland Selke River, characterized by distinctive pool riffle sequences and fluvial islands and gravel bars. Continuous time series of hydraulic heads and temperatures were measured at different depths in the river bank, the hyporheic zone and within the river. The analyses of the measured diurnal temperature variation in riverbed sediments provided detailed information about the exchange flux between river and groundwater. Beyond the one-dimensional vertical water flow in the riverbed sediment, hyporheic and parafluvial flow patterns were identified. Subsurface flow direction and magnitude around fluvial islands and gravel bars at the study site strongly depended on the position around the geomorphological structures and on the river stage. Horizontal water flux in the streambed substantially impacted temperature patterns in the streambed. At locations with substantial horizontal fluxes the penetration depths of daily temperature fluctuations was reduced in comparison to purely vertical exchange conditions. The calibrated and validated 3D fully-integrated model of reach-scale water and heat fluxes across the river-groundwater interface was able to accurately represent the real system. The magnitude and variations of the simulated temperatures matched the observed ones, with an average mean absolute error of 0.7 °C and an average Nash Sutcliffe Efficiency of 0.87. The simulation results showed that the water and heat exchange at the surface water-groundwater interface is highly variable in space and time with zones of daily temperature oscillations penetrating deep into the sediment and spots of daily constant temperature following the average groundwater temperature. The average hyporheic flow path temperature was found to strongly correlate with the flow path residence time (flow path length) and the temperature gradient between river and groundwater. Despite the complexity of these processes, the simulation results allowed the derivation of a general empirical relationship between the hyporheic residence times and temperature patterns. The presented results improve our understanding of the complex spatial and temporal dynamics of water flux and thermal processes within the shallow streambed. Understanding these links provides a general basis from which to assess hyporheic temperature conditions in river reaches.}, language = {en} } @phdthesis{Janetschek2016, author = {Janetschek, Hannah}, title = {Water development programs in India}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401337}, school = {Universit{\"a}t Potsdam}, pages = {279}, year = {2016}, abstract = {In the past decades, development cooperation (DC) led by conventional bi- and multilateral donors has been joined by a large number of small, private or public-private donors. This pluralism of actors raises questions as to whether or not these new donors are able to implement projects more or less effectively than their conventional counterparts. In contrast to their predecessors, the new donors have committed themselves to be more pragmatic, innovative and flexible in their development cooperation measures. However, they are also criticized for weakening the function of local civil society and have the reputation of being an intransparent and often controversial alternative to public services. With additional financial resources and their new approach to development, the new donors have been described in the literature as playing a controversial role in transforming development cooperation. This dissertation compares the effectiveness of initiatives by new and conventional donors with regard to the provision of public goods and services to the poor in the water and sanitation sector in India. India is an emerging country but it is experiencing high poverty rates and poor water supply in predominantly rural areas. It lends itself for analyzing this research theme as it is currently being confronted by a large number of actors and approaches that aim to find solutions for these challenges . In the theoretical framework of this dissertation, four governance configurations are derived from the interaction of varying actor types with regard to hierarchical and non-hierarchical steering of their interactions. These four governance configurations differ in decision-making responsibilities, accountability and delegation of tasks or direction of information flow. The assumption on actor relationships and steering is supplemented by possible alternative explanations in the empirical investigation, such as resource availability, the inheritance of structures and institutions from previous projects in a project context, gaining acceptance through beneficiaries (local legitimacy) as a door opener, and asymmetries of power in the project context. Case study evidence from seven projects reveals that the actors' relationship is important for successful project delivery. Additionally, the results show that there is a systematic difference between conventional and new donors. Projects led by conventional donors were consistently more successful, due to an actor relationship that placed the responsibility in the hands of the recipient actors and benefited from the trust and reputation of a long-term cooperation. The trust and reputation of conventional donors always went along with a back-up from federal level and trickled down as reputation also at local level implementation. Furthermore, charismatic leaders, as well as the acquired structures and institutions of predecessor projects, also proved to be a positive influencing factor for successful project implementation. Despite the mixed results of the seven case studies, central recommendations for action can be derived for the various actors involved in development cooperation. For example, new donors could fulfill a supplementary function with conventional donors by developing innovative project approaches through pilot studies and then implementing them as a supplement to the projects of conventional donors on the ground. In return, conventional donors would have to make room the new donors by integrating their approaches into already programs in order to promote donor harmonization. It is also important to identify and occupy niches for activities and to promote harmonization among donors on state and federal sides. The empirical results demonstrate the need for a harmonization strategy of different donor types in order to prevent duplication, over-experimentation and the failure of development programs. A transformation to successful and sustainable development cooperation can only be achieved through more coordination processes and national self-responsibility.}, language = {en} } @phdthesis{Heiden2018, author = {Heiden, Sophia L.}, title = {Water at α-alumina surfaces}, doi = {10.25932/publishup-42636}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426366}, school = {Universit{\"a}t Potsdam}, pages = {124}, year = {2018}, abstract = {The (0001) surface of α-Al₂O₃ is the most stable surface cut under UHV conditions and was studied by many groups both theoretically and experimentally. Reaction barriers computed with GGA functionals are known to be underestimated. Based on an example reaction at the (0001) surface, this work seeks to improve this rate by applying a hybrid functional method and perturbation theory (LMP2) with an atomic orbital basis, rather than a plane wave basis. In addition to activation barriers, we calculate the stability and vibrational frequencies of water on the surface. Adsorption energies were compared to PW calculations and confirmed PBE+D2/PW stability results. Especially the vibrational frequencies with the B3LYP hybrid functional that have been calculated for the (0001) surface are in good agreement with experimental findings. Concerning the barriers and the reaction rate constant, the expectations are fully met. It could be shown that recalculation of the transition state leads to an increased barrier, and a decreased rate constant when hybrid functionals or LMP2 are applied. Furthermore, the molecular beam scattering of water on (0001) surface was studied. In a previous work by Hass the dissociation was studied by AIMD of molecularly adsorbed water, referring to an equilibrium situation. The experimental method to obtaining this is pinhole dosing. In contrast to this earlier work, the dissociation process of heavy water that is brought onto the surface from a molecular beam source was modeled in this work by periodic ab initio molecular dynamics simulations. This experimental method results in a non-equilibrium situation. The calculations with different surface and beam models allow us to understand the results of the non-equilibrium situation better. In contrast to a more equilibrium situation with pinhole dosing, this gives an increase in the dissociation probability, which could be explained and also understood mechanistically by those calculations. In this work good progress was made in understanding the (1120) surface of α-Al₂O₃ in contact with water in the low-coverage regime. This surface cut is the third most stable one under UHV conditions and has not been studied to a great extent yet. After optimization of the clean, defect free surface, the stability of different adsorbed species could be classified. One molecular minimum and several dissociated species could be detected. Starting from these, reaction rates for various surface reactions were evaluated. A dissociation reaction was shown to be very fast because the molecular minimum is relatively unstable, whereas diffusion reactions cover a wider range from fast to slow. In general, the (112‾0) surface appears to be much more reactive against water than the (0001) surface. In addition to reactivity, harmonic vibrational frequencies were determined for comparison with the findings of the experimental "Interfacial Molecular Spectroscopy" group from Fritz-Haber institute in Berlin. Especially the vibrational frequencies of OD species could be assigned to vibrations from experimental SFG spectra with very good agreement. Also, lattice vibrations were studied in close collaboration with the experimental partners. They perform SFG spectra at very low frequencies to get deep into the lattice vibration region. Correspondingly, a bigger slab model with greater expansion perpendicular to the surface was applied, considering more layers in the bulk. Also with the lattice vibrations we could obtain reasonably good agreement in terms of energy differences between the peaks.}, language = {en} } @phdthesis{Kucharzewski2024, author = {Kucharzewski, Tim}, title = {Wars and the world}, publisher = {Cambridge Scholars Publishing}, address = {Newcastle upon Tyne}, isbn = {978-1-0364-0374-4}, pages = {xxi, 509}, year = {2024}, abstract = {This book offers a descriptive analysis of the Soviet/Russian wars in Afghanistan, Chechnya, and Georgia, as well as an in-depth exploration of the ways in which these wars are framed in the collective consciousness created by global popular culture. Russian and Western modalities of remembrance have been, and remain, engaged in a world war that takes place (not exclusively, but intensively) on the level of popular culture. The action/reaction dynamic, confrontational narratives and othering between the two "camps" never ceased. The Cold War, in many ways and contrary to the views of many others who hoped for the end of history, never really ended.}, language = {en} } @phdthesis{Walter2009, author = {Walter, Thomas}, title = {Volcano-tectonics: Volcano deformation, dynamics and structure affected by intrusions and earthquakes}, address = {Potsdam}, pages = {137 S.}, year = {2009}, language = {en} } @phdthesis{Salzer2017, author = {Salzer, Jacqueline T.}, title = {Volcano dome deformation processes analysed with high resolution InSAR and camera-based techniques}, school = {Universit{\"a}t Potsdam}, pages = {137}, year = {2017}, language = {en} } @phdthesis{Ruch2010, author = {Ruch, Jo{\"e}l}, title = {Volcano deformation analysis in the Lazufre area (central Andes) using geodetic and geological observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-47361}, school = {Universit{\"a}t Potsdam}, year = {2010}, abstract = {Large-scale volcanic deformation recently detected by radar interferometry (InSAR) provides new information and thus new scientific challenges for understanding volcano-tectonic activity and magmatic systems. The destabilization of such a system at depth noticeably affects the surrounding environment through magma injection, ground displacement and volcanic eruptions. To determine the spatiotemporal evolution of the Lazufre volcanic area located in the central Andes, we combined short-term ground displacement acquired by InSAR with long-term geological observations. Ground displacement was first detected using InSAR in 1997. By 2008, this displacement affected 1800 km2 of the surface, an area comparable in size to the deformation observed at caldera systems. The original displacement was followed in 2000 by a second, small-scale, neighbouring deformation located on the Lastarria volcano. We performed a detailed analysis of the volcanic structures at Lazufre and found relationships with the volcano deformations observed with InSAR. We infer that these observations are both likely to be the surface expression of a long-lived magmatic system evolving at depth. It is not yet clear whether Lazufre may trigger larger unrest or volcanic eruptions; however, the second deformation detected at Lastarria and the clear increase of the large-scale deformation rate make this an area of particular interest for closer continuous monitoring.}, language = {en} } @phdthesis{Zali2023, author = {Zali, Zahra}, title = {Volcanic tremor analysis based on advanced signal processing concepts including music information retrieval (MIR) strategies}, doi = {10.25932/publishup-61086}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-610866}, school = {Universit{\"a}t Potsdam}, pages = {viii, 95}, year = {2023}, abstract = {Volcanoes are one of the Earth's most dynamic zones and responsible for many changes in our planet. Volcano seismology aims to provide an understanding of the physical processes in volcanic systems and anticipate the style and timing of eruptions by analyzing the seismic records. Volcanic tremor signals are usually observed in the seismic records before or during volcanic eruptions. Their analysis contributes to evaluate the evolving volcanic activity and potentially predict eruptions. Years of continuous seismic monitoring now provide useful information for operational eruption forecasting. The continuously growing amount of seismic recordings, however, poses a challenge for analysis, information extraction, and interpretation, to support timely decision making during volcanic crises. Furthermore, the complexity of eruption processes and precursory activities makes the analysis challenging. A challenge in studying seismic signals of volcanic origin is the coexistence of transient signal swarms and long-lasting volcanic tremor signals. Separating transient events from volcanic tremors can, therefore, contribute to improving our understanding of the underlying physical processes. Some similar issues (data reduction, source separation, extraction, and classification) are addressed in the context of music information retrieval (MIR). The signal characteristics of acoustic and seismic recordings comprise a number of similarities. This thesis is going beyond classical signal analysis techniques usually employed in seismology by exploiting similarities of seismic and acoustic signals and building the information retrieval strategy on the expertise developed in the field of MIR. First, inspired by the idea of harmonic-percussive separation (HPS) in musical signal processing, I have developed a method to extract harmonic volcanic tremor signals and to detect transient events from seismic recordings. This provides a clean tremor signal suitable for tremor investigation along with a characteristic function suitable for earthquake detection. Second, using HPS algorithms, I have developed a noise reduction technique for seismic signals. This method is especially useful for denoising ocean bottom seismometers, which are highly contaminated by noise. The advantage of this method compared to other denoising techniques is that it doesn't introduce distortion to the broadband earthquake waveforms, which makes it reliable for different applications in passive seismological analysis. Third, to address the challenge of extracting information from high-dimensional data and investigating the complex eruptive phases, I have developed an advanced machine learning model that results in a comprehensive signal processing scheme for volcanic tremors. Using this method seismic signatures of major eruptive phases can be automatically detected. This helps to provide a chronology of the volcanic system. Also, this model is capable to detect weak precursory volcanic tremors prior to the eruption, which could be used as an indicator of imminent eruptive activity. The extracted patterns of seismicity and their temporal variations finally provide an explanation for the transition mechanism between eruptive phases.}, language = {en} } @phdthesis{ArbabiBidgoli2003, author = {Arbabi-Bidgoli, Sepehr}, title = {Voids in the Large Scale Structure of the Universe}, pages = {108 S.}, year = {2003}, language = {en} } @phdthesis{Landes2005, author = {Landes, Nico}, title = {Vitamin E : elucidation of the mechanism of side chain degradation and gene regulatory functions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5222}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {For more than 80 years vitamin E has been in the focus of scientific research. Most of the progress concerning non-antioxidant functions, nevertheless, has only arisen from publications during the last decade. Most recently, the metabolic pathway of vitamin E has been almost completely elucidated. Vitamin E is metabolized by truncation of its side chain. The initial step of an omega-hydroxylation is carried out by cytochromes P450 (CYPs). This was evidenced by the inhibition of the metabolism of alpha-tocopherol by ketoconozole, an inhibitor of CYP3A expression, whereas rifampicin, an inducer of CYP3A expression increased the metabolism of alpha-tocopherol. Although the degradation pathway is identical for all tocopherols and tocotrienols, there is a marked difference in the amount of the release of metabolites from the individual vitamin E forms in cell culture as well as in experimental animals and in humans. Recent findings not only proposed an CYP3A4-mediated degradation of vitamin E but also suggested an induction of the metabolizing enzymes by vitamin E itself. In order to investigate how vitamin E is able to influence the expression of metabolizing enzymes like CYP3A4, a pregnane X receptor (PXR)-based reporter gene assay was chosen. PXR is a nuclear receptor which regulates the transcription of genes, e.g., CYP3A4, by binding to specific DNA response elements. And indeed, as shown here, vitamin E is able to influence the expression of CYP3A via PXR in an in vitro reporter gene assay. Tocotrienols showed the highest activity followed by delta- and alpha-tocopherol. An up-regulation of Cyp3a11 mRNA, the murine homolog of the human CYP3A4, could also be confirmed in an animal experiment. The PXR-mediated change in gene expression displayed the first evidence of a direct transcriptional activity of vitamin E. PXR regulates the expression of genes involved in xenobiotic detoxification, including oxidation, conjugation, and transport. CYP3A, e.g., is involved in the oxidative metabolism of numerous currently used drugs. This opens a discussion of possible side effects of vitamin E, but the extent to which supranutritional doses of vitamin E modulate these pathways in humans has yet to be determined. Additionally, as there is arising evidence that vitamin E's essentiality is more likely to be based on gene regulation than on antioxidant functions, it appeared necessary to further investigate the ability of vitamin E to influence gene expression. Mice were divided in three groups with diets (i) deficient in alpha-tocopherol, (ii) adequate in alpha-tocopherol supply and (iii) with a supranutritional dosage of alpha-tocopherol. After three months, half of each group was supplemented via a gastric tube with a supranutritional dosage of gamma-tocotrienol per day for 7 days. Livers were analyzed for vitamin E content and liver RNA was prepared for hybridization using cDNA array and oligonucleotide array technology. A significant change in gene expression was observed by alpha-tocopherol but not by gamma-tocotrienol and only using the oligonucleotide array but not using the cDNA array. The latter effect is most probably due to the limited number of genes represented on a cDNA array, the lacking gamma-tocotrienol effect is obviously caused by a rapid degradation, which might prevent bioefficacy of gamma-tocotrienol. Alpha-tocopherol changed the expression of various genes. The most striking observation was an up-regulation of genes, which code for proteins involved in synaptic transmitter release and calcium signal transduction. Synapsin, synaptotagmin, synaptophysin, synaptobrevin, RAB3A, complexin 1, Snap25, ionotropic glutamate receptors (alpha 2 and zeta 1) were shown to be up-regulated in the supranutritional group compared to the deficient group. The up-regulation of synaptic genes shown in this work are not only supported by the strong concentration of genes which all are involved in the process of vesicular transport of neurotransmitters, but were also confirmed by a recent publication. However, a confirmation by real time PCR in neuronal tissue like brain is now required to explain the effect of vitamin E on neurological functionality. The change in expression of genes coding for synaptic proteins by vitamin E is of principal interest thus far, since the only human disease directly originating from an inadequate vitamin E status is ataxia with isolated vitamin E deficiency. Therefore, with the results of this work, an explanation for the observed neurological symptoms associated with vitamin E deficiency can be presented for the first time.}, subject = {Vitamin E}, language = {en} } @phdthesis{Schmiedchen2014, author = {Schmiedchen, Bettina}, title = {Vitamin D and its linkage between chronic kidney disease and cardiovascular integrity}, pages = {113}, year = {2014}, language = {en} } @phdthesis{Zona2024, author = {Zona, Carlotta Isabella}, title = {Visuo-linguistic integration for thematic-role assignment across speakers}, doi = {10.25932/publishup-63185}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-631857}, school = {Universit{\"a}t Potsdam}, pages = {147}, year = {2024}, abstract = {This dissertation examines the integration of incongruent visual-scene and morphological-case information ("cues") in building thematic-role representations of spoken relative clauses in German. Addressing the mutual influence of visual and linguistic processing, the Coordinated Interplay Account (CIA) describes a mechanism in two steps supporting visuo-linguistic integration (Knoeferle \& Crocker, 2006, Cog Sci). However, the outcomes and dynamics of integrating incongruent thematic-role representations from distinct sources have been investigated scarcely. Further, there is evidence that both second-language (L2) and older speakers may rely on non-syntactic cues relatively more than first-language (L1)/young speakers. Yet, the role of visual information for thematic-role comprehension has not been measured in L2 speakers, and only limitedly across the adult lifespan. Thematically unambiguous canonically ordered (subject-extracted) and noncanonically ordered (object-extracted) spoken relative clauses in German (see 1a-b) were presented in isolation and alongside visual scenes conveying either the same (congruent) or the opposite (incongruent) thematic relations as the sentence did. 1 a Das ist der Koch, der die Braut verfolgt. This is the.NOM cook who.NOM the.ACC bride follows This is the cook who is following the bride. b Das ist der Koch, den die Braut verfolgt. This is the.NOM cook whom.ACC the.NOM bride follows This is the cook whom the bride is following. The relative contribution of each cue to thematic-role representations was assessed with agent identification. Accuracy and latency data were collected post-sentence from a sample of L1 and L2 speakers (Zona \& Felser, 2023), and from a sample of L1 speakers from across the adult lifespan (Zona \& Reifegerste, under review). In addition, the moment-by-moment dynamics of thematic-role assignment were investigated with mouse tracking in a young L1 sample (Zona, under review). The following questions were addressed: (1) How do visual scenes influence thematic-role representations of canonical and noncanonical sentences? (2) How does reliance on visual-scene, case, and word-order cues vary in L1 and L2 speakers? (3) How does reliance on visual-scene, case, and word-order cues change across the lifespan? The results showed reliable effects of incongruence of visually and linguistically conveyed thematic relations on thematic-role representations. Incongruent (vs. congruent) scenes yielded slower and less accurate responses to agent-identification probes presented post-sentence. The recently inspected agent was considered as the most likely agent ~300ms after trial onset, and the convergence of visual scenes and word order enabled comprehenders to assign thematic roles predictively. L2 (vs. L1) participants relied more on word order overall. In response to noncanonical clauses presented with incongruent visual scenes, sensitivity to case predicted the size of incongruence effects better than L1-L2 grouping. These results suggest that the individual's ability to exploit specific cues might predict their weighting. Sensitivity to case was stable throughout the lifespan, while visual effects increased with increasing age and were modulated by individual interference-inhibition levels. Thus, age-related changes in comprehension may stem from stronger reliance on visually (vs. linguistically) conveyed meaning. These patterns represent evidence for a recent-role preference - i.e., a tendency to re-assign visually conveyed thematic roles to the same referents in temporally coordinated utterances. The findings (i) extend the generalizability of CIA predictions across stimuli, tasks, populations, and measures of interest, (ii) contribute to specifying the outcomes and mechanisms of detecting and indexing incongruent representations within the CIA, and (iii) speak to current efforts to understand the sources of variability in sentence comprehension.}, language = {en} } @phdthesis{Truemper2014, author = {Tr{\"u}mper, Jonas}, title = {Visualization techniques for the analysis of software behavior and related structures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72145}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Software maintenance encompasses any changes made to a software system after its initial deployment and is thereby one of the key phases in the typical software-engineering lifecycle. In software maintenance, we primarily need to understand structural and behavioral aspects, which are difficult to obtain, e.g., by code reading. Software analysis is therefore a vital tool for maintaining these systems: It provides - the preferably automated - means to extract and evaluate information from their artifacts such as software structure, runtime behavior, and related processes. However, such analysis typically results in massive raw data, so that even experienced engineers face difficulties directly examining, assessing, and understanding these data. Among other things, they require tools with which to explore the data if no clear question can be formulated beforehand. For this, software analysis and visualization provide its users with powerful interactive means. These enable the automation of tasks and, particularly, the acquisition of valuable and actionable insights into the raw data. For instance, one means for exploring runtime behavior is trace visualization. This thesis aims at extending and improving the tool set for visual software analysis by concentrating on several open challenges in the fields of dynamic and static analysis of software systems. This work develops a series of concepts and tools for the exploratory visualization of the respective data to support users in finding and retrieving information on the system artifacts concerned. This is a difficult task, due to the lack of appropriate visualization metaphors; in particular, the visualization of complex runtime behavior poses various questions and challenges of both a technical and conceptual nature. This work focuses on a set of visualization techniques for visually representing control-flow related aspects of software traces from shared-memory software systems: A trace-visualization concept based on icicle plots aids in understanding both single-threaded as well as multi-threaded runtime behavior on the function level. The concept's extensibility further allows the visualization and analysis of specific aspects of multi-threading such as synchronization, the correlation of such traces with data from static software analysis, and a comparison between traces. Moreover, complementary techniques for simultaneously analyzing system structures and the evolution of related attributes are proposed. These aim at facilitating long-term planning of software architecture and supporting management decisions in software projects by extensions to the circular-bundle-view technique: An extension to 3-dimensional space allows for the use of additional variables simultaneously; interaction techniques allow for the modification of structures in a visual manner. The concepts and techniques presented here are generic and, as such, can be applied beyond software analysis for the visualization of similarly structured data. The techniques' practicability is demonstrated by several qualitative studies using subject data from industry-scale software systems. The studies provide initial evidence that the techniques' application yields useful insights into the subject data and its interrelationships in several scenarios.}, language = {en} } @phdthesis{Bohnet2010, author = {Bohnet, Johannes}, title = {Visualization of Execution Traces and its Application to Software Maintenance}, address = {Potsdam}, pages = {150 S.}, year = {2010}, language = {en} } @phdthesis{Gandhi2022, author = {Gandhi, Nilima}, title = {Visionary leadership and job satisfaction}, doi = {10.25932/publishup-57269}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-572691}, school = {Universit{\"a}t Potsdam}, pages = {154}, year = {2022}, abstract = {Current business organizations want to be more efficient and constantly evolving to find ways to retain talent. It is well established that visionary leadership plays a vital role in organizational success and contributes to a better working environment. This study aims to determine the effect of visionary leadership on employees' perceived job satisfaction. Specifically, it investigates whether the mediators meaningfulness at work and commitment to the leader impact the relationship. I take support from job demand resource theory to explain the overarching model used in this study and broaden-and-build theory to leverage the use of mediators. To test the hypotheses, evidence was collected in a multi-source, time-lagged design field study of 95 leader-follower dyads. The data was collected in a three-wave study, each survey appearing after one month. Data on employee perception of visionary leadership was collected in T1, data for both mediators were collected in T2, and employee perception of job satisfaction was collected in T3. The findings display that meaningfulness at work and commitment to the leader play positive intervening roles (in the form of a chain) in the indirect influence of visionary leadership on employee perceptions regarding job satisfaction. This research offers contributions to literature and theory by first broadening the existing knowledge on the effects of visionary leadership on employees. Second, it contributes to the literature on constructs meaningfulness at work, commitment to the leader, and job satisfaction. Third, it sheds light on the mediation mechanism dealing with study variables in line with the proposed model. Fourth, it integrates two theories, job demand resource theory and broaden-and-build theory providing further evidence. Additionally, the study provides practical implications for business leaders and HR practitioners. Overall, my study discusses the potential of visionary leadership behavior to elevate employee outcomes. The study aligns with previous research and answers several calls for further research on visionary leadership, job satisfaction, and mediation mechanism with meaningfulness at work and commitment to the leader.}, language = {en} } @phdthesis{Marwecki2021, author = {Marwecki, Sebastian}, title = {Virtualizing physical space}, doi = {10.25932/publishup-52033}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-520332}, school = {Universit{\"a}t Potsdam}, pages = {xi, 128}, year = {2021}, abstract = {The true cost for virtual reality is not the hardware, but the physical space it requires, as a one-to-one mapping of physical space to virtual space allows for the most immersive way of navigating in virtual reality. Such "real-walking" requires physical space to be of the same size and the same shape of the virtual world represented. This generally prevents real-walking applications from running on any space that they were not designed for. To reduce virtual reality's demand for physical space, creators of such applications let users navigate virtual space by means of a treadmill, altered mappings of physical to virtual space, hand-held controllers, or gesture-based techniques. While all of these solutions succeed at reducing virtual reality's demand for physical space, none of them reach the same level of immersion that real-walking provides. Our approach is to virtualize physical space: instead of accessing physical space directly, we allow applications to express their need for space in an abstract way, which our software systems then map to the physical space available. We allow real-walking applications to run in spaces of different size, different shape, and in spaces containing different physical objects. We also allow users immersed in different virtual environments to share the same space. Our systems achieve this by using a tracking volume-independent representation of real-walking experiences — a graph structure that expresses the spatial and logical relationships between virtual locations, virtual elements contained within those locations, and user interactions with those elements. When run in a specific physical space, this graph representation is used to define a custom mapping of the elements of the virtual reality application and the physical space by parsing the graph using a constraint solver. To re-use space, our system splits virtual scenes and overlap virtual geometry. The system derives this split by means of hierarchically clustering of our virtual objects as nodes of our bi-partite directed graph that represents the logical ordering of events of the experience. We let applications express their demands for physical space and use pre-emptive scheduling between applications to have them share space. We present several application examples enabled by our system. They all enable real-walking, despite being mapped to physical spaces of different size and shape, containing different physical objects or other users. We see substantial real-world impact in our systems. Today's commercial virtual reality applications are generally designing to be navigated using less immersive solutions, as this allows them to be operated on any tracking volume. While this is a commercial necessity for the developers, it misses out on the higher immersion offered by real-walking. We let developers overcome this hurdle by allowing experiences to bring real-walking to any tracking volume, thus potentially bringing real-walking to consumers. Die eigentlichen Kosten f{\"u}r Virtual Reality Anwendungen entstehen nicht prim{\"a}r durch die erforderliche Hardware, sondern durch die Nutzung von physischem Raum, da die eins-zu-eins Abbildung von physischem auf virtuellem Raum die immersivste Art von Navigation erm{\"o}glicht. Dieses als „Real-Walking" bezeichnete Erlebnis erfordert hinsichtlich Gr{\"o}ße und Form eine Entsprechung von physischem Raum und virtueller Welt. Resultierend daraus k{\"o}nnen Real-Walking-Anwendungen nicht an Orten angewandt werden, f{\"u}r die sie nicht entwickelt wurden. Um den Bedarf an physischem Raum zu reduzieren, lassen Entwickler von Virtual Reality-Anwendungen ihre Nutzer auf verschiedene Arten navigieren, etwa mit Hilfe eines Laufbandes, verf{\"a}lschten Abbildungen von physischem zu virtuellem Raum, Handheld-Controllern oder gestenbasierten Techniken. All diese L{\"o}sungen reduzieren zwar den Bedarf an physischem Raum, erreichen jedoch nicht denselben Grad an Immersion, den Real-Walking bietet. Unser Ansatz zielt darauf, physischen Raum zu virtualisieren: Anstatt auf den physischen Raum direkt zuzugreifen, lassen wir Anwendungen ihren Raumbedarf auf abstrakte Weise formulieren, den unsere Softwaresysteme anschließend auf den verf{\"u}gbaren physischen Raum abbilden. Dadurch erm{\"o}glichen wir Real-Walking-Anwendungen R{\"a}ume mit unterschiedlichen Gr{\"o}ßen und Formen und R{\"a}ume, die unterschiedliche physische Objekte enthalten, zu nutzen. Wir erm{\"o}glichen auch die zeitgleiche Nutzung desselben Raums durch mehrere Nutzer verschiedener Real-Walking-Anwendungen. Unsere Systeme erreichen dieses Resultat durch eine Repr{\"a}sentation von Real-Walking-Erfahrungen, die unabh{\"a}ngig sind vom gegebenen Trackingvolumen - eine Graphenstruktur, die die r{\"a}umlichen und logischen Beziehungen zwischen virtuellen Orten, den virtuellen Elementen innerhalb dieser Orte, und Benutzerinteraktionen mit diesen Elementen, ausdr{\"u}ckt. Bei der Instanziierung der Anwendung in einem bestimmten physischen Raum wird diese Graphenstruktur und ein Constraint Solver verwendet, um eine individuelle Abbildung der virtuellen Elemente auf den physischen Raum zu erreichen. Zur mehrmaligen Verwendung des Raumes teilt unser System virtuelle Szenen und {\"u}berlagert virtuelle Geometrie. Das System leitet diese Aufteilung anhand eines hierarchischen Clusterings unserer virtuellen Objekte ab, die als Knoten unseres bi-partiten, gerichteten Graphen die logische Reihenfolge aller Ereignisse repr{\"a}sentieren. Wir verwenden pr{\"a}emptives Scheduling zwischen den Anwendungen f{\"u}r die zeitgleiche Nutzung von physischem Raum. Wir stellen mehrere Anwendungsbeispiele vor, die Real-Walking erm{\"o}glichen - in physischen R{\"a}umen mit unterschiedlicher Gr{\"o}ße und Form, die verschiedene physische Objekte oder weitere Nutzer enthalten. Wir sehen in unseren Systemen substantielles Potential. Heutige Virtual Reality-Anwendungen sind bisher zwar so konzipiert, dass sie auf einem beliebigen Trackingvolumen betrieben werden k{\"o}nnen, aber aus kommerzieller Notwendigkeit kein Real-Walking beinhalten. Damit entgeht Entwicklern die Gelegenheit eine h{\"o}here Immersion herzustellen. Indem wir es erm{\"o}glichen, Real-Walking auf jedes Trackingvolumen zu bringen, geben wir Entwicklern die M{\"o}glichkeit Real-Walking zu ihren Nutzern zu bringen.}, language = {en} } @phdthesis{Shigeyama2024, author = {Shigeyama, Jotaro}, title = {Virtual reality at 1:1 scale in small physical spaces}, doi = {10.25932/publishup-64900}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-649000}, school = {Universit{\"a}t Potsdam}, pages = {115}, year = {2024}, abstract = {Virtual Reality (VR) leads to the highest level of immersion if presented using a 1:1 mapping of virtual space to physical space—also known as real walking. The advent of inexpensive consumer virtual reality (VR) headsets, all capable of running inside-out position tracking, has brought VR to the home. However, many VR applications do not feature full real walking, but instead, feature a less immersive space-saving technique known as instant teleportation. Given that only 0.3\% of home users run their VR experiences in spaces more than 4m2, the most likely explanation is the lack of the physical space required for meaningful use of real walking. In this thesis, we investigate how to overcome this hurdle. We demonstrate how to run 1:1-mapped VR experiences in small physical spaces and we explore the trade-off between space and immersion. (1) We start with a space limit of 15cm. We present DualPanto, a device that allows (blind) VR users to experience the virtual world from a 1:1 mapped bird's eye perspective—by leveraging haptics. (2) We then relax our space constraints to 50cm, which is what seated users (e.g., on an airplane or train ride) have at their disposal. We leverage the space to represent a standing user in 1:1 mapping, while only compressing the user's arm movement. We demonstrate our 4 prototype VirtualArms at the example of VR experiences limited to arm movement, such as boxing. (3) Finally, we relax our space constraints further to 3m2 of walkable space, which is what 75\% of home users have access to. As well- established in the literature, we implement real walking with the help of portals, also known as "impossible spaces". While impossible spaces on such dramatic space constraints tend to degenerate into incomprehensible mazes (as demonstrated, for example, by "TraVRsal"), we propose plausibleSpaces: presenting meaningful virtual worlds by adapting various visual elements to impossible spaces. Our techniques push the boundary of spatially meaningful VR interaction in various small spaces. We see further future challenges for new design approaches to immersive VR experiences for the smallest physical spaces in our daily life.}, language = {en} } @phdthesis{Berg2013, author = {Berg, Gregor}, title = {Virtual prototypes for the model-based elicitation and validation of collaborative scenarios}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69729}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Requirements engineers have to elicit, document, and validate how stakeholders act and interact to achieve their common goals in collaborative scenarios. Only after gathering all information concerning who interacts with whom to do what and why, can a software system be designed and realized which supports the stakeholders to do their work. To capture and structure requirements of different (groups of) stakeholders, scenario-based approaches have been widely used and investigated. Still, the elicitation and validation of requirements covering collaborative scenarios remains complicated, since the required information is highly intertwined, fragmented, and distributed over several stakeholders. Hence, it can only be elicited and validated collaboratively. In times of globally distributed companies, scheduling and conducting workshops with groups of stakeholders is usually not feasible due to budget and time constraints. Talking to individual stakeholders, on the other hand, is feasible but leads to fragmented and incomplete stakeholder scenarios. Going back and forth between different individual stakeholders to resolve this fragmentation and explore uncovered alternatives is an error-prone, time-consuming, and expensive task for the requirements engineers. While formal modeling methods can be employed to automatically check and ensure consistency of stakeholder scenarios, such methods introduce additional overhead since their formal notations have to be explained in each interaction between stakeholders and requirements engineers. Tangible prototypes as they are used in other disciplines such as design, on the other hand, allow designers to feasibly validate and iterate concepts and requirements with stakeholders. This thesis proposes a model-based approach for prototyping formal behavioral specifications of stakeholders who are involved in collaborative scenarios. By simulating and animating such specifications in a remote domain-specific visualization, stakeholders can experience and validate the scenarios captured so far, i.e., how other stakeholders act and react. This interactive scenario simulation is referred to as a model-based virtual prototype. Moreover, through observing how stakeholders interact with a virtual prototype of their collaborative scenarios, formal behavioral specifications can be automatically derived which complete the otherwise fragmented scenarios. This, in turn, enables requirements engineers to elicit and validate collaborative scenarios in individual stakeholder sessions - decoupled, since stakeholders can participate remotely and are not forced to be available for a joint session at the same time. This thesis discusses and evaluates the feasibility, understandability, and modifiability of model-based virtual prototypes. Similarly to how physical prototypes are perceived, the presented approach brings behavioral models closer to being tangible for stakeholders and, moreover, combines the advantages of joint stakeholder sessions and decoupled sessions.}, language = {en} } @phdthesis{Elsaid2022, author = {Elsaid, Mohamed Esameldin Mohamed}, title = {Virtual machines live migration cost modeling and prediction}, doi = {10.25932/publishup-54001}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-540013}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 107}, year = {2022}, abstract = {Dynamic resource management is an essential requirement for private and public cloud computing environments. With dynamic resource management, the physical resources assignment to the cloud virtual resources depends on the actual need of the applications or the running services, which enhances the cloud physical resources utilization and reduces the offered services cost. In addition, the virtual resources can be moved across different physical resources in the cloud environment without an obvious impact on the running applications or services production. This means that the availability of the running services and applications in the cloud is independent on the hardware resources including the servers, switches and storage failures. This increases the reliability of using cloud services compared to the classical data-centers environments. In this thesis we briefly discuss the dynamic resource management topic and then deeply focus on live migration as the definition of the compute resource dynamic management. Live migration is a commonly used and an essential feature in cloud and virtual data-centers environments. Cloud computing load balance, power saving and fault tolerance features are all dependent on live migration to optimize the virtual and physical resources usage. As we will discuss in this thesis, live migration shows many benefits to cloud and virtual data-centers environments, however the cost of live migration can not be ignored. Live migration cost includes the migration time, downtime, network overhead, power consumption increases and CPU overhead. IT admins run virtual machines live migrations without an idea about the migration cost. So, resources bottlenecks, higher migration cost and migration failures might happen. The first problem that we discuss in this thesis is how to model the cost of the virtual machines live migration. Secondly, we investigate how to make use of machine learning techniques to help the cloud admins getting an estimation of this cost before initiating the migration for one of multiple virtual machines. Also, we discuss the optimal timing for a specific virtual machine before live migration to another server. Finally, we propose practical solutions that can be used by the cloud admins to be integrated with the cloud administration portals to answer the raised research questions above. Our research methodology to achieve the project objectives is to propose empirical models based on using VMware test-beds with different benchmarks tools. Then we make use of the machine learning techniques to propose a prediction approach for virtual machines live migration cost. Timing optimization for live migration is also proposed in this thesis based on using the cost prediction and data-centers network utilization prediction. Live migration with persistent memory clusters is also discussed at the end of the thesis. The cost prediction and timing optimization techniques proposed in this thesis could be practically integrated with VMware vSphere cluster portal such that the IT admins can now use the cost prediction feature and timing optimization option before proceeding with a virtual machine live migration. Testing results show that our proposed approach for VMs live migration cost prediction shows acceptable results with less than 20\% prediction error and can be easily implemented and integrated with VMware vSphere as an example of a commonly used resource management portal for virtual data-centers and private cloud environments. The results show that using our proposed VMs migration timing optimization technique also could save up to 51\% of migration time of the VMs migration time for memory intensive workloads and up to 27\% of the migration time for network intensive workloads. This timing optimization technique can be useful for network admins to save migration time with utilizing higher network rate and higher probability of success. At the end of this thesis, we discuss the persistent memory technology as a new trend in servers memory technology. Persistent memory modes of operation and configurations are discussed in detail to explain how live migration works between servers with different memory configuration set up. Then, we build a VMware cluster with persistent memory inside server and also with DRAM only servers to show the live migration cost difference between the VMs with DRAM only versus the VMs with persistent memory inside.}, language = {en} } @phdthesis{Xiong2018, author = {Xiong, Tao}, title = {Vibrationally resolved absorption, emission, resonance Raman and photoelectron spectra of selected organic molecules, associated radicals and cations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418105}, school = {Universit{\"a}t Potsdam}, pages = {iv, 100}, year = {2018}, abstract = {Time-dependent correlation function based methods to study optical spectroscopy involving electronic transitions can be traced back to the work of Heller and coworkers. This intuitive methodology can be expected to be computationally efficient and is applied in the current work to study the vibronic absorption, emission, and resonance Raman spectra of selected organic molecules. Besides, the "non-standard" application of this approach to photoionization processes is also explored. The application section consists of four chapters as described below. In Chapter 4, the molar absorptivities and vibronic absorption/emission spectra of perylene and several of its N-substituted derivatives are investigated. By systematically varying the number and position of N atoms, it is shown that the presence of nitrogen heteroatoms has a negligible effect on the molecular structure and geometric distortions upon electronic transitions, while spectral properties are more sensitive: In particular the number of N atoms is important while their position is less decisive. Thus, N-substitution can be used to fine-tune the optical properties of perylene-based molecules. In Chapter 5, the same methods are applied to study the vibronic absorption/emission and resonance Raman spectra of a newly synthesized donor-acceptor type molecule. The simulated absorption/emission spectra agree fairly well with experimental data, with discrepancies being attributed to solvent effects. Possible modes which may dominate the fine-structure in the vibronic spectra are proposed by analyzing the correlation function with the aid of Raman and resonance Raman spectra. In the next two chapters, besides the above types of spectra, the methods are extended to study photoelectron spectra of several small diamondoid-related systems (molecules, radicals, and cations). Comparison of the photoelectron spectra with available experimental data suggests that the correlation function based approach can describe ionization processes reasonably well. Some of these systems, cationic species in particular, exhibit somewhat peculiar optical behavior, which presents them as possible candidates for functional devices. Correlation function based methods in a more general sense can be very versatile. In fact, besides the above radiative processes, formulas for non-radiative processes such as internal conversion have been derived in literature. Further implementation of the available methods is among our next goals.}, language = {en} } @phdthesis{Vazhappilly2008, author = {Vazhappilly, Tijo Joseph}, title = {Vibrationally enhanced associative photodesorption of H2 (D2) from Ru(0001) : quantum and classical approaches}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-19056}, school = {Universit{\"a}t Potsdam}, year = {2008}, abstract = {Nowadays, reactions on surfaces are attaining great scientific interest because of their diverse applications. Some well known examples are production of ammonia on metal surfaces for fertilizers and reduction of poisonous gases from automobiles using catalytic converters. More recently, also photoinduced reactions at surfaces, useful, \textit{e.g.}, for photocatalysis, were studied in detail. Often, very short laser pulses are used for this purpose. Some of these reactions are occurring on femtosecond (1 fs=\$10^{-15}\$ s) time scales since the motion of atoms (which leads to bond breaking and new bond formation) belongs to this time range. This thesis investigates the femtosecond laser induced associative photodesorption of hydrogen, H\$_2\$, and deuterium, D\$_2\$, from a ruthenium metal surface. Many interesting features of this reaction were explored by experimentalists: (i) a huge isotope effect in the desorption probability of H\$_2\$ and D\$_2\$, (ii) the desorption yield increases non-linearly with the applied visible (vis) laser fluence, and (iii) unequal energy partitioning to different degrees of freedom. These peculiarities are due to the fact that an ultrashort vis pulse creates hot electrons in the metal. These hot electrons then transfer energy to adsorbate vibrations which leads to desorption. In fact, adsorbate vibrations are strongly coupled to metal electrons, \textit{i.e.}, through non-adiabatic couplings. This means that, surfaces introduce additional channels for energy exchange which makes the control of surface reactions more difficult than the control of reactions in the gas phase. In fact, the quantum yield of surface photochemical reactions is often notoriously small. One of the goals of the present thesis is to suggest, on the basis of theoretical simulations, strategies to control/enhance the photodesorption yield of H\$_2\$ and D\$_2\$ from Ru(0001). For this purpose, we suggest a \textit{hybrid scheme} to control the reaction, where the adsorbate vibrations are initially excited by an infrared (IR) pulse, prior to the vis pulse. Both \textit{adiabatic} and \textit{non-adiabatic} representations for photoinduced desorption problems are employed here. The \textit{adiabatic} representation is realized within the classical picture using Molecular Dynamics (MD) with electronic frictions. In a quantum mechanical description, \textit{non-adiabatic} representations are employed within open-system density matrix theory. The time evolution of the desorption process is studied using a two-mode reduced dimensionality model with one vibrational coordinate and one translational coordinate of the adsorbate. The ground and excited electronic state potentials, and dipole function for the IR excitation are taken from first principles. The IR driven vibrational excitation of adsorbate modes with moderate efficiency is achieved by (modified) \$\pi\$-pulses or/and optimal control theory. The fluence dependence of the desorption reaction is computed by including the electronic temperature of the metal calculated from the two-temperature model. Here, our theoretical results show a good agreement with experimental and previous theoretical findings. We then employed the IR+vis strategy in both models. Here, we found that vibrational excitation indeed promotes the desorption of hydrogen and deuterium. To summarize, we conclude that photocontrol of this surface reaction can be achieved by our IR+vis scheme.}, language = {en} } @phdthesis{Hoffmann2013, author = {Hoffmann, Holger}, title = {Vertical structures induced by embedded propeller moonlets in Saturn's rings}, address = {Potsdam}, pages = {99 S.}, year = {2013}, language = {en} } @phdthesis{RojasCarillo2012, author = {Rojas Carillo, Oscar Mario}, title = {Versatile uses of halogen-free Ionic Liquids for the formulation of non-aqueous microemulsion and synthesis of gold nanoparticles}, address = {Potsdam}, pages = {93, XXVI S.}, year = {2012}, language = {en} } @phdthesis{Kanehira2023, author = {Kanehira, Yuya}, title = {Versatile DNA origami based SERS substrates for spectroscopic applications}, pages = {115}, year = {2023}, language = {en} } @phdthesis{Giuri2023, author = {Giuri, Chiara}, title = {VERITAS Dark Matter search in dwarf spheroidal galaxies: an extended analysis}, doi = {10.25932/publishup-57586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-575869}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 227}, year = {2023}, abstract = {In the last century, several astronomical measurements have supported that a significant percentage (about 22\%) of the total mass of the Universe, on galactic and extragalactic scales, is composed of a mysterious "dark" matter (DM). DM does not interact with the electromagnetic force; in other words it does not reflect, absorb or emit light. It is possible that DM particles are weakly interacting massive particles (WIMPs) that can annihilate (or decay) into Standard Model (SM) particles, and modern very- high-energy (VHE; > 100 GeV) instruments such as imaging atmospheric Cherenkov telescopes (IACTs) can play an important role in constraining the main properties of such DM particles, by detecting these products. One of the most privileged targets where to look for DM signal are dwarf spheroidal galaxies (dSphs), as they are expected to be high DM-dominated objects with a clean, gas-free environment. Some dSphs could be considered as extended sources, considering the angular resolution of IACTs; their angu- lar resolution is adequate to detect extended emission from dSphs. For this reason, we performed an extended-source analysis, by taking into account in the unbinned maximum likelihood estimation both the energy and the angular extension dependency of observed events. The goal was to set more constrained upper limits on the velocity-averaged cross-section annihilation of WIMPs with VERITAS data. VERITAS is an array of four IACTs, able to detect γ-ray photons ranging between 100 GeV and 30 TeV. The results of this extended analysis were compared against the traditional spectral analysis. We found that a 2D analysis may lead to more constrained results, depending on the DM mass, channel, and source. Moreover, in this thesis, the results of a multi-instrument project are presented too. Its goal was to combine already published 20 dSphs data from five different experiments, such as Fermi-LAT, MAGIC, H.E.S.S., VERITAS and HAWC, in order to set upper limits on the WIMP annihilation cross-section in the widest mass range ever reported.}, language = {en} } @phdthesis{Yang2017, author = {Yang, Lei}, title = {Verification of systemic mRNAs mobility and mobile functions}, school = {Universit{\"a}t Potsdam}, pages = {125}, year = {2017}, language = {en} } @phdthesis{Dyck2020, author = {Dyck, Johannes}, title = {Verification of graph transformation systems with k-inductive invariants}, doi = {10.25932/publishup-44274}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-442742}, school = {Universit{\"a}t Potsdam}, pages = {X, 364}, year = {2020}, abstract = {With rising complexity of today's software and hardware systems and the hypothesized increase in autonomous, intelligent, and self-* systems, developing correct systems remains an important challenge. Testing, although an important part of the development and maintainance process, cannot usually establish the definite correctness of a software or hardware system - especially when systems have arbitrarily large or infinite state spaces or an infinite number of initial states. This is where formal verification comes in: given a representation of the system in question in a formal framework, verification approaches and tools can be used to establish the system's adherence to its similarly formalized specification, and to complement testing. One such formal framework is the field of graphs and graph transformation systems. Both are powerful formalisms with well-established foundations and ongoing research that can be used to describe complex hardware or software systems with varying degrees of abstraction. Since their inception in the 1970s, graph transformation systems have continuously evolved; related research spans extensions of expressive power, graph algorithms, and their implementation, application scenarios, or verification approaches, to name just a few topics. This thesis focuses on a verification approach for graph transformation systems called k-inductive invariant checking, which is an extension of previous work on 1-inductive invariant checking. Instead of exhaustively computing a system's state space, which is a common approach in model checking, 1-inductive invariant checking symbolically analyzes graph transformation rules - i.e. system behavior - in order to draw conclusions with respect to the validity of graph constraints in the system's state space. The approach is based on an inductive argument: if a system's initial state satisfies a graph constraint and if all rules preserve that constraint's validity, we can conclude the constraint's validity in the system's entire state space - without having to compute it. However, inductive invariant checking also comes with a specific drawback: the locality of graph transformation rules leads to a lack of context information during the symbolic analysis of potential rule applications. This thesis argues that this lack of context can be partly addressed by using k-induction instead of 1-induction. A k-inductive invariant is a graph constraint whose validity in a path of k-1 rule applications implies its validity after any subsequent rule application - as opposed to a 1-inductive invariant where only one rule application is taken into account. Considering a path of transformations then accumulates more context of the graph rules' applications. As such, this thesis extends existing research and implementation on 1-inductive invariant checking for graph transformation systems to k-induction. In addition, it proposes a technique to perform the base case of the inductive argument in a symbolic fashion, which allows verification of systems with an infinite set of initial states. Both k-inductive invariant checking and its base case are described in formal terms. Based on that, this thesis formulates theorems and constructions to apply this general verification approach for typed graph transformation systems and nested graph constraints - and to formally prove the approach's correctness. Since unrestricted graph constraints may lead to non-termination or impracticably high execution times given a hypothetical implementation, this thesis also presents a restricted verification approach, which limits the form of graph transformation systems and graph constraints. It is formalized, proven correct, and its procedures terminate by construction. This restricted approach has been implemented in an automated tool and has been evaluated with respect to its applicability to test cases, its performance, and its degree of completeness.}, language = {en} } @phdthesis{Niemeyer2016, author = {Niemeyer, Bastian}, title = {Vegetation reconstruction and assessment of plant diversity at the treeline ecotone in northern Siberia}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2016}, language = {en} } @phdthesis{Zimmermann2017, author = {Zimmermann, Heike Hildegard}, title = {Vegetation changes and treeline dynamics in northern Siberia since the last interglacial revealed by sedimentary ancient DNA metabarcoding and organelle genome assembly of modern larches}, school = {Universit{\"a}t Potsdam}, pages = {138}, year = {2017}, language = {en} } @phdthesis{Tian2014, author = {Tian, Fang}, title = {Vegetation and environmental changes on millennial, centennial and decadal time-scales in central Mongolia and their driving forces}, school = {Universit{\"a}t Potsdam}, pages = {139}, year = {2014}, language = {en} } @phdthesis{Cao2014, author = {Cao, Xianyong}, title = {Vegetation and climate change in eastern continental Asia during the last 22 ka inferred from pollen data synthesis}, pages = {156}, year = {2014}, language = {en} } @phdthesis{Schlolaut2013, author = {Schlolaut, Gordon}, title = {Varve and event layer chronology of Lake Suigetsu (Japan) back to 40 kyr BP and contribution to the international consensus atmospheric radiocarbon calibration curve}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69096}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {The main intention of the PhD project was to create a varve chronology for the Suigetsu Varves 2006' (SG06) composite profile from Lake Suigetsu (Japan) by thin section microscopy. The chronology was not only to provide an age-scale for the various palaeo-environmental proxies analysed within the SG06 project, but also and foremost to contribute, in combination with the SG06 14C chronology, to the international atmospheric radiocarbon calibration curve (IntCal). The SG06 14C data are based on terrestrial leaf fossils and therefore record atmospheric 14C values directly, avoiding the corrections necessary for the reservoir ages of the marine datasets, which are currently used beyond the tree-ring limit in the IntCal09 dataset (Reimer et al., 2009). The SG06 project is a follow up of the SG93 project (Kitagawa \& van der Plicht, 2000), which aimed to produce an atmospheric calibration dataset, too, but suffered from incomplete core recovery and varve count uncertainties. For the SG06 project the complete Lake Suigetsu sediment sequence was recovered continuously, leaving the task to produce an improved varve count. Varve counting was carried out using a dual method approach utilizing thin section microscopy and micro X-Ray Fluorescence (µXRF). The latter was carried out by Dr. Michael Marshall in cooperation with the PhD candidate. The varve count covers 19 m of composite core, which corresponds to the time frame from ≈10 to ≈40 kyr BP. The count result showed that seasonal layers did not form in every year. Hence, the varve counts from either method were incomplete. This rather common problem in varve counting is usually solved by manual varve interpolation. But manual interpolation often suffers from subjectivity. Furthermore, sedimentation rate estimates (which are the basis for interpolation) are generally derived from neighbouring, well varved intervals. This assumes that the sedimentation rates in neighbouring intervals are identical to those in the incompletely varved section, which is not necessarily true. To overcome these problems a novel interpolation method was devised. It is computer based and automated (i.e. avoids subjectivity and ensures reproducibility) and derives the sedimentation rate estimate directly from the incompletely varved interval by statistically analysing distances between successive seasonal layers. Therefore, the interpolation approach is also suitable for sediments which do not contain well varved intervals. Another benefit of the novel method is that it provides objective interpolation error estimates. Interpolation results from the two counting methods were combined and the resulting chronology compared to the 14C chronology from Lake Suigetsu, calibrated with the tree-ring derived section of IntCal09 (which is considered accurate). The varve and 14C chronology showed a high degree of similarity, demonstrating that the novel interpolation method produces reliable results. In order to constrain the uncertainties of the varve chronology, especially the cumulative error estimates, U-Th dated speleothem data were used by linking the low frequency 14C signal of Lake Suigetsu and the speleothems, increasing the accuracy and precision of the Suigetsu calibration dataset. The resulting chronology also represents the age-scale for the various palaeo-environmental proxies analysed in the SG06 project. One proxy analysed within the PhD project was the distribution of event layers, which are often representatives of past floods or earthquakes. A detailed microfacies analysis revealed three different types of event layers, two of which are described here for the first time for the Suigetsu sediment. The types are: matrix supported layers produced as result of subaqueous slope failures, turbidites produced as result of landslides and turbidites produced as result of flood events. The former two are likely to have been triggered by earthquakes. The vast majority of event layers was related to floods (362 out of 369), which allowed the construction of a respective chronology for the last 40 kyr. Flood frequencies were highly variable, reaching their greatest values during the global sea level low-stand of the Glacial, their lowest values during Heinrich Event 1. Typhoons affecting the region represent the most likely control on the flood frequency, especially during the Glacial. However, also local, non-climatic controls are suggested by the data. In summary, the work presented here expands and revises knowledge on the Lake Suigetsu sediment and enabls the construction of a far more precise varve chronology. The 14C calibration dataset is the first such derived from lacustrine sediments to be included into the (next) IntCal dataset. References: Kitagawa \& van der Plicht, 2000, Radiocarbon, Vol 42(3), 370-381 Reimer et al., 2009, Radiocarbon, Vol 51(4), 1111-1150}, language = {en} } @phdthesis{Gholamrezaie2021, author = {Gholamrezaie, Ershad}, title = {Variations of lithospheric strength in different tectonic settings}, doi = {10.25932/publishup-51146}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-511467}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 147}, year = {2021}, abstract = {Rheology describes the flow of matter under the influence of stress, and - related to solids- it investigates how solids subjected to stresses deform. As the deformation of the Earth's outer layers, the lithosphere and the crust, is a major focus of rheological studies, rheology in the geosciences describes how strain evolves in rocks of variable composition and temperature under tectonic stresses. It is here where deformation processes shape the form of ocean basins and mountain belts that ultimately result from the complex interplay between lithospheric plate motion and the susceptibility of rocks to the influence of plate-tectonic forces. A rigorous study of the strength of the lithosphere and deformation phenomena thus requires in-depth studies of the rheological characteristics of the involved materials and the temporal framework of deformation processes. This dissertation aims at analyzing the influence of the physical configuration of the lithosphere on the present-day thermal field and the overall rheological characteristics of the lithosphere to better understand variable expressions in the formation of passive continental margins and the behavior of strike-slip fault zones. The main methodological approach chosen is to estimate the present-day thermal field and the strength of the lithosphere by 3-D numerical modeling. The distribution of rock properties is provided by 3-D structural models, which are used as the basis for the thermal and rheological modeling. The structural models are based on geophysical and geological data integration, additionally constrained by 3-D density modeling. More specifically, to decipher the thermal and rheological characteristics of the lithosphere in both oceanic and continental domains, sedimentary basins in the Sea of Marmara (continental transform setting), the SW African passive margin (old oceanic crust), and the Norwegian passive margin (young oceanic crust) were selected for this study. The Sea of Marmara, in northwestern Turkey, is located where the dextral North Anatolian Fault zone (NAFZ) accommodates the westward escape of the Anatolian Plate toward the Aegean. Geophysical observations indicate that the crust is heterogeneous beneath the Marmara basin, but a detailed characterization of the lateral crustal heterogeneities is presented for the first time in this study. Here, I use different gravity datasets and the general non-uniqueness in potential field modeling, to propose three possible end-member scenarios of crustal configuration. The models suggest that pronounced gravitational anomalies in the basin originate from significant density heterogeneities within the crust. The rheological modeling reveals that associated variations in lithospheric strength control the mechanical segmentation of the NAFZ. Importantly, a strong crust that is mechanically coupled to the upper mantle spatially correlates with aseismic patches where the fault bends and changes its strike in response to the presence of high-density lower crustal bodies. Between the bends, mechanically weaker crustal domains that are decoupled from the mantle are characterized by creep. For the passive margins of SW Africa and Norway, two previously published 3-D conductive and lithospheric-scale thermal models were analyzed. These 3-D models differentiate various sedimentary, crustal, and mantle units and integrate different geophysical data, such as seismic observations and the gravity field. Here, the rheological modeling suggests that the present-day lithospheric strength across the oceanic domain is ultimately affected by the age and past thermal and tectonic processes as well as the depth of the thermal lithosphere-asthenosphere boundary, while the configuration of the crystalline crust dominantly controls the rheological behavior of the lithosphere beneath the continental domains of both passive margins. The thermal and rheological models show that the variations of lithospheric strength are fundamentally influenced by the temperature distribution within the lithosphere. Moreover, as the composition of the lithosphere significantly influences the present-day thermal field, it therefore also affects the rheological characteristics of the lithosphere. Overall my studies add to our understanding of regional tectonic deformation processes and the long-term behavior of sedimentary basins; they confirm other analyses that have pointed out that crustal heterogeneities in the continents result in diverse lithospheric thermal characteristics, which in turn results in higher complexity and variations of rheological behavior compared to oceanic domains with a thinner, more homogeneous crust.}, language = {en} } @phdthesis{Czech2013, author = {Czech, Andreas}, title = {Variations in the tRNA pool of mammalian cells upon differentiation and oxidative stress}, address = {Potsdam}, pages = {98 S.}, year = {2013}, language = {en} } @phdthesis{Hanisch2011, author = {Hanisch, Florian}, title = {Variational problems on supermanifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59757}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {In this thesis, we discuss the formulation of variational problems on supermanifolds. Supermanifolds incorporate bosonic as well as fermionic degrees of freedom. Fermionic fields take values in the odd part of an appropriate Grassmann algebra and are thus showing an anticommutative behaviour. However, a systematic treatment of these Grassmann parameters requires a description of spaces as functors, e.g. from the category of Grassmann algberas into the category of sets (or topological spaces, manifolds). After an introduction to the general ideas of this approach, we use it to give a description of the resulting supermanifolds of fields/maps. We show that each map is uniquely characterized by a family of differential operators of appropriate order. Moreover, we demonstrate that each of this maps is uniquely characterized by its component fields, i.e. by the coefficients in a Taylor expansion w.r.t. the odd coordinates. In general, the component fields are only locally defined. We present a way how to circumvent this limitation. In fact, by enlarging the supermanifold in question, we show that it is possible to work with globally defined components. We eventually use this formalism to study variational problems. More precisely, we study a super version of the geodesic and a generalization of harmonic maps to supermanifolds. Equations of motion are derived from an energy functional and we show how to decompose them into components. Finally, in special cases, we can prove the existence of critical points by reducing the problem to equations from ordinary geometric analysis. After solving these component equations, it is possible to show that their solutions give rise to critical points in the functor spaces of fields.}, language = {en} } @phdthesis{Lindinger2023, author = {Lindinger, Jakob}, title = {Variational inference for composite Gaussian process models}, doi = {10.25932/publishup-60444}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-604441}, school = {Universit{\"a}t Potsdam}, pages = {xi, 122}, year = {2023}, abstract = {Most machine learning methods provide only point estimates when being queried to predict on new data. This is problematic when the data is corrupted by noise, e.g. from imperfect measurements, or when the queried data point is very different to the data that the machine learning model has been trained with. Probabilistic modelling in machine learning naturally equips predictions with corresponding uncertainty estimates which allows a practitioner to incorporate information about measurement noise into the modelling process and to know when not to trust the predictions. A well-understood, flexible probabilistic framework is provided by Gaussian processes that are ideal as building blocks of probabilistic models. They lend themself naturally to the problem of regression, i.e., being given a set of inputs and corresponding observations and then predicting likely observations for new unseen inputs, and can also be adapted to many more machine learning tasks. However, exactly inferring the optimal parameters of such a Gaussian process model (in a computationally tractable manner) is only possible for regression tasks in small data regimes. Otherwise, approximate inference methods are needed, the most prominent of which is variational inference. In this dissertation we study models that are composed of Gaussian processes embedded in other models in order to make those more flexible and/or probabilistic. The first example are deep Gaussian processes which can be thought of as a small network of Gaussian processes and which can be employed for flexible regression. The second model class that we study are Gaussian process state-space models. These can be used for time-series modelling, i.e., the task of being given a stream of data ordered by time and then predicting future observations. For both model classes the state-of-the-art approaches offer a trade-off between expressive models and computational properties (e.g. speed or convergence properties) and mostly employ variational inference. Our goal is to improve inference in both models by first getting a deep understanding of the existing methods and then, based on this, to design better inference methods. We achieve this by either exploring the existing trade-offs or by providing general improvements applicable to multiple methods. We first provide an extensive background, introducing Gaussian processes and their sparse (approximate and efficient) variants. We continue with a description of the models under consideration in this thesis, deep Gaussian processes and Gaussian process state-space models, including detailed derivations and a theoretical comparison of existing methods. Then we start analysing deep Gaussian processes more closely: Trading off the properties (good optimisation versus expressivity) of state-of-the-art methods in this field, we propose a new variational inference based approach. We then demonstrate experimentally that our new algorithm leads to better calibrated uncertainty estimates than existing methods. Next, we turn our attention to Gaussian process state-space models, where we closely analyse the theoretical properties of existing methods.The understanding gained in this process leads us to propose a new inference scheme for general Gaussian process state-space models that incorporates effects on multiple time scales. This method is more efficient than previous approaches for long timeseries and outperforms its comparison partners on data sets in which effects on multiple time scales (fast and slowly varying dynamics) are present. Finally, we propose a new inference approach for Gaussian process state-space models that trades off the properties of state-of-the-art methods in this field. By combining variational inference with another approximate inference method, the Laplace approximation, we design an efficient algorithm that outperforms its comparison partners since it achieves better calibrated uncertainties.}, language = {en} } @phdthesis{Aktas2023, author = {Aktas, Berfin}, title = {Variation in coreference patterns}, doi = {10.25932/publishup-59608}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-596086}, school = {Universit{\"a}t Potsdam}, pages = {xviii, 195}, year = {2023}, abstract = {This thesis explores the variation in coreference patterns across language modes (i.e., spoken and written) and text genres. The significance of research on variation in language use has been emphasized in a number of linguistic studies. For instance, Biber and Conrad [2009] state that "register/genre variation is a fundamental aspect of human language" and "Given the ubiquity of register/genre variation, an understanding of how linguistic features are used in patterned ways across text varieties is of central importance for both the description of particular languages and the development of cross-linguistic theories of language use."[p.23] We examine the variation across genres with the primary goal of contributing to the body of knowledge on the description of language use in English. On the computational side, we believe that incorporating linguistic knowledge into learning-based systems can boost the performance of automatic natural language processing systems, particularly for non-standard texts. Therefore, in addition to their descriptive value, the linguistic findings we provide in this study may prove to be helpful for improving the performance of automatic coreference resolution, which is essential for a good text understanding and beneficial for several downstream NLP applications, including machine translation and text summarization. In particular, we study a genre of texts that is formed of conversational interactions on the well-known social media platform Twitter. Two factors motivate us: First, Twitter conversations are realized in written form but resemble spoken communication [Scheffler, 2017], and therefore they form an atypical genre for the written mode. Second, while Twitter texts are a complicated genre for automatic coreference resolution, due to their widespread use in the digital sphere, at the same time they are highly relevant for applications that seek to extract information or sentiments from users' messages. Thus, we are interested in discovering more about the linguistic and computational aspects of coreference in Twitter conversations. We first created a corpus of such conversations for this purpose and annotated it for coreference. We are interested in not only the coreference patterns but the overall discourse behavior of Twitter conversations. To address this, in addition to the coreference relations, we also annotated the coherence relations on the corpus we compiled. The corpus is available online in a newly developed form that allows for separating the tweets from their annotations. This study consists of three empirical analyses where we independently apply corpus-based, psycholinguistic and computational approaches for the investigation of variation in coreference patterns in a complementary manner. (1) We first make a descriptive analysis of variation across genres through a corpus-based study. We investigate the linguistic aspects of nominal coreference in Twitter conversations and we determine how this genre relates to other text genres in spoken and written modes. In addition to the variation across genres, studying the differences in spoken-written modes is also in focus of linguistic research since from Woolbert [1922]. (2) In order to investigate whether the language mode alone has any effect on coreference patterns, we carry out a crowdsourced experiment and analyze the patterns in the same genre for both spoken and written modes. (3) Finally, we explore the potentials of domain adaptation of automatic coreference resolution (ACR) for the conversational Twitter data. In order to answer the question of how the genre of Twitter conversations relates to other genres in spoken and written modes with respect to coreference patterns, we employ a state-of-the-art neural ACR model [Lee et al., 2018] to examine whether ACR on Twitter conversations will benefit from mode-based separation in out-of-domain training data.}, language = {en} } @phdthesis{Beinrucker2015, author = {Beinrucker, Andre}, title = {Variable selection in high dimensional data analysis with applications}, school = {Universit{\"a}t Potsdam}, pages = {VII, 107}, year = {2015}, language = {en} } @phdthesis{Pregla2023, author = {Pregla, Dorothea}, title = {Variability in sentence processing performance in German people with aphasia and unimpaired German native speakers}, doi = {10.25932/publishup-61420}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-614201}, school = {Universit{\"a}t Potsdam}, pages = {171}, year = {2023}, abstract = {Individuals with aphasia vary in the speed and accuracy they perform sentence comprehension tasks. Previous results indicate that the performance patterns of individuals with aphasia vary between tasks (e.g., Caplan, DeDe, \& Michaud, 2006; Caplan, Michaud, \& Hufford, 2013a). Similarly, it has been found that the comprehension performance of individuals with aphasia varies between homogeneous test sentences within and between sessions (e.g., McNeil, Hageman, \& Matthews, 2005). These studies ascribed the variability in the performance of individuals with aphasia to random noise. This conclusion would be in line with an influential theory on sentence comprehension in aphasia, the resource reduction hypothesis (Caplan, 2012). However, previous studies did not directly compare variability in language-impaired and language-unimpaired adults. Thus, it is still unclear how the variability in sentence comprehension differs between individuals with and without aphasia. Furthermore, the previous studies were exclusively carried out in English. Therefore, the findings on variability in sentence processing in English still need to be replicated in a different language. This dissertation aims to give a systematic overview of the patterns of variability in sentence comprehension performance in aphasia in German and, based on this overview, to put the resource reduction hypothesis to the test. In order to reach the first aim, variability was considered on three different dimensions (persons, measures, and occasions) following the classification by Hultsch, Strauss, Hunter, and MacDonald (2011). At the dimension of persons, the thesis compared the performance of individuals with aphasia and language-unimpaired adults. At the dimension of measures, this work explored the performance across different sentence comprehension tasks (object manipulation, sentence-picture matching). Finally, at the dimension of occasions, this work compared the performance in each task between two test sessions. Several methods were combined to study variability to gain a large and diverse database. In addition to the offline comprehension tasks, the self-paced-listening paradigm and the visual world eye-tracking paradigm were used in this work. The findings are in line with the previous results. As in the previous studies, variability in sentence comprehension in individuals with aphasia emerged between test sessions and between tasks. Additionally, it was possible to characterize the variability further using hierarchical Bayesian models. For individuals with aphasia, it was shown that both between-task and between-session variability are unsystematic. In contrast to that, language-unimpaired individuals exhibited systematic differences between measures and between sessions. However, these systematic differences occurred only in the offline tasks. Hence, variability in sentence comprehension differed between language-impaired and language-unimpaired adults, and this difference could be narrowed down to the offline measures. Based on this overview of the patterns of variability, the resource reduction hypothesis was evaluated. According to the hypothesis, the variability in the performance of individuals with aphasia can be ascribed to random fluctuations in the resources available for sentence processing. Given that the performance of the individuals with aphasia varied unsystematically, the results support the resource reduction hypothesis. Furthermore, the thesis proposes that the differences in variability between language-impaired and language-unimpaired adults can also be explained by the resource reduction hypothesis. More specifically, it is suggested that the systematic changes in the performance of language-unimpaired adults are due to decreasing fluctuations in available processing resources. In parallel, the unsystematic variability in the performance of individuals with aphasia could be due to constant fluctuations in available processing resources. In conclusion, the systematic investigation of variability contributes to a better understanding of language processing in aphasia and thus enriches aphasia research.}, language = {en} } @phdthesis{Schleussner2013, author = {Schleussner, Carl-Friedrich}, title = {Variability and trend of the North Atlantic ocean circulation in past and future climate}, address = {Potsdam}, pages = {127 S.}, year = {2013}, language = {en} } @phdthesis{Hofmann2002, author = {Hofmann, Nadi}, title = {Value education of youth}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000926}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {Am Anfang und am Ende des Schuljahres 2000/2001 wurden die Wertepriorit{\"a}ten der Sch{\"u}ler/innen und Lehrer/innen an acht verschiedenen Schulen gemessen. Mit dieser Studie wurde das theoretische Modell einer universellen Struktur menschlicher Wertepriorit{\"a}ten (Schwartz, 1992) erneut best{\"a}tigt. Zu beiden Messzeitpunkten ergaben sich gleiche Geschlechtseffekte wie auch {\"a}hnlich hohe positive Zusammenh{\"a}nge zwischen Religiosit{\"a}t und Schulbindung. Die Sch{\"u}ler/innen der nicht-religi{\"o}sen Schulen gaben Hedonism als h{\"o}chsten, und Tradition als niedrigsten Wert an. In den religi{\"o}sen Schulen waren Benevolence und Self-Direction die meist gesch{\"a}tzten Werte, w{\"a}hrend Power die niedrigste Priorit{\"a}t besaß. Die Ver{\"a}nderung der Werte Conformity, Hedonism und Universalism ließ sich sowohl durch die Religiosit{\"a}t der Sch{\"u}ler/innen wie auch durch deren Schulzugeh{\"o}rigkeit vorhersagen. Die Ver{\"a}nderung von Power, Tradition, Benevolence und Achievement hingegen wurde prim{\"a}r durch Religiosit{\"a}t vorhergesagt. In drei der vier Schulen korrelierte die {\"A}hnlichkeit der Sch{\"u}ler und Lehrer positiv mit der Schulbindung der Sch{\"u}ler/innen. Die Sch{\"u}ler-Lehrer {\"A}hnlichkeit korrelierte {\"u}ber alle Schulen positiv mit Schulleistung.}, language = {en} } @phdthesis{Stadion2017, author = {Stadion, Mandy}, title = {Validation and Characterization of lfi202b and Zfp69, two Novel Disease Genes in Obesity-Associated Insulin Resistance}, school = {Universit{\"a}t Potsdam}, pages = {158}, year = {2017}, language = {en} } @phdthesis{Kumru2018, author = {Kumru, Baris}, title = {Utilization of graphitic carbon nitride in dispersed media}, doi = {10.25932/publishup-42733}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427339}, school = {Universit{\"a}t Potsdam}, pages = {III, 190}, year = {2018}, abstract = {Utilization of sunlight for energy harvesting has been foreseen as sustainable replacement for fossil fuels, which would also eliminate side effects arising from fossil fuel consumption such as drastic increase of CO2 in Earth atmosphere. Semiconductor materials can be implemented for energy harvesting, and design of ideal energy harvesting devices relies on effective semiconductor with low recombination rate, ease of processing, stability over long period, non-toxicity and synthesis from abundant sources. Aforementioned criteria have attracted broad interest for graphitic carbon nitride (g-CN) materials, metal-free semiconductor which can be synthesized from low cost and abundant precursors. Furthermore, physical properties such as band gap, surface area and absorption can be tuned. g-CN was investigated as heterogeneous catalyst, with diversified applications from water splitting to CO2 reduction and organic coupling reactions. However, low dispersibility of g-CN in water and organic solvents was an obstacle for future improvements. Tissue engineering aims to mimic natural tissues mechanically and biologically, so that synthetic materials can replace natural ones in future. Hydrogels are crosslinked networks with high water content, therefore are prime candidates for tissue engineering. However, the first requirement is synthesis of hydrogels with mechanical properties that are matching to natural tissues. Among different approaches for reinforcement, nanocomposite reinforcement is highly promising. This thesis aims to investigate aqueous and organic dispersions of g-CN materials. Aqueous g-CN dispersions were utilized for visible light induced hydrogel synthesis, where g-CN acts as reinforcer and photoinitiator. Varieties of methodologies were presented for enhancing g-CN dispersibility, from co-solvent method to prepolymer formation, and it was shown that hydrogels with diversified mechanical properties (from skin-like to cartilage-like) are accessible via g-CN utilization. One pot photografting method was introduced for functionalization of g-CN surface which provides functional groups towards enhanced dispersibility in aqueous and organic media. Grafting vinyl thiazole groups yields stable additive-free organodispersions of g-CN which are electrostatically stabilized with increased photophysical properties. Colloidal stability of organic systems provides transparent g-CN coatings and printing g-CN from commercial inkjet printers. Overall, application of g-CN in dispersed media is highly promising, and variety of materials can be accessible via utilization of g-CN and visible light with simple chemicals and synthetic conditions. g-CN in dispersed media will bridge emerging research areas from tissue engineering to energy harvesting in near future.}, language = {en} } @phdthesis{Vu2014, author = {Vu, Dinh Phuong}, title = {Using video study to investigate eighth-grade mathematics classrooms in Vietnam}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72464}, school = {Universit{\"a}t Potsdam}, pages = {273}, year = {2014}, abstract = {The International Project for the Evaluation of Educational Achievement (IEA) was formed in the 1950s (Postlethwaite, 1967). Since that time, the IEA has conducted many studies in the area of mathematics, such as the First International Mathematics Study (FIMS) in 1964, the Second International Mathematics Study (SIMS) in 1980-1982, and a series of studies beginning with the Third International Mathematics and Science Study (TIMSS) which has been conducted every 4 years since 1995. According to Stigler et al. (1999), in the FIMS and the SIMS, U.S. students achieved low scores in comparison with students in other countries (p. 1). The TIMSS 1995 "Videotape Classroom Study" was therefore a complement to the earlier studies conducted to learn "more about the instructional and cultural processes that are associated with achievement" (Stigler et al., 1999, p. 1). The TIMSS Videotape Classroom Study is known today as the TIMSS Video Study. From the findings of the TIMSS 1995 Video Study, Stigler and Hiebert (1999) likened teaching to "mountain ranges poking above the surface of the water," whereby they implied that we might see the mountaintops, but we do not see the hidden parts underneath these mountain ranges (pp. 73-78). By watching the videotaped lessons from Germany, Japan, and the United States again and again, they discovered that "the systems of teaching within each country look similar from lesson to lesson. At least, there are certain recurring features [or patterns] that typify many of the lessons within a country and distinguish the lessons among countries" (pp. 77-78). They also discovered that "teaching is a cultural activity," so the systems of teaching "must be understood in relation to the cultural beliefs and assumptions that surround them" (pp. 85, 88). From this viewpoint, one of the purposes of this dissertation was to study some cultural aspects of mathematics teaching and relate the results to mathematics teaching and learning in Vietnam. Another research purpose was to carry out a video study in Vietnam to find out the characteristics of Vietnamese mathematics teaching and compare these characteristics with those of other countries. In particular, this dissertation carried out the following research tasks: - Studying the characteristics of teaching and learning in different cultures and relating the results to mathematics teaching and learning in Vietnam - Introducing the TIMSS, the TIMSS Video Study and the advantages of using video study in investigating mathematics teaching and learning - Carrying out the video study in Vietnam to identify the image, scripts and patterns, and the lesson signature of eighth-grade mathematics teaching in Vietnam - Comparing some aspects of mathematics teaching in Vietnam and other countries and identifying the similarities and differences across countries - Studying the demands and challenges of innovating mathematics teaching methods in Vietnam - lessons from the video studies Hopefully, this dissertation will be a useful reference material for pre-service teachers at education universities to understand the nature of teaching and develop their teaching career.}, language = {en} } @phdthesis{Crisologo2019, author = {Crisologo, Irene}, title = {Using spaceborne radar platforms to enhance the homogeneity of weather radar calibration}, doi = {10.25932/publishup-44570}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-445704}, school = {Universit{\"a}t Potsdam}, pages = {xvii, 61}, year = {2019}, abstract = {Accurate weather observations are the keystone to many quantitative applications, such as precipitation monitoring and nowcasting, hydrological modelling and forecasting, climate studies, as well as understanding precipitation-driven natural hazards (i.e. floods, landslides, debris flow). Weather radars have been an increasingly popular tool since the 1940s to provide high spatial and temporal resolution precipitation data at the mesoscale, bridging the gap between synoptic and point scale observations. Yet, many institutions still struggle to tap the potential of the large archives of reflectivity, as there is still much to understand about factors that contribute to measurement errors, one of which is calibration. Calibration represents a substantial source of uncertainty in quantitative precipitation estimation (QPE). A miscalibration of a few dBZ can easily deteriorate the accuracy of precipitation estimates by an order of magnitude. Instances where rain cells carrying torrential rains are misidentified by the radar as moderate rain could mean the difference between a timely warning and a devastating flood. Since 2012, the Philippine Atmospheric, Geophysical, and Astronomical Services Administration (PAGASA) has been expanding the country's ground radar network. We had a first look into the dataset from one of the longest running radars (the Subic radar) after devastating week-long torrential rains and thunderstorms in August 2012 caused by the annual southwestmonsoon and enhanced by the north-passing Typhoon Haikui. The analysis of the rainfall spatial distribution revealed the added value of radar-based QPE in comparison to interpolated rain gauge observations. However, when compared with local gauge measurements, severe miscalibration of the Subic radar was found. As a consequence, the radar-based QPE would have underestimated the rainfall amount by up to 60\% if they had not been adjusted by rain gauge observations—a technique that is not only affected by other uncertainties, but which is also not feasible in other regions of the country with very sparse rain gauge coverage. Relative calibration techniques, or the assessment of bias from the reflectivity of two radars, has been steadily gaining popularity. Previous studies have demonstrated that reflectivity observations from the Tropical Rainfall Measuring Mission (TRMM) and its successor, the Global Precipitation Measurement (GPM), are accurate enough to serve as a calibration reference for ground radars over low-to-mid-latitudes (± 35 deg for TRMM; ± 65 deg for GPM). Comparing spaceborne radars (SR) and ground radars (GR) requires cautious consideration of differences in measurement geometry and instrument specifications, as well as temporal coincidence. For this purpose, we implement a 3-D volume matching method developed by Schwaller and Morris (2011) and extended by Warren et al. (2018) to 5 years worth of observations from the Subic radar. In this method, only the volumetric intersections of the SR and GR beams are considered. Calibration bias affects reflectivity observations homogeneously across the entire radar domain. Yet, other sources of systematic measurement errors are highly heterogeneous in space, and can either enhance or balance the bias introduced by miscalibration. In order to account for such heterogeneous errors, and thus isolate the calibration bias, we assign a quality index to each matching SR-GR volume, and thus compute the GR calibration bias as a qualityweighted average of reflectivity differences in any sample of matching SR-GR volumes. We exemplify the idea of quality-weighted averaging by using beam blockage fraction (BBF) as a quality variable. Quality-weighted averaging is able to increase the consistency of SR and GR observations by decreasing the standard deviation of the SR-GR differences, and thus increasing the precision of the bias estimates. To extend this framework further, the SR-GR quality-weighted bias estimation is applied to the neighboring Tagaytay radar, but this time focusing on path-integrated attenuation (PIA) as the source of uncertainty. Tagaytay is a C-band radar operating at a lower wavelength and is therefore more affected by attenuation. Applying the same method used for the Subic radar, a time series of calibration bias is also established for the Tagaytay radar. Tagaytay radar sits at a higher altitude than the Subic radar and is surrounded by a gentler terrain, so beam blockage is negligible, especially in the overlapping region. Conversely, Subic radar is largely affected by beam blockage in the overlapping region, but being an SBand radar, attenuation is considered negligible. This coincidentally independent uncertainty contributions of each radar in the region of overlap provides an ideal environment to experiment with the different scenarios of quality filtering when comparing reflectivities from the two ground radars. The standard deviation of the GR-GR differences already decreases if we consider either BBF or PIA to compute the quality index and thus the weights. However, combining them multiplicatively resulted in the largest decrease in standard deviation, suggesting that taking both factors into account increases the consistency between the matched samples. The overlap between the two radars and the instances of the SR passing over the two radars at the same time allows for verification of the SR-GR quality-weighted bias estimation method. In this regard, the consistency between the two ground radars is analyzed before and after bias correction is applied. For cases when all three radars are coincident during a significant rainfall event, the correction of GR reflectivities with calibration bias estimates from SR overpasses dramatically improves the consistency between the two ground radars which have shown incoherent observations before correction. We also show that for cases where adequate SR coverage is unavailable, interpolating the calibration biases using a moving average can be used to correct the GR observations for any point in time to some extent. By using the interpolated biases to correct GR observations, we demonstrate that bias correction reduces the absolute value of the mean difference in most cases, and therefore improves the consistency between the two ground radars. This thesis demonstrates that in general, taking into account systematic sources of uncertainty that are heterogeneous in space (e.g. BBF) and time (e.g. PIA) allows for a more consistent estimation of calibration bias, a homogeneous quantity. The bias still exhibits an unexpected variability in time, which hints that there are still other sources of errors that remain unexplored. Nevertheless, the increase in consistency between SR and GR as well as between the two ground radars, suggests that considering BBF and PIA in a weighted-averaging approach is a step in the right direction. Despite the ample room for improvement, the approach that combines volume matching between radars (either SR-GR or GR-GR) and quality-weighted comparison is readily available for application or further scrutiny. As a step towards reproducibility and transparency in atmospheric science, the 3D matching procedure and the analysis workflows as well as sample data are made available in public repositories. Open-source software such as Python and wradlib are used for all radar data processing in this thesis. This approach towards open science provides both research institutions and weather services with a valuable tool that can be applied to radar calibration, from monitoring to a posteriori correction of archived data.}, language = {en} } @phdthesis{Gerber1995, author = {Gerber, Stefan}, title = {Using software for fault detection in arithmetical circuits}, pages = {123 S.}, year = {1995}, language = {en} } @phdthesis{Schmitz2023, author = {Schmitz, Se{\´a}n}, title = {Using low-cost sensors to gather high resolution measurements of air quality in urban environments and inform mobility policy}, doi = {10.25932/publishup-60105}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-601053}, school = {Universit{\"a}t Potsdam}, pages = {180}, year = {2023}, abstract = {Air pollution has been a persistent global problem in the past several hundred years. While some industrialized nations have shown improvements in their air quality through stricter regulation, others have experienced declines as they rapidly industrialize. The WHO's 2021 update of their recommended air pollution limit values reflects the substantial impacts on human health of pollutants such as NO2 and O3, as recent epidemiological evidence suggests substantial long-term health impacts of air pollution even at low concentrations. Alongside developments in our understanding of air pollution's health impacts, the new technology of low-cost sensors (LCS) has been taken up by both academia and industry as a new method for measuring air pollution. Due primarily to their lower cost and smaller size, they can be used in a variety of different applications, including in the development of higher resolution measurement networks, in source identification, and in measurements of air pollution exposure. While significant efforts have been made to accurately calibrate LCS with reference instrumentation and various statistical models, accuracy and precision remain limited by variable sensor sensitivity. Furthermore, standard procedures for calibration still do not exist and most proprietary calibration algorithms are black-box, inaccessible to the public. This work seeks to expand the knowledge base on LCS in several different ways: 1) by developing an open-source calibration methodology; 2) by deploying LCS at high spatial resolution in urban environments to test their capability in measuring microscale changes in urban air pollution; 3) by connecting LCS deployments with the implementation of local mobility policies to provide policy advice on resultant changes in air quality. In a first step, it was found that LCS can be consistently calibrated with good performance against reference instrumentation using seven general steps: 1) assessing raw data distribution, 2) cleaning data, 3) flagging data, 4) model selection and tuning, 5) model validation, 6) exporting final predictions, and 7) calculating associated uncertainty. By emphasizing the need for consistent reporting of details at each step, most crucially on model selection, validation, and performance, this work pushed forward with the effort towards standardization of calibration methodologies. In addition, with the open-source publication of code and data for the seven-step methodology, advances were made towards reforming the largely black-box nature of LCS calibrations. With a transparent and reliable calibration methodology established, LCS were then deployed in various street canyons between 2017 and 2020. Using two types of LCS, metal oxide (MOS) and electrochemical (EC), their performance in capturing expected patterns of urban NO2 and O3 pollution was evaluated. Results showed that calibrated concentrations from MOS and EC sensors matched general diurnal patterns in NO2 and O3 pollution measured using reference instruments. While MOS proved to be unreliable for discerning differences among measured locations within the urban environment, the concentrations measured with calibrated EC sensors matched expectations from modelling studies on NO2 and O3 pollution distribution in street canyons. As such, it was concluded that LCS are appropriate for measuring urban air quality, including for assisting urban-scale air pollution model development, and can reveal new insights into air pollution in urban environments. To achieve the last goal of this work, two measurement campaigns were conducted in connection with the implementation of three mobility policies in Berlin. The first involved the construction of a pop-up bike lane on Kottbusser Damm in response to the COVID-19 pandemic, the second surrounded the temporary implementation of a community space on B{\"o}ckhstrasse, and the last was focused on the closure of a portion of Friedrichstrasse to all motorized traffic. In all cases, measurements of NO2 were collected before and after the measure was implemented to assess changes in air quality resultant from these policies. Results from the Kottbusser Damm experiment showed that the bike-lane reduced NO2 concentrations that cyclists were exposed to by 22 ± 19\%. On Friedrichstrasse, the street closure reduced NO2 concentrations to the level of the urban background without worsening the air quality on side streets. These valuable results were communicated swiftly to partners in the city administration responsible for evaluating the policies' success and future, highlighting the ability of LCS to provide policy-relevant results. As a new technology, much is still to be learned about LCS and their value to academic research in the atmospheric sciences. Nevertheless, this work has advanced the state of the art in several ways. First, it contributed a novel open-source calibration methodology that can be used by a LCS end-users for various air pollutants. Second, it strengthened the evidence base on the reliability of LCS for measuring urban air quality, finding through novel deployments in street canyons that LCS can be used at high spatial resolution to understand microscale air pollution dynamics. Last, it is the first of its kind to connect LCS measurements directly with mobility policies to understand their influences on local air quality, resulting in policy-relevant findings valuable for decisionmakers. It serves as an example of the potential for LCS to expand our understanding of air pollution at various scales, as well as their ability to serve as valuable tools in transdisciplinary research.}, language = {en} } @phdthesis{Crawford2020, author = {Crawford, Michael Scott}, title = {Using individual-based modeling to understand grassland diversity and resilience in the Anthropocene}, doi = {10.25932/publishup-47941}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-479414}, school = {Universit{\"a}t Potsdam}, pages = {174}, year = {2020}, abstract = {The world's grassland systems are increasingly threatened by anthropogenic change. Susceptible to a variety of different stressors, from land-use intensification to climate change, understanding the mechanisms driving the maintenance of these systems' biodiversity and stability, and how these mechanisms may shift under human-mediated disturbance, is thus critical for successfully navigating the next century. Within this dissertation, I use an individual-based and spatially-explicit model of grassland community assembly (IBC-grass) to examine several processes, thought key to understanding their biodiversity and stability and how it changes under stress. In the first chapter of my thesis, I examine the conditions under which intraspecific trait variation influences the diversity of simulated grassland communities. In the second and third chapters of my thesis, I shift focus towards understanding how belowground herbivores influence the stability of these grassland systems to either a disturbance that results in increased, stochastic, plant mortality, or eutrophication. Intraspecific trait variation (ITV), or variation in trait values between individuals of the same species, is fundamental to the structure of ecological communities. However, because it has historically been difficult to incorporate into theoretical and statistical models, it has remained largely overlooked in community-level analyses. This reality is quickly shifting, however, as a consensus of research suggests that it may compose a sizeable proportion of the total variation within an ecological community and that it may play a critical role in determining if species coexist. Despite this increasing awareness that ITV matters, there is little consensus of the magnitude and direction of its influence. Therefore, to better understand how ITV changes the assembly of grassland communities, in the first chapter of my thesis, I incorporate it into an established, individual-based grassland community model, simulating both pairwise invasion experiments as well as the assembly of communities with varying initial diversities. By varying the amount of ITV in these species' functional traits, I examine the magnitude and direction of ITV's influence on pairwise invasibility and community coexistence. During pairwise invasion, ITV enables the weakest species to more frequently invade the competitively superior species, however, this influence does not generally scale to the community level. Indeed, unless the community has low alpha- and beta- diversity, there will be little effect of ITV in bolstering diversity. In these situations, since the trait axis is sparsely filled, the competitively inferior may suffer less competition and therefore ITV may buffer the persistence and abundance of these species for some time. In the second and third chapters of my thesis, I model how one of the most ubiquitous trophic interactions within grasslands, herbivory belowground, influences their diversity and stability. Until recently, the fundamental difficulty in studying a process within the soil has left belowground herbivory "out of sight, out of mind." This dilemma presents an opportunity for simulation models to explore how this understudied process may alter community dynamics. In the second chapter of my thesis, I implement belowground herbivory - represented by the weekly removal of plant biomass - into IBC-grass. Then, by introducing a pulse disturbance, modelled as the stochastic mortality of some percentage of the plant community, I observe how the presence of belowground herbivores influences the resistance and recovery of Shannon diversity in these communities. I find that high resource, low diversity, communities are significantly more destabilized by the presence of belowground herbivores after disturbance. Depending on the timing of the disturbance and whether the grassland's seed bank persists for more than one season, the impact of the disturbance - and subsequently the influence of the herbivores - can be greatly reduced. However, because human-mediated eutrophication increases the amount of resources in the soil, thus pressuring grassland systems, our results suggest that the influence of these herbivores may become more important over time. In the third chapter of my thesis, I delve further into understanding the mechanistic underpinnings of belowground herbivores on the diversity of grasslands by replicating an empirical mesocosm experiment that crosses the presence of herbivores above- and below-ground with eutrophication. I show that while aboveground herbivory, as predicted by theory and frequently observed in experiments, mitigates the impact of eutrophication on species diversity, belowground herbivores counterintuitively reduce biodiversity. Indeed, this influence positively interacts with the eutrophication process, amplifying its negative impact on diversity. I discovered the mechanism underlying this surprising pattern to be that, as the herbivores consume roots, they increase the proportion of root resources to root biomass. Because root competition is often symmetric, herbivory fails to mitigate any asymmetries in the plants' competitive dynamics. However, since the remaining roots have more abundant access to resources, the plants' competition shifts aboveground, towards asymmetric competition for light. This leads the community towards a low-diversity state, composed of mostly high-performance, large plant species. We further argue that this pattern will emerge unless the plants' root competition is asymmetric, in which case, like its counterpart aboveground, belowground herbivory may buffer diversity by reducing this asymmetry between the competitively superior and inferior plants. I conclude my dissertation by discussing the implications of my research on the state of the art in intraspecific trait variation and belowground herbivory, with emphasis on the necessity of more diverse theory development in the study of these fundamental interactions. My results suggest that the influence of these processes on the biodiversity and stability of grassland systems is underappreciated and multidimensional, and must be thoroughly explored if researchers wish to predict how the world's grasslands will respond to anthropogenic change. Further, should researchers myopically focus on understanding central ecological interactions through only mathematically tractable analyses, they may miss entire suites of potential coexistence mechanisms that can increase the coviability of species, potentially leading to coexistence over ecologically-significant timespans. Individual-based modelling, therefore, with its focus on individual interactions, will prove a critical tool in the coming decades for understanding how local interactions scale to larger contexts, and how these interactions shape ecological communities and further predicting how these systems will change under human-mediated stress.}, language = {en} } @phdthesis{Rigamonti2023, author = {Rigamonti, Lia}, title = {Use of digital media for remote instruction in exercise sciences education and research}, doi = {10.25932/publishup-58364}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-583642}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2023}, abstract = {To grant high-quality evidence-based research in the field of exercise sciences, it is often necessary for various institutions to collaborate over longer distances and internationally. Here, not only with regard to the recent COVID-19-pandemic, digital means provide new options for remote scientific exchanges. This thesis is meant to analyse and test digital opportunities to support the dissemination of knowledge and instruction of investigators about defined examination protocols in an international multi-center context. The project consisted of three studies. The first study, a questionnaire-based survey, aimed at learning about the opinions and preferences of digital learning or social media among students of sport science faculties in two universities each in Germany, the UK and Italy. Based on these findings, in a second study, an examination video of an ultrasound determination of the intima-media-thickness and diameter of an artery was distributed by a messenger app to doctors and nursing personnel as simulated investigators and efficacy of the test setting was analysed. Finally, a third study integrated the use of an augmented reality device for direct remote supervision of the same ultrasound examinations in a long-distance international setting with international experts from the fields of engineering and sports science and later remote supervision of augmented reality equipped physicians performing a given task. The first study with 229 participating students revealed a high preference for YouTube to receive video-based knowledge as well as a preference for using WhatsApp and Facebook for peer-to-peer contacts for learning purposes and to exchange and discuss knowledge. In the second study, video-based instructions send by WhatsApp messenger showed high approval of the setup in both study groups, one with doctors familiar with the use of ultrasound technology as well as one with nursing staff who were not familiar with the device, with similar results in overall time of performance and the measurements of the femoral arteries. In the third and final study, experts from different continents were connected remotely to the examination site via an augmented reality device with good transmission quality. The remote supervision to doctors' examination produced a good interrater correlation. Experiences with the augmented reality-based setting were rated as highly positive by the participants. Potential benefits of this technique were seen in the fields of education, movement analysis, and supervision. Concluding, the findings of this thesis were able to suggest modern and addressee- centred digital solutions to enhance the understanding of given examinations techniques of potential investigators in exercise science research projects. Head-mounted augmented reality devices have a special value and may be recommended for collaborative research projects with physical examination-based research questions. While the established setting should be further investigated in prospective clinical studies, digital competencies of future researchers should already be enhanced during the early stages of their education.}, language = {en} } @phdthesis{SvirejevaHopkins2004, author = {Svirejeva-Hopkins, Anastasia}, title = {Urbanised territories as a specific component of the global carbon cycle}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0001512}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Wir betrachten folgende Teile: die zus{\"a}tzlichen Kohlenstoff(C)-emissionen, welche aus der Umwandlung von nat{\"u}rlichem Umland durch Stadtwachstum resultieren, und die {\"A}nderung des C-Flusses durch 'urbanisierte' {\"O}kosysteme, soweit atmosph{\"a}risches C durch diese in umliegende nat{\"u}rliche {\"O}kosysteme entlang der Kette \“Atmosph{\"a}re -> Vegetation -> abgestorbene organische Substanzen\” gepumpt wird: d.h. C-Export; f{\"u}r den Zeitraum von 1980 bis 2050. Als Szenario nutzen wir Prognosen der regionalen Stadtbev{\"o}lkerung, welche durch ein 'Hybridmodell' generiert werden f{\"u}r acht Regionen. Alle Sch{\"a}tzungen der C-Fl{\"u}sse basieren auf zwei Modellen: das Regression Modell und das sogenannte G-Modell. Die Siedlungsfl{\"a}che, welche mit dem Wachstum der Stadtbev{\"o}lkerung zunimmt, wird in 'Gr{\"u}nfl{\"a}chen' (Parks, usw.), Geb{\"a}udefl{\"a}chen und informell st{\"a}dtisch genutzte Fl{\"a}chen (Slums, illegale Lagerpl{\"a}tze, usw.) unterteilt. Es werden j{\"a}hrlich die regionale und globale Dynamik der C-Emissionen und des C-Exports sowie die C-Gesamtbilanz berechnet. Dabei liefern beide Modelle qualitativ {\"a}hnliche Ergebnisse, jedoch gibt es einige quantitative Unterschiede. Im ersten Modell erreicht die globale Jahresemission f{\"u}r die Dekade 2020-2030 resultierend aus der Landnutzungs{\"a}nderung ein Maximum von 205 Mt/a. Die maximalen Beitr{\"a}ge zur globalen Emission werden durch China, die asiatische und die pazifische Region erbracht. Im zweiten Modell erh{\"o}ht sich die j{\"a}hrliche globale Emission von 1.12 GtC/a f{\"u}r 1980 auf 1.25 GtC/a f{\"u}r 2005 (1Gt = 109 t). Danach beginnt eine Reduzierung. Vergleichen wir das Emissionmaximum mit der Emission durch Abholzung im Jahre 1980 (1.36 GtC/a), k{\"o}nnen wir konstatieren, daß die Urbanisierung damit in vergleichbarer Gr{\"o}sse zur Emission beitr{\"a}gt. Bezogen auf die globale Dynamik des j{\"a}hrlichen C-Exports durch Urbanisierung beobachten wir ein monotones Wachstum bis zum nahezu dreifachen Wert von 24 MtC/a f{\"u}r 1980 auf 66 MtC/a f{\"u}r 2050 im ersten Modell, bzw. im zweiten Modell von 249 MtC/a f{\"u}r 1980 auf 505 MtC/a f{\"u}r 2050. Damit ist im zweiten Fall die Transportleistung der Siedlungsgebiete mit dem C-Transport durch Fl{\"u}sse in die Ozeane (196 .. 537 MtC/a) vergleichbar. Bei der Absch{\"a}tzung der Gesamtbilanz finden wir, daß die Urbanisierung die Bilanz in Richtung zu einer 'Senke' verschiebt. Entsprechend dem zweiten Modell beginnt sich die C-Gesamtbilanz (nach ann{\"a}hernder Konstanz) ab dem Jahre 2000 mit einer fast konstanten Rate zu verringern. Wenn das Maximum im Jahre 2000 bei 905MtC/a liegt, f{\"a}llt dieser Wert anschliessend bis zum Jahre 2050 auf 118 MtC/a. Bei Extrapolation dieser Dynamik in die Zukunft k{\"o}nnen wir annehmen, daß am Ende des 21. Jahrhunderts die \“urbane\” C-Gesamtbilanz Null bzw. negative Werte erreicht.}, language = {en} } @phdthesis{Numberger2019, author = {Numberger, Daniela}, title = {Urban wastewater and lakes as habitats for bacteria and potential vectors for pathogens}, doi = {10.25932/publishup-43709}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-437095}, school = {Universit{\"a}t Potsdam}, pages = {VI, 130}, year = {2019}, abstract = {Wasser ist lebensnotwendig und somit eine essentielle Ressource. Jedoch sind unsere S{\"u}ßwasser-Ressourcen begrenzt und ihre Erhaltung daher besonders wichtig. Verschmutzungen mit Chemikalien und Krankheitserregern, die mit einer wachsenden Bev{\"o}lkerung und Urbanisierung einhergehen, verschlechtern die Qualit{\"a}t unseres S{\"u}ßwassers. Außerdem kann Wasser als {\"U}bertragungsvektor f{\"u}r Krankheitserreger dienen und daher wasserb{\"u}rtige Krankheiten verursachen. Der Leibniz-Forschungsverbund INFECTIONS'21 untersuchte innerhalb der interdisziplin{\"a}ren Forschungsgruppe III - „Wasser", Gew{\"a}sser als zentralen Mittelpunkt f{\"u}r Krankheiterreger. Dabei konzentrierte man sich auf Clostridioides difficile sowie avi{\"a}re Influenza A-Viren, von denen angenommen wird, dass sie in die Gew{\"a}sser ausgeschieden werden. Ein weiteres Ziel bestand darin, die bakterielle Gemeinschaften eines Kl{\"a}rwerkes der deutschen Hauptstadt Berlin zu charakterisieren, um anschließend eine Bewertung des potentiellen Gesundheitsrisikos geben zu k{\"o}nnen. Bakterielle Gemeinschaften des Roh- und Klarwassers aus dem Kl{\"a}rwerk unterschieden sich signifikant voneinander. Der Anteil an Darm-/F{\"a}kalbakterien war relativ niedrig und potentielle Darmpathogene wurden gr{\"o}ßtenteils aus dem Rohwasser entfernt. Ein potentielles Gesundheitsrisiko konnte allerdings von potentiell pathogenen Legionellen wie L. lytica festgestellt werden, deren relative Abundanz im Klarwasser h{\"o}her war als im Rohwasser. Es wurden außerdem drei C. difficile-Isolate aus den Kl{\"a}rwerk-Rohwasser und einem st{\"a}dtischen Badesee in Berlin (Weisser See) gewonnen und sequenziert. Die beiden Isolate aus dem Kl{\"a}rwerk tragen keine Toxin-Gene, wohingegen das Isolat aus dem See Toxin-Gene besitzt. Alle drei Isolate sind sehr nah mit humanen St{\"a}mmen verwandt. Dies deutet auf ein potentielles, wenn auch sporadisches Gesundheitsrisiko hin. (Avi{\"a}re) Influenza A-Viren wurden in 38.8\% der untersuchten Sedimentproben mittels PCR detektiert, aber die Virusisolierung schlug fehl. Ein Experiment mit beimpften Wasser- und Sedimentproben zeigte, dass f{\"u}r die Isolierung aus Sedimentproben eine relativ hohe Viruskonzentration n{\"o}tig ist. In Wasserproben ist jedoch ein niedriger Titer an Influenza A-Viren ausreichend, um eine Infektion auszul{\"o}sen. Es konnte zudem auch festgestellt werden, dass sich „Madin-Darby Canine Kidney (MDCK)―-Zellkulturen im Gegensatz zu embryonierten H{\"u}hnereiern besser eignen, um Influenza A-Viren aus Sediment zu isolieren. Zusammenfassend l{\"a}sst sich sagen, dass diese Arbeit m{\"o}gliche Gesundheitsrisiken aufgedeckt hat, wie etwa durch Legionellen im untersuchten Berliner Kl{\"a}rwerk, deren relative Abundanz in gekl{\"a}rtem Abwasser h{\"o}her ist als im Rohwasser. Desweiteren wird indiziert, dass Abwasser und Gew{\"a}sser als Reservoir und Vektor f{\"u}r pathogene Organismen dienen k{\"o}nnen, selbst f{\"u}r nicht-typische Wasser-Pathogene wie C. difficile.}, language = {en} } @phdthesis{Hennig2022, author = {Hennig, Theresa}, title = {Uranium migration in the Opalinus Clay quantified on the host rock scale with reactive transport simulations}, doi = {10.25932/publishup-55270}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-552700}, school = {Universit{\"a}t Potsdam}, pages = {161}, year = {2022}, abstract = {Humankind and their environment need to be protected from the harmful effects of spent nuclear fuel, and therefore disposal in deep geological formations is favoured worldwide. Suitability of potential host rocks is evaluated, among others, by the retention capacity with respect to radionuclides. Safety assessments are based on the quantification of radionuclide migration lengths with numerical simulations as experiments cannot cover the required temporal (1 Ma) and spatial scales (>100 m). Aim of the present thesis is to assess the migration of uranium, a geochemically complex radionuclide, in the potential host rock Opalinus Clay. Radionuclide migration in clay formations is governed by diffusion due to their low permeability and retarded by sorption. Both processes highly depend on pore water geochemistry and mineralogy that vary between different facies. Diffusion is quantified with the single-component (SC) approach using one diffusion coefficient for all species and the process-based multi-component (MC) option. With this, each species is assigned its own diffusion coefficient and the interaction with the diffuse double layer is taken into account. Sorption is integrated via a bottom-up approach using mechanistic surface complexation models and cation exchange. Therefore, reactive transport simulations are conducted with the geochemical code PHREEQC to quantify uranium migration, i.e. diffusion and sorption, as a function of mineralogical and geochemical heterogeneities on the host rock scale. Sorption processes are facies dependent. Migration lengths vary between the Opalinus Clay facies by up to 10 m. Thereby, the geochemistry of the pore water, in particular the partial pressure of carbon dioxide (pCO2), is more decisive for the sorption capacity than the amount of clay minerals. Nevertheless, higher clay mineral quantities compensate geochemical variations. Consequently, sorption processes must be quantified as a function of pore water geochemistry in contact with the mineral assemblage. Uranium diffusion in the Opalinus Clay is facies independent. Speciation is dominated by aqueous ternary complexes of U(VI) with calcium and carbonate. Differences in the migration lengths between SC and MC diffusion are with +/-5 m negligible. Further, the application of the MC approach highly depends on the quality and availability of the underlying data. Therefore, diffusion processes can be adequately quantified with the SC approach using experimentally determined diffusion coefficients. The hydrogeological system governs pore water geochemistry within the formation rather than the mineralogy. Diffusive exchange with the adjacent aquifers established geochemical gradients over geological time scales that can enhance migration by up to 25 m. Consequently, uranium sorption processes must be quantified following the identified priority: pCO2 > hydrogeology > mineralogy. The presented research provides a workflow and orientation for other potential disposal sites with similar pore water geochemistry due to the identified mechanisms and dependencies. With a maximum migration length of 70 m, the retention capacity of the Opalinus Clay with respect to uranium is sufficient to fulfill the German legal minimum requirement of a thickness of at least 100 m.}, language = {en} } @phdthesis{Muehlenbruch2013, author = {M{\"u}hlenbruch, Kristin}, title = {Updating the german diabetes risk score - model extensions, validation and reclassification}, address = {Potsdam}, pages = {131 S.}, year = {2013}, language = {en} } @phdthesis{Vockenberg2006, author = {Vockenberg, Kerstin}, title = {Updating of representations in working memory}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-11767}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The limited capacity of working memory forces people to update its contents continuously. Two aspects of the updating process were investigated in the present experimental series. The first series concerned the question if it is possible to update several representations in parallel. Similar results were obtained for the updating of object features as well as for the updating of whole objects, participants were able to update representations in parallel. The second experimental series addressed the question if working memory representations which were replaced in an updating disappear directly or interfere with the new representations. Evidence for the existence of old representations was found under working memory conditions and under conditions exceeding working memory capacity. These results contradict the hypothesis that working memory contents are protected from proactive interference of long-term memory contents.}, subject = {Aktualisierung}, language = {en} } @phdthesis{Klier2016, author = {Klier, Dennis Tobias}, title = {Upconversion luminescence in Er-codoped NaYF4 nanoparticles}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98486}, school = {Universit{\"a}t Potsdam}, pages = {ix, 89}, year = {2016}, abstract = {In the context of an increasing population of aging people and a shift of medical paradigm towards an individualized medicine in health care, nanostructured lanthanides doped sodium yttrium fluoride (NaYF4) represents an exciting class of upconversion nanomaterials (UCNM) which are suitable to bring forward developments in biomedicine and -biodetection. Despite the fact that among various fluoride based upconversion (UC) phosphors lanthanide doped NaYF4 is one of the most studied upconversion nanomaterial, many open questions are still remaining concerning the interplay of the population routes of sensitizer and activator electronic states involved in different luminescence upconversion photophysics as well as the role of phonon coupling. The collective work aims to explore a detailed understanding of the upconversion mechanism in nanoscaled NaYF4 based materials co-doped with several lanthanides, e.g. Yb3+ and Er3+ as the "standard" type upconversion nanoparticles (UCNP) up to advanced UCNP with Gd3+ and Nd3+. Especially the impact of the crystal lattice structure as well as the resulting lattice phonons on the upconversion luminescence was investigated in detail based on different mixtures of cubic and hexagonal NaYF4 nanoscaled crystals. Three synthesis methods, depending on the attempt of the respective central spectroscopic questions, could be accomplished in the following work. NaYF4 based upconversion nanoparticles doped with several combination of lanthanides (Yb3+, Er3+, Gd3+ and Nd3+) were synthesized successfully using a hydrothermal synthesis method under mild conditions as well as a co-precipitation and a high temperature co-precipitation technique. Structural information were gathered by means of X-ray diffraction (XRD), electron microscopy (TEM), dynamic light scattering (DLS), Raman spectroscopy and inductively coupled plasma atomic emission spectrometry (ICP-OES). The results were discussed in detail with relation to the spectroscopic results. A variable spectroscopic setup was developed for multi parameter upconversion luminescence studies at various temperature 4 K to 328 K. Especially, the study of the thermal behavior of upconversion luminescence as well as time resolved area normalized emission spectra were a prerequisite for the detailed understanding of intramolecular deactivation processes, structural changes upon annealing or Gd3+ concentration, and the role of phonon coupling for the upconversion efficiency. Subsequently it became possible to synthesize UCNP with tailored upconversion luminescence properties. In the end, the potential of UCNP for life science application should be enunciated in context of current needs and improvements of a nanomaterial based optical sensors, whereas the "standard" UCNP design was attuned according to the special conditions in the biological matrix. In terms of a better biocompatibility due to a lower impact on biological tissue and higher penetrability for the excitation light. The first step into this direction was to use Nd3+ ions as a new sensitizer in tridoped NaYF4 based UCNP, whereas the achieved absolute and relative temperature sensitivity is comparable to other types of local temperature sensors in the literature.}, language = {en} } @phdthesis{Valade2023, author = {Valade, Aurelien Niels Valentin}, title = {Unveiling the Local Universe}, school = {Universit{\"a}t Potsdam}, pages = {X, 102}, year = {2023}, language = {en} } @phdthesis{Hutter2014, author = {Hutter, Anne}, title = {Unveiling the epoch of reionization by simulations and high-redshift galaxies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-76998}, school = {Universit{\"a}t Potsdam}, pages = {vi, 155}, year = {2014}, abstract = {The Epoch of Reionization marks after recombination the second major change in the ionization state of the universe, going from a neutral to an ionized state. It starts with the appearance of the first stars and galaxies; a fraction of high-energy photons emitted from galaxies permeate into the intergalactic medium (IGM) and gradually ionize the hydrogen, until the IGM is completely ionized at z~6 (Fan et al., 2006). While the progress of reionization is driven by galaxy evolution, it changes the ionization and thermal state of the IGM substantially and affects subsequent structure and galaxy formation by various feedback mechanisms. Understanding this interaction between reionization and galaxy formation is further impeded by a lack of understanding of the high-redshift galactic properties such as the dust distribution and the escape fraction of ionizing photons. Lyman Alpha Emitters (LAEs) represent a sample of high-redshift galaxies that are sensitive to all these galactic properties and the effects of reionization. In this thesis we aim to understand the progress of reionization by performing cosmological simulations, which allows us to investigate the limits of constraining reionization by high-redshift galaxies as LAEs, and examine how galactic properties and the ionization state of the IGM affect the visibility and observed quantities of LAEs and Lyman Break galaxies (LBGs). In the first part of this thesis we focus on performing radiative transfer calculations to simulate reionization. We have developed a mapping-sphere-scheme, which, starting from spherically averaged temperature and density fields, uses our 1D radiative transfer code and computes the effect of each source on the IGM temperature and ionization (HII, HeII, HeIII) profiles, which are subsequently mapped onto a grid. Furthermore we have updated the 3D Monte-Carlo radiative transfer pCRASH, enabling detailed reionization simulations which take individual source characteristics into account. In the second part of this thesis we perform a reionization simulation by post-processing a smoothed-particle hydrodynamical (SPH) simulation (GADGET-2) with 3D radiative transfer (pCRASH), where the ionizing sources are modelled according to the characteristics of the stellar populations in the hydrodynamical simulation. Following the ionization fractions of hydrogen (HI) and helium (HeII, HeIII), and temperature in our simulation, we find that reionization starts at z~11 and ends at z~6, and high density regions near sources are ionized earlier than low density regions far from sources. In the third part of this thesis we couple the cosmological SPH simulation and the radiative transfer simulations with a physically motivated, self-consistent model for LAEs, in order to understand the importance of the ionization state of the IGM, the escape fraction of ionizing photons from galaxies and dust in the interstellar medium (ISM) on the visibility of LAEs. Comparison of our models results with the LAE Lyman Alpha (Lya) and UV luminosity functions at z~6.6 reveals a three-dimensional degeneracy between the ionization state of the IGM, the ionizing photons escape fraction and the ISM dust distribution, which implies that LAEs act not only as tracers of reionization but also of the ionizing photon escape fraction and of the ISM dust distribution. This degeneracy does not even break down when we compare simulated with observed clustering of LAEs at z~6.6. However, our results show that reionization has the largest impact on the amplitude of the LAE angular correlation functions, and its imprints are clearly distinguishable from those of properties on galactic scales. These results show that reionization cannot be constrained tightly by exclusively using LAE observations. Further observational constraints, e.g. tomographies of the redshifted hydrogen 21cm line, are required. In addition we also use our LAE model to probe the question when a galaxy is visible as a LAE or a LBG. Within our model galaxies above a critical stellar mass can produce enough luminosity to be visible as a LBG and/or a LAE. By finding an increasing duty cycle of LBGs with Lya emission as the UV magnitude or stellar mass of the galaxy rises, our model reveals that the brightest (and most massive) LBGs most often show Lya emission. Predicting the Lya equivalent width (Lya EW) distribution and the fraction of LBGs showing Lya emission at z~6.6, we reproduce the observational trend of the Lya EWs with UV magnitude. However, the Lya EWs of the UV brightest LBGs exceed observations and can only be reconciled by accounting for an increased Lya attenuation of massive galaxies, which implies that the observed Lya brightest LAEs do not necessarily coincide with the UV brightest galaxies. We have analysed the dependencies of LAE observables on the properties of the galactic and intergalactic medium and the LAE-LBG connection, and this enhances our understanding of the nature of LAEs.}, language = {en} } @phdthesis{Usadel2004, author = {Usadel, Bj{\"o}rn}, title = {Untersuchungen zur Biosynthese der pflanzlichen Zellwand = [Identification and characterization of genes involved in plant cell wall synthesis]}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-2947}, school = {Universit{\"a}t Potsdam}, year = {2004}, abstract = {Even though the structure of the plant cell wall is by and large quite well characterized, its synthesis and regulation remains largely obscure. However, it is accepted that the building blocks of the polysaccharidic part of the plant cell wall are nucleotide sugars. Thus to gain more insight into the cell wall biosynthesis, in the first part of this thesis, plant genes possibly involved in the nucleotide sugar interconversion pathway were identified using a bioinformatics approach and characterized in plants, mainly in Arabidopsis. For the computational identification profile hidden markov models were extracted from the Pfam and TIGR databases. Mainly with these, plant genes were identified facilitating the "hmmer" program. Several gene families were identified and three were further characterized, the UDP-rhamnose synthase (RHM), UDP-glucuronic acid epimerase (GAE) and the myo-inositol oxygenase (MIOX) families. For the three-membered RHM family relative ubiquitous expression was shown using variuos methods. For one of these genes, RHM2, T-DNA lines could be obtained. Moreover, the transcription of the whole family was downregulated facilitating an RNAi approach. In both cases a alteration of cell wall typic polysaccharides and developmental changes could be shown. In the case of the rhm2 mutant these were restricted to the seed or the seed mucilage, whereas the RNAi plants showed profound changes in the whole plant. In the case of the six-membered GAE family, the gene expressed to the highest level (GAE6) was cloned, expressed heterologously and its function was characterized. Thus, it could be shown that GAE6 encodes for an enzyme responsible for the conversion of UDP-glucuronic acid to UDP-galacturonic acid. However, a change in transcript level of variuos GAE family members achieved by T-DNA insertions (gae2, gae5, gae6), overexpression (GAE6) or an RNAi approach, targeting the whole family, did not reveal any robust changes in the cell wall. Contrary to the other two families the MIOX gene family had to be identified using a BLAST based approach due to the lack of enough suitable candidate genes for building a hidden markov model. An initial bioinformatic characterization was performed which will lead to further insights into this pathway. In total it was possible to identify the two gene families which are involved in the synthesis of the two pectin backbone sugars galacturonic acid and rhamnose. Moreover with the identification of the MIOX genes a genefamily, important for the supply of nucleotide sugar precursors was identified. In a second part of this thesis publicly available microarray datasets were analyzed with respect to co-responsive behavior of transcripts on a global basis using nearly 10,000 genes. The data has been made available to the community in form of a database providing additional statistical and visualization tools (http://csbdb.mpimp-golm.mpg.de). Using the framework of the database to identify nucleotide sugar converting genes indicated that co-response might be used for identification of novel genes involved in cell wall synthesis based on already known genes.}, subject = {Zellwand}, language = {en} }