@phdthesis{Oeztuerk2018, author = {{\"O}zt{\"u}rk, Ugur}, title = {Learning more to predict landslides}, doi = {10.25932/publishup-42643}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426439}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 104}, year = {2018}, abstract = {Landslides are frequent natural hazards in rugged terrain, when the resisting frictional force of the surface of rupture yields to the gravitational force. These forces are functions of geological and morphological factors, such as angle of internal friction, local slope gradient or curvature, which remain static over hundreds of years; whereas more dynamic triggering events, such as rainfall and earthquakes, compromise the force balance by temporarily reducing resisting forces or adding transient loads. This thesis investigates landslide distribution and orientation due to landslide triggers (e.g. rainfall) at different scales (6-4∙10^5 km^2) and aims to link rainfall movement with the landslide distribution. It additionally explores the local impacts of the extreme rainstorms on landsliding and the role of precursory stability conditions that could be induced by an earlier trigger, such as an earthquake. Extreme rainfall is a common landslide trigger. Although several studies assessed rainfall intensity and duration to study the distribution of thus triggered landslides, only a few case studies quantified spatial rainfall patterns (i.e. orographic effect). Quantifying the regional trajectories of extreme rainfall could aid predicting landslide prone regions in Japan. To this end, I combined a non-linear correlation metric, namely event synchronization, and radial statistics to assess the general pattern of extreme rainfall tracks over distances of hundreds of kilometers using satellite based rainfall estimates. Results showed that, although the increase in rainfall intensity and duration positively correlates with landslide occurrence, the trajectories of typhoons and frontal storms were insufficient to explain landslide distribution in Japan. Extreme rainfall trajectories inclined northwestwards and were concentrated along some certain locations, such as coastlines of southern Japan, which was unnoticed in the landslide distribution of about 5000 rainfall-triggered landslides. These landslides seemed to respond to the mean annual rainfall rates. Above mentioned findings suggest further investigation on a more local scale to better understand the mechanistic response of landscape to extreme rainfall in terms of landslides. On May 2016 intense rainfall struck southern Germany triggering high waters and landslides. The highest damage was reported at the Braunsbach, which is located on the tributary-mouth fan formed by the Orlacher Bach. Orlacher Bach is a ~3 km long creek that drains a catchment of about ~6 km^2. I visited this catchment in June 2016 and mapped 48 landslides along the creek. Such high landslide activity was not reported in the nearby catchments within ~3300 km^2, despite similar rainfall intensity and duration based on weather radar estimates. My hypothesis was that several landslides were triggered by rainfall-triggered flash floods that undercut hillslope toes along the Orlacher Bach. I found that morphometric features such as slope and curvature play an important role in landslide distribution on this micro scale study site (<10 km^2). In addition, the high number of landslides along the Orlacher Bach could also be boosted by accumulated damages on hillslopes due karst weathering over longer time scales. Precursory damages on hillslopes could also be induced by past triggering events that effect landscape evolution, but this interaction is hard to assess independently from the latest trigger. For example, an earthquake might influence the evolution of a landscape decades long, besides its direct impacts, such as landslides that follow the earthquake. Here I studied the consequences of the 2016 Kumamoto Earthquake (MW 7.1) that triggered some 1500 landslides in an area of ~4000 km^2 in central Kyushu, Japan. Topography, i.e. local slope and curvature, both amplified and attenuated seismic waves, thus controlling the failure mechanism of those landslides (e.g. progressive). I found that topography fails in explaining the distribution and the preferred orientation of the landslides after the earthquake; instead the landslides were concentrated around the northeast of the rupture area and faced mostly normal to the rupture plane. This preferred location of the landslides was dominated mainly by the directivity effect of the strike-slip earthquake, which is the propagation of wave energy along the fault in the rupture direction; whereas amplitude variations of the seismic radiation altered the preferred orientation. I suspect that the earthquake directivity and the asymmetry of seismic radiation damaged hillslopes at those preferred locations increasing landslide susceptibility. Hence a future weak triggering event, e.g. scattered rainfall, could further trigger landslides at those damaged hillslopes.}, language = {en} } @phdthesis{Zur2018, author = {Zur, Avichai}, title = {פרדוקס 'הידיעה והבחירה' במשנת ר' צדוק הכהן מלובלין}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412201}, school = {Universit{\"a}t Potsdam}, pages = {371, xxxiii}, year = {2018}, abstract = {This research deals with R. Zadok's innovative writings. According to most scholars R. Zadok continues the doctrine of his rabbi R. Mordechai Yosef Leiner of Izbica and introduces existential fatalism: man has the freedom to act against the customary law, according to God's will which is revealed in his heart, even in his passions, which exceed the Halacha; however, man does not determine the will that is revealed in the root of his soul, but only uncovers it. Many expressions, in content and form, of this fatalism can be found in R. Zadok's writings; yet, he also introduces many remarks about human free choice and its creative power to establish and determine man's root and re-establish and influence the Divine worlds and this world. This research focuses on these passages whose centrality has been so far neglected by research. Hence, its renewed understanding of his doctrine. R. Zadok's position can be explained by means of the paradoxical perception, which claims the full strength of the two opposites and even a mutual influence between them, which creates a difficult yet fertile tension: as opposed to the a priori intellectual and formal process of fatalism where Divine Foreknowledge eliminates free choice, R. Zadok introduces fatality which identifies in reality itself the Divine foreknowledge and will that exist in all things. In R. Zadok's fatality, Divine foreknowledge does not eliminate free choice; on the contrary, without Divine foreknowledge, nothing has existence. Therefore, only the existence of Divine foreknowledge within free choice enables its true and indeed free fulfillment. R. Zadok's ontological perception is realized not only in the direct content of his words but also indirectly, in the method of his exegesis and the sense he gives to the concepts he discusses. Therefore the fatality is revealed in other areas in which there is a gap between the absolute dimension ('Yedi'ah') and the contingent dimension ('Bechirah'): the lie, imagination, evil, sin, suffering etc., indeed contingent compared with the absoluteness of truth, good, etc.; however, according to R. Zadok, God wants them as such - that is to say, they have an essential existence and actuality that are not absolute, but rather as such: as contingent, temporary and relative. However, these essential existence and actuality do not confirm them as they are but create a transformation within them. For example, evil does not become absolute good or remains evil but rather turns into 'Very Good' (Tov Me'od) which includes evil and according to Rabbi Zadok is greater than regular good. From this, rises also the opposite influence: that of free choice, or contingent Bechira in general, on Divine Foreknowledge, or absolute Yediah in general. According to R. Zadok, when the contingency and relativity of "Bechirah" received its essential existence and actuality, it has the power so to speak, to add dynamism to the permanent Divine absoluteness of the 'Yedi'ah': the affliction (Nega; נגע) of sin or suffering turns by itself, by interchanging its letters, into pleasure (Oneg; ענג) which is greater than regular delight. Man has the power to influence the upper worlds by decreeing decrees or by canceling Divine decrees; he also has the power to influence the daily novelties of this world, by the novellaes of the Oral Law and the sanctification of the new month which is capable of changing the movement of the zodiac. The human creativity of the novellae of the Oral Law is included in the Divine truth which is hidden in the Written Law and it only reveals and interprets the latter; but on the other hand, according to R. Zadok, the source of the dynamic vital novellaes of the Oral Law is higher than the source of the permanent absoluteness of Written Law and they are those which create and determine the latter. R. Zadok introduces two main concepts for his paradox: On the ontological perception - Yichud Gamur, the Ultimate Unification of God, in which the contingent duality (between God and His creation, and the choice it enables) of the Lower Unification paradoxically exists with the absolute unification and Divine Foreknowledge of the Upper Unification. On the perception of man's existential situation - HaShoresh HaNe'elam, The Hidden Root: unlike Rabbi Mordechai Yosef, his rabbi, rabbi Zadok claims that man determines his destiny by his willful contingent actions - yet, simultaneously, like his rabbi, he also claims that the man's permanent root is determined by God and His Foreknowledge and it is what determines his actions for better or worse. But above these R. Zadok points to an additional Hidden Root which is higher than the permanent one: it is indeed an absolute root (Yediah) yet is determined and established by man's willful actions (Bechira), similar to the Divine creation of 'ex nihilo'.}, language = {mul} } @phdthesis{Zimmermann2018, author = {Zimmermann, Marc}, title = {Multifunctional patchy silica particles via microcontact printing}, doi = {10.25932/publishup-42773}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-427731}, school = {Universit{\"a}t Potsdam}, pages = {IX, 121, xiii}, year = {2018}, abstract = {This research addressed the question, if it is possible to simplify current microcontact printing systems for the production of anisotropic building blocks or patchy particles, by using common chemicals while still maintaining reproducibility, high precision and tunability of the Janus-balance Chapter 2 introduced the microcontact printing materials as well as their defined electrostatic interactions. In particular polydimethylsiloxane stamps, silica particles and high molecular weight polyethylenimine ink were mainly used in this research. All of these components are commercially available in large quantities and affordable, which gives this approach a huge potential for further up-scaling developments. The benefits of polymeric over molecular inks was described including its flexible influence on the printing pressure. With this alteration of the µCP concept, a new method of solvent assisted particle release mechanism enabled the switch from two-dimensional surface modification to three-dimensional structure printing on colloidal silica particles, without changing printing parameters or starting materials. This effect opened the way to use the internal volume of the achieved patches for incorporation of nano additives, introducing additional physical properties into the patches without alteration of the surface chemistry. The success of this system and its achievable range was further investigated in chapter 3 by giving detailed information about patch geometry parameters including diameter, thickness and yield. For this purpose, silica particles in a size range between 1µm and 5µm were printed with different ink concentrations to change the Janus-balance of these single patched particles. A necessary intermediate step, consisting of air-plasma treatment, for the production of trivalent particles using "sandwich" printing was discovered and comparative studies concerning the patch geometry of single and double patched particles were conducted. Additionally, the usage of structured PDMS stamps during printing was described. These results demonstrate the excellent precision of this approach and opens the pathway for even greater accuracy as further parameters can be finely tuned and investigated, e.g. humidity and temperature during stamp loading. The performance of these synthesized anisotropic colloids was further investigated in chapter 4, starting with behaviour studies in alcoholic and aqueous dispersions. Here, the stability of the applied patches was studied in a broad pH range, discovering a release mechanism by disabling the electrostatic bonding between particle surface and polyelectrolyte ink. Furthermore, the absence of strong attractive forces between divalent particles in water was investigated using XPS measurements. These results lead to the conclusion that the transfer of small PDMS oligomers onto the patch surface is shielding charges, preventing colloidal agglomeration. However, based on this knowledge, further patch modifications for particle self-assembly were introduced including physical approaches using magnetic nano additives, chemical patch functionalization with avidin-biotin or the light responsive cyclodextrin-arylazopyrazoles coupling as well as particle surface modification for the synthesis of highly amphiphilic colloids. The successful coupling, its efficiency, stability and behaviour in different solvents were evaluated to find a suitable coupling system for future assembly experiments. Based on these results the possibility of more sophisticated structures by colloidal self-assembly is given. Certain findings needed further analysis to understand their underlying mechanics, including the relatively broad patch diameter distribution and the decreasing patch thickness for smaller silica particles. Mathematical assumptions for both effects are introduced in chapter 5. First, they demonstrate the connection between the naturally occurring particle size distribution and the broadening of the patch diameter, indicating an even higher precision for this µCP approach. Second, explaining the increase of contact area between particle and ink surface due to higher particle packaging, leading to a decrease in printing pressure for smaller particles. These calculations ultimately lead to the development of a new mechanical microcontact printing approach, using centrifugal forces for high pressure control and excellent parallel alignment of printing substrates. First results with this device and the comparison with previously conducted by-hand experiments conclude this research. It furthermore displays the advantages of such a device for future applications using a mechanical printing approach, especially for accessing even smaller nano particles with great precision and excellent yield. In conclusion, this work demonstrates the successful adjustment of the µCP approach using commercially available and affordable silica particles and polyelectrolytes for high flexibility, reduced costs and higher scale-up value. Furthermore, its was possible to increase the modification potential by introducing three-dimensional patches for additional functionalization volume. While keeping a high colloidal stability, different coupling systems showed the self-assembly capabilities of this toolbox for anisotropic particles.}, language = {en} } @phdthesis{Zakarias2018, author = {Zakari{\´a}s, Lilla}, title = {Transfer effects after working memory training in post-stroke aphasia}, doi = {10.25932/publishup-42360}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-423600}, school = {Universit{\"a}t Potsdam}, pages = {178}, year = {2018}, abstract = {Background: Individuals with aphasia after stroke (IWA) often present with working memory (WM) deficits. Research investigating the relationship between WM and language abilities has led to the promising hypothesis that treatments of WM could lead to improvements in language, a phenomenon known as transfer. Although recent treatment protocols have been successful in improving WM, the evidence to date is scarce and the extent to which improvements in trained tasks of WM transfer to untrained memory tasks, spoken sentence comprehension, and functional communication is yet poorly understood. Aims: We aimed at (a) investigating whether WM can be improved through an adaptive n-back training in IWA (Study 1-3); (b) testing whether WM training leads to near transfer to unpracticed WM tasks (Study 1-3), and far transfer to spoken sentence comprehension (Study 1-3), functional communication (Study 2-3), and memory in daily life in IWA (Study 2-3); and (c) evaluating the methodological quality of existing WM treatments in IWA (Study 3). To address these goals, we conducted two empirical studies - a case-controls study with Hungarian speaking IWA (Study 1) and a multiple baseline study with German speaking IWA (Study 2) - and a systematic review (Study 3). Methods: In Study 1 and 2 participants with chronic, post-stroke aphasia performed an adaptive, computerized n-back training. 'Adaptivity' was implemented by adjusting the tasks' difficulty level according to the participants' performance, ensuring that they always practiced at an optimal level of difficulty. To assess the specificity of transfer effects and to better understand the underlying mechanisms of transfer on spoken sentence comprehension, we included an outcome measure testing specific syntactic structures that have been proposed to involve WM processes (e.g., non-canonical structures with varying complexity). Results: We detected a mixed pattern of training and transfer effects across individuals: five participants out of six significantly improved in the n-back training. Our most important finding is that all six participants improved significantly in spoken sentence comprehension (i.e., far transfer effects). In addition, we also found far transfer to functional communication (in two participants out of three in Study 2) and everyday memory functioning (in all three participants in Study 2), and near transfer to unpracticed n-back tasks (in four participants out of six). Pooled data analysis of Study 1 and 2 showed a significant negative relationship between initial spoken sentence comprehension and the amount of improvement in this ability, suggesting that the more severe the participants' spoken sentence comprehension deficit was at the beginning of training, the more they improved after training. Taken together, we detected both near far and transfer effects in our studies, but the effects varied across participants. The systematic review evaluating the methodological quality of existing WM treatments in stroke IWA (Study 3) showed poor internal and external validity across the included 17 studies. Poor internal validity was mainly due to use of inappropriate design, lack of randomization of study phases, lack of blinding of participants and/or assessors, and insufficient sampling. Low external validity was mainly related to incomplete information on the setting, lack of use of appropriate analysis or justification for the suitability of the analysis procedure used, and lack of replication across participants and/or behaviors. Results in terms of WM, spoken sentence comprehension, and reading are promising, but further studies with more rigorous methodology and stronger experimental control are needed to determine the beneficial effects of WM intervention. Conclusions: Results of the empirical studies suggest that WM can be improved with a computerized and adaptive WM training, and improvements can lead to transfer effects to spoken sentence comprehension and functional communication in some individuals with chronic post-stroke aphasia. The fact that improvements were not specific to certain syntactic structures (i.e., non-canonical complex sentences) in spoken sentence comprehension suggest that WM is not involved in the online, automatic processing of syntactic information (i.e., parsing and interpretation), but plays a more general role in the later stage of spoken sentence comprehension (i.e., post-interpretive comprehension). The individual differences in treatment outcomes call for future research to clarify how far these results are generalizable to the population level of IWA. Future studies are needed to identify a few mechanisms that may generalize to at least a subpopulation of IWA as well as to investigate baseline non-linguistic cognitive and language abilities that may play a role in transfer effects and the maintenance of such effects. These may require larger yet homogenous samples.}, language = {en} } @phdthesis{Xiong2018, author = {Xiong, Tao}, title = {Vibrationally resolved absorption, emission, resonance Raman and photoelectron spectra of selected organic molecules, associated radicals and cations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418105}, school = {Universit{\"a}t Potsdam}, pages = {iv, 100}, year = {2018}, abstract = {Time-dependent correlation function based methods to study optical spectroscopy involving electronic transitions can be traced back to the work of Heller and coworkers. This intuitive methodology can be expected to be computationally efficient and is applied in the current work to study the vibronic absorption, emission, and resonance Raman spectra of selected organic molecules. Besides, the "non-standard" application of this approach to photoionization processes is also explored. The application section consists of four chapters as described below. In Chapter 4, the molar absorptivities and vibronic absorption/emission spectra of perylene and several of its N-substituted derivatives are investigated. By systematically varying the number and position of N atoms, it is shown that the presence of nitrogen heteroatoms has a negligible effect on the molecular structure and geometric distortions upon electronic transitions, while spectral properties are more sensitive: In particular the number of N atoms is important while their position is less decisive. Thus, N-substitution can be used to fine-tune the optical properties of perylene-based molecules. In Chapter 5, the same methods are applied to study the vibronic absorption/emission and resonance Raman spectra of a newly synthesized donor-acceptor type molecule. The simulated absorption/emission spectra agree fairly well with experimental data, with discrepancies being attributed to solvent effects. Possible modes which may dominate the fine-structure in the vibronic spectra are proposed by analyzing the correlation function with the aid of Raman and resonance Raman spectra. In the next two chapters, besides the above types of spectra, the methods are extended to study photoelectron spectra of several small diamondoid-related systems (molecules, radicals, and cations). Comparison of the photoelectron spectra with available experimental data suggests that the correlation function based approach can describe ionization processes reasonably well. Some of these systems, cationic species in particular, exhibit somewhat peculiar optical behavior, which presents them as possible candidates for functional devices. Correlation function based methods in a more general sense can be very versatile. In fact, besides the above radiative processes, formulas for non-radiative processes such as internal conversion have been derived in literature. Further implementation of the available methods is among our next goals.}, language = {en} } @phdthesis{Wulff2018, author = {Wulff, Alexander}, title = {Essays in macroeconomics and financial market imperfections}, doi = {10.25932/publishup-42995}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429956}, school = {Universit{\"a}t Potsdam}, pages = {X, 142}, year = {2018}, abstract = {This dissertation consists of four self-contained papers that deal with the implications of financial market imperfections and heterogeneity. The analysis mainly relates to the class of incomplete-markets models but covers different research topics. The first paper deals with the distributional effects of financial integration for developing countries. Based on a simple heterogeneous-agent approach, it is shown that capital owners experience large welfare losses while only workers moderately gain due to higher wages. The large welfare losses for capital owners contrast with the small average welfare gains from representative-agent economies and indicate that a strong opposition against capital market opening has to be expected. The second paper considers the puzzling observation of capital flows from poor to rich countries and the accompanying changes in domestic economic development. Motivated by the mixed results from the literature, we employ an incomplete-markets model with different types of idiosyncratic risk and borrowing constraints. Based on different scenarios, we analyze under what conditions the presence of financial market imperfections contributes to explain the empirical findings and how the conditions may change with different model assumptions. The third paper deals with the interplay of incomplete information and financial market imperfections in an incomplete-markets economy. In particular, it analyzes the impact of incomplete information about idiosyncratic income shocks on aggregate saving. The results show that the effect of incomplete information is not only quantitatively substantial but also qualitatively ambiguous and varies with the influence of the income risk and the borrowing constraint. Finally, the fourth paper analyzes the influence of different types of fiscal rules on the response of key macroeconomic variables to a government spending shock. We find that a strong temporary increase in public debt contributes to stabilizing consumption and leisure in the first periods following the change in government spending, whereas a non-debt-intensive fiscal rule leads to a faster recovery of consumption, leisure, capital and output in later periods. Regarding optimal debt policy, we find that a debt-intensive fiscal rule leads to the largest aggregate welfare benefit and that the individual welfare gain is particularly high for wealth-poor agents.}, language = {en} } @phdthesis{Witt2018, author = {Witt, Tanja Ivonne}, title = {Camera Monitoring at volcanoes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-421073}, school = {Universit{\"a}t Potsdam}, pages = {viii, 140}, year = {2018}, abstract = {Basaltic fissure eruptions, such as on Hawai'i or on Iceland, are thought to be driven by the lateral propagation of feeder dikes and graben subsidence. Associated solid earth processes, such as deformation and structural development, are well studied by means of geophysical and geodetic technologies. The eruptions themselves, lava fountaining and venting dynamics, in turn, have been much less investigated due to hazardous access, local dimension, fast processes, and resulting poor data availability. This thesis provides a detailed quantitative understanding of the shape and dynamics of lava fountains and the morphological changes at their respective eruption sites. For this purpose, I apply image processing techniques, including drones and fixed installed cameras, to the sequence of frames of video records from two well-known fissure eruptions in Hawai'i and Iceland. This way I extract the dimensions of multiple lava fountains, visible in all frames. By putting these results together and considering the acquisition times of the frames I quantify the variations in height, width and eruption velocity of the lava fountains. Then I analyse these time-series in both time and frequency domains and investigate the similarities and correlations between adjacent lava fountains. Following this procedure, I am able to link the dynamics of the individual lava fountains to physical parameters of the magma transport in the feeder dyke of the fountains. The first case study in this thesis focuses on the March 2011 Pu'u'O'o eruption, Hawai'i, where a continuous pulsating behaviour at all eight lava fountains has been observed. The lava fountains, even those from different parts of the fissure that are closely connected, show a similar frequency content and eruption behaviour. The regular pattern in the heights of lava fountain suggests a controlling process within the magma feeder system like a hydraulic connection in the underlying dyke, affecting or even controlling the pulsating behaviour. The second case study addresses the 2014-2015 Holuhraun fissure eruption, Iceland. In this case, the feeder dyke is highlighted by the surface expressions of graben-like structures and fault systems. At the eruption site, the activity decreases from a continuous line of fire of ~60 vents to a limited number of lava fountains. This can be explained by preferred upwards magma movements through vertical structures of the pre-eruptive morphology. Seismic tremors during the eruption reveal vent opening at the surface and/or pressure changes in the feeder dyke. The evolving topography of the cinder cones during the eruption interacts with the lava fountain behaviour. Local variations in the lava fountain height and width are controlled by the conduit diameter, the depth of the lava pond and the shape of the crater. Modelling of the fountain heights shows that long-term eruption behaviour is controlled mainly by pressure changes in the feeder dyke. This research consists of six chapters with four papers, including two first author and two co-author papers. It establishes a new method to analyse lava fountain dynamics by video monitoring. The comparison with the seismicity, geomorphologic and structural expressions of fissure eruptions shows a complex relationship between focussed flow through dykes, the morphology of the cinder cones, and the lava fountain dynamics at the vents of a fissure eruption.}, language = {en} } @phdthesis{Westbury2018, author = {Westbury, Michael V.}, title = {Unraveling evolution through Next Generation Sequencing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-409981}, school = {Universit{\"a}t Potsdam}, pages = {129}, year = {2018}, abstract = {The sequencing of the human genome in the early 2000s led to an increased interest in cheap and fast sequencing technologies. This interest culminated in the advent of next generation sequencing (NGS). A number of different NGS platforms have arisen since then all promising to do the same thing, i.e. produce large amounts of genetic information for relatively low costs compared to more traditional methods such as Sanger sequencing. The capabilities of NGS meant that researchers were no longer bound to species for which a lot of previous work had already been done (e.g. model organisms and humans) enabling a shift in research towards more novel and diverse species of interest. This capability has greatly benefitted many fields within the biological sciences, one of which being the field of evolutionary biology. Researchers have begun to move away from the study of laboratory model organisms to wild, natural populations and species which has greatly expanded our knowledge of evolution. NGS boasts a number of benefits over more traditional sequencing approaches. The main benefit comes from the capability to generate information for drastically more loci for a fraction of the cost. This is hugely beneficial to the study of wild animals as, even when large numbers of individuals are unobtainable, the amount of data produced still allows for accurate, reliable population and species level results from a small selection of individuals. The use of NGS to study species for which little to no previous research has been carried out on and the production of novel evolutionary information and reference datasets for the greater scientific community were the focuses of this thesis. Two studies in this thesis focused on producing novel mitochondrial genomes from shotgun sequencing data through iterative mapping, bypassing the need for a close relative to serve as a reference sequence. These mitochondrial genomes were then used to infer species level relationships through phylogenetic analyses. The first of these studies involved reconstructing a complete mitochondrial genome of the bat eared fox (Otocyon megalotis). Phylogenetic analyses of the mitochondrial genome confidently placed the bat eared fox as sister to the clade consisting of the raccoon dog and true foxes within the canidae family. The next study also involved reconstructing a mitochondrial genome but in this case from the extinct Macrauchenia of South America. As this study utilised ancient DNA, it involved a lot of parameter testing, quality controls and strict thresholds to obtain a near complete mitochondrial genome devoid of contamination known to plague ancient DNA studies. Phylogenetic analyses confidently placed Macrauchenia as sister to all living representatives of Perissodactyla with a divergence time of ~66 million years ago. The third and final study of this thesis involved de novo assemblies of both nuclear and mitochondrial genomes from brown and striped hyena and focussed on demographic, genetic diversity and population genomic analyses within the brown hyena. Previous studies of the brown hyena hinted at very low levels of genomic diversity and, perhaps due to this, were unable to find any notable population structure across its range. By incorporating a large number of genetic loci, in the form of complete nuclear genomes, population structure within the brown hyena was uncovered. On top of this, genomic diversity levels were compared to a number of other species. Results showed the brown hyena to have the lowest genomic diversity out of all species included in the study which was perhaps caused by a continuous and ongoing decline in effective population size that started about one million years ago and dramatically accelerated towards the end of the Pleistocene. The studies within this thesis show the power NGS sequencing has and its utility within evolutionary biology. The most notable capabilities outlined in this thesis involve the study of species for which no reference data is available and in the production of large amounts of data, providing evolutionary answers at the species and population level that data produced using more traditional techniques simply could not.}, language = {en} } @phdthesis{Wendi2018, author = {Wendi, Dadiyorto}, title = {Recurrence Plots and Quantification Analysis of Flood Runoff Dynamics}, doi = {10.25932/publishup-43191}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-431915}, school = {Universit{\"a}t Potsdam}, pages = {114}, year = {2018}, abstract = {This paper introduces a novel measure to assess similarity between event hydrographs. It is based on Cross Recurrence Plots and Recurrence Quantification Analysis which have recently gained attention in a range of disciplines when dealing with complex systems. The method attempts to quantify the event runoff dynamics and is based on the time delay embedded phase space representation of discharge hydrographs. A phase space trajectory is reconstructed from the event hydrograph, and pairs of hydrographs are compared to each other based on the distance of their phase space trajectories. Time delay embedding allows considering the multi-dimensional relationships between different points in time within the event. Hence, the temporal succession of discharge values is taken into account, such as the impact of the initial conditions on the runoff event. We provide an introduction to Cross Recurrence Plots and discuss their parameterization. An application example based on flood time series demonstrates how the method can be used to measure the similarity or dissimilarity of events, and how it can be used to detect events with rare runoff dynamics. It is argued that this methods provides a more comprehensive approach to quantify hydrograph similarity compared to conventional hydrological signatures.}, language = {en} } @phdthesis{Weiss2018, author = {Weiß, Katharina}, title = {Three Essays on EFRAG}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415355}, school = {Universit{\"a}t Potsdam}, pages = {II, 180}, year = {2018}, abstract = {This cumulative doctoral thesis consists of three papers that deal with the role of one specific European accounting player in the international accounting standard-setting, namely the European Financial Reporting Advisory Group (EFRAG). The first paper examines whether and how EFRAG generally fulfills its role in articulating Europe's interests toward the International Accounting Standards Board (IASB). The qualitative data from the conducted interviews reveal that EFRAG influences the IASB's decision making at a very early stage, long before other constituents are officially asked to comment on the IASB's proposals. The second paper uses quantitative data and investigates the formal participation behavior of European constituents that seek to determine EFRAG's voice. More precisely, this paper analyzes the nature of the constituents' participation in EFRAG's due process in terms of representation (constituent groups and geographical distribution) and the drivers of their participation behavior. EFRAG's official decision making process is dominated by some specific constituent groups (such as preparers and the accounting profession) and by constituents from some specific countries (e.g. those with effective enforcement regimes). The third paper investigates in a first step who of the European constituents choose which lobbying channel (participation only at IASB, only at EFRAG, or at both institutions) and unveils in a second step possible reasons for their lobbying choices. The paper comprises quantitative and qualitative data. It reveals that English skills, time issues, the size of the constituent, and the country of origin are factors that can explain why the majority participates only in the IASB's due process.}, language = {en} }