@phdthesis{Zajnulina2015, author = {Zajnulina, Marina}, title = {Optical frequency comb generation in optical fibres}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-88776}, school = {Universit{\"a}t Potsdam}, pages = {xii, 103}, year = {2015}, abstract = {Optical frequency combs (OFC) constitute an array of phase-correlated equidistant spectral lines with nearly equal intensities over a broad spectral range. The adaptations of combs generated in mode-locked lasers proved to be highly efficient for the calibration of high-resolution (resolving power > 50000) astronomical spectrographs. The observation of different galaxy structures or the studies of the Milky Way are done using instruments in the low- and medium resolution range. To such instruments belong, for instance, the Multi Unit Spectroscopic Explorer (MUSE) being developed for the Very Large Telescope (VLT) of the European Southern Observatory (ESO) and the 4-metre Multi-Object Spectroscopic Telescope (4MOST) being in development for the ESO VISTA 4.1 m Telescope. The existing adaptations of OFC from mode-locked lasers are not resolvable by these instruments. Within this work, a fibre-based approach for generation of OFC specifically in the low- and medium resolution range is studied numerically. This approach consists of three optical fibres that are fed by two equally intense continuous-wave (CW) lasers. The first fibre is a conventional single-mode fibre, the second one is a suitably pumped amplifying Erbium-doped fibre with anomalous dispersion, and the third one is a low-dispersion highly nonlinear optical fibre. The evolution of a frequency comb in this system is governed by the following processes: as the two initial CW-laser waves with different frequencies propagate through the first fibre, they generate an initial comb via a cascade of four-wave mixing processes. The frequency components of the comb are phase-correlated with the original laser lines and have a frequency spacing that is equal to the initial laser frequency separation (LFS), i.e. the difference in the laser frequencies. In the time domain, a train of pre-compressed pulses with widths of a few pico-seconds arises out of the initial bichromatic deeply-modulated cosine-wave. These pulses undergo strong compression in the subsequent amplifying Erbium-doped fibre: sub-100 fs pulses with broad OFC spectra are formed. In the following low-dispersion highly nonlinear fibre, the OFC experience a further broadening and the intensity of the comb lines are fairly equalised. This approach was mathematically modelled by means of a Generalised Nonlinear Schr{\"o}dinger Equation (GNLS) that contains terms describing the nonlinear optical Kerr effect, the delayed Raman response, the pulse self-steepening, and the linear optical losses as well as the wavelength-dependent Erbium gain profile for the second fibre. The initial condition equation being a deeply-modulated cosine-wave mimics the radiation of the two initial CW lasers. The numerical studies are performed with the help of Matlab scripts that were specifically developed for the integration of the GNLS and the initial condition according to the proposed approach for the OFC generation. The scripts are based on the Fourth-Order Runge-Kutta in the Interaction Picture Method (RK4IP) in combination with the local error method. This work includes the studies and results on the length optimisation of the first and the second fibre depending on different values of the group-velocity dispersion of the first fibre. Such length optimisation studies are necessary because the OFC have the biggest possible broadband and exhibit a low level of noise exactly at the optimum lengths. Further, the optical pulse build-up in the first and the second fibre was studied by means of the numerical technique called Soliton Radiation Beat Analysis (SRBA). It was shown that a common soliton crystal state is formed in the first fibre for low laser input powers. The soliton crystal continuously dissolves into separated optical solitons as the input power increases. The pulse formation in the second fibre is critically dependent on the features of the pulses formed in the first fibre. I showed that, for low input powers, an adiabatic soliton compression delivering low-noise OFC occurs in the second fibre. At high input powers, the pulses in the first fibre have more complicated structures which leads to the pulse break-up in the second fibre with a subsequent degradation of the OFC noise performance. The pulse intensity noise studies that were performed within the framework of this thesis allow making statements about the noise performance of an OFC. They showed that the intensity noise of the whole system decreases with the increasing value of LFS.}, language = {en} } @phdthesis{Prokhorov2015, author = {Prokhorov, Boris E.}, title = {High-latitude coupling processes between thermospheric circulation and solar wind driven magnetospheric currents and plasma convection}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-92353}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2015}, abstract = {The high-latitudinal thermospheric processes driven by the solar wind and Interplanetary Magnetic Field (IMF) interaction with the Earth magnetosphere are highly variable parts of the complex dynamic plasma environment, which represent the coupled Magnetosphere - Ionosphere - Thermosphere (MIT) system. The solar wind and IMF interactions transfer energy to the MIT system via reconnection processes at the magnetopause. The Field Aligned Currents (FACs) constitute the energetic links between the magnetosphere and the Earth ionosphere. The MIT system depends on the highly variable solar wind conditions, in particular on changes of the strength and orientation of the IMF. In my thesis, I perform an investigation on the physical background of the complex MIT system using the global physical - numerical, three-dimensional, time-dependent and self-consistent Upper Atmosphere Model (UAM). This model describes the thermosphere, ionosphere, plasmasphere and inner magnetosphere as well as the electrodynamics of the coupled MIT system for the altitudinal range from 80 (60) km up to the 15 Earth radii. In the present study, I developed and investigated several variants of the high-latitudinal electrodynamic coupling by including the IMF dependence of FACs into the UAM model. For testing, the various variants were applied to simulations of the coupled MIT system for different seasons, geomagnetic activities, various solar wind and IMF conditions. Additionally, these variants of the theoretical model with the IMF dependence were compared with global empirical models. The modelling results for the most important thermospheric parameters like neutral wind and mass density were compared with satellite measurements. The variants of the UAM model with IMF dependence show a good agreement with the satellite observations. In comparison with the empirical models, the improved variants of the UAM model reproduce a more realistic meso-scale structures and dynamics of the coupled MIT system than the empirical models, in particular at high latitudes. The new configurations of the UAM model with IMF dependence contribute to the improvement of space weather prediction.}, language = {en} } @phdthesis{Papendiek2015, author = {Papendiek, Franka}, title = {Fodder legumes for Green Biorefineries}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87198}, school = {Universit{\"a}t Potsdam}, pages = {XI, 111}, year = {2015}, abstract = {Peak oil is forcing our society to shift from fossil to renewable resources. However, such renewable resources are also scarce, and they too must be used in the most efficient and sustainable way possible. Biorefining is a concept that represents both resource efficiency and sustainability. This approach initiates a cascade use, which means food and feed production before material use, and an energy-related use at the end of the value-added chain. However, sustainability should already start in the fields, on the agricultural side, where the industrially-used biomass is produced. Therefore, the aim of my doctoral thesis is to analyse the sustainable feedstock supply for biorefineries. In contrast to most studies on biorefineries, I focus on the sustainable provision of feedstock and not on the bioengineering processing of whatever feedstock is available. Grasslands provide a high biomass potential. They are often inefficiently used, so a new utilisation concept based on the biorefining approach can increase the added value from grasslands. Fodder legumes from temporary and permanent grasslands were chosen for this study. Previous research shows that they are a promising feedstock for industrial uses, and their positive environmental impact is an important byproduct to promote sustainable agricultural production systems. Green Biorefineries are a class of biorefineries that use fresh green biomass, such as grasses or fodder legumes, as feedstock. After fractionation, an organic solution (press juice) forms; this is used for the production of organic acids, chemicals and extracts, as well as fertilisers. A fibre component (press cake) is also created to produce feed, biomaterials and biogas. This thesis examines a specific value chain, using alfalfa and clover/grass as feedstock and generating lactic acid and one type of cattle feed from it. The research question is if biomass production needs to be adapted for the utilisation of fodder legumes in the Green Biorefinery approach. I have attempted to give a holistic analysis of cultivation, processing and utilisation of two specific grassland crops. Field trials with alfalfa and clover/grass at different study sites were carried out to obtain information on biomass quality and quantity depending on the crop, study site and harvest time. The fresh biomass was fractionated with a screw press and the composition of press juices and cakes was analysed. Fermentation experiments took place to determine the usability of press juices for lactic acid production. The harvest time is not of high importance for the quality of press juices as a fermentation medium. For permanent grasslands, late cuts, often needed for reasons of nature conservation, are possible without a major influence on feedstock quality. The press cakes were silaged for feed-value determination. Following evidence that both intermediate products are suitable feedstocks in the Green Biorefinery approach, I developed a cost-benefit analysis, comparing different production scenarios on a farm. Two standard crop rotations for Brandenburg, producing either only market crops or market crops and fodder legumes for ruminant feed production, were compared to a system that uses the cultivated fodder legumes for the Green Biorefinery value chain instead of only feed production. Timely processing of the raw material is important to maintain quality for industrial uses, so on-site processing at the farm is assumed in Green Biorefinery scenario. As a result, more added value stays in the rural area. Two farm sizes, common for many European regions, were chosen to examine the influence of scale. The cost site of farmers has also been analysed in detail to assess which farm characteristics make production of press juices for biochemical industries viable. Results show that for large farm sizes in particular, the potential profits are high. Additionally, the wider spectrum of marketable products generates new sources of income for farmers. The holistic analysis of the supply chain provides evidence that the cultivation processes for fodder legumes do not need to be adapted for use in Green Biorefineries. In fact, the new utilisation approach even widens the cultivation and processing spectrum and can increase economic viability of fodder legume production in conventional farming.}, language = {en} } @phdthesis{Cattania2015, author = {Cattania, Camilla}, title = {Improvement of aftershock models based on Coulomb stress changes and rate-and-state dependent friction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-87097}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 123}, year = {2015}, abstract = {Earthquake clustering has proven the most useful tool to forecast changes in seismicity rates in the short and medium term (hours to months), and efforts are currently being made to extend the scope of such models to operational earthquake forecasting. The overarching goal of the research presented in this thesis is to improve physics-based earthquake forecasts, with a focus on aftershock sequences. Physical models of triggered seismicity are based on the redistribution of stresses in the crust, coupled with the rate-and-state constitutive law proposed by Dieterich to calculate changes in seismicity rate. This type of models are known as Coulomb- rate and-state (CRS) models. In spite of the success of the Coulomb hypothesis, CRS models typically performed poorly in comparison to statistical ones, and they have been underepresented in the operational forecasting context. In this thesis, I address some of these issues, and in particular these questions: (1) How can we realistically model the uncertainties and heterogeneity of the mainshock stress field? (2) What is the effect of time dependent stresses in the postseismic phase on seismicity? I focus on two case studies from different tectonic settings: the Mw 9.0 Tohoku megathrust and the Mw 6.0 Parkfield strike slip earthquake. I study aleatoric uncertainties using a Monte Carlo method. I find that the existence of multiple receiver faults is the most important source of intrinsic stress heterogeneity, and CRS models perform better when this variability is taken into account. Epistemic uncertainties inherited from the slip models also have a significant impact on the forecast, and I find that an ensemble model based on several slip distributions outperforms most individual models. I address the role of postseismic stresses due to aseismic slip on the mainshock fault (afterslip) and to the redistribution of stresses by previous aftershocks (secondary triggering). I find that modeling secondary triggering improves model performance. The effect of afterslip is less clear, and difficult to assess for near-fault aftershocks due to the large uncertainties of the afterslip models. Off-fault events, on the other hand, are less sensitive to the details of the slip distribution: I find that following the Tohoku earthquake, afterslip promotes seismicity in the Fukushima region. To evaluate the performance of the improved CRS models in a pseudo-operational context, I submitted them for independent testing to a collaborative experiment carried out by CSEP for the 2010-2012 Canterbury sequence. Preliminary results indicate that physical models generally perform well compared to statistical ones, suggesting that CRS models may have a role to play in the future of operational forecasting. To facilitate efforts in this direction, and to enable future studies of earthquake triggering by time dependent processes, I have made the code open source. In the final part of this thesis I summarize the capabilities of the program and outline technical aspects regarding performance and parallelization strategies.}, language = {en} } @phdthesis{Hildebrandt2015, author = {Hildebrandt, Dominik}, title = {The HI Lyman-alpha opacity at redshift 2.7 < z < 3.6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78355}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 292}, year = {2015}, abstract = {Most of the baryonic matter in the Universe resides in a diffuse gaseous phase in-between galaxies consisting mostly of hydrogen and helium. This intergalactic medium (IGM) is distributed in large-scale filaments as part of the overall cosmic web. The luminous extragalactic objects that we can observe today, such as galaxies and quasars, are surrounded by the IGM in the most dense regions within the cosmic web. The radiation of these objects contributes to the so-called ultraviolet background (UVB) which keeps the IGM highly ionized ever since the epoch of reionization. Measuring the amount of absorption due to intergalactic neutral hydrogen (HI) against extragalactic background sources is a very useful tool to constrain the energy input of ionizing sources into the IGM. Observations suggest that the HI Lyman-alpha effective optical depth, τ_eff, decreases with decreasing redshift, which is primarily due to the expansion of the Universe. However, some studies find a smaller value of the effective optical depth than expected at the specific redshift z~3.2, possibly related to the complete reionization of helium in the IGM and a hardening of the UVB. The detection and possible cause of a decrease in τ_eff at z~3.2 is controversially debated in the literature and the observed features need further explanation. To better understand the properties of the mean absorption at high redshift and to provide an answer for whether the detection of a τ_eff feature is real we study 13 high-resolution, high signal-to-noise ratio quasar spectra observed with the Ultraviolet and Visual Echelle Spectrograph (UVES) at the Very Large Telescope (VLT). The redshift evolution of the effective optical depth, τ_eff(z), is measured in the redshift range 2.7≤z≤3.6. The influence of metal absorption features is removed by performing a comprehensive absorption-line-fitting procedure. In the first part of the thesis, a line-parameter analysis of the column density, N, and Doppler parameter, b, of ≈7500 individually fitted absorption lines is performed. The results are in good agreement with findings from previous surveys. The second (main) part of this thesis deals with the analysis of the redshift evolution of the effective optical depth. The τ_eff measurements vary around the empirical power law τ_eff(z)~(1+z)^(γ+1) with γ=2.09±0.52. The same analysis as for the observed spectra is performed on synthetic absorption spectra. From a comparison between observed and synthetic spectral data it can be inferred that the uncertainties of the τ_eff values are likely underestimated and that the scatter is probably caused by high-column-density absorbers with column densities in the range 15≤logN≤17. In the real Universe, such absorbers are rarely observed, however. Hence, the difference in τ_eff from different observational data sets and absorption studies is most likely caused by cosmic variance. If, alternatively, the disagreement between such data is a result of an too optimistic estimate of the (systematic) errors, it is also possible that all τ_eff measurements agree with a smooth evolution within the investigated redshift range. To explore in detail the different analysis techniques of previous studies an extensive literature comparison to the results of this work is presented in this thesis. Although a final explanation for the occurrence of the τ_eff deviation in different studies at z~3.2 cannot be given here, our study, which represents the most detailed line-fitting analysis of its kind performed at the investigated redshifts so far, represents another important benchmark for the characterization of the HI Ly-alpha effective optical depth at high redshift and its indicated unusual behavior at z~3.2.}, language = {en} } @phdthesis{Wettstein2015, author = {Wettstein, Christoph}, title = {Cytochrome c-DNA and cytochrome c-enzyme interactions for the construction of analytical signal chains}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78367}, school = {Universit{\"a}t Potsdam}, pages = {120}, year = {2015}, abstract = {Electron transfer (ET) reactions play a crucial role in the metabolic pathways of all organisms. In biotechnological approaches, the redox properties of the protein cytochrome c (cyt c), which acts as an electron shuttle in the respiratory chain, was utilized to engineer ET chains on electrode surfaces. With the help of the biopolymer DNA, the redox protein assembles into electro active multilayer (ML) systems, providing a biocompatible matrix for the entrapment of proteins. In this study the characteristics of the cyt c and DNA interaction were defined on the molecular level for the first time and the binding sites of DNA on cyt c were identified. Persistent cyt c/DNA complexes were formed in solution under the assembly conditions of ML architectures, i.e. pH 5.0 and low ionic strength. At pH 7.0, no agglomerates were formed, permitting the characterization of the NMR spectroscopy. Using transverse relaxation-optimized spectroscopy (TROSY)-heteronuclear single quantum coherence (HSQC) experiments, DNAs' binding sites on the protein were identified. In particular, negatively charged AA residues, which are known interaction sites in cyt c/protein binding were identified as the main contact points of cyt c and DNA. Moreover, the sophisticated task of arranging proteins on electrode surfaces to create functional ET chains was addressed. Therefore, two different enzyme types, the flavin dependent fructose dehydrogenase (FDH) and the pyrroloquinoline quinone dependent glucose dehydrogenase (PQQ-GDH), were tested as reaction partners of freely diffusing cyt c and cyt c immobilized on electrodes in mono- and MLs. The characterisation of the ET processes was performed by means of electrochemistry and the protein deposition was monitored by microgravimetric measurements. FDH and PQQ-GDH were found to be generally suitable for combination with the cyt c/DNA ML system, since both enzymes interact with cyt c in solution and in the immobilized state. The immobilization of FDH and cyt c was achieved with the enzyme on top of a cyt c monolayer electrode without the help of a polyelectrolyte. Combining FDH with the cyt c/DNA ML system did not succeed, yet. However, the basic conditions for this protein-protein interaction were defined. PQQ-GDH was successfully coupled with the ML system, demonstrating that that the cyt c/DNA ML system provides a suitable interface for enzymes and that the creation of signal chains, based on the idea of co-immobilized proteins is feasible. Future work may be directed to the investigation of cyt c/DNA interaction under the precise conditions of ML assembly. Therefore, solid state NMR or X-ray crystallography may be required. Based on the results of this study, the combination of FDH with the ML system should be addressed. Moreover, alternative types of enzymes may be tested as catalytic component of the ML assembly, aiming on the development of innovative biosensor applications.}, language = {en} } @phdthesis{Rajasundaram2015, author = {Rajasundaram, Dhivyaa}, title = {Integrative analysis of heterogeneous plant cell wall related data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-77652}, school = {Universit{\"a}t Potsdam}, pages = {xii, 205}, year = {2015}, abstract = {Plant cell walls are complex structures that underpin plant growth and are widely exploited in diverse human activities thus placing them with a central importance in biology. Cell walls have been a prominent area of research for a long time, but the chemical complexity and diversity of cell walls not just between species, but also within plants, between cell-types, and between cell wall micro-domains pose several challenges. Progress accelerated several-fold in cell wall biology owing to advances in sequencing technology, aided soon thereafter by advances in omics and imaging technologies. This development provides additional perspectives of cell walls across a rapidly growing number of species, highlighting a myriad of architectures, compositions, and functions. Furthermore, rather than the component centric view, integrative analysis of the different cell wall components across system-levels help to gain a more in-depth understanding of the structure and biosynthesis of the cell envelope and its interactions with the environment. To this end, in this work three case studies are detailed, all pertaining to the integrative analysis of heterogeneous cell wall related data arising from different system-levels and analytical techniques. A detailed account of multiblock methods is provided and in particular canonical correlation and regression methods of data integration are discussed. In the first integrative analysis, by employing canonical correlation analysis - a multivariate statistical technique to study the association between two datasets - novel insight to the relationship between glycans and phenotypic traits is gained. In addition, sparse partial least squares regression approach that adapts Lasso penalization and allows for the selection of a subset of variables was employed. The second case study focuses on an integrative analysis of images obtained from different spectroscopic techniques. By employing yet another multiblock approach - multiple co-inertia analysis, insitu biochemical composition of cell walls from different cell-types is studied thereby highlighting the common and complementary parts of the two hyperspectral imaging techniques. Finally, the third integrative analysis facilitates gene expression analysis of the Arabidopsis root transcriptome and translatome for the identification of cell wall related genes and compare expression patterns of cell wall synthesis genes. The computational analysis considered correlation and variation of expression across cell-types at both system-levels, and also provides insight into the degree of co-regulatory relationships that are preserved between the two processes. The integrative analysis of glycan data and phenotypic traits in cotton fibers using canonical methods led to the identification of specific polysaccharides which may play a major role during fiber development for the final fiber characteristics. Furthermore, this analysis provides a base for future studies on glycan arrays in case of developing cotton fibers. The integrative analysis of images from infrared and Raman spectroscopic approaches allowed the coupling of different analytical techniques to characterize complex biological material, thereby, representing various facets of their chemical properties. Moreover, the results from the co-inertia analysis demonstrated that the study was well adapted as it is relevant for coupling data tables in a symmetric way. Several indicators are proposed to investigate how the global and block scores are related. In addition, studying the root cells of \textit{Arabidopsis thaliana} allowed positing a novel pipeline to systematically investigate and integrate the different levels of information available at the global and single-cell level. The conducted analysis also confirms that previously identified key transcriptional activators of secondary cell wall development display highly conserved patterns of transcription and translation across the investigated cell-types. Moreover, the biological processes that display conserved and divergent patterns based on the cell-type-specific expression and translation levels are identified.}, language = {en} } @phdthesis{Bittermann2015, author = {Bittermann, Klaus}, title = {Semi-empirical sea-level modelling}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-93881}, school = {Universit{\"a}t Potsdam}, pages = {v, 88}, year = {2015}, abstract = {Semi-empirical sea-level models (SEMs) exploit physically motivated empirical relationships between global sea level and certain drivers, in the following global mean temperature. This model class evolved as a supplement to process-based models (Rahmstorf (2007)) which were unable to fully represent all relevant processes. They thus failed to capture past sea-level change (Rahmstorf et al. (2012)) and were thought likely to underestimate future sea-level rise. Semi-empirical models were found to be a fast and useful tool for exploring the uncertainties in future sea-level rise, consistently giving significantly higher projections than process-based models. In the following different aspects of semi-empirical sea-level modelling have been studied. Models were first validated using various data sets of global sea level and temperature. SEMs were then used on the glacier contribution to sea level, and to infer past global temperature from sea-level data via inverse modelling. Periods studied encompass the instrumental period, covered by tide gauges (starting 1700 CE (Common Era) in Amsterdam) and satellites (first launched in 1992 CE), the era from 1000 BCE (before CE) to present, and the full length of the Holocene (using proxy data). Accordingly different data, model formulations and implementations have been used. It could be shown in Bittermann et al. (2013) that SEMs correctly predict 20th century sea-level when calibrated with data until 1900 CE. SEMs also turned out to give better predictions than the Intergovernmental Panel on Climate Change (IPCC) 4th assessment report (AR4, IPCC (2007)) models, for the period from 1961-2003 CE. With the first multi-proxy reconstruction of global sea-level as input, estimate of the human-induced component of modern sea-level change and projections of future sea-level rise were calculated (Kopp et al. (2016)). It turned out with 90\% confidence that more than 40 \% of the observed 20th century sea-level rise is indeed anthropogenic. With the new semi-empirical and IPCC (2013) 5th assessment report (AR5) projections the gap between SEM and process-based model projections closes, giving higher credibility to both. Combining all scenarios, from strong mitigation to business as usual, a global sea-level rise of 28-131 cm relative to 2000 CE, is projected with 90\% confidence. The decision for a low carbon pathway could halve the expected global sea-level rise by 2100 CE. Present day temperature and thus sea level are driven by the globally acting greenhouse-gas forcing. Unlike that, the Milankovich forcing, acting on Holocene timescales, results mainly in a northern-hemisphere temperature change. Therefore a semi-empirical model can be driven with northernhemisphere temperatures, which makes it possible to model the main subcomponent of sea-level change over this period. It showed that an additional positive constant rate of the order of the estimated Antarctic sea-level contribution is then required to explain the sea-level evolution over the Holocene. Thus the global sea level, following the climatic optimum, can be interpreted as the sum of a temperature induced sea-level drop and a positive long-term contribution, likely an ongoing response to deglaciation coming from Antarctica.}, language = {en} } @phdthesis{Brachs2015, author = {Brachs, Maria}, title = {Genome wide expression analysis and metabolic mechanisms predicting body weight maintenance}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100767}, school = {Universit{\"a}t Potsdam}, pages = {106}, year = {2015}, abstract = {Obesity is a major health problem for many developing and industrial countries. Increasing rates reach almost 50 \% of the population in some countries and related metabolic diseases including cardiovascular events and T2DM are challenging the health systems. Adiposity, an increase in body fat mass, is a major hallmark of obesity. Adipose tissue is long known not only to store lipids but also to influence whole-body metabolism including food intake, energy expenditure and insulin sensitivity. Adipocytes can store lipids and thereby protect other tissue from lipotoxic damage. However, if the energy intake is higher than the energy expenditure over a sustained time period, adipose tissue will expand. This can lead to an impaired adipose tissue function resulting in higher levels of plasma lipids, which can affect other tissue like skeletal muscle, finally leading to metabolic complications. Several studies showed beneficial metabolic effects of weight reduction in obese subjects immediately after weight loss. However, weight regain is frequently observed along with potential negative effects on cardiovascular risk factors and a high intra-individual response. We performed a body weight maintenance study investigating the mechanisms of weight maintenance after intended WR. Therefore we used a low caloric diet followed by a 12-month life-style intervention. Comprehensive phenotyping including fat and muscle biopsies was conducted to investigate hormonal as well as metabolic influences on body weight regulation. In this study, we showed that weight reduction has numerous potentially beneficial effects on metabolic parameters. After 3-month WR subjects showed significant weight and fat mass reduction, lower TG levels as well as higher insulin sensitivity. Using RNA-Seq to analyse whole fat and muscle transcriptome a strong impact of weight reduction on adipose tissue gene expression was observed. Gene expression alterations over weight reduction included several cellular metabolic genes involved in lipid and glucose metabolism as well as insulin signalling and regulatory pathways. These changes were also associated with anthropometric parameters assigning body composition. Our data indicated that weight reduction leads to a decreased expression of several lipid catabolic as well as anabolic genes. Long-term body weight maintenance might be influenced by several parameters including hormones, metabolic intermediates as well as the transcriptional landscape of metabolic active tissues. Our data showed that genes involved in biosynthesis of unsaturated fatty acids might influence the BMI 18-month after a weight reduction phase. This was further supported by analysing metabolic parameters including RQ and FFA levels. We could show that subjects maintaining their lost body weight had a higher RQ and lower FFA levels, indicating increased metabolic flexibility in subjects. Using this transcriptomic approach we hypothesize that low expression levels of lipid synthetic genes in adipose tissue together with a higher mitochondrial activity in skeletal muscle tissue might be beneficial in terms of body weight maintenance.}, language = {en} } @phdthesis{Bettenbuehl2015, author = {Bettenb{\"u}hl, Mario}, title = {Microsaccades}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-122-6}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72622}, school = {Universit{\"a}t Potsdam}, pages = {iv, 126}, year = {2015}, abstract = {The first thing we do upon waking is open our eyes. Rotating them in our eye sockets, we scan our surroundings and collect the information into a picture in our head. Eye movements can be split into saccades and fixational eye movements, which occur when we attempt to fixate our gaze. The latter consists of microsaccades, drift and tremor. Before we even lift our eye lids, eye movements - such as saccades and microsaccades that let the eyes jump from one to another position - have partially been prepared in the brain stem. Saccades and microsaccades are often assumed to be generated by the same mechanisms. But how saccades and microsaccades can be classified according to shape has not yet been reported in a statistical manner. Research has put more effort into the investigations of microsaccades' properties and generation only since the last decade. Consequently, we are only beginning to understand the dynamic processes governing microsaccadic eye movements. Within this thesis, the dynamics governing the generation of microsaccades is assessed and the development of a model for the underlying processes. Eye movement trajectories from different experiments are used, recorded with a video-based eye tracking technique, and a novel method is proposed for the scale-invariant detection of saccades (events of large amplitude) and microsaccades (events of small amplitude). Using a time-frequency approach, the method is examined with different experiments and validated against simulated data. A shape model is suggested that allows for a simple estimation of saccade- and microsaccade related properties. For sequences of microsaccades, in this thesis a time-dynamic Markov model is proposed, with a memory horizon that changes over time and which can best describe sequences of microsaccades.}, language = {en} }