@phdthesis{Zuehlke2017, author = {Z{\"u}hlke, Martin}, title = {Elektrosprayionisation Ionenmobilit{\"a}tsspektrometrie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407452}, school = {Universit{\"a}t Potsdam}, pages = {viii, 113, XIV}, year = {2017}, abstract = {Die Elektrosprayionisation (ESI) ist eine der weitverbreitetsten Ionisationstechniken f{\"u}r fl{\"u}ssige Pro-ben in der Massen- und Ionenmobilit{\"a}ts(IM)-Spektrometrie. Aufgrund ihrer schonenden Ionisierung wird ESI vorwiegend f{\"u}r empfindliche, komplexe Molek{\"u}le in der Biologie und Medizin eingesetzt. {\"U}berdies ist sie allerdings f{\"u}r ein sehr breites Spektrum an Substanzklassen anwendbar. Die IM-Spektrometrie wurde urspr{\"u}nglich zur Detektion gasf{\"o}rmiger Proben entwickelt, die haupts{\"a}chlich durch radioaktive Quellen ionisiert werden. Sie ist die einzige analytische Methode, bei der Isomere in Echtzeit getrennt und {\"u}ber ihre charakteristische IM direkt identifiziert werden k{\"o}nnen. ESI wurde in den 90ger Jahren durch die Hill Gruppe in die IM-Spektrometrie eingef{\"u}hrt. Die Kombination wird bisher jedoch nur von wenigen Gruppen verwendet und hat deshalb noch ein hohes Entwick-lungspotential. Ein vielversprechendes Anwendungsfeld ist der Einsatz in der Hochleistungs-fl{\"u}ssigkeitschromatographie (HPLC) zur mehrdimensionalen Trennung. Heutzutage ist die HPLC die Standardmethode zur Trennung komplexer Proben in der Routineanalytik. HPLC-Trennungsg{\"a}nge sind jedoch h{\"a}ufig langwierig und der Einsatz verschiedener Laufmittel, hoher Flussraten, von Puffern, sowie Laufmittelgradienten stellt hohe Anforderungen an die Detektoren. Die ESI-IM-Spektrometrie wurde in einigen Studien bereits als HPLC-Detektor eingesetzt, war dort bisher jedoch auf Flussratensplitting oder geringe Flussraten des Laufmittels beschr{\"a}nkt. In dieser kumulativen Doktorarbeit konnte daher erstmals ein ESI IM-Spektrometer als HPLC-Detektor f{\"u}r den Flussratenbereich von 200-1500 μl/min entwickelt werden. Anhand von f{\"u}nf Publi-kationen wurden (1) {\"u}ber eine umfassende Charakterisierung die Eignung des Spektrometers als HPLC-Detektor festgestellt, (2) ausgew{\"a}hlte komplexe Trenng{\"a}nge pr{\"a}sentiert und (3) die Anwen-dung zum Reaktionsmonitoring und (4, 5) m{\"o}gliche Weiterentwicklungen gezeigt. Erfolgreich konnten mit dem selbst-entwickelten ESI IM-Spektrometer typische HPLC-Bedingungen wie Wassergehalte im Laufmittel von bis zu 90\%, Pufferkonzentrationen von bis zu 10 mM, sowie Nachweisgrenzen von bis zu 50 nM erreicht werden. Weiterhin wurde anhand der komplexen Trennungsg{\"a}nge (24 Pestizide/18 Aminos{\"a}uren) gezeigt, dass die HPLC und die IM-Spektrometrie eine hohe Orthogonalit{\"a}t besitzen. Eine effektive Peakkapazit{\"a}t von 240 wurde so realisiert. Auf der HPLC-S{\"a}ule koeluierende Substanzen konnten {\"u}ber die Driftzeit getrennt und {\"u}ber ihre IM identifi-ziert werden, sodass die Gesamttrennzeiten erheblich minimiert werden konnten. Die Anwend-barkeit des ESI IM-Spektrometers zur {\"U}berwachung chemischer Synthesen wurde anhand einer dreistufigen Reaktion demonstriert. Es konnten die wichtigsten Edukte, Zwischenprodukte und Produkte aller Stufen identifiziert werden. Eine quantitative Auswertung war sowohl {\"u}ber eine kurze HPLC-Vortrennung als auch durch die Entwicklung eines eigenen Kalibrierverfahrens, welches die Ladungskonkurrenz bei ESI ber{\"u}cksichtigt, ohne HPLC m{\"o}glich. Im zweiten Teil der Arbeit werden zwei Weiterentwicklungen des Spektrometers pr{\"a}sentiert. Eine M{\"o}glichkeit ist die Reduzierung des Drucks in den intermedi{\"a}ren Bereich (300 - 1000 mbar) mit dem Ziel der Verringerung der ben{\"o}tigten Spannungen. Mithilfe von Streulichtbildern und Strom-Spannungs-Kurven wurden f{\"u}r geringe Dr{\"u}cke eine verminderte Freisetzung der Analyt-Ionen aus den Tropfen festgestellt. Die Verluste konnten jedoch {\"u}ber h{\"o}here elektrische Feldst{\"a}rken ausgeglichen werden, sodass gleiche Nachweisgrenzen bei 500 mbar und bei 1 bar erreicht wurden. Die zweite Weiterentwicklung ist ein neuartiges Ionentors mit Pulsschaltung, welches eine Verdopplung der Aufl{\"o}sung auf bis zu R > 100 bei gleicher Sensitivit{\"a}t erm{\"o}glichte. Eine denkbare Anwendung im Bereich der Peptidanalytik wurde mit beachtlichen Aufl{\"o}sungen der Peptide von R = 90 gezeigt.}, language = {de} } @phdthesis{Zuo2017, author = {Zuo, Zhe}, title = {From unstructured to structured: Context-based named entity mining from text}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412576}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2017}, abstract = {With recent advances in the area of information extraction, automatically extracting structured information from a vast amount of unstructured textual data becomes an important task, which is infeasible for humans to capture all information manually. Named entities (e.g., persons, organizations, and locations), which are crucial components in texts, are usually the subjects of structured information from textual documents. Therefore, the task of named entity mining receives much attention. It consists of three major subtasks, which are named entity recognition, named entity linking, and relation extraction. These three tasks build up an entire pipeline of a named entity mining system, where each of them has its challenges and can be employed for further applications. As a fundamental task in the natural language processing domain, studies on named entity recognition have a long history, and many existing approaches produce reliable results. The task is aiming to extract mentions of named entities in text and identify their types. Named entity linking recently received much attention with the development of knowledge bases that contain rich information about entities. The goal is to disambiguate mentions of named entities and to link them to the corresponding entries in a knowledge base. Relation extraction, as the final step of named entity mining, is a highly challenging task, which is to extract semantic relations between named entities, e.g., the ownership relation between two companies. In this thesis, we review the state-of-the-art of named entity mining domain in detail, including valuable features, techniques, evaluation methodologies, and so on. Furthermore, we present two of our approaches that focus on the named entity linking and relation extraction tasks separately. To solve the named entity linking task, we propose the entity linking technique, BEL, which operates on a textual range of relevant terms and aggregates decisions from an ensemble of simple classifiers. Each of the classifiers operates on a randomly sampled subset of the above range. In extensive experiments on hand-labeled and benchmark datasets, our approach outperformed state-of-the-art entity linking techniques, both in terms of quality and efficiency. For the task of relation extraction, we focus on extracting a specific group of difficult relation types, business relations between companies. These relations can be used to gain valuable insight into the interactions between companies and perform complex analytics, such as predicting risk or valuating companies. Our semi-supervised strategy can extract business relations between companies based on only a few user-provided seed company pairs. By doing so, we also provide a solution for the problem of determining the direction of asymmetric relations, such as the ownership_of relation. We improve the reliability of the extraction process by using a holistic pattern identification method, which classifies the generated extraction patterns. Our experiments show that we can accurately and reliably extract new entity pairs occurring in the target relation by using as few as five labeled seed pairs.}, language = {en} } @phdthesis{Ziegler2017, author = {Ziegler, Moritz O.}, title = {The 3D in-situ stress field and its changes in geothermal reservoirs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403838}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 110, XV}, year = {2017}, abstract = {Information on the contemporary in-situ stress state of the earth's crust is essential for geotechnical applications and physics-based seismic hazard assessment. Yet, stress data records for a data point are incomplete and their availability is usually not dense enough to allow conclusive statements. This demands a thorough examination of the in-situ stress field which is achieved by 3D geomechanicalnumerical models. However, the models spatial resolution is limited and the resulting local stress state is subject to large uncertainties that confine the significance of the findings. In addition, temporal variations of the in-situ stress field are naturally or anthropogenically induced. In my thesis I address these challenges in three manuscripts that investigate (1) the current crustal stress field orientation, (2) the 3D geomechanical-numerical modelling of the in-situ stress state, and (3) the phenomenon of injection induced temporal stress tensor rotations. In the first manuscript I present the first comprehensive stress data compilation of Iceland with 495 data records. Therefore, I analysed image logs from 57 boreholes in Iceland for indicators of the orientation of the maximum horizontal stress component. The study is the first stress survey from different kinds of stress indicators in a geologically very young and tectonically active area of an onshore spreading ridge. It reveals a distinct stress field with a depth independent stress orientation even very close to the spreading centre. In the second manuscript I present a calibrated 3D geomechanical-numerical modelling approach of the in-situ stress state of the Bavarian Molasse Basin that investigates the regional (70x70x10km³) and local (10x10x10km³) stress state. To link these two models I develop a multi-stage modelling approach that provides a reliable and efficient method to derive from the larger scale model initial and boundary conditions for the smaller scale model. Furthermore, I quantify the uncertainties in the models results which are inherent to geomechanical-numerical modelling in general and the multi-stage approach in particular. I show that the significance of the models results is mainly reduced due to the uncertainties in the material properties and the low number of available stress magnitude data records for calibration. In the third manuscript I investigate the phenomenon of injection induced temporal stress tensor rotation and its controlling factors. I conduct a sensitivity study with a 3D generic thermo-hydro-mechanical model. I show that the key control factors for the stress tensor rotation are the permeability as the decisive factor, the injection rate, and the initial differential stress. In particular for enhanced geothermal systems with a low permeability large rotations of the stress tensor are indicated. According to these findings the estimation of the initial differential stress in a reservoir is possible provided the permeability is known and the angle of stress rotation is observed. I propose that the stress tensor rotations can be a key factor in terms of the potential for induced seismicity on pre-existing faults due to the reorientation of the stress field that changes the optimal orientation of faults.}, language = {en} } @phdthesis{Zieger2017, author = {Zieger, Tobias}, title = {Self-adaptive data quality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 125}, year = {2017}, abstract = {Carrying out business processes successfully is closely linked to the quality of the data inventory in an organization. Lacks in data quality lead to problems: Incorrect address data prevents (timely) shipments to customers. Erroneous orders lead to returns and thus to unnecessary effort. Wrong pricing forces companies to miss out on revenues or to impair customer satisfaction. If orders or customer records cannot be retrieved, complaint management takes longer. Due to erroneous inventories, too few or too much supplies might be reordered. A special problem with data quality and the reason for many of the issues mentioned above are duplicates in databases. Duplicates are different representations of same real-world objects in a dataset. However, these representations differ from each other and are for that reason hard to match by a computer. Moreover, the number of required comparisons to find those duplicates grows with the square of the dataset size. To cleanse the data, these duplicates must be detected and removed. Duplicate detection is a very laborious process. To achieve satisfactory results, appropriate software must be created and configured (similarity measures, partitioning keys, thresholds, etc.). Both requires much manual effort and experience. This thesis addresses automation of parameter selection for duplicate detection and presents several novel approaches that eliminate the need for human experience in parts of the duplicate detection process. A pre-processing step is introduced that analyzes the datasets in question and classifies their attributes semantically. Not only do these annotations help understanding the respective datasets, but they also facilitate subsequent steps, for example, by selecting appropriate similarity measures or normalizing the data upfront. This approach works without schema information. Following that, we show a partitioning technique that strongly reduces the number of pair comparisons for the duplicate detection process. The approach automatically finds particularly suitable partitioning keys that simultaneously allow for effective and efficient duplicate retrieval. By means of a user study, we demonstrate that this technique finds partitioning keys that outperform expert suggestions and additionally does not need manual configuration. Furthermore, this approach can be applied independently of the attribute types. To measure the success of a duplicate detection process and to execute the described partitioning approach, a gold standard is required that provides information about the actual duplicates in a training dataset. This thesis presents a technique that uses existing duplicate detection results and crowdsourcing to create a near gold standard that can be used for the purposes above. Another part of the thesis describes and evaluates strategies how to reduce these crowdsourcing costs and to achieve a consensus with less effort.}, language = {en} } @phdthesis{Zhou2017, author = {Zhou, Bin}, title = {On the assessment of surface urban heat island}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404383}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 119}, year = {2017}, abstract = {Inwiefern St{\"a}dte unter den Megatrends der Urbanisierung und des Klimawandels nachhaltig gestaltet werden k{\"o}nnen, bleibt umstritten. Dies ist zum Teil auf unzureichende Kenntnisse der Mensch-Umwelt-Interaktionen zur{\"u}ckzuf{\"u}hren. Als die am vollst{\"a}ndigsten dokumentierte anthropogene Klimamodifikation ruft der Urbane Hitzeinsel (UHI) Effekt weltweit Sorgen hinsichtlich der Gesundheit der Bev{\"o}lkerung hervor. Dazu kommt noch ein immer h{\"a}ufigeres und intensiveres Auftreten von Hitzewellen, wodurch das Wohlbefinden der Stadtbewohner weiter beeintr{\"a}chtigt wird. Trotz eines deutlichen Anstiegs der Zahl der UHI-bezogenen Ver{\"o}ffentlichungen in den letzten Jahrzehnten haben die unterschiedlichen Definitionen von st{\"a}dtischen und l{\"a}ndlichen Gebieten in bisherigen Studien die allgemeine Vergleichbarkeit der Resultate stark erschwert. Dar{\"u}ber hinaus haben nur wenige Studien den UHI-Effekt und seine Einflussfaktoren anhand einer Kombination der Landnutzungsdaten und der thermischen Fernerkundung systematisch untersucht. Diese Arbeit stellt einen allgemeinen Rahmen zur Quantifizierung von UHI-Intensit{\"a}ten mittels eines automatisierten Algorithmus vor, wobei St{\"a}dte als Agglomerationen maximal r{\"a}umlicher Kontinuit{\"a}t basierend auf Landnutzungsdaten identifiziert, sowie deren l{\"a}ndliche Umfelder analog definiert werden. Durch Verkn{\"u}pfung der Landnutzungsdaten mit Landoberfl{\"a}chentemperaturen von Satelliten kann die UHI-Intensit{\"a}t robust und konsistent berechnet werden. Anhand dieser Innovation wurde nicht nur der Zusammenhang zwischen Stadtgr{\"o}ße und UHI-Intensit{\"a}t erneut untersucht, sondern auch die Auswirkungen der Stadtform auf die UHI-Intensit{\"a}t quantifiziert. Diese Arbeit leistet vielf{\"a}ltige Beitr{\"a}ge zum tieferen Verst{\"a}ndnis des UHI-Ph{\"a}nomens. Erstens wurde eine log-lineare Beziehung zwischen UHI-Intensit{\"a}t und Stadtgr{\"o}ße unter Ber{\"u}cksichtigung der 5,000 europ{\"a}ischen St{\"a}dte best{\"a}tigt. Werden kleinere St{\"a}dte auch ber{\"u}cksichtigt, ergibt sich eine log-logistische Beziehung. Zweitens besteht ein komplexes Zusammenspiel zwischen der Stadtform und der UHI-Intensit{\"a}t: die Stadtgr{\"o}ße stellt den st{\"a}rksten Einfluss auf die UHI-Intensit{\"a}t dar, gefolgt von der fraktalen Dimension und der Anisometrie. Allerdings zeigen ihre relativen Beitr{\"a}ge zur UHI-Intensit{\"a}t eine regionale Heterogenit{\"a}t, welche die Bedeutung r{\"a}umlicher Muster w{\"a}hrend der Umsetzung von UHI-Anpassungsmaßnahmen hervorhebt. Des Weiteren ergibt sich eine neue Saisonalit{\"a}t der UHI-Intensit{\"a}t f{\"u}r individuelle St{\"a}dte in Form von Hysteresekurven, die eine Phasenverschiebung zwischen den Zeitreihen der UHI-Intensit{\"a}t und der Hintergrundtemperatur andeutet. Diese Saisonalit{\"a}t wurde anhand von Luft- und Landoberfl{\"a}chentemperaturen untersucht, indem die Satellitenbeobachtung und die Modellierung der urbanen Grenzschicht mittels des UrbClim-Modells kombiniert wurden. Am Beispiel von London ist die Diskrepanz der Saisonalit{\"a}ten zwischen den beiden Temperaturen vor allem auf die mit der einfallenden Sonnenstrahlung verbundene Besonderheit der Landoberfl{\"a}chentemperatur zur{\"u}ckzuf{\"u}hren. Dar{\"u}ber hinaus spielt das regionale Klima eine wichtige Rolle bei der Entwicklung der UHI. Diese Arbeit ist eine der ersten Studien dieser Art, die eine systematische und statistische Untersuchung des UHI-Effektes erm{\"o}glicht. Die Ergebnisse sind von besonderer Bedeutung f{\"u}r die allgemeine r{\"a}umliche Planung und Regulierung auf Meso- und Makroebenen, damit sich Vorteile der rapiden Urbanisierung nutzbar machen und zeitgleich die folgende Hitzebelastung proaktiv vermindern lassen.}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Julia}, title = {Schadenserkennung in Beton durch {\"U}berwachung mit eingebetteten Ultraschallpr{\"u}fk{\"o}pfen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397363}, school = {Universit{\"a}t Potsdam}, pages = {ix, 142}, year = {2017}, abstract = {Die zerst{\"o}rungsfreien Pr{\"u}fungen von Bauwerken mit Hilfe von Ultraschallmessverfahren haben in den letzten Jahren an Bedeutung gewonnen. Durch Ultraschallmessungen k{\"o}nnen die Geometrien von Bauteilen bestimmt sowie von außen nicht sichtbare Fehler wie Delaminationen und Kiesnester erkannt werden. Mit neuartigen, in das Betonbauteil eingebetteten Ultraschallpr{\"u}fk{\"o}pfen sollen nun Bauwerke dauerhaft auf Ver{\"a}nderungen {\"u}berpr{\"u}ft werden. Dazu werden Ultraschallsignale direkt im Inneren eines Bauteils erzeugt, was die M{\"o}glichkeiten der herk{\"o}mmlichen Methoden der Bauwerks{\"u}berwachung wesentlich erweitert. Ein Ultraschallverfahren k{\"o}nnte mit eingebetteten Pr{\"u}fk{\"o}pfen ein Betonbauteil kontinuierlich integral {\"u}berwachen und damit auch stetig fortschreitende Gef{\"u}ge{\"a}nderungen, wie beispielsweise Mikrorisse, registrieren. Sicherheitsrelevante Bauteile, die nach dem Einbau f{\"u}r Messungen unzug{\"a}nglich oder mittels Ultraschall, beispielsweise durch zus{\"a}tzliche Beschichtungen der Oberfl{\"a}che, nicht pr{\"u}fbar sind, lassen sich mit eingebetteten Pr{\"u}fk{\"o}pfen {\"u}berwachen. An bereits vorhandenen Bauwerken k{\"o}nnen die Ultraschallpr{\"u}fk{\"o}pfe mithilfe von Bohrl{\"o}chern und speziellem Verpressm{\"o}rtel auch nachtr{\"a}glich in das Bauteil integriert werden. F{\"u}r Fertigbauteile bieten sich eingebettete Pr{\"u}fk{\"o}pfe zur Herstellungskontrolle sowie zur {\"U}berwachung der Baudurchf{\"u}hrung als Werkzeug der Qualit{\"a}tssicherung an. Auch die schnelle Schadensanalyse eines Bauwerks nach Naturkatastrophen, wie beispielsweise einem Erdbeben oder einer Flut, ist denkbar. Durch die gute Ankopplung erm{\"o}glichen diese neuartigen Pr{\"u}fk{\"o}pfe den Einsatz von empfindlichen Auswertungsmethoden, wie die Kreuzkorrelation, die Coda-Wellen-Interferometrie oder die Amplitudenauswertung, f{\"u}r die Signalanalyse. Bei regelm{\"a}ßigen Messungen k{\"o}nnen somit sich anbahnende Sch{\"a}den eines Bauwerks fr{\"u}hzeitig erkannt werden. Da die Sch{\"a}digung eines Bauwerks keine direkt messbare Gr{\"o}ße darstellt, erfordert eine eindeutige Schadenserkennung in der Regel die Messung mehrerer physikalischer Gr{\"o}ßen die geeignet verkn{\"u}pft werden. Physikalische Gr{\"o}ßen k{\"o}nnen sein: Ultraschalllaufzeit, Amplitude des Ultraschallsignals und Umgebungstemperatur. Dazu m{\"u}ssen Korrelationen zwischen dem Zustand des Bauwerks, den Umgebungsbedingungen und den Parametern des gemessenen Ultraschallsignals untersucht werden. In dieser Arbeit werden die neuartigen Pr{\"u}fk{\"o}pfe vorgestellt. Es wird beschrieben, dass sie sich, sowohl in bereits errichtete Betonbauwerke als auch in der Konstruktion befindliche, einbauen lassen. Experimentell wird gezeigt, dass die Pr{\"u}fk{\"o}pfe in mehreren Ebenen eingebettet sein k{\"o}nnen da ihre Abstrahlcharakteristik im Beton nahezu ungerichtet ist. Die Mittenfrequenz von rund 62 kHz erm{\"o}glicht Abst{\"a}nde, je nach Betonart und SRV, von mindestens 3 m zwischen Pr{\"u}fk{\"o}pfen die als Sender und Empf{\"a}nger arbeiten. Die Empfindlichkeit der eingebetteten Pr{\"u}fk{\"o}pfe gegen{\"u}ber Ver{\"a}nderungen im Beton wird an Hand von zwei Laborexperimenten gezeigt, einem Drei-Punkt-Biegeversuch und einem Versuch zur Erzeugung von Frost-Tau-Wechsel Sch{\"a}den. Die Ergebnisse werden mit anderen zerst{\"o}rungsfreien Pr{\"u}fverfahren verglichen. Es zeigt sich, dass die Pr{\"u}fk{\"o}pfe durch die Anwendung empfindlicher Auswertemethoden, auftretende Risse im Beton detektieren, bevor diese eine Gefahr f{\"u}r das Bauwerk darstellen. Abschließend werden Beispiele von Installation der neuartigen Ultraschallpr{\"u}fk{\"o}pfe in realen Bauteilen, zwei Br{\"u}cken und einem Fundament, gezeigt und basierend auf dort gewonnenen ersten Erfahrungen ein Konzept f{\"u}r die Umsetzung einer Langzeit{\"u}berwachung aufgestellt.}, language = {de} } @phdthesis{Wittenbecher2017, author = {Wittenbecher, Clemens}, title = {Linking whole-grain bread, coffee, and red meat to the risk of type 2 diabetes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404592}, school = {Universit{\"a}t Potsdam}, pages = {XII, 194, ii}, year = {2017}, abstract = {Background: Consumption of whole-grain, coffee, and red meat were consistently related to the risk of developing type 2 diabetes in prospective cohort studies, but potentially underlying biological mechanisms are not well understood. Metabolomics profiles were shown to be sensitive to these dietary exposures, and at the same time to be informative with respect to the risk of type 2 diabetes. Moreover, graphical network-models were demonstrated to reflect the biological processes underlying high-dimensional metabolomics profiles. Aim: The aim of this study was to infer hypotheses on the biological mechanisms that link consumption of whole-grain bread, coffee, and red meat, respectively, to the risk of developing type 2 diabetes. More specifically, it was aimed to consider network models of amino acid and lipid profiles as potential mediators of these risk-relations. Study population: Analyses were conducted in the prospective EPIC-Potsdam cohort (n = 27,548), applying a nested case-cohort design (n = 2731, including 692 incident diabetes cases). Habitual diet was assessed with validated semiquantitative food-frequency questionnaires. Concentrations of 126 metabolites (acylcarnitines, phosphatidylcholines, sphingomyelins, amino acids) were determined in baseline-serum samples. Incident type 2 diabetes cases were assed and validated in an active follow-up procedure. The median follow-up time was 6.6 years. Analytical design: The methodological approach was conceptually based on counterfactual causal inference theory. Observations on the network-encoded conditional independence structure restricted the space of possible causal explanations of observed metabolomics-data patterns. Given basic directionality assumptions (diet affects metabolism; metabolism affects future diabetes incidence), adjustment for a subset of direct neighbours was sufficient to consistently estimate network-independent direct effects. Further model-specification, however, was limited due to missing directionality information on the links between metabolites. Therefore, a multi-model approach was applied to infer the bounds of possible direct effects. All metabolite-exposure links and metabolite-outcome links, respectively, were classified into one of three categories: direct effect, ambiguous (some models indicated an effect others not), and no-effect. Cross-sectional and longitudinal relations were evaluated in multivariable-adjusted linear regression and Cox proportional hazard regression models, respectively. Models were comprehensively adjusted for age, sex, body mass index, prevalence of hypertension, dietary and lifestyle factors, and medication. Results: Consumption of whole-grain bread was related to lower levels of several lipid metabolites with saturated and monounsaturated fatty acids. Coffee was related to lower aromatic and branched-chain amino acids, and had potential effects on the fatty acid profile within lipid classes. Red meat was linked to lower glycine levels and was related to higher circulating concentrations of branched-chain amino acids. In addition, potential marked effects of red meat consumption on the fatty acid composition within the investigated lipid classes were identified. Moreover, potential beneficial and adverse direct effects of metabolites on type 2 diabetes risk were detected. Aromatic amino acids and lipid metabolites with even-chain saturated (C14-C18) and with specific polyunsaturated fatty acids had adverse effects on type 2 diabetes risk. Glycine, glutamine, and lipid metabolites with monounsaturated fatty acids and with other species of polyunsaturated fatty acids were classified as having direct beneficial effects on type 2 diabetes risk. Potential mediators of the diet-diabetes links were identified by graphically overlaying this information in network models. Mediation analyses revealed that effects on lipid metabolites could potentially explain about one fourth of the whole-grain bread effect on type 2 diabetes risk; and that effects of coffee and red meat consumption on amino acid and lipid profiles could potentially explain about two thirds of the altered type 2 diabetes risk linked to these dietary exposures. Conclusion: An algorithm was developed that is capable to integrate single external variables (continuous exposures, survival time) and high-dimensional metabolomics-data in a joint graphical model. Application to the EPIC-Potsdam cohort study revealed that the observed conditional independence patterns were consistent with the a priori mediation hypothesis: Early effects on lipid and amino acid metabolism had the potential to explain large parts of the link between three of the most widely discussed diabetes-related dietary exposures and the risk of developing type 2 diabetes.}, language = {en} } @phdthesis{Willersinn2017, author = {Willersinn, Jochen}, title = {Self-Assembly of double hydrophilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408578}, school = {Universit{\"a}t Potsdam}, pages = {119, clxxiv}, year = {2017}, abstract = {The motivation of this work was to investigate the self-assembly of a block copolymer species that attended little attraction before, double hydrophilic block copolymers (DHBCs). DHBCs consist of two linear hydrophilic polymer blocks. The self-assembly of DHBCs towards suprastructures such as particles and vesicles is determined via a strong difference in hydrophilicity between the corresponding blocks leading to a microphase separation due to immiscibility. The benefits of DHBCs and the corresponding particles and vesicles, such as biocompatibility, high permeability towards water and hydrophilic compounds as well as the large amount of possible functionalizations that can be addressed to the block copolymers make the application of DHBC based structures a viable choice in biomedicine. In order to assess a route towards self-assembled structures from DHBCs that display the potential to act as cargos for future applications, several block copolymers containing two hydrophilic polymer blocks were synthesized. Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone) (PEO-b-PVP) and Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone-co-N-vinylimidazole) (PEO-b-P(VP-co-VIm) block copolymers were synthesized via reversible deactivation radical polymerization (RDRP) techniques starting from a PEO-macro chain transfer agent. The block copolymers displayed a concentration dependent self-assembly behavior in water which was determined via dynamic light scattering (DLS). It was possible to observe spherical particles via laser scanning confocal microscopy (LSCM) and cryogenic scanning electron microscopy (cryo SEM) at highly concentrated solutions of PEO-b-PVP. Furthermore, a crosslinking strategy with (PEO-b-P(VP-co-VIm) was developed applying a diiodo derived crosslinker diethylene glycol bis(2-iodoethyl) ether to form quaternary amines at the VIm units. The formed crosslinked structures proved stability upon dilution and transfer into organic solvents. Moreover, self-assembly and crosslinking in DMF proved to be more advantageous and the crosslinked structures could be successfully transferred to aqueous solution. The afforded spherical submicron particles could be visualized via LSCM, cryo SEM and Cryo TEM. Double hydrophilic pullulan-b-poly(acrylamide) block copolymers were synthesized via copper catalyzed alkyne azide cycloaddition (CuAAC) starting from suitable pullulan alkyne and azide functionalized poly(N,N-dimethylacrylamide) (PDMA) and poly(N-ethylacrylamide) (PEA) homopolymers. The conjugation reaction was confirmed via SEC and 1H-NMR measurements. The self-assembly of the block copolymers was monitored with DLS and static light scattering (SLS) measurements indicating the presence of hollow spherical structures. Cryo SEM measurements could confirm the presence of vesicular structures for Pull-b-PEA block copolymers. Solutions of Pull-b-PDMA displayed particles in cryo SEM. Moreover, an end group functionalization of Pull-b-PDMA with Rhodamine B allowed assessing the structure via LSCM and hollow spherical structures were observed indicating the presence of vesicles, too. An exemplified pathway towards a DHBC based drug delivery vehicle was demonstrated with the block copolymer Pull-b-PVP. The block copolymer was synthesized via RAFT/MADIX techniques starting from a pullulan chain transfer agent. Pull-b-PVP displayed a concentration dependent self-assembly in water with an efficiency superior to the PEO-b-PVP system, which could be observed via DLS. Cryo SEM and LSCM microscopy displayed the presence of spherical structures. In order to apply a reversible crosslinking strategy on the synthesized block copolymer, the pullulan block was selectively oxidized to dialdehydes with NaIO4. The oxidation of the block copolymer was confirmed via SEC and 1H-NMR measurements. The self-assembled and oxidized structures were subsequently crosslinked with cystamine dihiydrochloride, a pH and redox responsive crosslinker resulting in crosslinked vesicles which were observed via cryo SEM. The vesicular structures of crosslinked Pull-b-PVP could be disassembled by acid treatment or the application of the redox agent tris(2-carboxyethyl)-phosphin-hydrochloride. The successful disassembly was monitored with DLS measurements. To conclude, self-assembled structures from DHBCs such as particles and vesicles display a strong potential to generate an impact on biomedicine and nanotechnologies. The variety of DHBC compositions and functionalities are very promising features for future applications.}, language = {en} } @phdthesis{Wierzba2017, author = {Wierzba, Marta}, title = {Revisiting prosodic reconstruction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403152}, school = {Universit{\"a}t Potsdam}, pages = {vi, 224}, year = {2017}, abstract = {In this thesis, I develop a theoretical implementation of prosodic reconstruction and apply it to the empirical domain of German sentences in which part of a focus or contrastive topic is fronted. Prosodic reconstruction refers to the idea that sentences involving syntactic movement show prosodic parallels with corresponding simpler structures without movement. I propose to model this recurrent observation by ordering syntax-prosody mapping before copy deletion. In order to account for the partial fronting data, the idea is extended to the mapping between prosody and information structure. This assumption helps to explain why object-initial sentences containing a broad focus or broad contrastive topic show similar prosodic and interpretative restrictions as sentences with canonical word order. The empirical adequacy of the model is tested against a set of gradient acceptability judgments.}, language = {en} } @phdthesis{Weissenberger2017, author = {Weißenberger, Martin}, title = {Start-up subsidies for the unemployed - New evaluation approaches and insights}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406362}, school = {Universit{\"a}t Potsdam}, pages = {viii, 239}, year = {2017}, abstract = {Start-up incentives targeted at unemployed individuals have become an important tool of the Active Labor Market Policy (ALMP) to fight unemployment in many countries in recent years. In contrast to traditional ALMP instruments like training measures, wage subsidies, or job creation schemes, which are aimed at reintegrating unemployed individuals into dependent employment, start-up incentives are a fundamentally different approach to ALMP, in that they intend to encourage and help unemployed individuals to exit unemployment by entering self-employment and, thus, by creating their own jobs. In this sense, start-up incentives for unemployed individuals serve not only as employment and social policy to activate job seekers and combat unemployment but also as business policy to promote entrepreneurship. The corresponding empirical literature on this topic so far has been mainly focused on the individual labor market perspective, however. The main part of the thesis at hand examines the new start-up subsidy ("Gr{\"u}ndungszuschuss") in Germany and consists of four empirical analyses that extend the existing evidence on start-up incentives for unemployed individuals from multiple perspectives and in the following directions: First, it provides the first impact evaluation of the new start-up subsidy in Germany. The results indicate that participation in the new start-up subsidy has significant positive and persistent effects on both reintegration into the labor market as well as the income profiles of participants, in line with previous evidence on comparable German and international programs, which emphasizes the general potential of start-up incentives as part of the broader ALMP toolset. Furthermore, a new innovative sensitivity analysis of the applied propensity score matching approach integrates findings from entrepreneurship and labor market research about the key role of an individual's personality on start-up decision, business performance, as well as general labor market outcomes, into the impact evaluation of start-up incentives. The sensitivity analysis with regard to the inclusion and exclusion of usually unobserved personality variables reveals that differences in the estimated treatment effects are small in magnitude and mostly insignificant. Consequently, concerns about potential overestimation of treatment effects in previous evaluation studies of similar start-up incentives due to usually unobservable personality variables are less justified, as long as the set of observed control variables is sufficiently informative (Chapter 2). Second, the thesis expands our knowledge about the longer-term business performance and potential of subsidized businesses arising from the start-up subsidy program. In absolute terms, the analysis shows that a relatively high share of subsidized founders successfully survives in the market with their original businesses in the medium to long run. The subsidy also yields a "double dividend" to a certain extent in terms of additional job creation. Compared to "regular", i.e., non-subsidized new businesses founded by non-unemployed individuals in the same quarter, however, the economic and growth-related impulses set by participants of the subsidy program are only limited with regard to employment growth, innovation activity, or investment. Further investigations of possible reasons for these differences show that differential business growth paths of subsidized founders in the longer run seem to be mainly limited by higher restrictions to access capital and by unobserved factors, such as less growth-oriented business strategies and intentions, as well as lower (subjective) entrepreneurial persistence. Taken together, the program has only limited potential as a business and entrepreneurship policy intended to induce innovation and economic growth (Chapters 3 and 4). And third, an empirical analysis on the level of German regional labor markets yields that there is a high regional variation in subsidized start-up activity relative to overall new business formation. The positive correlation between regular start-up intensity and the share among all unemployed individuals who participate in the start-up subsidy program suggests that (nascent) unemployed founders also profit from the beneficial effects of regional entrepreneurship capital. Moreover, the analysis of potential deadweight and displacement effects from an aggregated regional perspective emphasizes that the start-up subsidy for unemployed individuals represents a market intervention into existing markets, which affects incumbents and potentially produces inefficiencies and market distortions. This macro perspective deserves more attention and research in the future (Chapter 5).}, language = {en} }