@phdthesis{Zuehlke2017, author = {Z{\"u}hlke, Martin}, title = {Elektrosprayionisation Ionenmobilit{\"a}tsspektrometrie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-407452}, school = {Universit{\"a}t Potsdam}, pages = {viii, 113, XIV}, year = {2017}, abstract = {Die Elektrosprayionisation (ESI) ist eine der weitverbreitetsten Ionisationstechniken f{\"u}r fl{\"u}ssige Pro-ben in der Massen- und Ionenmobilit{\"a}ts(IM)-Spektrometrie. Aufgrund ihrer schonenden Ionisierung wird ESI vorwiegend f{\"u}r empfindliche, komplexe Molek{\"u}le in der Biologie und Medizin eingesetzt. {\"U}berdies ist sie allerdings f{\"u}r ein sehr breites Spektrum an Substanzklassen anwendbar. Die IM-Spektrometrie wurde urspr{\"u}nglich zur Detektion gasf{\"o}rmiger Proben entwickelt, die haupts{\"a}chlich durch radioaktive Quellen ionisiert werden. Sie ist die einzige analytische Methode, bei der Isomere in Echtzeit getrennt und {\"u}ber ihre charakteristische IM direkt identifiziert werden k{\"o}nnen. ESI wurde in den 90ger Jahren durch die Hill Gruppe in die IM-Spektrometrie eingef{\"u}hrt. Die Kombination wird bisher jedoch nur von wenigen Gruppen verwendet und hat deshalb noch ein hohes Entwick-lungspotential. Ein vielversprechendes Anwendungsfeld ist der Einsatz in der Hochleistungs-fl{\"u}ssigkeitschromatographie (HPLC) zur mehrdimensionalen Trennung. Heutzutage ist die HPLC die Standardmethode zur Trennung komplexer Proben in der Routineanalytik. HPLC-Trennungsg{\"a}nge sind jedoch h{\"a}ufig langwierig und der Einsatz verschiedener Laufmittel, hoher Flussraten, von Puffern, sowie Laufmittelgradienten stellt hohe Anforderungen an die Detektoren. Die ESI-IM-Spektrometrie wurde in einigen Studien bereits als HPLC-Detektor eingesetzt, war dort bisher jedoch auf Flussratensplitting oder geringe Flussraten des Laufmittels beschr{\"a}nkt. In dieser kumulativen Doktorarbeit konnte daher erstmals ein ESI IM-Spektrometer als HPLC-Detektor f{\"u}r den Flussratenbereich von 200-1500 μl/min entwickelt werden. Anhand von f{\"u}nf Publi-kationen wurden (1) {\"u}ber eine umfassende Charakterisierung die Eignung des Spektrometers als HPLC-Detektor festgestellt, (2) ausgew{\"a}hlte komplexe Trenng{\"a}nge pr{\"a}sentiert und (3) die Anwen-dung zum Reaktionsmonitoring und (4, 5) m{\"o}gliche Weiterentwicklungen gezeigt. Erfolgreich konnten mit dem selbst-entwickelten ESI IM-Spektrometer typische HPLC-Bedingungen wie Wassergehalte im Laufmittel von bis zu 90\%, Pufferkonzentrationen von bis zu 10 mM, sowie Nachweisgrenzen von bis zu 50 nM erreicht werden. Weiterhin wurde anhand der komplexen Trennungsg{\"a}nge (24 Pestizide/18 Aminos{\"a}uren) gezeigt, dass die HPLC und die IM-Spektrometrie eine hohe Orthogonalit{\"a}t besitzen. Eine effektive Peakkapazit{\"a}t von 240 wurde so realisiert. Auf der HPLC-S{\"a}ule koeluierende Substanzen konnten {\"u}ber die Driftzeit getrennt und {\"u}ber ihre IM identifi-ziert werden, sodass die Gesamttrennzeiten erheblich minimiert werden konnten. Die Anwend-barkeit des ESI IM-Spektrometers zur {\"U}berwachung chemischer Synthesen wurde anhand einer dreistufigen Reaktion demonstriert. Es konnten die wichtigsten Edukte, Zwischenprodukte und Produkte aller Stufen identifiziert werden. Eine quantitative Auswertung war sowohl {\"u}ber eine kurze HPLC-Vortrennung als auch durch die Entwicklung eines eigenen Kalibrierverfahrens, welches die Ladungskonkurrenz bei ESI ber{\"u}cksichtigt, ohne HPLC m{\"o}glich. Im zweiten Teil der Arbeit werden zwei Weiterentwicklungen des Spektrometers pr{\"a}sentiert. Eine M{\"o}glichkeit ist die Reduzierung des Drucks in den intermedi{\"a}ren Bereich (300 - 1000 mbar) mit dem Ziel der Verringerung der ben{\"o}tigten Spannungen. Mithilfe von Streulichtbildern und Strom-Spannungs-Kurven wurden f{\"u}r geringe Dr{\"u}cke eine verminderte Freisetzung der Analyt-Ionen aus den Tropfen festgestellt. Die Verluste konnten jedoch {\"u}ber h{\"o}here elektrische Feldst{\"a}rken ausgeglichen werden, sodass gleiche Nachweisgrenzen bei 500 mbar und bei 1 bar erreicht wurden. Die zweite Weiterentwicklung ist ein neuartiges Ionentors mit Pulsschaltung, welches eine Verdopplung der Aufl{\"o}sung auf bis zu R > 100 bei gleicher Sensitivit{\"a}t erm{\"o}glichte. Eine denkbare Anwendung im Bereich der Peptidanalytik wurde mit beachtlichen Aufl{\"o}sungen der Peptide von R = 90 gezeigt.}, language = {de} } @phdthesis{Zuo2017, author = {Zuo, Zhe}, title = {From unstructured to structured: Context-based named entity mining from text}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412576}, school = {Universit{\"a}t Potsdam}, pages = {vii, 112}, year = {2017}, abstract = {With recent advances in the area of information extraction, automatically extracting structured information from a vast amount of unstructured textual data becomes an important task, which is infeasible for humans to capture all information manually. Named entities (e.g., persons, organizations, and locations), which are crucial components in texts, are usually the subjects of structured information from textual documents. Therefore, the task of named entity mining receives much attention. It consists of three major subtasks, which are named entity recognition, named entity linking, and relation extraction. These three tasks build up an entire pipeline of a named entity mining system, where each of them has its challenges and can be employed for further applications. As a fundamental task in the natural language processing domain, studies on named entity recognition have a long history, and many existing approaches produce reliable results. The task is aiming to extract mentions of named entities in text and identify their types. Named entity linking recently received much attention with the development of knowledge bases that contain rich information about entities. The goal is to disambiguate mentions of named entities and to link them to the corresponding entries in a knowledge base. Relation extraction, as the final step of named entity mining, is a highly challenging task, which is to extract semantic relations between named entities, e.g., the ownership relation between two companies. In this thesis, we review the state-of-the-art of named entity mining domain in detail, including valuable features, techniques, evaluation methodologies, and so on. Furthermore, we present two of our approaches that focus on the named entity linking and relation extraction tasks separately. To solve the named entity linking task, we propose the entity linking technique, BEL, which operates on a textual range of relevant terms and aggregates decisions from an ensemble of simple classifiers. Each of the classifiers operates on a randomly sampled subset of the above range. In extensive experiments on hand-labeled and benchmark datasets, our approach outperformed state-of-the-art entity linking techniques, both in terms of quality and efficiency. For the task of relation extraction, we focus on extracting a specific group of difficult relation types, business relations between companies. These relations can be used to gain valuable insight into the interactions between companies and perform complex analytics, such as predicting risk or valuating companies. Our semi-supervised strategy can extract business relations between companies based on only a few user-provided seed company pairs. By doing so, we also provide a solution for the problem of determining the direction of asymmetric relations, such as the ownership_of relation. We improve the reliability of the extraction process by using a holistic pattern identification method, which classifies the generated extraction patterns. Our experiments show that we can accurately and reliably extract new entity pairs occurring in the target relation by using as few as five labeled seed pairs.}, language = {en} } @phdthesis{Ziegler2017, author = {Ziegler, Moritz O.}, title = {The 3D in-situ stress field and its changes in geothermal reservoirs}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403838}, school = {Universit{\"a}t Potsdam}, pages = {VIII, 110, XV}, year = {2017}, abstract = {Information on the contemporary in-situ stress state of the earth's crust is essential for geotechnical applications and physics-based seismic hazard assessment. Yet, stress data records for a data point are incomplete and their availability is usually not dense enough to allow conclusive statements. This demands a thorough examination of the in-situ stress field which is achieved by 3D geomechanicalnumerical models. However, the models spatial resolution is limited and the resulting local stress state is subject to large uncertainties that confine the significance of the findings. In addition, temporal variations of the in-situ stress field are naturally or anthropogenically induced. In my thesis I address these challenges in three manuscripts that investigate (1) the current crustal stress field orientation, (2) the 3D geomechanical-numerical modelling of the in-situ stress state, and (3) the phenomenon of injection induced temporal stress tensor rotations. In the first manuscript I present the first comprehensive stress data compilation of Iceland with 495 data records. Therefore, I analysed image logs from 57 boreholes in Iceland for indicators of the orientation of the maximum horizontal stress component. The study is the first stress survey from different kinds of stress indicators in a geologically very young and tectonically active area of an onshore spreading ridge. It reveals a distinct stress field with a depth independent stress orientation even very close to the spreading centre. In the second manuscript I present a calibrated 3D geomechanical-numerical modelling approach of the in-situ stress state of the Bavarian Molasse Basin that investigates the regional (70x70x10km³) and local (10x10x10km³) stress state. To link these two models I develop a multi-stage modelling approach that provides a reliable and efficient method to derive from the larger scale model initial and boundary conditions for the smaller scale model. Furthermore, I quantify the uncertainties in the models results which are inherent to geomechanical-numerical modelling in general and the multi-stage approach in particular. I show that the significance of the models results is mainly reduced due to the uncertainties in the material properties and the low number of available stress magnitude data records for calibration. In the third manuscript I investigate the phenomenon of injection induced temporal stress tensor rotation and its controlling factors. I conduct a sensitivity study with a 3D generic thermo-hydro-mechanical model. I show that the key control factors for the stress tensor rotation are the permeability as the decisive factor, the injection rate, and the initial differential stress. In particular for enhanced geothermal systems with a low permeability large rotations of the stress tensor are indicated. According to these findings the estimation of the initial differential stress in a reservoir is possible provided the permeability is known and the angle of stress rotation is observed. I propose that the stress tensor rotations can be a key factor in terms of the potential for induced seismicity on pre-existing faults due to the reorientation of the stress field that changes the optimal orientation of faults.}, language = {en} } @phdthesis{Zieger2017, author = {Zieger, Tobias}, title = {Self-adaptive data quality}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410573}, school = {Universit{\"a}t Potsdam}, pages = {vii, 125}, year = {2017}, abstract = {Carrying out business processes successfully is closely linked to the quality of the data inventory in an organization. Lacks in data quality lead to problems: Incorrect address data prevents (timely) shipments to customers. Erroneous orders lead to returns and thus to unnecessary effort. Wrong pricing forces companies to miss out on revenues or to impair customer satisfaction. If orders or customer records cannot be retrieved, complaint management takes longer. Due to erroneous inventories, too few or too much supplies might be reordered. A special problem with data quality and the reason for many of the issues mentioned above are duplicates in databases. Duplicates are different representations of same real-world objects in a dataset. However, these representations differ from each other and are for that reason hard to match by a computer. Moreover, the number of required comparisons to find those duplicates grows with the square of the dataset size. To cleanse the data, these duplicates must be detected and removed. Duplicate detection is a very laborious process. To achieve satisfactory results, appropriate software must be created and configured (similarity measures, partitioning keys, thresholds, etc.). Both requires much manual effort and experience. This thesis addresses automation of parameter selection for duplicate detection and presents several novel approaches that eliminate the need for human experience in parts of the duplicate detection process. A pre-processing step is introduced that analyzes the datasets in question and classifies their attributes semantically. Not only do these annotations help understanding the respective datasets, but they also facilitate subsequent steps, for example, by selecting appropriate similarity measures or normalizing the data upfront. This approach works without schema information. Following that, we show a partitioning technique that strongly reduces the number of pair comparisons for the duplicate detection process. The approach automatically finds particularly suitable partitioning keys that simultaneously allow for effective and efficient duplicate retrieval. By means of a user study, we demonstrate that this technique finds partitioning keys that outperform expert suggestions and additionally does not need manual configuration. Furthermore, this approach can be applied independently of the attribute types. To measure the success of a duplicate detection process and to execute the described partitioning approach, a gold standard is required that provides information about the actual duplicates in a training dataset. This thesis presents a technique that uses existing duplicate detection results and crowdsourcing to create a near gold standard that can be used for the purposes above. Another part of the thesis describes and evaluates strategies how to reduce these crowdsourcing costs and to achieve a consensus with less effort.}, language = {en} } @phdthesis{Zhou2017, author = {Zhou, Bin}, title = {On the assessment of surface urban heat island}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404383}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 119}, year = {2017}, abstract = {Inwiefern St{\"a}dte unter den Megatrends der Urbanisierung und des Klimawandels nachhaltig gestaltet werden k{\"o}nnen, bleibt umstritten. Dies ist zum Teil auf unzureichende Kenntnisse der Mensch-Umwelt-Interaktionen zur{\"u}ckzuf{\"u}hren. Als die am vollst{\"a}ndigsten dokumentierte anthropogene Klimamodifikation ruft der Urbane Hitzeinsel (UHI) Effekt weltweit Sorgen hinsichtlich der Gesundheit der Bev{\"o}lkerung hervor. Dazu kommt noch ein immer h{\"a}ufigeres und intensiveres Auftreten von Hitzewellen, wodurch das Wohlbefinden der Stadtbewohner weiter beeintr{\"a}chtigt wird. Trotz eines deutlichen Anstiegs der Zahl der UHI-bezogenen Ver{\"o}ffentlichungen in den letzten Jahrzehnten haben die unterschiedlichen Definitionen von st{\"a}dtischen und l{\"a}ndlichen Gebieten in bisherigen Studien die allgemeine Vergleichbarkeit der Resultate stark erschwert. Dar{\"u}ber hinaus haben nur wenige Studien den UHI-Effekt und seine Einflussfaktoren anhand einer Kombination der Landnutzungsdaten und der thermischen Fernerkundung systematisch untersucht. Diese Arbeit stellt einen allgemeinen Rahmen zur Quantifizierung von UHI-Intensit{\"a}ten mittels eines automatisierten Algorithmus vor, wobei St{\"a}dte als Agglomerationen maximal r{\"a}umlicher Kontinuit{\"a}t basierend auf Landnutzungsdaten identifiziert, sowie deren l{\"a}ndliche Umfelder analog definiert werden. Durch Verkn{\"u}pfung der Landnutzungsdaten mit Landoberfl{\"a}chentemperaturen von Satelliten kann die UHI-Intensit{\"a}t robust und konsistent berechnet werden. Anhand dieser Innovation wurde nicht nur der Zusammenhang zwischen Stadtgr{\"o}ße und UHI-Intensit{\"a}t erneut untersucht, sondern auch die Auswirkungen der Stadtform auf die UHI-Intensit{\"a}t quantifiziert. Diese Arbeit leistet vielf{\"a}ltige Beitr{\"a}ge zum tieferen Verst{\"a}ndnis des UHI-Ph{\"a}nomens. Erstens wurde eine log-lineare Beziehung zwischen UHI-Intensit{\"a}t und Stadtgr{\"o}ße unter Ber{\"u}cksichtigung der 5,000 europ{\"a}ischen St{\"a}dte best{\"a}tigt. Werden kleinere St{\"a}dte auch ber{\"u}cksichtigt, ergibt sich eine log-logistische Beziehung. Zweitens besteht ein komplexes Zusammenspiel zwischen der Stadtform und der UHI-Intensit{\"a}t: die Stadtgr{\"o}ße stellt den st{\"a}rksten Einfluss auf die UHI-Intensit{\"a}t dar, gefolgt von der fraktalen Dimension und der Anisometrie. Allerdings zeigen ihre relativen Beitr{\"a}ge zur UHI-Intensit{\"a}t eine regionale Heterogenit{\"a}t, welche die Bedeutung r{\"a}umlicher Muster w{\"a}hrend der Umsetzung von UHI-Anpassungsmaßnahmen hervorhebt. Des Weiteren ergibt sich eine neue Saisonalit{\"a}t der UHI-Intensit{\"a}t f{\"u}r individuelle St{\"a}dte in Form von Hysteresekurven, die eine Phasenverschiebung zwischen den Zeitreihen der UHI-Intensit{\"a}t und der Hintergrundtemperatur andeutet. Diese Saisonalit{\"a}t wurde anhand von Luft- und Landoberfl{\"a}chentemperaturen untersucht, indem die Satellitenbeobachtung und die Modellierung der urbanen Grenzschicht mittels des UrbClim-Modells kombiniert wurden. Am Beispiel von London ist die Diskrepanz der Saisonalit{\"a}ten zwischen den beiden Temperaturen vor allem auf die mit der einfallenden Sonnenstrahlung verbundene Besonderheit der Landoberfl{\"a}chentemperatur zur{\"u}ckzuf{\"u}hren. Dar{\"u}ber hinaus spielt das regionale Klima eine wichtige Rolle bei der Entwicklung der UHI. Diese Arbeit ist eine der ersten Studien dieser Art, die eine systematische und statistische Untersuchung des UHI-Effektes erm{\"o}glicht. Die Ergebnisse sind von besonderer Bedeutung f{\"u}r die allgemeine r{\"a}umliche Planung und Regulierung auf Meso- und Makroebenen, damit sich Vorteile der rapiden Urbanisierung nutzbar machen und zeitgleich die folgende Hitzebelastung proaktiv vermindern lassen.}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Julia}, title = {Schadenserkennung in Beton durch {\"U}berwachung mit eingebetteten Ultraschallpr{\"u}fk{\"o}pfen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397363}, school = {Universit{\"a}t Potsdam}, pages = {ix, 142}, year = {2017}, abstract = {Die zerst{\"o}rungsfreien Pr{\"u}fungen von Bauwerken mit Hilfe von Ultraschallmessverfahren haben in den letzten Jahren an Bedeutung gewonnen. Durch Ultraschallmessungen k{\"o}nnen die Geometrien von Bauteilen bestimmt sowie von außen nicht sichtbare Fehler wie Delaminationen und Kiesnester erkannt werden. Mit neuartigen, in das Betonbauteil eingebetteten Ultraschallpr{\"u}fk{\"o}pfen sollen nun Bauwerke dauerhaft auf Ver{\"a}nderungen {\"u}berpr{\"u}ft werden. Dazu werden Ultraschallsignale direkt im Inneren eines Bauteils erzeugt, was die M{\"o}glichkeiten der herk{\"o}mmlichen Methoden der Bauwerks{\"u}berwachung wesentlich erweitert. Ein Ultraschallverfahren k{\"o}nnte mit eingebetteten Pr{\"u}fk{\"o}pfen ein Betonbauteil kontinuierlich integral {\"u}berwachen und damit auch stetig fortschreitende Gef{\"u}ge{\"a}nderungen, wie beispielsweise Mikrorisse, registrieren. Sicherheitsrelevante Bauteile, die nach dem Einbau f{\"u}r Messungen unzug{\"a}nglich oder mittels Ultraschall, beispielsweise durch zus{\"a}tzliche Beschichtungen der Oberfl{\"a}che, nicht pr{\"u}fbar sind, lassen sich mit eingebetteten Pr{\"u}fk{\"o}pfen {\"u}berwachen. An bereits vorhandenen Bauwerken k{\"o}nnen die Ultraschallpr{\"u}fk{\"o}pfe mithilfe von Bohrl{\"o}chern und speziellem Verpressm{\"o}rtel auch nachtr{\"a}glich in das Bauteil integriert werden. F{\"u}r Fertigbauteile bieten sich eingebettete Pr{\"u}fk{\"o}pfe zur Herstellungskontrolle sowie zur {\"U}berwachung der Baudurchf{\"u}hrung als Werkzeug der Qualit{\"a}tssicherung an. Auch die schnelle Schadensanalyse eines Bauwerks nach Naturkatastrophen, wie beispielsweise einem Erdbeben oder einer Flut, ist denkbar. Durch die gute Ankopplung erm{\"o}glichen diese neuartigen Pr{\"u}fk{\"o}pfe den Einsatz von empfindlichen Auswertungsmethoden, wie die Kreuzkorrelation, die Coda-Wellen-Interferometrie oder die Amplitudenauswertung, f{\"u}r die Signalanalyse. Bei regelm{\"a}ßigen Messungen k{\"o}nnen somit sich anbahnende Sch{\"a}den eines Bauwerks fr{\"u}hzeitig erkannt werden. Da die Sch{\"a}digung eines Bauwerks keine direkt messbare Gr{\"o}ße darstellt, erfordert eine eindeutige Schadenserkennung in der Regel die Messung mehrerer physikalischer Gr{\"o}ßen die geeignet verkn{\"u}pft werden. Physikalische Gr{\"o}ßen k{\"o}nnen sein: Ultraschalllaufzeit, Amplitude des Ultraschallsignals und Umgebungstemperatur. Dazu m{\"u}ssen Korrelationen zwischen dem Zustand des Bauwerks, den Umgebungsbedingungen und den Parametern des gemessenen Ultraschallsignals untersucht werden. In dieser Arbeit werden die neuartigen Pr{\"u}fk{\"o}pfe vorgestellt. Es wird beschrieben, dass sie sich, sowohl in bereits errichtete Betonbauwerke als auch in der Konstruktion befindliche, einbauen lassen. Experimentell wird gezeigt, dass die Pr{\"u}fk{\"o}pfe in mehreren Ebenen eingebettet sein k{\"o}nnen da ihre Abstrahlcharakteristik im Beton nahezu ungerichtet ist. Die Mittenfrequenz von rund 62 kHz erm{\"o}glicht Abst{\"a}nde, je nach Betonart und SRV, von mindestens 3 m zwischen Pr{\"u}fk{\"o}pfen die als Sender und Empf{\"a}nger arbeiten. Die Empfindlichkeit der eingebetteten Pr{\"u}fk{\"o}pfe gegen{\"u}ber Ver{\"a}nderungen im Beton wird an Hand von zwei Laborexperimenten gezeigt, einem Drei-Punkt-Biegeversuch und einem Versuch zur Erzeugung von Frost-Tau-Wechsel Sch{\"a}den. Die Ergebnisse werden mit anderen zerst{\"o}rungsfreien Pr{\"u}fverfahren verglichen. Es zeigt sich, dass die Pr{\"u}fk{\"o}pfe durch die Anwendung empfindlicher Auswertemethoden, auftretende Risse im Beton detektieren, bevor diese eine Gefahr f{\"u}r das Bauwerk darstellen. Abschließend werden Beispiele von Installation der neuartigen Ultraschallpr{\"u}fk{\"o}pfe in realen Bauteilen, zwei Br{\"u}cken und einem Fundament, gezeigt und basierend auf dort gewonnenen ersten Erfahrungen ein Konzept f{\"u}r die Umsetzung einer Langzeit{\"u}berwachung aufgestellt.}, language = {de} } @phdthesis{Wittenbecher2017, author = {Wittenbecher, Clemens}, title = {Linking whole-grain bread, coffee, and red meat to the risk of type 2 diabetes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-404592}, school = {Universit{\"a}t Potsdam}, pages = {XII, 194, ii}, year = {2017}, abstract = {Background: Consumption of whole-grain, coffee, and red meat were consistently related to the risk of developing type 2 diabetes in prospective cohort studies, but potentially underlying biological mechanisms are not well understood. Metabolomics profiles were shown to be sensitive to these dietary exposures, and at the same time to be informative with respect to the risk of type 2 diabetes. Moreover, graphical network-models were demonstrated to reflect the biological processes underlying high-dimensional metabolomics profiles. Aim: The aim of this study was to infer hypotheses on the biological mechanisms that link consumption of whole-grain bread, coffee, and red meat, respectively, to the risk of developing type 2 diabetes. More specifically, it was aimed to consider network models of amino acid and lipid profiles as potential mediators of these risk-relations. Study population: Analyses were conducted in the prospective EPIC-Potsdam cohort (n = 27,548), applying a nested case-cohort design (n = 2731, including 692 incident diabetes cases). Habitual diet was assessed with validated semiquantitative food-frequency questionnaires. Concentrations of 126 metabolites (acylcarnitines, phosphatidylcholines, sphingomyelins, amino acids) were determined in baseline-serum samples. Incident type 2 diabetes cases were assed and validated in an active follow-up procedure. The median follow-up time was 6.6 years. Analytical design: The methodological approach was conceptually based on counterfactual causal inference theory. Observations on the network-encoded conditional independence structure restricted the space of possible causal explanations of observed metabolomics-data patterns. Given basic directionality assumptions (diet affects metabolism; metabolism affects future diabetes incidence), adjustment for a subset of direct neighbours was sufficient to consistently estimate network-independent direct effects. Further model-specification, however, was limited due to missing directionality information on the links between metabolites. Therefore, a multi-model approach was applied to infer the bounds of possible direct effects. All metabolite-exposure links and metabolite-outcome links, respectively, were classified into one of three categories: direct effect, ambiguous (some models indicated an effect others not), and no-effect. Cross-sectional and longitudinal relations were evaluated in multivariable-adjusted linear regression and Cox proportional hazard regression models, respectively. Models were comprehensively adjusted for age, sex, body mass index, prevalence of hypertension, dietary and lifestyle factors, and medication. Results: Consumption of whole-grain bread was related to lower levels of several lipid metabolites with saturated and monounsaturated fatty acids. Coffee was related to lower aromatic and branched-chain amino acids, and had potential effects on the fatty acid profile within lipid classes. Red meat was linked to lower glycine levels and was related to higher circulating concentrations of branched-chain amino acids. In addition, potential marked effects of red meat consumption on the fatty acid composition within the investigated lipid classes were identified. Moreover, potential beneficial and adverse direct effects of metabolites on type 2 diabetes risk were detected. Aromatic amino acids and lipid metabolites with even-chain saturated (C14-C18) and with specific polyunsaturated fatty acids had adverse effects on type 2 diabetes risk. Glycine, glutamine, and lipid metabolites with monounsaturated fatty acids and with other species of polyunsaturated fatty acids were classified as having direct beneficial effects on type 2 diabetes risk. Potential mediators of the diet-diabetes links were identified by graphically overlaying this information in network models. Mediation analyses revealed that effects on lipid metabolites could potentially explain about one fourth of the whole-grain bread effect on type 2 diabetes risk; and that effects of coffee and red meat consumption on amino acid and lipid profiles could potentially explain about two thirds of the altered type 2 diabetes risk linked to these dietary exposures. Conclusion: An algorithm was developed that is capable to integrate single external variables (continuous exposures, survival time) and high-dimensional metabolomics-data in a joint graphical model. Application to the EPIC-Potsdam cohort study revealed that the observed conditional independence patterns were consistent with the a priori mediation hypothesis: Early effects on lipid and amino acid metabolism had the potential to explain large parts of the link between three of the most widely discussed diabetes-related dietary exposures and the risk of developing type 2 diabetes.}, language = {en} } @phdthesis{Willersinn2017, author = {Willersinn, Jochen}, title = {Self-Assembly of double hydrophilic block copolymers}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-408578}, school = {Universit{\"a}t Potsdam}, pages = {119, clxxiv}, year = {2017}, abstract = {The motivation of this work was to investigate the self-assembly of a block copolymer species that attended little attraction before, double hydrophilic block copolymers (DHBCs). DHBCs consist of two linear hydrophilic polymer blocks. The self-assembly of DHBCs towards suprastructures such as particles and vesicles is determined via a strong difference in hydrophilicity between the corresponding blocks leading to a microphase separation due to immiscibility. The benefits of DHBCs and the corresponding particles and vesicles, such as biocompatibility, high permeability towards water and hydrophilic compounds as well as the large amount of possible functionalizations that can be addressed to the block copolymers make the application of DHBC based structures a viable choice in biomedicine. In order to assess a route towards self-assembled structures from DHBCs that display the potential to act as cargos for future applications, several block copolymers containing two hydrophilic polymer blocks were synthesized. Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone) (PEO-b-PVP) and Poly(ethylene oxide)-b-poly(N-vinylpyrrolidone-co-N-vinylimidazole) (PEO-b-P(VP-co-VIm) block copolymers were synthesized via reversible deactivation radical polymerization (RDRP) techniques starting from a PEO-macro chain transfer agent. The block copolymers displayed a concentration dependent self-assembly behavior in water which was determined via dynamic light scattering (DLS). It was possible to observe spherical particles via laser scanning confocal microscopy (LSCM) and cryogenic scanning electron microscopy (cryo SEM) at highly concentrated solutions of PEO-b-PVP. Furthermore, a crosslinking strategy with (PEO-b-P(VP-co-VIm) was developed applying a diiodo derived crosslinker diethylene glycol bis(2-iodoethyl) ether to form quaternary amines at the VIm units. The formed crosslinked structures proved stability upon dilution and transfer into organic solvents. Moreover, self-assembly and crosslinking in DMF proved to be more advantageous and the crosslinked structures could be successfully transferred to aqueous solution. The afforded spherical submicron particles could be visualized via LSCM, cryo SEM and Cryo TEM. Double hydrophilic pullulan-b-poly(acrylamide) block copolymers were synthesized via copper catalyzed alkyne azide cycloaddition (CuAAC) starting from suitable pullulan alkyne and azide functionalized poly(N,N-dimethylacrylamide) (PDMA) and poly(N-ethylacrylamide) (PEA) homopolymers. The conjugation reaction was confirmed via SEC and 1H-NMR measurements. The self-assembly of the block copolymers was monitored with DLS and static light scattering (SLS) measurements indicating the presence of hollow spherical structures. Cryo SEM measurements could confirm the presence of vesicular structures for Pull-b-PEA block copolymers. Solutions of Pull-b-PDMA displayed particles in cryo SEM. Moreover, an end group functionalization of Pull-b-PDMA with Rhodamine B allowed assessing the structure via LSCM and hollow spherical structures were observed indicating the presence of vesicles, too. An exemplified pathway towards a DHBC based drug delivery vehicle was demonstrated with the block copolymer Pull-b-PVP. The block copolymer was synthesized via RAFT/MADIX techniques starting from a pullulan chain transfer agent. Pull-b-PVP displayed a concentration dependent self-assembly in water with an efficiency superior to the PEO-b-PVP system, which could be observed via DLS. Cryo SEM and LSCM microscopy displayed the presence of spherical structures. In order to apply a reversible crosslinking strategy on the synthesized block copolymer, the pullulan block was selectively oxidized to dialdehydes with NaIO4. The oxidation of the block copolymer was confirmed via SEC and 1H-NMR measurements. The self-assembled and oxidized structures were subsequently crosslinked with cystamine dihiydrochloride, a pH and redox responsive crosslinker resulting in crosslinked vesicles which were observed via cryo SEM. The vesicular structures of crosslinked Pull-b-PVP could be disassembled by acid treatment or the application of the redox agent tris(2-carboxyethyl)-phosphin-hydrochloride. The successful disassembly was monitored with DLS measurements. To conclude, self-assembled structures from DHBCs such as particles and vesicles display a strong potential to generate an impact on biomedicine and nanotechnologies. The variety of DHBC compositions and functionalities are very promising features for future applications.}, language = {en} } @phdthesis{Wierzba2017, author = {Wierzba, Marta}, title = {Revisiting prosodic reconstruction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403152}, school = {Universit{\"a}t Potsdam}, pages = {vi, 224}, year = {2017}, abstract = {In this thesis, I develop a theoretical implementation of prosodic reconstruction and apply it to the empirical domain of German sentences in which part of a focus or contrastive topic is fronted. Prosodic reconstruction refers to the idea that sentences involving syntactic movement show prosodic parallels with corresponding simpler structures without movement. I propose to model this recurrent observation by ordering syntax-prosody mapping before copy deletion. In order to account for the partial fronting data, the idea is extended to the mapping between prosody and information structure. This assumption helps to explain why object-initial sentences containing a broad focus or broad contrastive topic show similar prosodic and interpretative restrictions as sentences with canonical word order. The empirical adequacy of the model is tested against a set of gradient acceptability judgments.}, language = {en} } @phdthesis{Weissenberger2017, author = {Weißenberger, Martin}, title = {Start-up subsidies for the unemployed - New evaluation approaches and insights}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-406362}, school = {Universit{\"a}t Potsdam}, pages = {viii, 239}, year = {2017}, abstract = {Start-up incentives targeted at unemployed individuals have become an important tool of the Active Labor Market Policy (ALMP) to fight unemployment in many countries in recent years. In contrast to traditional ALMP instruments like training measures, wage subsidies, or job creation schemes, which are aimed at reintegrating unemployed individuals into dependent employment, start-up incentives are a fundamentally different approach to ALMP, in that they intend to encourage and help unemployed individuals to exit unemployment by entering self-employment and, thus, by creating their own jobs. In this sense, start-up incentives for unemployed individuals serve not only as employment and social policy to activate job seekers and combat unemployment but also as business policy to promote entrepreneurship. The corresponding empirical literature on this topic so far has been mainly focused on the individual labor market perspective, however. The main part of the thesis at hand examines the new start-up subsidy ("Gr{\"u}ndungszuschuss") in Germany and consists of four empirical analyses that extend the existing evidence on start-up incentives for unemployed individuals from multiple perspectives and in the following directions: First, it provides the first impact evaluation of the new start-up subsidy in Germany. The results indicate that participation in the new start-up subsidy has significant positive and persistent effects on both reintegration into the labor market as well as the income profiles of participants, in line with previous evidence on comparable German and international programs, which emphasizes the general potential of start-up incentives as part of the broader ALMP toolset. Furthermore, a new innovative sensitivity analysis of the applied propensity score matching approach integrates findings from entrepreneurship and labor market research about the key role of an individual's personality on start-up decision, business performance, as well as general labor market outcomes, into the impact evaluation of start-up incentives. The sensitivity analysis with regard to the inclusion and exclusion of usually unobserved personality variables reveals that differences in the estimated treatment effects are small in magnitude and mostly insignificant. Consequently, concerns about potential overestimation of treatment effects in previous evaluation studies of similar start-up incentives due to usually unobservable personality variables are less justified, as long as the set of observed control variables is sufficiently informative (Chapter 2). Second, the thesis expands our knowledge about the longer-term business performance and potential of subsidized businesses arising from the start-up subsidy program. In absolute terms, the analysis shows that a relatively high share of subsidized founders successfully survives in the market with their original businesses in the medium to long run. The subsidy also yields a "double dividend" to a certain extent in terms of additional job creation. Compared to "regular", i.e., non-subsidized new businesses founded by non-unemployed individuals in the same quarter, however, the economic and growth-related impulses set by participants of the subsidy program are only limited with regard to employment growth, innovation activity, or investment. Further investigations of possible reasons for these differences show that differential business growth paths of subsidized founders in the longer run seem to be mainly limited by higher restrictions to access capital and by unobserved factors, such as less growth-oriented business strategies and intentions, as well as lower (subjective) entrepreneurial persistence. Taken together, the program has only limited potential as a business and entrepreneurship policy intended to induce innovation and economic growth (Chapters 3 and 4). And third, an empirical analysis on the level of German regional labor markets yields that there is a high regional variation in subsidized start-up activity relative to overall new business formation. The positive correlation between regular start-up intensity and the share among all unemployed individuals who participate in the start-up subsidy program suggests that (nascent) unemployed founders also profit from the beneficial effects of regional entrepreneurship capital. Moreover, the analysis of potential deadweight and displacement effects from an aggregated regional perspective emphasizes that the start-up subsidy for unemployed individuals represents a market intervention into existing markets, which affects incumbents and potentially produces inefficiencies and market distortions. This macro perspective deserves more attention and research in the future (Chapter 5).}, language = {en} } @phdthesis{Weise2017, author = {Weise, Katja}, title = {Gez{\"a}hmte Kleider, geb{\"a}ndigte K{\"o}rper?}, doi = {10.25932/publishup-43986}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-439868}, school = {Universit{\"a}t Potsdam}, pages = {270}, year = {2017}, abstract = {Gegenstand der Dissertation ist die Pr{\"a}sentation von Kleidermode in ihr gewidmeten Sonderausstellungen, die in zunehmender Zahl seit den 1990er Jahren in musealen und musems{\"a}hnlichen Kontexten veranstaltet werden. Es geht darum, wie Modek{\"o}rper und vestiment{\"a}re Artefakte in diesen Ausstellungen gezeigt werden und welche {\"a}sthetischen Erfahrungen sich f{\"u}r die RezipientInnen aus der jeweiligen Konstellation von vestiment{\"a}rem Objekt und Inszenierungsmittel ergeben k{\"o}nnen. Das Augenmerk liegt auf der Spannung zwischen dem visuellen Imperativ musealer Zeigepraktiken und den multisensorischen Qualit{\"a}ten der Kleidermode, v. a. jener hautsinnlichen, die sich aus dem unmittelbaren Kontakt zwischen K{\"o}rper und Kleid ergeben. Die zentrale These ist, dass sich das Hautsinnliche der Kleidermode trotz des Ber{\"u}hrungsverbots in vielen Ausstellungsinszenierungen zeigen kann. D. h., dass - entgegen h{\"a}ufig wiederholter Behauptungen - ‚der K{\"o}rper', das Tragen und die Bewegung nicht per se oder komplett aus den Kleidern gewichen sind, werden diese musealisiert und ausgestellt. Es findet eine Verschiebung des K{\"o}rperlichen und Hautsinnlichen, wie das Anfassen, Tragen und Bewegen, in visuelle Darstellungsformen statt. Hautsinnliche Qualit{\"a}ten der vestiment{\"a}ren Exponate k{\"o}nnen, auch in Abh{\"a}ngigkeit von den jeweils verwendeten Pr{\"a}sentationsmitteln, von den BesucherInnen in unterschiedlichen Abstufungen sehend oder buchst{\"a}blich gesp{\"u}rt werden. An konkreten Beispielen wird zum einen das Verh{\"a}ltnis von ausgestelltem Kleid und Pr{\"a}sentationsmittel(n) in den Displays untersucht. Dabei stehen folgende Mittel im Fokus, mit deren Hilfe die vestiment{\"a}ren Exponate zur Schau gestellt werden: Vitrinen, Podeste, Ersatzk{\"o}rper wie Mannequins, optische Hilfsmittel wie Lupen, Bildmedien oder (bewegte) Installationen. Zum anderen wird analysiert, welche Wirkungen die Arrangements jeweils erzielen oder verhindern k{\"o}nnen, und zwar in Hinblick auf m{\"o}gliche {\"a}sthetische Erfahrungen, die taktilen, haptischen und kin{\"a}sthetischen Qualit{\"a}ten der Exponate als BesucherIn sehend oder buchst{\"a}blich zu f{\"u}hlen oder zu sp{\"u}ren. Ob als Identifikation, Projektion, Haptic Vision - es handelt sich um {\"a}sthetische Erfahrungen, die sich aus den modischen Kompetenzen der BetrachterInnen speisen und bei denen sich Visuelles und Hautsinnliches oft {\"u}berlagern. In der Untersuchung wird eine vernachl{\"a}ssigte, wenn nicht gar unerw{\"u}nschte Rezeptionsweise diskutiert, die von den AkteurInnen der spezifischen Debatte bspw. als konsumptives Sehen abgewertet wird. Die von mir vorgeschlagene, st{\"a}rker differenzierende Perspektive ist zugleich eine Kritik an dem bisherigen Diskurs und seinem eng gefassten, teilweise elit{\"a}ren Verst{\"a}ndnis von Museum, Bildung und Wissen, mit dem sich AkteurInnen und Institutionen abgrenzen. Der Spezialdiskurs {\"u}ber musealisierte und exponierte Kleidermode steht zudem exemplarisch f{\"u}r die Diskussion, was das Museum, verstanden als Institution, sein kann und soll(te) und ob (und wenn ja, wie) es sich {\"u}berhaupt noch von anderen Orten und R{\"a}umen klar abgrenzen l{\"a}sst.}, language = {de} } @phdthesis{Weege2017, author = {Weege, Stefanie}, title = {Climatic drivers of retrogressive thaw slump activity and resulting sediment and carbon release to the nearshore zone of Herschel Island, Yukon Territory, Canada}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397947}, school = {Universit{\"a}t Potsdam}, pages = {163}, year = {2017}, abstract = {The Yukon Coast in Canada is an ice-rich permafrost coast and highly sensitive to changing environmental conditions. Retrogressive thaw slumps are a common thermoerosion feature along this coast, and develop through the thawing of exposed ice-rich permafrost on slopes and removal of accumulating debris. They contribute large amounts of sediment, including organic carbon and nitrogen, to the nearshore zone. The objective of this study was to 1) identify the climatic and geomorphological drivers of sediment-meltwater release, 2) quantify the amount of released meltwater, sediment, organic carbon and nitrogen, and 3) project the evolution of sediment-meltwater release of retrogressive thaw slumps in a changing future climate. The analysis is based on data collected over 18 days in July 2013 and 18 days in August 2012. A cut-throat flume was set up in the main sediment-meltwater channel of the largest retrogressive thaw slump on Herschel Island. In addition, two weather stations, one on top of the undisturbed tundra and one on the slump floor, measured incoming solar radiation, air temperature, wind speed and precipitation. The discharge volume eroding from the ice-rich permafrost and retreating snowbanks was measured and compared to the meteorological data collected in real time with a resolution of one minute. The results show that the release of sediment-meltwater from thawing of the ice-rich permafrost headwall is strongly related to snowmelt, incoming solar radiation and air temperature. Snowmelt led to seasonal differences, especially due to the additional contribution of water to the eroding sediment-meltwater from headwall ablation, lead to dilution of the sediment-meltwater composition. Incoming solar radiation and air temperature were the main drivers for diurnal and inter-diurnal fluctuations. In July (2013), the retrogressive thaw slump released about 25 000 m³ of sediment-meltwater, containing 225 kg dissolved organic carbon and 2050 t of sediment, which in turn included 33 t organic carbon, and 4 t total nitrogen. In August (2012), just 15 600 m³ of sediment-meltwater was released, since there was no additional contribution from snowmelt. However, even without the additional dilution, 281 kg dissolved organic carbon was released. The sediment concentration was twice as high as in July, with sediment contents of up to 457 g l-1 and 3058 t of sediment, including 53 t organic carbon and 5 t nitrogen, being released. In addition, the data from the 36 days of observations from Slump D were upscaled to cover the main summer season of 1 July to 31 August (62 days) and to include all 229 active retrogressive thaw slumps along the Yukon Coast. In total, all retrogressive thaw slumps along the Yukon Coast contribute a minimum of 1.4 Mio. m³ sediment-meltwater each thawing season, containing a minimum of 172 000 t sediment with 3119 t organic carbon, 327 t nitrogen and 17 t dissolved organic carbon. Therefore, in addition to the coastal erosion input to the Beaufort Sea, retrogressive thaw slumps additionally release 3 \% of sediment and 8 \% of organic carbon into the ocean. Finally, the future evolution of retrogressive thaw slumps under a warming scenario with summer air temperatures increasing by 2-3 °C by 2081-2100, would lead to an increase of 109-114\% in release of sediment-meltwater. It can be concluded that retrogressive thaw slumps are sensitive to climatic conditions and under projected future Arctic warming will contribute larger amounts of thawed permafrost material (including organic carbon and nitrogen) into the environment.}, language = {en} } @phdthesis{Wagner2017, author = {Wagner, Mario}, title = {Industrie 4.0 und demografische Entwicklung aus strukturationstheoretischer Sicht}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-412230}, school = {Universit{\"a}t Potsdam}, pages = {235}, year = {2017}, abstract = {Das Ziel der Arbeit ist die Entwicklung eines heuristischen Bezugsrahmens zur Erkl{\"a}rung der Komplexit{\"a}t im Kontext von Industrie 4.0 und der demografischen Entwicklung aus strukturationstheoretischer Sicht. Dabei sind in Bezug auf die zuk{\"u}nftig zu erwartenden kognitiven Anforderungen an die Besch{\"a}ftigten die Fragen essentiell, vor welchen Herausforderungen Unternehmen bez{\"u}glich der Einstellung und dem Verhalten sowie dem Erfahrungswissen der Besch{\"a}ftigten stehen und welche L{\"o}sungsans{\"a}tze sich im Umgang mit den Herausforderungen in der Praxis bisher als hilfreich erweisen. In Kapitel 1 erfolgt zun{\"a}chst die Beschreibung der Ausgangslage. Es werden die Begriffe Industrie 4.0 und demografische Entwicklung inhaltlich diskutiert und in einen theoretischen Zusammenhang gebracht. In Kapitel 2 erfolgt die theoretische Fundierung der Arbeit. Dabei wird eine strukturationstheoretische Sicht auf Unternehmen als soziotechnische Systeme eingenommen. Durch diese „nicht deterministische" Sichtweise wird ein prozessualer Blick auf den Wandlungsprozess in Unternehmen geschaffen, der es m{\"o}glich macht, die Besch{\"a}ftigten als aktiv handelnde Akteure im Sinne von „organisieren" zur Erkl{\"a}rung m{\"o}glicher Zusammenh{\"a}nge zwischen Industrie 4.0 und der demografischen Entwicklung mit einzubeziehen. Der soziotechnische Systemansatz und die Strukturationstheorie bilden in diesem Sinne den „Kern" des zu entwickelnden heuristischen Bezugsrahmens. Die inhaltliche Gestaltung des theoriebasierten heuristischen Bezugsrahmens erfolgt in Kapitel 3 und Kapitel 4. Kapitel 3 beschreibt ausgew{\"a}hlte Aspekte zuk{\"u}nftiger Anforderungen an die Arbeit, die durch eine systematische Aufbereitung des derzeitigen Forschungsstandes zu Industrie 4.0 ermittelt wurden. Sie bilden die „Gestaltungsgrenzen", innerhalb derer sich je nach betrieblicher Situation unterschiedliche neue oder ge{\"a}nderte Anforderungen an die Besch{\"a}ftigten bei der Gestaltung von Industrie 4.0 ableiten lassen. In Kapitel 4 werden ausgew{\"a}hlte Aspekte menschlichen Handelns am Beispiel {\"a}lterer Besch{\"a}ftigter in Form zweier Schwerpunkte beschrieben. Der erste Schwerpunkt betrifft m{\"o}gliche Einflussfaktoren auf die Einstellung und das Verhalten {\"a}lterer Besch{\"a}ftigter im Wandlungsprozess aufgrund eines vorherrschenden Altersbildes im Unternehmen. Grundlage hierzu bildete die Stigmatisierungstheorie als interaktionistischer Ansatz der Sozialtheorie. Mit dem zweiten Schwerpunkt, den ausgew{\"a}hlten handlungstheoretischen Aspekten der Alternsforschung aus der Entwicklungspsychologie, wird eine Lebensspannenperspektive eingenommen. Inhaltlich werden die komplexit{\"a}tsinduzierten Faktoren, die sich aus handlungstheoretischer Perspektive mit der Adaptation von {\"a}lteren Besch{\"a}ftigten an ver{\"a}nderte {\"a}ußere und pers{\"o}nliche Lebensbedingungen besch{\"a}ftigen, systematisiert. Anschließend wird auf Grundlage der bisherigen theoretischen Vor{\"u}berlegungen ein erster theoriebasierter Bezugsrahmen abgeleitet. Kapitel 5 und Kapitel 6 beschreiben den empirischen Teil, die Durchf{\"u}hrung teilstrukturierter Interviews, der Arbeit. Ziel der empirischen Untersuchung war es, neben der theoretischen Fundierung den theoriebasierten heuristischen Bezugsrahmen um Praxiserfahrungen zu konkretisieren und gegebenenfalls zu erg{\"a}nzen. Hierzu wurde auf Grundlage des theoriebasierten heuristischen Bezugsrahmens mittels teilstrukturierter Interviews das Erfahrungswissen von 23 Experten in pers{\"o}nlichen Gespr{\"a}chen abgefragt. Nachdem in Kapitel 5 die Vorgehensweise der empirischen Untersuchung beschrieben wird, erfolgt in Kapitel 6 die Beschreibung der Ergebnisse aus der qualitativen Befragung. Hierzu werden aus den pers{\"o}nlichen Gespr{\"a}chen zentrale Einflussfaktoren bei der Gestaltung und Umsetzung von Industrie 4.0 im Kontext mit der demografischen Entwicklung analysiert und in die {\"u}bergeordneten Kategorien Handlungskompetenzen, Einstellung/ Verhalten sowie Erfahrungswissen geclustert. Anschließend wird der theoriebasierte heuristische Bezugsrahmen durch die {\"u}bergeordneten Kategorien und Faktoren aus den Expertengespr{\"a}chen konkretisiert und erg{\"a}nzt. In Kapitel 7 werden auf Grundlage des heuristischen Bezugsrahmens sowie der Empfehlungen aus den Experteninterviews beispielhaft Implikationen f{\"u}r die Praxis abgeleitet. Es werden Interventionsm{\"o}glichkeiten zur Unterst{\"u}tzung einer positiven Ver{\"a}nderungsbereitschaft und einem positiven Ver{\"a}nderungsverhalten f{\"u}r den Strukturwandel aufgezeigt. Hierzu geh{\"o}ren die Anpassung des F{\"u}hrungsverhaltens im Wandlungsprozess, der Umgang mit der Paradoxie von Stabilit{\"a}t und Flexibilit{\"a}t, der Umgang mit Altersstereotypen in Unternehmen, die Unterst{\"u}tzung von Strategien zu Selektion, Optimierung und Kompensation sowie Maßnahmen zur Ausrichtung von Aktivit{\"a}ten an die Potenzialrisiken der Besch{\"a}ftigten. Eine Zusammenfassung, ein Res{\"u}mee und ein Ausblick erfolgen abschließend in Kapitel 8.}, language = {de} } @phdthesis{Tyree2017, author = {Tyree, Susan}, title = {Arc expression in the parabrachial nucleus following taste stimulation}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396600}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 109}, year = {2017}, abstract = {Researchers have made many approaches to study the complexities of the mammalian taste system; however molecular mechanisms of taste processing in the early structures of the central taste pathway remain unclear. More recently the Arc catFISH (cellular compartment analysis of temporal activity by fluorescent in situ hybridisation) method has been used in our lab to study neural activation following taste stimulation in the first central structure in the taste pathway, the nucleus of the solitary tract. This method uses the immediate early gene Arc as a neural activity marker to identify taste-responsive neurons. Arc plays a critical role in memory formation and is necessary for conditioned taste aversion memory formation. In the nucleus of the solitary tract only bitter taste stimulation resulted in increased Arc expression, however this did not occur following stimulation with tastants of any other taste quality. The primary target for gustatory NTS neurons is the parabrachial nucleus (PbN) and, like Arc, the PbN plays an important role in conditioned taste aversion learning. The aim of this thesis is to investigate Arc expression in the PbN following taste stimulation to elucidate the molecular identity and function of Arc expressing, taste- responsive neurons. Naïve and taste-conditioned mice were stimulated with tastants from each of the five basic taste qualities (sweet, salty, sour, umami, and bitter), with additional bitter compounds included for comparison. The expression patterns of Arc and marker genes were analysed using in situ hybridisation (ISH). The Arc catFISH method was used to observe taste-responsive neurons following each taste stimulation. A double fluorescent in situ hybridisation protocol was then established to investigate possible neuropeptide genes involved in neural responses to taste stimulation. The results showed that bitter taste stimulation induces increased Arc expression in the PbN in naïve mice. This was not true for other taste qualities. In mice conditioned to find an umami tastant aversive, subsequent umami taste stimulation resulted in an increase in Arc expression similar to that seen in bitter-stimulated mice. Taste-responsive Arc expression was denser in the lateral PbN than the medial PbN. In mice that received two temporally separated taste stimulations, each stimulation time-point showed a distinct population of Arc-expressing neurons, with only a small population (10 - 18 \%) of neurons responding to both stimulations. This suggests that either each stimulation event activates a different population of neurons, or that Arc is marking something other than simple cellular activation, such as long-term cellular changes that do not occur twice within a 25 minute time frame. Investigation using the newly established double-FISH protocol revealed that, of the bitter-responsive Arc expressing neuron population: 16 \% co-expressed calcitonin RNA; 17 \% co-expressed glucagon-like peptide 1 receptor RNA; 17 \% co-expressed hypocretin receptor 1 RNA; 9 \% co-expressed gastrin-releasing peptide RNA; and 20 \% co-expressed neurotensin RNA. This co-expression with multiple different neuropeptides suggests that bitter-activated Arc expression mediates multiple neural responses to the taste event, such as taste aversion learning, suppression of food intake, increased heart rate, and involves multiple brain structures such as the lateral hypothalamus, amygdala, bed nucleus of the stria terminalis, and the thalamus. The increase in Arc-expression suggests that bitter taste stimulation, and umami taste stimulation in umami-averse animals, may result in an enhanced state of Arc- dependent synaptic plasticity in the PbN, allowing animals to form taste-relevant memories to these aversive compounds more readily. The results investigating neuropeptide RNA co- expression suggest the amygdala, bed nucleus of the stria terminalis, and thalamus as possible targets for bitter-responsive Arc-expressing PbN neurons.}, language = {en} } @phdthesis{Trendelenburg2017, author = {Trendelenburg, Val{\´e}rie}, title = {Therapie der Erdnussallergie durch orale Immuntherapie}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-403557}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 198}, year = {2017}, abstract = {Einleitung: Die Erdnussallergie z{\"a}hlt zu den h{\"a}ufigsten Nahrungsmittelallergien im Kindesalter. Bereits kleine Mengen Erdnuss (EN) k{\"o}nnen zu schweren allergischen Reaktionen f{\"u}hren. EN ist der h{\"a}ufigste Ausl{\"o}ser einer lebensbedrohlichen Anaphylaxie bei Kindern und Jugendlichen. Im Gegensatz zu anderen fr{\"u}hkindlichen Nahrungsmittelallergien entwickeln Patienten mit einer EN-Allergie nur selten eine nat{\"u}rliche Toleranz. Seit mehreren Jahren wird daher an kausalen Therapiem{\"o}glichkeiten f{\"u}r EN-Allergiker, insbesondere an der oralen Immuntherapie (OIT), geforscht. Erste kleinere Studien zur OIT bei EN-Allergie zeigten erfolgsversprechende Ergebnisse. Im Rahmen einer randomisierten, doppelblind, Placebo-kontrollierten Studie mit gr{\"o}ßerer Fallzahl werden in der vorliegenden Arbeit die klinische Wirksamkeit und Sicherheit dieser Therapieoption bei Kindern mit EN-Allergie genauer evaluiert. Des Weiteren werden immunologische Ver{\"a}nderungen sowie die Lebensqualit{\"a}t und Therapiebelastung unter OIT untersucht. Methoden: Kinder zwischen 3-18 Jahren mit einer IgE-vermittelten EN-Allergie wurden in die Studie eingeschlossen. Vor Beginn der OIT wurde eine orale Provokation mit EN durchgef{\"u}hrt. Die Patienten wurden 1:1 randomisiert und entsprechend der Verum- oder Placebogruppe zugeordnet. Begonnen wurde mit 2-120 mg EN bzw. Placebo pro Tag, abh{\"a}ngig von der Reaktionsdosis bei der oralen Provokation. Zun{\"a}chst wurde die t{\"a}gliche OIT-Dosis alle zwei Wochen {\"u}ber etwa 14 Monate langsam bis zu einer Erhaltungsdosis von mindestens 500 mg EN (= 125 mg EN-Protein, ~ 1 kleine EN) bzw. Placebo gesteigert. Die maximal erreichte Dosis wurde dann {\"u}ber zwei Monate t{\"a}glich zu Hause verabreicht. Im Anschluss erfolgte erneut eine orale Provokation mit EN. Der prim{\"a}re Endpunkt der Studie war die Anzahl an Patienten der Verum- und Placebogruppe, die unter oraler Provokation nach OIT ≥1200 mg EN vertrugen (=„partielle Desensibilisierung"). Sowohl vor als auch nach OIT wurde ein Hautpricktest mit EN durchgef{\"u}hrt und EN-spezifisches IgE und IgG4 im Serum bestimmt. Außerdem wurden die Basophilenaktivierung sowie die Aussch{\"u}ttung von T-Zell-spezifischen Zytokinen nach Stimulation mit EN in vitro gemessen. Anhand von Frageb{\"o}gen wurde die Lebensqualit{\"a}t vor und nach OIT sowie die Therapiebelastung w{\"a}hrend OIT erfasst. Ergebnisse: 62 Patienten wurden in die Studie eingeschlossen und randomisiert. Nach etwa 16 Monaten unter OIT zeigten 74,2\% (23/31) der Patienten der Verumgruppe und nur 16,1\% (5/31) der Placebogruppe eine „partielle Desensibilisierung" gegen{\"u}ber EN (p<0,001). Im Median vertrugen Patienten der Verumgruppe 4000 mg EN (~8 kleine EN) unter der Provokation nach OIT wohingegen Patienten der Placebogruppe nur 80 mg EN (~1/6 kleine EN) vertrugen (p<0,001). Fast die H{\"a}lfte der Patienten der Verumgruppe (41,9\%) tolerierten die H{\"o}chstdosis von 18 g EN unter Provokation („komplette Desensibilisierung"). Es zeigte sich ein vergleichbares Sicherheitsprofil unter Verum- und Placebo-OIT in Bezug auf objektive Nebenwirkungen. Unter Verum-OIT kam es jedoch signifikant h{\"a}ufiger zu subjektiven Nebenwirkungen wie oralem Juckreiz oder Bauchschmerzen im Vergleich zu Placebo (3,7\% der Verum-OIT-Gaben vs. 0,5\% der Placebo-OIT-Gaben, p<0,001). Drei Kinder der Verumgruppe (9,7\%) und sieben Kinder der Placebogruppe (22,6\%) beendeten die Studie vorzeitig, je zwei Patienten beider Gruppen aufgrund von Nebenwirkungen. Im Gegensatz zu Placebo, zeigten sich unter Verum-OIT signifikante immunologische Ver{\"a}nderungen. So kam es zu einer Abnahme des EN-spezifischen Quaddeldurchmessers im Hautpricktest, einem Anstieg der EN-spezifischen IgG4-Werte im Serum sowie zu einer verminderten EN-spezifischen Zytokinsekretion, insbesondere der Th2-spezifischen Zytokine IL-4 und IL-5. Hinsichtlich der EN-spezifischen IgE-Werte sowie der EN-spezifischen Basophilenaktivierung zeigten sich hingegen keine Ver{\"a}nderungen unter OIT. Die Lebensqualit{\"a}t von Kindern der Verumgruppe war nach OIT signifikant verbessert, jedoch nicht bei Kindern der Placebogruppe. W{\"a}hrend der OIT wurde die Therapie von fast allen Kindern (82\%) und M{\"u}ttern (82\%) als positiv bewertet (= niedrige Therapiebelastung). Diskussion: Die EN-OIT f{\"u}hrte bei einem Großteil der EN-allergischen Kinder zu einer Desensibilisierung und einer deutlich erh{\"o}hten Reaktionsschwelle auf EN. Somit sind die Kinder im Alltag vor akzidentellen Reaktionen auf EN gesch{\"u}tzt, was die Lebensqualit{\"a}t der Kinder deutlich verbessert. Unter den kontrollierten Studienbedingungen zeigte sich ein akzeptables Sicherheitsprofil, mit vorrangig milder Symptomatik. Die klinische Desensibilisierung ging mit Ver{\"a}nderungen auf immunologischer Ebene einher. Langzeitstudien zur EN-OIT m{\"u}ssen jedoch abgewartet werden, um die klinische und immunologische Wirksamkeit hinsichtlich einer m{\"o}glichen langfristigen oralen Toleranzinduktion sowie die Sicherheit unter langfristiger OIT zu untersuchen, bevor das Therapiekonzept in die Praxis {\"u}bertragen werden kann.}, language = {de} } @phdthesis{Titov2017, author = {Titov, Evgenii}, title = {Quantum chemistry and surface hopping dynamics of azobenzenes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-394610}, school = {Universit{\"a}t Potsdam}, pages = {205}, year = {2017}, abstract = {This cumulative doctoral dissertation, based on three publications, is devoted to the investigation of several aspects of azobenzene molecular switches, with the aid of computational chemistry. In the first paper, the isomerization rates of a thermal cis → trans isomerization of azobenzenes for species formed upon an integer electron transfer, i.e., with added or removed electron, are calculated from Eyring's transition state theory and activation energy barriers, computed by means of density functional theory. The obtained results are discussed in connection with an experimental study of the thermal cis → trans isomerization of azobenzene derivatives in the presence of gold nanoparticles, which is demonstrated to be greatly accelerated in comparison to the same isomerization reaction in the absence of nanoparticles. The second paper is concerned with electronically excited states of (i) dimers, composed of two photoswitchable units placed closely side-by-side, as well as (ii) monomers and dimers adsorbed on a silicon cluster. A variety of quantum chemistry methods, capable of calculating molecular electronic absorption spectra, based on density functional and wave function theories, is employed to quantify changes in optical absorption upon dimerization and covalent grafting to a surface. Specifically, the exciton (Davydov) splitting between states of interest is determined from first-principles calculations with the help of natural transition orbital analysis, allowing for insight into the nature of excited states. In the third paper, nonadiabatic molecular dynamics with trajectory surface hopping is applied to model the photoisomerization of azobenzene dimers, (i) for the isolated case (exhibiting the exciton coupling between two molecules) as well as (ii) for the constrained case (providing the van der Waals interaction with environment in addition to the exciton coupling between two monomers). For the latter, the additional azobenzene molecules, surrounding the dimer, are introduced, mimicking a densely packed self-assembled monolayer. From obtained results it is concluded that the isolated dimer is capable of isomerization likewise the monomer, whereas the steric hindrance considerably suppresses trans → cis photoisomerization. Furthermore, the present dissertation comprises the general introduction describing the main features of the azobenzene photoswitch and objectives of this work, theoretical basis of the employed methods, and discussion of gained findings in the light of existing literature. Also, additional results on (i) activation parameters of the thermal cis → trans isomerization of azobenzenes, (ii) an approximate scheme to account for anharmonicity of molecular vibrations in calculation of the activation entropy, as well as (iii) absorption spectra of photoswitch-silicon composites obtained from time-demanding wave function-based methods are presented.}, language = {en} } @phdthesis{Thim2017, author = {Thim, Christof}, title = {Technologieakzeptanz in Organisationen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-401070}, school = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {Technological change is influencing organisations in their operation. It is used as a means to enhance productivity or to gain momentum on the market. The success of introducing new technologies into the organisation relies heavily on user acceptance. Existing explanations like the Diffusion of Innovation Theory (Rogers, 2003) and the Technology Acceptance Model and its extensions (Davis, 1989; Venkatesh and Davis, 1996; Venkatesh and Davis, 2000; Venkatesh, Morris, et al., 2003) do not address the organisational context sufficiently. Their models concentrate on technology adoption in a non-mandatory environment. Furthermore they do not encompass resistance against a new technology. Hence these models cannot be used to analyse the acceptance and usage decision process within organisations. This thesis therefore aims at investigating the organisational dynamics evoked by the introduction of new technologies with regard to acceptance and usage. More precisely it answers the question, whether different organisation types exert varying influences on their members and produce different patterns of acceptance and usage. The groundwork to achieve this insight is the synthesis and extension of different models of technology acceptance and organisational governance. The resulting model describes the development dynamics within an organisation and model combines two perspectives. On one hand the individual level encompasses socio-psychological aspects and individual decision making processes. This perspective is based on the aforementioned theories of individual acceptance, which are extended with different fit theories (Goodhue and Thompson, 1995; Floyd, 1986; Liu, Lee, and Chen, 2011; Parkes, 2013). Furthermore the resistance to new technology is introduced into the analysis as another possible course of action (Patsiotis, Hughes, and Webber, 2012). The organisational perspective on the other hand embeds the individual acceptance and usage decision into a social context. The interaction between organisation members based on the observation of others and the internalisation of social pressure are introduced as determinants of acceptance and usage. Furthermore organisational governance structures moderate this social influence and specify its impact. The relationship between governance and social influence is elaborated through the application of system theory to the organisational context: Actors like change agents or management use governance media (Luhmann, 1997; Fischer, 2009) to intervene in the individual decisions. The effect of these governance media varies with certain attributes of the organisation. Different coordination mechanisms of organisational configurations (Mintzberg, 1979) provide a link to governance media and their connectivity to individual decision processes. In order to demonstrate the feasibility of model a simulation experiment is conducted in AnyLogic. The validity of the model was tested in a sensitivity analysis. The results from the experiment show a specific acceptance and usage pattern. The acceptance is dropping at first due to the initial frustration. It then recovers and is growing in a bounded manner. Since usage is mandatory in an organisation, it is enforced by the management. This leads to a rapid increase of usage at first and stabilises on different levels during the course of the simulation. It was also found that different organisation configurations produce varying outcomes. The bureaucratic organisation enforces the usage better than any other configuration, leading to a higher usage level. However it fails to produce acceptance. The adhocracy on the other hand reaches a higher acceptance level through mutual adjustment. Its downside is the lack of usage. Furthermore the behaviour is not predictable, which can either lead to mostly positive outcomes or the complete break-down of the diffusion process. The simulation shows that organisations have to decide during the introduction of a new technology whether they want high usage rates fast with the risk of failing in the long term or establish a self-enforcing and sustainable diffusion processes which requires more time to be effective.}, language = {de} } @phdthesis{ThielemannKuehn2017, author = {Thielemann-K{\"u}hn, Nele}, title = {Optically induced ferro- and antiferromagnetic dynamics in the rare-earth metal dysprosium}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-402994}, school = {Universit{\"a}t Potsdam}, pages = {iv, 121}, year = {2017}, abstract = {Approaching physical limits in speed and size of today's magnetic storage and processing technologies demands new concepts for controlling magnetization and moves researches on optically induced magnetic dynamics. Studies on photoinduced magnetization dynamics and their underlying mechanisms have been primarily performed on ferromagnetic metals. Ferromagnetic dynamics bases on transfer of the conserved angular momentum connected with atomic magnetic moments out of the parallel aligned magnetic system into other degrees of freedom. In this thesis the so far rarely studied response of antiferromagnetic order to ultra-short optical laser pulses in a metal is investigated. The experiments were performed at the FemtoSpex slicing facility at the storage ring BESSY II, an unique source for ultra-short elliptically polarized x-ray pulses. Laser-induced changes of the 4f-magnetic order parameter in ferro- and antiferromagnetic dysprosium (Dy), were studied by x-ray methods, which yield directly comparable quantities. The discovered fundamental differences in the temporal and spatial behavior of ferro- and antiferrmagnetic dynamics are assinged to an additional channel for angular momentum transfer, which reduces the antiferromagnetic order by redistributing angular momentum within the non-parallel aligned magnetic system, and hence conserves the zero net magnetization. It is shown that antiferromagnetic dynamics proceeds considerably faster and more energy-efficient than demagnetization in ferromagnets. By probing antiferromagnetic order in time and space, it is found to be affected along the whole sample depth of an in situ grown 73 nm tick Dy film. Interatomic transfer of angular momentum via fast diffusion of laser-excited 5d electrons is held responsible for the out-most long-ranging effect. Ultrafast ferromagnetic dynamics can be expected to base on the same origin, which however leads to demagnetization only in regions close to interfaces caused by super-diffusive spin transport. Dynamics due to local scattering processes of excited but less mobile electrons, occur in both magnetic alignments only in directly excited regions of the sample and on slower pisosecond timescales. The thesis provides fundamental insights into photoinduced magnetic dynamics by directly comparing ferro- and antiferromagnetic dynamics in the same material and by consideration of the laser-induced magnetic depth profile.}, language = {en} } @phdthesis{Theuring2017, author = {Theuring, Philipp Christian}, title = {Suspended sediments in the Kharaa River, sources and impacts}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-410550}, school = {Universit{\"a}t Potsdam}, pages = {135}, year = {2017}, abstract = {Anthropogenically amplified erosion leads to increased fine-grained sediment input into the fluvial system in the 15.000 km2 Kharaa River catchment in northern Mongolia and constitutes a major stressing factor for the aquatic ecosystem. This study uniquely combines the application of intensive monitoring, source fingerprinting and catchment modelling techniques to allow for the comparison of the credibility and accuracy of each single method. High-resolution discharge data were used in combination with daily suspended solid measurements to calculate the suspended sediment budget and compare it with estimations of the sediment budget model SedNet. The comparison of both techniques showed that the development of an overall sediment budget with SedNet was possible, yielding results in the same order of magnitude (20.3 kt a- 1 and 16.2 kt a- 1). Radionuclide sediment tracing, using Be-7, Cs-137 and Pb-210 was applied to differentiate sediment sources for particles < 10μm from hillslope and riverbank erosion and showed that riverbank erosion generates 74.5\% of the suspended sediment load, whereas surface erosion contributes 21.7\% and gully erosion only 3.8\%. The contribution of the single subcatchments of the Kharaa to the suspended sediment load was assessed based on their variation in geochemical composition (e.g. in Ti, Sn, Mo, Mn, As, Sr, B, U, Ca and Sb). These variations were used for sediment source discrimination with geochemical composite fingerprints based on Genetic Algorithm driven Discriminant Function Analysis, the Kruskal-Wallis H-test and Principal Component Analysis. The contributions of the individual sub-catchment varied from 6.4\% to 36.2\%, generally showing higher contributions from the sub-catchments in the middle, rather than the upstream portions of the study area. The results indicate that river bank erosion generated by existing grazing practices of livestock is the main cause for elevated fine sediment input. Actions towards the protection of the headwaters and the stabilization of the river banks within the middle reaches were identified as the highest priority. Deforestation and by lodging and forest fires should be prevented to avoid increased hillslope erosion in the mountainous areas. Mining activities are of minor importance for the overall catchment sediment load but can constitute locally important point sources for particular heavy metals in the fluvial system.}, language = {en} } @phdthesis{Solms2017, author = {Solms, Alexander Maximilian}, title = {Integrating nonlinear mixed effects and physiologically-based modeling approaches for the analysis of repeated measurement studies}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-397070}, school = {Universit{\"a}t Potsdam}, pages = {x, 141}, year = {2017}, abstract = {During the drug discovery \& development process, several phases encompassing a number of preclinical and clinical studies have to be successfully passed to demonstrate safety and efficacy of a new drug candidate. As part of these studies, the characterization of the drug's pharmacokinetics (PK) is an important aspect, since the PK is assumed to strongly impact safety and efficacy. To this end, drug concentrations are measured repeatedly over time in a study population. The objectives of such studies are to describe the typical PK time-course and the associated variability between subjects. Furthermore, underlying sources significantly contributing to this variability, e.g. the use of comedication, should be identified. The most commonly used statistical framework to analyse repeated measurement data is the nonlinear mixed effect (NLME) approach. At the same time, ample knowledge about the drug's properties already exists and has been accumulating during the discovery \& development process: Before any drug is tested in humans, detailed knowledge about the PK in different animal species has to be collected. This drug-specific knowledge and general knowledge about the species' physiology is exploited in mechanistic physiological based PK (PBPK) modeling approaches -it is, however, ignored in the classical NLME modeling approach. Mechanistic physiological based models aim to incorporate relevant and known physiological processes which contribute to the overlying process of interest. In comparison to data--driven models they are usually more complex from a mathematical perspective. For example, in many situations, the number of model parameters outrange the number of measurements and thus reliable parameter estimation becomes more complex and partly impossible. As a consequence, the integration of powerful mathematical estimation approaches like the NLME modeling approach -which is widely used in data-driven modeling -and the mechanistic modeling approach is not well established; the observed data is rather used as a confirming instead of a model informing and building input. Another aggravating circumstance of an integrated approach is the inaccessibility to the details of the NLME methodology so that these approaches can be adapted to the specifics and needs of mechanistic modeling. Despite the fact that the NLME modeling approach exists for several decades, details of the mathematical methodology is scattered around a wide range of literature and a comprehensive, rigorous derivation is lacking. Available literature usually only covers selected parts of the mathematical methodology. Sometimes, important steps are not described or are only heuristically motivated, e.g. the iterative algorithm to finally determine the parameter estimates. Thus, in the present thesis the mathematical methodology of NLME modeling is systemically described and complemented to a comprehensive description, comprising the common theme from ideas and motivation to the final parameter estimation. Therein, new insights for the interpretation of different approximation methods used in the context of the NLME modeling approach are given and illustrated; furthermore, similarities and differences between them are outlined. Based on these findings, an expectation-maximization (EM) algorithm to determine estimates of a NLME model is described. Using the EM algorithm and the lumping methodology by Pilari2010, a new approach on how PBPK and NLME modeling can be combined is presented and exemplified for the antibiotic levofloxacin. Therein, the lumping identifies which processes are informed by the available data and the respective model reduction improves the robustness in parameter estimation. Furthermore, it is shown how apriori known factors influencing the variability and apriori known unexplained variability is incorporated to further mechanistically drive the model development. Concludingly, correlation between parameters and between covariates is automatically accounted for due to the mechanistic derivation of the lumping and the covariate relationships. A useful feature of PBPK models compared to classical data-driven PK models is in the possibility to predict drug concentration within all organs and tissue in the body. Thus, the resulting PBPK model for levofloxacin is used to predict drug concentrations and their variability within soft tissues which are the site of action for levofloxacin. These predictions are compared with data of muscle and adipose tissue obtained by microdialysis, which is an invasive technique to measure a proportion of drug in the tissue, allowing to approximate the concentrations in the interstitial fluid of tissues. Because, so far, comparing human in vivo tissue PK and PBPK predictions are not established, a new conceptual framework is derived. The comparison of PBPK model predictions and microdialysis measurements shows an adequate agreement and reveals further strengths of the presented new approach. We demonstrated how mechanistic PBPK models, which are usually developed in the early stage of drug development, can be used as basis for model building in the analysis of later stages, i.e. in clinical studies. As a consequence, the extensively collected and accumulated knowledge about species and drug are utilized and updated with specific volunteer or patient data. The NLME approach combined with mechanistic modeling reveals new insights for the mechanistic model, for example identification and quantification of variability in mechanistic processes. This represents a further contribution to the learn \& confirm paradigm across different stages of drug development. Finally, the applicability of mechanism--driven model development is demonstrated on an example from the field of Quantitative Psycholinguistics to analyse repeated eye movement data. Our approach gives new insight into the interpretation of these experiments and the processes behind.}, language = {en} }