@misc{OPUS4-64958, title = {Erkl{\"a}rung Potsdamer B{\"u}rgerinnen und B{\"u}rger zur Robotik in der Altenpflege}, editor = {Bubeck, Marc and Haltaufderheide, Joschka and Sakowsky, Ruben Andreas and Ranisch, Robert}, doi = {10.25932/publishup-64958}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-649589}, pages = {53}, year = {2024}, abstract = {Die bedarfsgerechte Versorgung im Alter zuk{\"u}nftig sicherzustellen, geh{\"o}rt zu den entscheidenden Aufgaben unserer Zeit. Der in Deutschland bestehende Fachkr{\"a}ftemangel sowie der demografische Wandel belasten das Pflegesystem in mehrfacher Hinsicht: In einer alternden Gesellschaft sind immer mehr Menschen auf eine anhaltende Unterst{\"u}tzung angewiesen. Niedrige Geburtenraten und damit verbunden ein sinkender Bev{\"o}lkerungs-anteil von Menschen im erwerbsf{\"a}higen Alter bringen einen bereits heute sp{\"u}rbaren Mangel an beruflich Pflegenden mit sich. Um eine menschenw{\"u}rdige Pflege anhaltend zu gew{\"a}hrleisten, m{\"u}ssen vorhandene Ressourcen gezielter eingesetzt und zus{\"a}tzliche Reserven freigelegt werden. Viele Hoffnungen liegen hier auf technologischen Innovationen. Die Digitalisierung soll das Gesundheitswesen effizienter gestalten und beispielsweise durch K{\"u}nstliche Intelligenz zeitraubende Prozesse vereinfachen oder sogar automatisieren. Im Kontext der Pflege wird der Einsatz von robotischen Assistenzsystemen diskutiert. Aus diesem Grund wurde die die Potsdamer B{\"u}rger:innenkonferenz „Robotik in der Altenpflege?" initiiert. Um die Zukunft der Pflege gemeinsam zu gestalten, wurden 3.500 Potsdamer B{\"u}rgerinnen und B{\"u}rger kontaktiert und schließlich f{\"u}nfundzwanzig Teilnehmende ausgew{\"a}hlt. Im Fr{\"u}hjahr 2024 kamen sie zusammen, um den verantwortlichen Einsatz von Robotik in der Pflege zu diskutieren. Die hier vorliegende Erkl{\"a}rung ist das Ergebnis der B{\"u}rger:innenkonferenz. Sie enth{\"a}lt die zentralen Positionen der Teilnehmenden. Die B{\"u}rger:innenkonferenz ist Teil des Projekts E-cARE („Ethics Guidelines for Socially Assistive Robots in Elderly Care: An Empirical-Participatory Approach"), welches die Juniorprofessur f{\"u}r Medizinische Ethik mit Schwerpunkt auf Digitalisierung der Fakult{\"a}t f{\"u}r Gesundheitswissenschaften Brandenburg, Universit{\"a}t Potsdam, durchgef{\"u}hrt hat.}, language = {de} } @phdthesis{Arend2024, author = {Arend, Marius}, title = {Comparing genome-scale models of protein-constrained metabolism in heterotrophic and photosynthetic microorganisms}, doi = {10.25932/publishup-65147}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-651470}, school = {Universit{\"a}t Potsdam}, pages = {150}, year = {2024}, abstract = {Genome-scale metabolic models are mathematical representations of all known reactions occurring in a cell. Combined with constraints based on physiological measurements, these models have been used to accurately predict metabolic fluxes and effects of perturbations (e.g. knock-outs) and to inform metabolic engineering strategies. Recently, protein-constrained models have been shown to increase predictive potential (especially in overflow metabolism), while alleviating the need for measurement of nutrient uptake rates. The resulting modelling frameworks quantify the upkeep cost of a certain metabolic flux as the minimum amount of enzyme required for catalysis. These improvements are based on the use of in vitro turnover numbers or in vivo apparent catalytic rates of enzymes for model parameterization. In this thesis several tools for the estimation and refinement of these parameters based on in vivo proteomics data of Escherichia coli, Saccharomyces cerevisiae, and Chlamydomonas reinhardtii have been developed and applied. The difference between in vitro and in vivo catalytic rate measures for the three microorganisms was systematically analyzed. The results for the facultatively heterotrophic microalga C. reinhardtii considerably expanded the apparent catalytic rate estimates for photosynthetic organisms. Our general finding pointed at a global reduction of enzyme efficiency in heterotrophy compared to other growth scenarios. Independent of the modelled organism, in vivo estimates were shown to improve accuracy of predictions of protein abundances compared to in vitro values for turnover numbers. To further improve the protein abundance predictions, machine learning models were trained that integrate features derived from protein-constrained modelling and codon usage. Combining the two types of features outperformed single feature models and yielded good prediction results without relying on experimental transcriptomic data. The presented work reports valuable advances in the prediction of enzyme allocation in unseen scenarios using protein constrained metabolic models. It marks the first successful application of this modelling framework in the biotechnological important taxon of green microalgae, substantially increasing our knowledge of the enzyme catalytic landscape of phototrophic microorganisms.}, language = {en} } @phdthesis{Bryant2024, author = {Bryant, Seth}, title = {Aggregation and disaggregation in flood risk models}, doi = {10.25932/publishup-65095}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-650952}, school = {Universit{\"a}t Potsdam}, pages = {ix, 116}, year = {2024}, abstract = {Floods continue to be the leading cause of economic damages and fatalities among natural disasters worldwide. As future climate and exposure changes are projected to intensify these damages, the need for more accurate and scalable flood risk models is rising. Over the past decade, macro-scale flood risk models have evolved from initial proof-of-concepts to indispensable tools for decision-making at global-, nationaland, increasingly, the local-level. This progress has been propelled by the advent of high-performance computing and the availability of global, space-based datasets. However, despite such advancements, these models are rarely validated and consistently fall short of the accuracy achieved by high-resolution local models. While capabilities have improved, significant gaps persist in understanding the behaviours of such macro-scale models, particularly their tendency to overestimate risk. This dissertation aims to address such gaps by examining the scale transfers inherent in the construction and application of coarse macroscale models. To achieve this, four studies are presented that, collectively, address exposure, hazard, and vulnerability components of risk affected by upscaling or downscaling. The first study focuses on a type of downscaling where coarse flood hazard inundation grids are enhanced to a finer resolution. While such inundation downscaling has been employed in numerous global model chains, ours is the first study to focus specifically on this component, providing an evaluation of the state of the art and a novel algorithm. Findings demonstrate that our novel algorithm is eight times faster than existing methods, offers a slight improvement in accuracy, and generates more physically coherent flood maps in hydraulically challenging regions. When applied to a case study, the algorithm generated a 4m resolution inundation map from 30m hydrodynamic model outputs in 33 s, a 60-fold improvement in runtime with a 25\% increase in RMSE compared with direct hydrodynamic modelling. All evaluated downscaling algorithms yielded better accuracy than the coarse hydrodynamic model when compared to observations, demonstrating similar limits of coarse hydrodynamic models reported by others. The substitution of downscaling into flood risk model chains, in place of high-resolution modelling, can drastically improve the lead time of impactbased forecasts and the efficiency of hazard map production. With downscaling, local regions could obtain high resolution local inundation maps by post-processing a global model without the need for expensive modelling or expertise. The second study focuses on hazard aggregation and its implications for exposure, investigating implicit aggregations commonly used to intersect hazard grids with coarse exposure models. This research introduces a novel spatial classification framework to understand the effects of rescaling flood hazard grids to a coarser resolution. The study derives closed-form analytical solutions for the location and direction of bias from flood grid aggregation, showing that bias will always be present in regions near the edge of inundation. For example, inundation area will be positively biased when water depth grids are aggregated, while volume will be negatively biased when water elevation grids are aggregated. Extending the analysis to effects of hazard aggregation on building exposure, this study shows that exposure in regions at the edge of inundation are an order of magnitude more sensitive to aggregation errors than hazard alone. Among the two aggregation routines considered, averaging water surface elevation grids better preserved flood depths at buildings than averaging of water depth grids. The study provides the first mathematical proof and generalizeable treatment of flood hazard grid aggregation, demonstrating important mechanisms to help flood risk modellers understand and control model behaviour. The final two studies focus on the aggregation of vulnerability models or flood damage functions, investigating the practice of applying per-asset functions to aggregate exposure models. Both studies extend Jensen's inequality, a well-known 1906 mathematical proof, to demonstrate how the aggregation of flood damage functions leads to bias. Applying Jensen's proof in this new context, results show that typically concave flood damage functions will introduce a positive bias (overestimation) when aggregated. This behaviour was further investigated with a simulation experiment including 2 million buildings in Germany, four global flood hazard simulations and three aggregation scenarios. The results show that positive aggregation bias is not distributed evenly in space, meaning some regions identified as "hot spots of risk" in assessments may in fact just be hot spots of aggregation bias. This study provides the first application of Jensen's inequality to explain the overestimates reported elsewhere and advice for modellers to minimize such artifacts. In total, this dissertation investigates the complex ways aggregation and disaggregation influence the behaviour of risk models, focusing on the scale-transfers underpinning macro-scale flood risk assessments. Extending a key finding of the flood hazard literature to the broader context of flood risk, this dissertation concludes that all else equal, coarse models overestimate risk. This dissertation goes beyond previous studies by providing mathematical proofs for how and where such bias emerges in aggregation routines, offering a mechanistic explanation for coarse model overestimates. It shows that this bias is spatially heterogeneous, necessitating a deep understanding of how rescaling may bias models to effectively reduce or communicate uncertainties. Further, the dissertation offers specific recommendations to help modellers minimize scale transfers in problematic regions. In conclusion, I argue that such aggregation errors are epistemic, stemming from choices in model structure, and therefore hold greater potential and impetus for study and mitigation. This deeper understanding of uncertainties is essential for improving macro-scale flood risk models and their effectiveness in equitable, holistic, and sustainable flood management.}, language = {en} } @phdthesis{Martin2024, author = {Martin, Johannes}, title = {Synthesis of protein-polymer conjugates and block copolymers via sortase-mediated ligation}, doi = {10.25932/publishup-64566}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-645669}, school = {Universit{\"a}t Potsdam}, pages = {XVII, 150}, year = {2024}, abstract = {In den vergangenen Jahrzehnten haben therapeutische Proteine in der pharmazeutischen Industrie mehr und mehr an Bedeutung gewonnen. Werden Proteine nichtmenschlichen Ursprungs verwendet, kann es jedoch zu einer Immunreaktion kommen, sodass das Protein sehr schnell aus dem K{\"o}rper ausgeschieden oder abgebaut wird. Um die Zirkulationszeit im Blut signifikant zu verl{\"a}ngern, werden die Proteine mit synthetischen Polymeren modifiziert (Protein-Polymer-Konjugate). Die Proteine aller heute auf dem Markt erh{\"a}ltlichen Medikamente dieser Art tragen eine oder mehrere Polymerketten aus Poly(ethylenglycol) (PEG). Ein Nachteil der PEGylierung ist, dass viele Patienten bei regelm{\"a}ßiger Einnahme dieser Medikamente Antik{\"o}rper gegen PEG entwickeln, die den effizienzsteigernden Effekt der PEGylierung wieder aufheben. Ein weiterer Nachteil der PEGylierung ist die oftmals deutlich verringerte Aktivit{\"a}t der Konjugate im Vergleich zum nativen Protein. Der Grund daf{\"u}r ist die Herstellungsmethode der Konjugate, bei der meist die prim{\"a}ren Amine der Lysin-Seitenketten und der N-Terminus des Proteins genutzt werden. Da die meisten Proteine mehrere gut zug{\"a}ngliche Lysine aufweisen, werden oft unterschiedliche und teilweise mehrere Lysine mit PEG funktionalisiert, was zu einer Mischung an Regioisomeren f{\"u}hrt. Je nach Position der PEG-Kette kann das aktive Zentrum abgeschirmt oder die 3D-Struktur des Proteins ver{\"a}ndert werden, was zu einem teilweise drastischen Aktivit{\"a}tsabfall f{\"u}hrt. In dieser Arbeit wurde eine neuartige Methode zur Ligation von Makromolek{\"u}len untersucht. Die Verwendung eines Enzyms als Katalysator zur Verbindung zweier Makromolek{\"u}le ist bisher wenig untersucht und ineffizient. Als Enzym wurde Sortase A ausgew{\"a}hlt, eine gut untersuchte Ligase aus der Familie der Transpeptidasen, welche die Ligation zweier Peptide katalysieren kann. Ein Nachteil dieser Sortase-vermittelten Ligation ist, dass es sich um eine Gleichgewichtsreaktion handelt, wodurch hohe Ausbeuten schwierig zu erreichen sind. Im Rahmen dieser Dissertation wurden zwei zuvor entwickelte Methoden zur Verschiebung des Gleichgewichts ohne Einsatz eines großen {\"U}berschusses von einem Edukt f{\"u}r Makromolek{\"u}le {\"u}berpr{\"u}ft. Zur Durchf{\"u}hrung der Sortase-vermittelten Ligation werden zwei komplement{\"a}re Peptidsequenzen verwendet, die Erkennungssequenz und das Nukleophil. Um eine systematische Untersuchung durchf{\"u}hren zu k{\"o}nnen, wurden alle n{\"o}tigen Bausteine (Protein-Erkennungssequenz zur Reaktion mit Nukleophil-Polymer und Polymer-Erkennungssequenz mit Nukleophil-Protein) hergestellt. Als Polymerisationstechnik wurde die radikalische Polymerisation mit reversibler Deaktivierung (im Detail, Atom Transfer Radical Polymerization, ATRP und Reversible Addition-Fragmentation Chain Transfer, RAFT polymerization) gew{\"a}hlt, um eine enge Molmassenverteilung zu erreichen. Die Herstellung der Bausteine begann mit der Synthese der Peptide via automatisierter Festphasen-Peptidsynthese, um eine einfache {\"A}nderung der Peptidsequenz zu gew{\"a}hrleisten und um eine Modifizierung der Polymerkette nach der Polymerisation zu umgehen. Um die ben{\"o}tigte unterschiedliche Funktionalit{\"a}t der zwei Peptidsequenzen (freier C-Terminus bei der Erkennungssequenz bzw. freier N-Terminus bei dem Nukleophil) zu erreichen, wurden verschiedene Linker zwischen Harz und Peptid verwendet. Danach wurde der Ketten{\"u}bertr{\"a}ger (chain transfer agent, CTA) zur Kontrolle der Polymerisation mit dem auf dem Harz befindlichen Peptid gekoppelt. Die f{\"u}r die anschließende Polymerisation verwendeten Monomere basierten auf Acrylamiden und Acrylaten und wurden anhand ihrer Eignung als Alternativen zu PEG ausgew{\"a}hlt. Es wurde eine k{\"u}rzlich entwickelte Technik basierend auf der RAFT-Polymerisation (xanthate-supported photo-iniferter RAFT, XPI-RAFT) verwendet um eine Reihe an Peptid-Polymeren mit unterschiedlichen Molekulargewichten und engen Molekulargewichtsverteilungen herzustellen. Nach Entfernung der Schutzgruppen der Peptid-Seitenketten wurden die Peptid-Polymere zun{\"a}chst genutzt, um mittels Sortase-vermittelter Ligation zwei Polymerketten zu einem Blockcopolymer zu verbinden. Unter Verwendung von Ni2+-Ionen in Kombination mit einer Verl{\"a}ngerung der Erkennungssequenz um ein Histidin zur Unterdr{\"u}ckung der R{\"u}ckreaktion konnte ein maximaler Umsatz von 70 \% erreicht werden. Dabei zeigte sich ein oberes Limit von durchschnittlich 100 Wiederholungseinheiten; die Ligation von l{\"a}ngeren Polymeren war nicht erfolgreich. Danach wurden ein Modellprotein und ein Nanobody mit vielversprechenden medizinischen Eigenschaften mit den f{\"u}r die enzymkatalysierte Ligation ben{\"o}tigten Peptidsequenzen f{\"u}r die Kopplung mit den zuvor hergestellten Peptid-Polymeren verwendet. Dabei konnte bei Verwendung des Modellproteins keine Bildung von Protein-Polymer-Konjugaten beobachtet werden. Der Nanobody konnte dagegen C-terminal mit einem Polymer funktionalisiert werden. Dabei wurde eine {\"a}hnliche Limitierung in der Polymer-Kettenl{\"a}nge beobachtet wie zuvor. Die auf Ni-Ionen basierte Strategie zur Gleichgewichtsverschiebung hatte hier keinen ausschlaggebenden Effekt, w{\"a}hrend die Verwendung von einem {\"U}berschuss an Polymer zur vollst{\"a}ndigen Umsetzung des Edukt-Nanobody f{\"u}hrte. Die erhaltenen Daten aus diesem Projekt bilden eine gute Basis f{\"u}r weitere Forschung in dem vielversprechenden Feld der enzymkatalysierten Herstellung von Protein-Polymer-Konjugaten und Blockcopolymeren. Langfristig k{\"o}nnte diese Herangehensweise eine vielseitig einsetzbare Herstellungsmethode von ortsspezifischen therapeutischen Protein-Polymer Konjugaten darstellen, welche sowohl eine hohe Aktivit{\"a}t als auch eine lange Zirkulationszeit im Blut aufweisen.}, language = {en} } @article{KernKochskaemper2024, author = {Kern, Kristine and Kochsk{\"a}mper, Elisa}, title = {Wege zur urbanen Transformation}, series = {Kommunalwissenschaften an der Universit{\"a}t Potsdam (KWI-Schriften ; 15)}, journal = {Kommunalwissenschaften an der Universit{\"a}t Potsdam (KWI-Schriften ; 15)}, number = {15}, editor = {Schmidt, Thorsten Ingo and Bickenbach, Christian and Gronewold, Ulfert and Kuhlmann, Sabine and Ulrich, Peter}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-581-1}, issn = {1867-951X}, doi = {10.25932/publishup-64788}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-647882}, pages = {101 -- 109}, year = {2024}, language = {de} } @book{SchmidtUlrichBuechneretal.2024, author = {Schmidt, Thorsten Ingo and Ulrich, Peter and B{\"u}chner, Christiane and Franzke, Jochen and Jann, Werner and Bauer, Hartmut and Wagner, Dieter and Br{\"u}ning, Christoph and Bickenbach, Christian and Kuhlmann, Sabine and Peters, Niklas and Reichard, Christoph and Tessmann, Jens and Maaß, Christian and Kern, Kristine and Kochsk{\"a}mper, Elisa and Gailing, Ludger and Krzymuski, Marcin}, title = {Kommunalwissenschaften an der Universit{\"a}t Potsdam}, series = {KWI-Schriften}, journal = {KWI-Schriften}, number = {15}, editor = {Schmidt, Thorsten Ingo and Bickenbach, Christian and Gronewold, Ulfert and Kuhlmann, Sabine and Ulrich, Peter}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-581-1}, issn = {1867-951X}, doi = {10.25932/publishup-63618}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-636180}, publisher = {Universit{\"a}t Potsdam}, pages = {124}, year = {2024}, abstract = {Zum dreißigj{\"a}hrigen Bestehen des Kommunalwissenschaftlichen Instituts an der Universit{\"a}t Potsdam vereint dieser Jubil{\"a}umsband kurze Aufs{\"a}tze von ehemaligen und aktuellen Vorstandsmitgliedern, von Ehrenmitgliedern des Vorstands, langj{\"a}hrigen wissenschaftlichen Mitarbeitern des Instituts und aktuellen wissenschaftlichen Kooperationspartnern. Die insgesamt zw{\"o}lf Beitr{\"a}ge befassen sich mit den Kommunalwissenschaften und der Geschichte des Kommunalwissenschaftlichen Instituts, mit aktuellen kommunalwissenschaftlichen Fragestellungen und wissenschaftlichen Kooperationen des KWI. Der vom KWI-Vorstand herausgegebene Band soll einen breiten Blick auf 30 Jahre Kommunalwissenschaften in Brandenburg und an der Universit{\"a}t Potsdam werfen und einen Ausblick auf zuk{\"u}nftige kommunalwissenschaftliche Forschung geben.}, language = {de} } @phdthesis{Khosravi2023, author = {Khosravi, Sara}, title = {The effect of new turbulence parameterizations for the stable surface layer on simulations of the Arctic climate}, doi = {10.25932/publishup-64352}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-643520}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 119}, year = {2023}, abstract = {Arctic climate change is marked by intensified warming compared to global trends and a significant reduction in Arctic sea ice which can intricately influence mid-latitude atmospheric circulation through tropo- and stratospheric pathways. Achieving accurate simulations of current and future climate demands a realistic representation of Arctic climate processes in numerical climate models, which remains challenging. Model deficiencies in replicating observed Arctic climate processes often arise due to inadequacies in representing turbulent boundary layer interactions that determine the interactions between the atmosphere, sea ice, and ocean. Many current climate models rely on parameterizations developed for mid-latitude conditions to handle Arctic turbulent boundary layer processes. This thesis focuses on modified representation of the Arctic atmospheric processes and understanding their resulting impact on large-scale mid-latitude atmospheric circulation within climate models. The improved turbulence parameterizations, recently developed based on Arctic measurements, were implemented in the global atmospheric circulation model ECHAM6. This involved modifying the stability functions over sea ice and ocean for stable stratification and changing the roughness length over sea ice for all stratification conditions. Comprehensive analyses are conducted to assess the impacts of these modifications on ECHAM6's simulations of the Arctic boundary layer, overall atmospheric circulation, and the dynamical pathways between the Arctic and mid-latitudes. Through a step-wise implementation of the mentioned parameterizations into ECHAM6, a series of sensitivity experiments revealed that the combined impacts of the reduced roughness length and the modified stability functions are non-linear. Nevertheless, it is evident that both modifications consistently lead to a general decrease in the heat transfer coefficient, being in close agreement with the observations. Additionally, compared to the reference observations, the ECHAM6 model falls short in accurately representing unstable and strongly stable conditions. The less frequent occurrence of strong stability restricts the influence of the modified stability functions by reducing the affected sample size. However, when focusing solely on the specific instances of a strongly stable atmosphere, the sensible heat flux approaches near-zero values, which is in line with the observations. Models employing commonly used surface turbulence parameterizations were shown to have difficulties replicating the near-zero sensible heat flux in strongly stable stratification. I also found that these limited changes in surface layer turbulence parameterizations have a statistically significant impact on the temperature and wind patterns across multiple pressure levels, including the stratosphere, in both the Arctic and mid-latitudes. These significant signals vary in strength, extent, and direction depending on the specific month or year, indicating a strong reliance on the background state. Furthermore, this research investigates how the modified surface turbulence parameterizations may influence the response of both stratospheric and tropospheric circulation to Arctic sea ice loss. The most suitable parameterizations for accurately representing Arctic boundary layer turbulence were identified from the sensitivity experiments. Subsequently, the model's response to sea ice loss is evaluated through extended ECHAM6 simulations with different prescribed sea ice conditions. The simulation with adjusted surface turbulence parameterizations better reproduced the observed Arctic tropospheric warming in vertical extent, demonstrating improved alignment with the reanalysis data. Additionally, unlike the control experiments, this simulation successfully reproduced specific circulation patterns linked to the stratospheric pathway for Arctic-mid-latitude linkages. Specifically, an increased occurrence of the Scandinavian-Ural blocking regime (negative phase of the North Atlantic Oscillation) in early (late) winter is observed. Overall, it can be inferred that improving turbulence parameterizations at the surface layer can improve the ECHAM6's response to sea ice loss.}, language = {en} } @techreport{LessmannGrunerKalkuhletal.2024, type = {Working Paper}, author = {Lessmann, Kai and Gruner, Friedemann and Kalkuhl, Matthias and Edenhofer, Ottmar}, title = {Emissions Trading with Clean-up Certificates}, series = {CEPA Discussion Papers}, journal = {CEPA Discussion Papers}, number = {79}, issn = {2628-653X}, doi = {10.25932/publishup-64136}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-641368}, pages = {35}, year = {2024}, abstract = {We analyze how conventional emissions trading schemes (ETS) can be modified by introducing "clean-up certificates" to allow for a phase of net-negative emissions. Clean-up certificates bundle the permission to emit CO2 with the obligation for its removal. We show that demand for such certificates is determined by cost-saving technological progress, the discount rate and the length of the compliance period. Introducing extra clean-up certificates into an existing ETS reduces near-term carbon prices and mitigation efforts. In contrast, substituting ETS allowances with clean-up certificates reduces cumulative emissions without depressing carbon prices or mitigation in the near term. We calibrate our model to the EU ETS and identify reforms where simultaneously (i) ambition levels rise, (ii) climate damages fall, (iii) revenues from carbon prices rise and (iv) carbon prices and aggregate mitigation cost fall. For reducing climate damages, roughly half of the issued clean-up certificates should replace conventional ETS allowances. In the context of the EU ETS, a European Carbon Central Bank could manage the implementation of cleanup certificates and could serve as an enforcement mechanism.}, language = {en} } @phdthesis{Damseaux2024, author = {Damseaux, Adrien}, title = {Improving permafrost dynamics in land surface models: insights from dual sensitivity experiments}, doi = {10.25932/publishup-63945}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-639450}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 143}, year = {2024}, abstract = {The thawing of permafrost and the subsequent release of greenhouse gases constitute one of the most significant and uncertain positive feedback loops in the context of climate change, making predictions regarding changes in permafrost coverage of paramount importance. To address these critical questions, climate scientists have developed Land Surface Models (LSMs) that encompass a multitude of physical soil processes. This thesis is committed to advancing our understanding and refining precise representations of permafrost dynamics within LSMs, with a specific focus on the accurate modeling of heat fluxes, an essential component for simulating permafrost physics. The first research question overviews fundamental model prerequisites for the representation of permafrost soils within land surface modeling. It includes a first-of-its-kind comparison between LSMs in CMIP6 to reveal their differences and shortcomings in key permafrost physics parameters. Overall, each of these LSMs represents a unique approach to simulating soil processes and their interactions with the climate system. Choosing the most appropriate model for a particular application depends on factors such as the spatial and temporal scale of the simulation, the specific research question, and available computational resources. The second research question evaluates the performance of the state-of-the-art Community Land Model (CLM5) in simulating Arctic permafrost regions. Our approach overcomes traditional evaluation limitations by individually addressing depth, seasonality, and regional variations, providing a comprehensive assessment of permafrost and soil temperature dynamics. I compare CLM5's results with three extensive datasets: (1) soil temperatures from 295 borehole stations, (2) active layer thickness (ALT) data from the Circumpolar Active Layer Monitoring Network (CALM), and (3) soil temperatures, ALT, and permafrost extent from the ESA Climate Change Initiative (ESA-CCI). The results show that CLM5 aligns well with ESA-CCI and CALM for permafrost extent and ALT but reveals a significant global cold temperature bias, notably over Siberia. These results echo a persistent challenge identified in numerous studies: the existence of a systematic 'cold bias' in soil temperature over permafrost regions. To address this challenge, the following research questions propose dual sensitivity experiments. The third research question represents the first study to apply a Plant Functional Type (PFT)-based approach to derive soil texture and soil organic matter (SOM), departing from the conventional use of coarse-resolution global data in LSMs. This novel method results in a more uniform distribution of soil organic matter density (OMD) across the domain, characterized by reduced OMD values in most regions. However, changes in soil texture exhibit a more intricate spatial pattern. Comparing the results to observations reveals a significant reduction in the cold bias observed in the control run. This method shows noticeable improvements in permafrost extent, but at the cost of an overestimation in ALT. These findings emphasize the model's high sensitivity to variations in soil texture and SOM content, highlighting the crucial role of soil composition in governing heat transfer processes and shaping the seasonal variation of soil temperatures in permafrost regions. Expanding upon a site experiment conducted in Trail Valley Creek by \citet{dutch_impact_2022}, the fourth research question extends the application of the snow scheme proposed by \citet{sturm_thermal_1997} to cover the entire Arctic domain. By employing a snow scheme better suited to the snow density profile observed over permafrost regions, this thesis seeks to assess its influence on simulated soil temperatures. Comparing this method to observational datasets reveals a significant reduction in the cold bias that was present in the control run. In most regions, the Sturm run exhibits a substantial decrease in the cold bias. However, there is a distinctive overshoot with a warm bias observed in mountainous areas. The Sturm experiment effectively addressed the overestimation of permafrost extent in the control run, albeit resulting in a substantial reduction in permafrost extent over mountainous areas. ALT results remain relatively consistent compared to the control run. These outcomes align with our initial hypothesis, which anticipated that the reduced snow insulation in the Sturm run would lead to higher winter soil temperatures and a more accurate representation of permafrost physics. In summary, this thesis demonstrates significant advancements in understanding permafrost dynamics and its integration into LSMs. It has meticulously unraveled the intricacies involved in the interplay between heat transfer, soil properties, and snow dynamics in permafrost regions. These insights offer novel perspectives on model representation and performance.}, language = {en} } @techreport{HertelKenneckeReicheetal.2024, author = {Hertel, Kyra and Kennecke, Andreas and Reiche, Michael and Schmidt-Kopplin, Diana and Tillmann, Diana and Winkler, Marco}, title = {Bericht zum Auftaktworkshop des Forschungsprojektes "Workflow-Management-Systeme f{\"u}r Open-Access-Hochschulverlage (OA-WFMS)"}, doi = {10.25932/publishup-63805}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-638057}, year = {2024}, abstract = {Das Forschungsprojekt „Workflow-Management-Systeme f{\"u}r Open-Access-Hochschulverlage (OA-WFMS)" ist eine Kooperation zwischen der HTWK Leipzig und der Universit{\"a}t Potsdam. Ziel ist es, die Bedarfe von Universit{\"a}ts- und Hochschulverlagen und Anforderungen an ein Workflow-Management-Systeme (WFMS) zu analysieren, um daraus ein generisches Lastenheft zu erstellen. Das WFMS soll den Publikationsprozess in OA-Verlagen erleichtern, beschleunigen sowie die Verbreitung von Open Access und das nachhaltige, digitale wissenschaftliche Publizieren f{\"o}rdern. Das Projekt baut auf den Ergebnissen der Projekte „Open-Access-Hochschulverlag (OA-HVerlag)" und „Open-Access-Strukturierte-Kommunikation (OA-STRUKTKOMM)" auf. Der diesem Bericht zugrunde liegende Auftaktworkshop fand 2024 in Leipzig mit Vertreter:innen von zehn Institutionen statt. Der Workshop diente dazu, Herausforderungen und Anforderungen an ein WFMS zu ermitteln sowie bestehende L{\"o}sungsans{\"a}tze und Tools zu diskutieren. Im Workshop wurden folgende Fragen behandelt: a. Wie kann die Organisation und {\"U}berwachung von Publikationsprozessen in wissenschaftlichen Verlagen durch ein WFMS effizient gestaltet werden? b. Welche Anforderungen muss ein WFMS erf{\"u}llen, um Publikationsprozesse optimal zu unterst{\"u}tzen? c. Welche Schnittstellen m{\"u}ssen ber{\"u}cksichtigt werden, um die Interoperabilit{\"a}t der Systeme zu garantieren? d. Welche bestehenden L{\"o}sungsans{\"a}tze und Tools sind bereits im Einsatz und welche Vor- und Nachteile haben diese? Der Workshop gliederte sich in zwei Teile : Teil 1 behandelte Herausforderungen und Anforderungen (Fragen a. bis c.), Teil 2 bestehende L{\"o}sungen und Tools (Frage d.). Die Ergebnisse des Workshops fließen in die Bedarfsanalyse des Forschungsprojekts ein. Die im Bericht dokumentierten Ergebnisse zeigen die Vielzahl der Herausforderungen der bestehenden Ans{\"a}tze bez{\"u}glich des OA-Publikationsmanagements . Die Herausforderungen zeigen sich insbesondere bei der Systemheterogenit{\"a}t, den individuellen Anpassungsbedarfen und der Notwendigkeit der systematischen Dokumentation. Die eingesetzten Unterst{\"u}tzungssysteme und Tools wie Dateiablagen, Projektmanagement- und Kommunikationstools k{\"o}nnen insgesamt den Anforderungen nicht gen{\"u}gen, f{\"u}r Teill{\"o}sungen sind sie jedoch nutzbar. Deshalb muss die Integration bestehender Systeme in ein zu entwickelndes OA-WFMS in Betracht gezogen und die Interoperabilit{\"a}t der miteinander interagierenden Systeme gew{\"a}hrleistet werden. Die Beteiligten des Workshops waren sich einig, dass das OA-WFMS flexibel und modular aufgebaut werden soll. Einer konsortialen Softwareentwicklung und einem gemeinsamen Betrieb im Verbund wurde der Vorrang gegeben. Der Workshop lieferte wertvolle Einblicke in die Arbeit der Hochschulverlage und bildet somit eine solide Grundlage f{\"u}r die in Folge zu erarbeitende weitere Bedarfsanalyse und die Erstellung des generischen Lastenheftes.}, language = {de} }