@phdthesis{Pauli2024, author = {Pauli, Daniel}, title = {Unraveling massive star and binary physics in the nearby low-metallicity galaxy, the Small Magellanic Cloud, as a proxy for high-redshift galaxies}, doi = {10.25932/publishup-65318}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-653184}, school = {Universit{\"a}t Potsdam}, pages = {169}, year = {2024}, abstract = {Massive stars (Mini > 8 Msol) are the key feedback agents within galaxies, as they shape their surroundings via their powerful winds, ionizing radiation, and explosive supernovae. Most massive stars are born in binary systems, where interactions with their companions significantly alter their evolution and the feedback they deposit in their host galaxy. Understanding binary evolution, particularly in the low-metallicity environments as proxies for the Early Universe, is crucial for interpreting the rest-frame ultraviolet spectra observed in high-redshift galaxies by telescopes like Hubble and James Webb. This thesis aims to tackle this challenge by investigating in detail massive binaries within the low-metallicity environment of the Small Magellanic Cloud galaxy. From ultraviolet and multi-epoch optical spectroscopic data, we uncovered post-interaction binaries. To comprehensively characterize these binary systems, their stellar winds, and orbital parameters, we use a multifaceted approach. The Potsdam Wolf-Rayet stellar atmosphere code is employed to obtain the stellar and wind parameters of the stars. Additionally, we perform consistent light and radial velocity fitting with the Physics of Eclipsing Binaries software, allowing for the independent determination of orbital parameters and component masses. Finally, we utilize these results to challenge the standard picture of stellar evolution and improve our understanding of low-metallicity stellar populations by calculating our binary evolution models with the Modules for Experiments in Stellar Astrophysics code. We discovered the first four O-type post-interaction binaries in the SMC (Chapters 2, 5, and 6). Their primary stars have temperatures similar to other OB stars and reside far from the helium zero-age main sequence, challenging the traditional view of binary evolution. Our stellar evolution models suggest this may be due to enhanced mixing after core-hydrogen burning. Furthermore, we discovered the so-far most massive binary system undergoing mass transfer (Chapter 3), offering a unique opportunity to test mass-transfer efficiency in extreme conditions. Our binary evolution calculations revealed unexpected evolutionary pathways for accreting stars in binaries, potentially providing the missing link to understanding the observed Wolf-Rayet population within the SMC (Chapter 4). The results presented in this thesis unveiled the properties of massive binaries at low-metallicity which challenge the way the spectra of high-redshift galaxies are currently being analyzed as well as our understanding of massive-star feedback within galaxies.}, language = {en} } @phdthesis{Fulat2024, author = {Fulat, Karol}, title = {Electron acceleration at quasi-perpendicular shocks in supernova remnants}, doi = {10.25932/publishup-65136}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-651365}, school = {Universit{\"a}t Potsdam}, pages = {vi, 94}, year = {2024}, abstract = {Astrophysical shocks, driven by explosive events such as supernovae, efficiently accelerate charged particles to relativistic energies. The majority of these shocks occur in collisionless plasmas where the energy transfer is dominated by particle-wave interactions.Strong nonrelativistic shocks found in supernova remnants are plausible sites of galactic cosmic ray production, and the observed emission indicates the presence of nonthermal electrons. To participate in the primary mechanism of energy gain - Diffusive Shock Acceleration - electrons must have a highly suprathermal energy, implying a need for very efficient pre-acceleration. This poorly understood aspect of the shock acceleration theory is known as the electron injection problem. Studying electron-scale phenomena requires the use of fully kinetic particle-in-cell (PIC) simulations, which describe collisionless plasma from first principles. Most published studies consider a homogenous upstream medium, but turbulence is ubiquitous in astrophysical environments and is typically driven at magnetohydrodynamic scales, cascading down to kinetic scales. For the first time, I investigate how preexisting turbulence affects electron acceleration at nonrelativistic shocks using the fully kinetic approach. To accomplish this, I developed a novel simulation framework that allows the study of shocks propagating in turbulent media. It involves simulating slabs of turbulent plasma separately, which are further continuously inserted into a shock simulation. This demands matching of the plasma slabs at the interface. A new procedure of matching electromagnetic fields and currents prevents numerical transients, and the plasma evolves self-consistently. The versatility of this framework has the potential to render simulations more consistent with turbulent systems in various astrophysical environments. In this Thesis, I present the results of 2D3V PIC simulations of high-Mach-number nonrelativistic shocks with preexisting compressive turbulence in an electron-ion plasma. The chosen amplitudes of the density fluctuations (\$\lesssim15\\%\$) concord with \textit{in situ} measurements in the heliosphere and the local interstellar medium. I explored how these fluctuations impact the dynamics of upstream electrons, the driving of the plasma instabilities, electron heating and acceleration. My results indicate that while the presence of the turbulence enhances variations in the upstream magnetic field, their levels remain too low to influence the behavior of electrons at perpendicular shocks significantly. However, the situation is different at oblique shocks. The external magnetic field inclined at an angle between \$50^\circ \lesssim \theta_\text{Bn} \lesssim 75^\circ\$ relative to the shock normal allows the escape of fast electrons toward the upstream region. An extended electron foreshock region is formed, where these particles drive various instabilities. Results of an oblique shock with \$\theta_\text{Bn}=60^\circ\$ propagating in preexisting compressive turbulence show that the foreshock becomes significantly shorter, and the shock-reflected electrons have higher temperatures. Furthermore, the energy spectrum of downstream electrons shows a well-pronounced nonthermal tail that follows a power law with an index up to -2.3. The methods and results presented in this Thesis could serve as a starting point for more realistic modeling of interactions between shocks and turbulence in plasmas from first principles.}, language = {en} } @phdthesis{ValenciaSanmiguel2003, author = {Valencia Sanmiguel, Antonio}, title = {Condensation and crystallization on patterned surfaces}, doi = {10.25932/publishup-65195}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-651950}, school = {Universit{\"a}t Potsdam}, pages = {102, XXII}, year = {2003}, abstract = {Condensation and crystallization are omnipresent phenomena in nature. The formation of droplets or crystals on a solid surface are familiar processes which, beyond their scientific interest, are required in many technological applications. In recent years, experimental techniques have been developed which allow patterning a substrate with surface domains of molecular thickness, surface area in the mesoscopic scale, and different wettabilities (i.e., different degrees of preference for a substance that is in contact with the substrate). The existence of new patterned surfaces has led to increased theoretical efforts to understand wetting phenomena in such systems. In this thesis, we deal with some problems related to the equilibrium of phases (e.g., liquid-vapor coexistence) and the kinetics of phase separation in the presence of chemically patterned surfaces. Two different cases are considered: (i) patterned surfaces in contact with liquid and vapor, and (ii) patterned surfaces in contact with a crystalline phase. One of the problems that we have studied is the following: It is widely believed that if air containing water vapor is cooled to its dew point, droplets of water are immediately formed. Although common experience seems to support this view, it is not correct. It is only when air is cooled well below its dew point that the phase transition occurs immediately. A vapor cooled slightly below its dew point is in a metastable state, meaning that the liquid phase is more stable than the vapor, but the formation of droplets requires some time to occur, which can be very long. It was first pointed out by J. W. Gibbs that the metastability of a vapor depends on the energy necessary to form a nucleus (a droplet of a critical size). Droplets smaller than the critical size will tend to disappear, while droplets larger than the critical size will tend to grow. This is consistent with an energy barrier that has its maximum at the critical size, as is the case for droplets formed directly in the vapor or in contact with a chemically uniform planar wall. Classical nucleation theory describes the time evolution of the condensation in terms of the random process of droplet growth through this energy barrier. This process is activated by thermal fluctuations, which eventually will form a droplet of the critical size. We consider nucleation of droplets from a vapor on a substrate patterned with easily wettable (lyophilic) circular domains. Under certain conditions of pressure and temperature, the condensation of a droplet on a lyophilic circular domain proceeds through a barrier with two maxima (a double barrier). We have extended classical nucleation theory to account for the kinetics of nucleation through a double barrier, and applied this extension to nucleation on lyophilic circular domains.}, language = {en} } @phdthesis{Arend2024, author = {Arend, Marius}, title = {Comparing genome-scale models of protein-constrained metabolism in heterotrophic and photosynthetic microorganisms}, doi = {10.25932/publishup-65147}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-651470}, school = {Universit{\"a}t Potsdam}, pages = {150}, year = {2024}, abstract = {Genome-scale metabolic models are mathematical representations of all known reactions occurring in a cell. Combined with constraints based on physiological measurements, these models have been used to accurately predict metabolic fluxes and effects of perturbations (e.g. knock-outs) and to inform metabolic engineering strategies. Recently, protein-constrained models have been shown to increase predictive potential (especially in overflow metabolism), while alleviating the need for measurement of nutrient uptake rates. The resulting modelling frameworks quantify the upkeep cost of a certain metabolic flux as the minimum amount of enzyme required for catalysis. These improvements are based on the use of in vitro turnover numbers or in vivo apparent catalytic rates of enzymes for model parameterization. In this thesis several tools for the estimation and refinement of these parameters based on in vivo proteomics data of Escherichia coli, Saccharomyces cerevisiae, and Chlamydomonas reinhardtii have been developed and applied. The difference between in vitro and in vivo catalytic rate measures for the three microorganisms was systematically analyzed. The results for the facultatively heterotrophic microalga C. reinhardtii considerably expanded the apparent catalytic rate estimates for photosynthetic organisms. Our general finding pointed at a global reduction of enzyme efficiency in heterotrophy compared to other growth scenarios. Independent of the modelled organism, in vivo estimates were shown to improve accuracy of predictions of protein abundances compared to in vitro values for turnover numbers. To further improve the protein abundance predictions, machine learning models were trained that integrate features derived from protein-constrained modelling and codon usage. Combining the two types of features outperformed single feature models and yielded good prediction results without relying on experimental transcriptomic data. The presented work reports valuable advances in the prediction of enzyme allocation in unseen scenarios using protein constrained metabolic models. It marks the first successful application of this modelling framework in the biotechnological important taxon of green microalgae, substantially increasing our knowledge of the enzyme catalytic landscape of phototrophic microorganisms.}, language = {en} } @phdthesis{Mayer2024, author = {Mayer, Selina}, title = {Understanding the impact of design thinking on organizations and individuals}, doi = {10.25932/publishup-65154}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-651541}, school = {Universit{\"a}t Potsdam}, pages = {xi, 125}, year = {2024}, abstract = {Organizations are investing billions on innovation and agility initiatives to stay competitive in their increasingly uncertain business environments. Design Thinking, an innovation approach based on human-centered exploration, ideation and experimentation, has gained increasing popularity. The market for Design Thinking, including software products and general services, is projected to reach 2.500 million \$ (US-Dollar) by 2028. A dispersed set of positive outcomes have been attributed to Design Thinking. However, there is no clear understanding of what exactly comprises the impact of Design Thinking and how it is created. To support a billion-dollar market, it is essential to understand the value Design Thinking is bringing to organizations not only to justify large investments, but to continuously improve the approach and its application. Following a qualitative research approach combined with results from a systematic literature review, the results presented in this dissertation offer a structured understanding of Design Thinking impact. The results are structured along two main perspectives of impact: the individual and the organizational perspective. First, insights from qualitative data analysis demonstrate that measuring and assessing the impact of Design Thinking is currently one central challenge for Design Thinking practitioners in organizations. Second, the interview data revealed several effects Design Thinking has on individuals, demonstrating how Design Thinking can impact boundary management behaviors and enable employees to craft their jobs more actively. Contributing to innovation management research, the work presented in this dissertation systematically explains the Design Thinking impact, allowing other researchers to both locate and integrate their work better. The results of this research advance the theoretical rigor of Design Thinking impact research, offering multiple theoretical underpinnings explaining the variety of Design Thinking impact. Furthermore, this dissertation contains three specific propositions on how Design Thinking creates an impact: Design Thinking creates an impact through integration, enablement, and engagement. Integration refers to how Design Thinking enables organizations through effectively combining things, such as for example fostering balance between exploitation and exploration activities. Through Engagement, Design Thinking impacts organizations involving users and other relevant stakeholders in their work. Moreover, Design Thinking creates impact through Enablement, making it possible for individuals to enact a specific behavior or experience certain states. By synthesizing multiple theoretical streams into these three overarching themes, the results of this research can help bridge disciplinary boundaries, for example between business, psychology and design, and enhance future collaborative research. Practitioners benefit from the results as multiple desirable outcomes are detailed in this thesis, such as successful individual job crafting behaviors, which can be expected from practicing Design Thinking. This allows practitioners to enact more evidence-based decision-making concerning Design Thinking implementation. Overall, considering multiple levels of impact as well as a broad range of theoretical underpinnings are paramount to understanding and fostering Design Thinking impact.}, language = {en} } @phdthesis{Heckenbach2024, author = {Heckenbach, Esther Lina}, title = {Geodynamic modeling of process interactions at continental plate boundaries}, doi = {10.25932/publishup-64750}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-647500}, school = {Universit{\"a}t Potsdam}, pages = {127}, year = {2024}, abstract = {Plate tectonic boundaries constitute the suture zones between tectonic plates. They are shaped by a variety of distinct and interrelated processes and play a key role in geohazards and georesource formation. Many of these processes have been previously studied, while many others remain unaddressed or undiscovered. In this work, the geodynamic numerical modeling software ASPECT is applied to shed light on further process interactions at continental plate boundaries. In contrast to natural data, geodynamic modeling has the advantage that processes can be directly quantified and that all parameters can be analyzed over the entire evolution of a structure. Furthermore, processes and interactions can be singled out from complex settings because the modeler has full control over all of the parameters involved. To account for the simplifying character of models in general, I have chosen to study generic geological settings with a focus on the processes and interactions rather than precisely reconstructing a specific region of the Earth. In Chapter 2, 2D models of continental rifts with different crustal thicknesses between 20 and 50 km and extension velocities in the range of 0.5-10 mm/yr are used to obtain a speed limit for the thermal steady-state assumption, commonly employed to address the temperature fields of continental rifts worldwide. Because the tectonic deformation from ongoing rifting outpaces heat conduction, the temperature field is not in equilibrium, but is characterized by a transient, tectonically-induced heat flow signal. As a result, I find that isotherm depths of the geodynamic evolution models are shallower than a temperature distribution in equilibrium would suggest. This is particularly important for deep isotherms and narrow rifts. In narrow rifts, the magnitude of the transient temperature signal limits a well-founded applicability of the thermal steady-state assumption to extension velocities of 0.5-2 mm/yr. Estimation of the crustal temperature field affects conclusions on all temperature-dependent processes ranging from mineral assemblages to the feasible exploitation of a geothermal reservoir. In Chapter 3, I model the interactions of different rheologies with the kinematics of folding and faulting using the example of fault-propagation folds in the Andean fold-and-thrust belt. The evolution of the velocity fields from geodynamic models are compared with those from trishear models of the same structure. While the latter use only geometric and kinematic constraints of the main fault, the geodynamic models capture viscous, plastic, and elastic deformation in the entire model domain. I find that both models work equally well for early, and thus relatively simple stages of folding and faulting, while results differ for more complex situations where off-fault deformation and secondary faulting are present. As fault-propagation folds can play an important role in the formation of reservoirs, knowledge of fluid pathways, for example via fractures and faults, is crucial for their characterization. Chapter 4 deals with a bending transform fault and the interconnections between tectonics and surface processes. In particular, the tectonic evolution of the Dead Sea Fault is addressed where a releasing bend forms the Dead Sea pull-apart basin, while a restraining bend further to the North resulted in the formation of the Lebanese mountains. I ran 3D coupled geodynamic and surface evolution models that included both types of bends in a single setup. I tested various randomized initial strain distributions, showing that basin asymmetry is a consequence of strain localization. Furthermore, by varying the surface process efficiency, I find that the deposition of sediment in the pull-apart basin not only controls basin depth, but also results in a crustal flow component that increases uplift at the restraining bend. Finally, in Chapter 5, I present the computational basis for adding further complexity to plate boundary models in ASPECT with the implementation of earthquake-like behavior using the rate-and-state friction framework. Despite earthquakes happening on a relatively small time scale, there are many interactions between the seismic cycle and the long time spans of other geodynamic processes. Amongst others, the crustal state of stress as well as the presence of fluids or changes in temperature may alter the frictional behavior of a fault segment. My work provides the basis for a realistic setup of involved structures and processes, which is therefore important to obtain a meaningful estimate for earthquake hazards. While these findings improve our understanding of continental plate boundaries, further development of geodynamic software may help to reveal even more processes and interactions in the future.}, language = {en} } @phdthesis{Mirzaee2024, author = {Mirzaee, Zohreh}, title = {Ecology and phylogeny of Mantodea of Iran and adjacent areas}, doi = {10.25932/publishup-65273}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-652739}, school = {Universit{\"a}t Potsdam}, pages = {187}, year = {2024}, abstract = {Mantodea, commonly known as mantids, have captivated researchers owing to their enigmatic behavior and ecological significance. This order comprises a diverse array of predatory insects, boasting over 2,400 species globally and inhabiting a wide spectrum of ecosystems. In Iran, the mantid fauna displays remarkable diversity, yet numerous facets of this fauna remain poorly understood, with a significant dearth of systematic and ecological research. This substantial knowledge gap underscores the pressing need for a comprehensive study to advance our understanding of Mantodea in Iran and its neighboring regions. The principal objective of this investigation was to delve into the ecology and phylogeny of Mantodea within these areas. To accomplish this, our research efforts concentrated on three distinct genera within Iranian Mantodea. These genera were selected due to their limited existing knowledge base and feasibility for in-depth study. Our comprehensive methodology encompassed a multifaceted approach, integrating morphological analysis, molecular techniques, and ecological observations. Our research encompassed a comprehensive revision of the genus Holaptilon, resulting in the description of four previously unknown species. This extensive effort substantially advanced our understanding of the ecological roles played by Holaptilon and refined its systematic classification. Furthermore, our investigation into Nilomantis floweri expanded its known distribution range to include Iran. By conducting thorough biological assessments, genetic analyses, and ecological niche modeling, we obtained invaluable insights into distribution patterns and genetic diversity within this species. Additionally, our research provided a thorough comprehension of the life cycle, behaviors, and ecological niche modeling of Blepharopsis mendica, shedding new light on the distinctive characteristics of this mantid species. Moreover, we contributed essential knowledge about parasitoids that infect mantid ootheca, laying the foundation for future studies aimed at uncovering the intricate mechanisms governing ecological and evolutionary interactions between parasitoids and Mantodea.}, language = {en} } @phdthesis{Shigeyama2024, author = {Shigeyama, Jotaro}, title = {Virtual reality at 1:1 scale in small physical spaces}, doi = {10.25932/publishup-64900}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-649000}, school = {Universit{\"a}t Potsdam}, pages = {115}, year = {2024}, abstract = {Virtual Reality (VR) leads to the highest level of immersion if presented using a 1:1 mapping of virtual space to physical space—also known as real walking. The advent of inexpensive consumer virtual reality (VR) headsets, all capable of running inside-out position tracking, has brought VR to the home. However, many VR applications do not feature full real walking, but instead, feature a less immersive space-saving technique known as instant teleportation. Given that only 0.3\% of home users run their VR experiences in spaces more than 4m2, the most likely explanation is the lack of the physical space required for meaningful use of real walking. In this thesis, we investigate how to overcome this hurdle. We demonstrate how to run 1:1-mapped VR experiences in small physical spaces and we explore the trade-off between space and immersion. (1) We start with a space limit of 15cm. We present DualPanto, a device that allows (blind) VR users to experience the virtual world from a 1:1 mapped bird's eye perspective—by leveraging haptics. (2) We then relax our space constraints to 50cm, which is what seated users (e.g., on an airplane or train ride) have at their disposal. We leverage the space to represent a standing user in 1:1 mapping, while only compressing the user's arm movement. We demonstrate our 4 prototype VirtualArms at the example of VR experiences limited to arm movement, such as boxing. (3) Finally, we relax our space constraints further to 3m2 of walkable space, which is what 75\% of home users have access to. As well- established in the literature, we implement real walking with the help of portals, also known as "impossible spaces". While impossible spaces on such dramatic space constraints tend to degenerate into incomprehensible mazes (as demonstrated, for example, by "TraVRsal"), we propose plausibleSpaces: presenting meaningful virtual worlds by adapting various visual elements to impossible spaces. Our techniques push the boundary of spatially meaningful VR interaction in various small spaces. We see further future challenges for new design approaches to immersive VR experiences for the smallest physical spaces in our daily life.}, language = {en} } @book{JuizBermejoCalleetal.2024, author = {Juiz, Carlos and Bermejo, Belen and Calle, Alejandro and Sidorova, Julia and Lundberg, Lars and Weidmann, Vera and Lowitzki, Leon and Mirtschin, Marvin and Hoorn, Andr{\´e} van and Frank, Markus and Schulz, Henning and Stojanovic, Dragan and Stojanovic, Natalija and Stojnev Ilic, Aleksandra and Friedrich, Tobias and Lenzner, Pascal and Weyand, Christopher and Wagner, Markus and Plauth, Max and Polze, Andreas and Nowicki, Marek and Seth, Sugandh and Kaur Chahal, Kuljit and Singh, Gurwinder and Speth, Sandro and Janes, Andrea and Camilli, Matteo and Ziegler, Erik and Schmidberger, Marcel and P{\"o}rschke, Mats and Bartz, Christian and Lorenz, Martin and Meinel, Christoph and Beilich, Robert and Bertazioli, Dario and Carlomagno, Cristiano and Bedoni, Marzia and Messina, Vincenzina}, title = {HPI Future SOC Lab}, series = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, journal = {Technische Berichte des Hasso-Plattner-Instituts f{\"u}r Digital Engineering an der Universit{\"a}t Potsdam}, number = {159}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen and Sommer, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-565-1}, issn = {1613-5652}, doi = {10.25932/publishup-59801}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-598014}, publisher = {Universit{\"a}t Potsdam}, pages = {ix, 142}, year = {2024}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2020. Selected projects have presented their results on April 21st and November 10th 2020 at the Future SOC Lab Day events.}, language = {en} } @phdthesis{Bryant2024, author = {Bryant, Seth}, title = {Aggregation and disaggregation in flood risk models}, doi = {10.25932/publishup-65095}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-650952}, school = {Universit{\"a}t Potsdam}, pages = {ix, 116}, year = {2024}, abstract = {Floods continue to be the leading cause of economic damages and fatalities among natural disasters worldwide. As future climate and exposure changes are projected to intensify these damages, the need for more accurate and scalable flood risk models is rising. Over the past decade, macro-scale flood risk models have evolved from initial proof-of-concepts to indispensable tools for decision-making at global-, nationaland, increasingly, the local-level. This progress has been propelled by the advent of high-performance computing and the availability of global, space-based datasets. However, despite such advancements, these models are rarely validated and consistently fall short of the accuracy achieved by high-resolution local models. While capabilities have improved, significant gaps persist in understanding the behaviours of such macro-scale models, particularly their tendency to overestimate risk. This dissertation aims to address such gaps by examining the scale transfers inherent in the construction and application of coarse macroscale models. To achieve this, four studies are presented that, collectively, address exposure, hazard, and vulnerability components of risk affected by upscaling or downscaling. The first study focuses on a type of downscaling where coarse flood hazard inundation grids are enhanced to a finer resolution. While such inundation downscaling has been employed in numerous global model chains, ours is the first study to focus specifically on this component, providing an evaluation of the state of the art and a novel algorithm. Findings demonstrate that our novel algorithm is eight times faster than existing methods, offers a slight improvement in accuracy, and generates more physically coherent flood maps in hydraulically challenging regions. When applied to a case study, the algorithm generated a 4m resolution inundation map from 30m hydrodynamic model outputs in 33 s, a 60-fold improvement in runtime with a 25\% increase in RMSE compared with direct hydrodynamic modelling. All evaluated downscaling algorithms yielded better accuracy than the coarse hydrodynamic model when compared to observations, demonstrating similar limits of coarse hydrodynamic models reported by others. The substitution of downscaling into flood risk model chains, in place of high-resolution modelling, can drastically improve the lead time of impactbased forecasts and the efficiency of hazard map production. With downscaling, local regions could obtain high resolution local inundation maps by post-processing a global model without the need for expensive modelling or expertise. The second study focuses on hazard aggregation and its implications for exposure, investigating implicit aggregations commonly used to intersect hazard grids with coarse exposure models. This research introduces a novel spatial classification framework to understand the effects of rescaling flood hazard grids to a coarser resolution. The study derives closed-form analytical solutions for the location and direction of bias from flood grid aggregation, showing that bias will always be present in regions near the edge of inundation. For example, inundation area will be positively biased when water depth grids are aggregated, while volume will be negatively biased when water elevation grids are aggregated. Extending the analysis to effects of hazard aggregation on building exposure, this study shows that exposure in regions at the edge of inundation are an order of magnitude more sensitive to aggregation errors than hazard alone. Among the two aggregation routines considered, averaging water surface elevation grids better preserved flood depths at buildings than averaging of water depth grids. The study provides the first mathematical proof and generalizeable treatment of flood hazard grid aggregation, demonstrating important mechanisms to help flood risk modellers understand and control model behaviour. The final two studies focus on the aggregation of vulnerability models or flood damage functions, investigating the practice of applying per-asset functions to aggregate exposure models. Both studies extend Jensen's inequality, a well-known 1906 mathematical proof, to demonstrate how the aggregation of flood damage functions leads to bias. Applying Jensen's proof in this new context, results show that typically concave flood damage functions will introduce a positive bias (overestimation) when aggregated. This behaviour was further investigated with a simulation experiment including 2 million buildings in Germany, four global flood hazard simulations and three aggregation scenarios. The results show that positive aggregation bias is not distributed evenly in space, meaning some regions identified as "hot spots of risk" in assessments may in fact just be hot spots of aggregation bias. This study provides the first application of Jensen's inequality to explain the overestimates reported elsewhere and advice for modellers to minimize such artifacts. In total, this dissertation investigates the complex ways aggregation and disaggregation influence the behaviour of risk models, focusing on the scale-transfers underpinning macro-scale flood risk assessments. Extending a key finding of the flood hazard literature to the broader context of flood risk, this dissertation concludes that all else equal, coarse models overestimate risk. This dissertation goes beyond previous studies by providing mathematical proofs for how and where such bias emerges in aggregation routines, offering a mechanistic explanation for coarse model overestimates. It shows that this bias is spatially heterogeneous, necessitating a deep understanding of how rescaling may bias models to effectively reduce or communicate uncertainties. Further, the dissertation offers specific recommendations to help modellers minimize scale transfers in problematic regions. In conclusion, I argue that such aggregation errors are epistemic, stemming from choices in model structure, and therefore hold greater potential and impetus for study and mitigation. This deeper understanding of uncertainties is essential for improving macro-scale flood risk models and their effectiveness in equitable, holistic, and sustainable flood management.}, language = {en} }