@phdthesis{Pilz2020, author = {Pilz, Tobias}, title = {Pursuing the understanding of uncertainties in hydrological modelling}, doi = {10.25932/publishup-47664}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476643}, school = {Universit{\"a}t Potsdam}, pages = {136}, year = {2020}, abstract = {Hydrological models are important tools for the simulation and quantification of the water cycle. They therefore aid in the understanding of hydrological processes, prediction of river discharge, assessment of the impacts of land use and climate changes, or the management of water resources. However, uncertainties associated with hydrological modelling are still large. While significant research has been done on the quantification and reduction of uncertainties, there are still fields which have gained little attention so far, such as model structural uncertainties that are related to the process implementations in the models. This holds especially true for complex process-based models in contrast to simpler conceptual models. Consequently, the aim of this thesis is to improve the understanding of structural uncertainties with focus on process-based hydrological modelling, including methods for their quantification. To identify common deficits of frequently used hydrological models and develop further strategies on how to reduce them, a survey among modellers was conducted. It was found that there is a certain degree of subjectivity in the perception of modellers, for instance with respect to the distinction of hydrological models into conceptual groups. It was further found that there are ambiguities on how to apply a certain hydrological model, for instance how many parameters should be calibrated, together with a large diversity of opinion regarding the deficits of models. Nevertheless, evapotranspiration processes are often represented in a more physically based manner, while processes of groundwater and soil water movement are often simplified, which many survey participants saw as a drawback. A large flexibility, for instance with respect to different alternative process implementations or a small number of parameters that needs to be calibrated, was generally seen as strength of a model. Flexible and efficient software, which is straightforward to apply, has been increasingly acknowledged by the hydrological community. This work further elaborated on this topic in a twofold way. First, a software package for semi-automated landscape discretisation has been developed, which serves as a tool for model initialisation. This was complemented by a sensitivity analysis of important and commonly used discretisation parameters, of which the size of hydrological sub-catchments as well as the size and number of hydrologically uniform computational units appeared to be more influential than information considered for the characterisation of hillslope profiles. Second, a process-based hydrological model has been implemented into a flexible simulation environment with several alternative process representations and a number of numerical solvers. It turned out that, even though computation times were still long, enhanced computational capabilities nowadays in combination with innovative methods for statistical analysis allow for the exploration of structural uncertainties of even complex process-based models, which up to now was often neglected by the modelling community. In a further study it could be shown that process-based models may even be employed as tools for seasonal operational forecasting. In contrast to statistical models, which are faster to initialise and to apply, process-based models produce more information in addition to the target variable, even at finer spatial and temporal scales, and provide more insights into process behaviour and catchment functioning. However, the process-based model was much more dependent on reliable rainfall forecasts. It seems unlikely that there exists a single best formulation for hydrological processes, even for a specific catchment. This supports the use of flexible model environments with alternative process representations instead of a single model structure. However, correlation and compensation effects between process formulations, their parametrisation, and other aspects such as numerical solver and model resolution, may lead to surprising results and potentially misleading conclusions. In future studies, such effects should be more explicitly addressed and quantified. Moreover, model functioning appeared to be highly dependent on the meteorological conditions and rainfall input generally was the most important source of uncertainty. It is still unclear, how this could be addressed, especially in the light of the aforementioned correlations. The use of innovative data products, e.g.\ remote sensing data in combination with station measurements, and efficient processing methods for the improvement of rainfall input and explicit consideration of associated uncertainties is advisable to bring more insights and make hydrological simulations and predictions more reliable.}, language = {en} } @phdthesis{Guentner2002, author = {G{\"u}ntner, Andreas}, title = {Large-scale hydrological modelling in the semi-arid north-east of Brazil}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000511}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {Semi-arid areas are, due to their climatic setting, characterized by small water resources. An increasing water demand as a consequence of population growth and economic development as well as a decreasing water availability in the course of possible climate change may aggravate water scarcity in future, which often exists already for present-day conditions in these areas. Understanding the mechanisms and feedbacks of complex natural and human systems, together with the quantitative assessment of future changes in volume, timing and quality of water resources are a prerequisite for the development of sustainable measures of water management to enhance the adaptive capacity of these regions. For this task, dynamic integrated models, containing a hydrological model as one component, are indispensable tools. The main objective of this study is to develop a hydrological model for the quantification of water availability in view of environmental change over a large geographic domain of semi-arid environments. The study area is the Federal State of Cear{\´a} (150 000 km2) in the semi-arid north-east of Brazil. Mean annual precipitation in this area is 850 mm, falling in a rainy season with duration of about five months. Being mainly characterized by crystalline bedrock and shallow soils, surface water provides the largest part of the water supply. The area has recurrently been affected by droughts which caused serious economic losses and social impacts like migration from the rural regions. The hydrological model Wasa (Model of Water Availability in Semi-Arid Environments) developed in this study is a deterministic, spatially distributed model being composed of conceptual, process-based approaches. Water availability (river discharge, storage volumes in reservoirs, soil moisture) is determined with daily resolution. Sub-basins, grid cells or administrative units (municipalities) can be chosen as spatial target units. The administrative units enable the coupling of Wasa in the framework of an integrated model which contains modules that do not work on the basis of natural spatial units. The target units mentioned above are disaggregated in Wasa into smaller modelling units within a new multi-scale, hierarchical approach. The landscape units defined in this scheme capture in particular the effect of structured variability of terrain, soil and vegetation characteristics along toposequences on soil moisture and runoff generation. Lateral hydrological processes at the hillslope scale, as reinfiltration of surface runoff, being of particular importance in semi-arid environments, can thus be represented also within the large-scale model in a simplified form. Depending on the resolution of available data, small-scale variability is not represented explicitly with geographic reference in Wasa, but by the distribution of sub-scale units and by statistical transition frequencies for lateral fluxes between these units. Further model components of Wasa which respect specific features of semi-arid hydrology are: (1) A two-layer model for evapotranspiration comprises energy transfer at the soil surface (including soil evaporation), which is of importance in view of the mainly sparse vegetation cover. Additionally, vegetation parameters are differentiated in space and time in dependence on the occurrence of the rainy season. (2) The infiltration module represents in particular infiltration-excess surface runoff as the dominant runoff component. (3) For the aggregate description of the water balance of reservoirs that cannot be represented explicitly in the model, a storage approach respecting different reservoirs size classes and their interaction via the river network is applied. (4) A model for the quantification of water withdrawal by water use in different sectors is coupled to Wasa. (5) A cascade model for the temporal disaggregation of precipitation time series, adapted to the specific characteristics of tropical convective rainfall, is applied for the generating rainfall time series of higher temporal resolution. All model parameters of Wasa can be derived from physiographic information of the study area. Thus, model calibration is primarily not required. Model applications of Wasa for historical time series generally results in a good model performance when comparing the simulation results of river discharge and reservoir storage volumes with observed data for river basins of various sizes. The mean water balance as well as the high interannual and intra-annual variability is reasonably represented by the model. Limitations of the modelling concept are most markedly seen for sub-basins with a runoff component from deep groundwater bodies of which the dynamics cannot be satisfactorily represented without calibration. Further results of model applications are: (1) Lateral processes of redistribution of runoff and soil moisture at the hillslope scale, in particular reinfiltration of surface runoff, lead to markedly smaller discharge volumes at the basin scale than the simple sum of runoff of the individual sub-areas. Thus, these processes are to be captured also in large-scale models. The different relevance of these processes for different conditions is demonstrated by a larger percentage decrease of discharge volumes in dry as compared to wet years. (2) Precipitation characteristics have a major impact on the hydrological response of semi-arid environments. In particular, underestimated rainfall intensities in the rainfall input due to the rough temporal resolution of the model and due to interpolation effects and, consequently, underestimated runoff volumes have to be compensated in the model. A scaling factor in the infiltration module or the use of disaggregated hourly rainfall data show good results in this respect. The simulation results of Wasa are characterized by large uncertainties. These are, on the one hand, due to uncertainties of the model structure to adequately represent the relevant hydrological processes. On the other hand, they are due to uncertainties of input data and parameters particularly in view of the low data availability. Of major importance is: (1) The uncertainty of rainfall data with regard to their spatial and temporal pattern has, due to the strong non-linear hydrological response, a large impact on the simulation results. (2) The uncertainty of soil parameters is in general of larger importance on model uncertainty than uncertainty of vegetation or topographic parameters. (3) The effect of uncertainty of individual model components or parameters is usually different for years with rainfall volumes being above or below the average, because individual hydrological processes are of different relevance in both cases. Thus, the uncertainty of individual model components or parameters is of different importance for the uncertainty of scenario simulations with increasing or decreasing precipitation trends. (4) The most important factor of uncertainty for scenarios of water availability in the study area is the uncertainty in the results of global climate models on which the regional climate scenarios are based. Both a marked increase or a decrease in precipitation can be assumed for the given data. Results of model simulations for climate scenarios until the year 2050 show that a possible future change in precipitation volumes causes a larger percentage change in runoff volumes by a factor of two to three. In the case of a decreasing precipitation trend, the efficiency of new reservoirs for securing water availability tends to decrease in the study area because of the interaction of the large number of reservoirs in retaining the overall decreasing runoff volumes.}, subject = {Cear{\´a} / Semiarides Gebiet / Wasserreserve / Hydrologie / Mathematisches Modell}, language = {en} } @phdthesis{Stauch2006, author = {Stauch, Vanessa Juliane}, title = {Data-led methods for the analysis and interpretation of eddy covariance observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12389}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The terrestrial biosphere impacts considerably on the global carbon cycle. In particular, ecosystems contribute to set off anthropogenic induced fossil fuel emissions and hence decelerate the rise of the atmospheric CO₂ concentration. However, the future net sink strength of an ecosystem will heavily depend on the response of the individual processes to a changing climate. Understanding the makeup of these processes and their interaction with the environment is, therefore, of major importance to develop long-term climate mitigation strategies. Mathematical models are used to predict the fate of carbon in the soil-plant-atmosphere system under changing environmental conditions. However, the underlying processes giving rise to the net carbon balance of an ecosystem are complex and not entirely understood at the canopy level. Therefore, carbon exchange models are characterised by considerable uncertainty rendering the model-based prediction into the future prone to error. Observations of the carbon exchange at the canopy scale can help learning about the dominant processes and hence contribute to reduce the uncertainty associated with model-based predictions. For this reason, a global network of measurement sites has been established that provides long-term observations of the CO₂ exchange between a canopy and the atmosphere along with micrometeorological conditions. These time series, however, suffer from observation uncertainty that, if not characterised, limits their use in ecosystem studies. The general objective of this work is to develop a modelling methodology that synthesises physical process understanding with the information content in canopy scale data as an attempt to overcome the limitations in both carbon exchange models and observations. Similar hybrid modelling approaches have been successfully applied for signal extraction out of noisy time series in environmental engineering. Here, simple process descriptions are used to identify relationships between the carbon exchange and environmental drivers from noisy data. The functional form of these relationships are not prescribed a priori but rather determined directly from the data, ensuring the model complexity to be commensurate with the observations. Therefore, this data-led analysis results in the identification of the processes dominating carbon exchange at the ecosystem scale as reflected in the data. The description of these processes may then lead to robust carbon exchange models that contribute to a faithful prediction of the ecosystem carbon balance. This work presents a number of studies that make use of the developed data-led modelling approach for the analysis and interpretation of net canopy CO₂ flux observations. Given the limited knowledge about the underlying real system, the evaluation of the derived models with synthetic canopy exchange data is introduced as a standard procedure prior to any real data employment. The derived data-led models prove successful in several different applications. First, the data-based nature of the presented methods makes them particularly useful for replacing missing data in the observed time series. The resulting interpolated CO₂ flux observation series can then be analysed with dynamic modelling techniques, or integrated to coarser temporal resolution series for further use e.g., in model evaluation exercises. However, the noise component in these observations interferes with deterministic flux integration in particular when long time periods are considered. Therefore, a method to characterise the uncertainties in the flux observations that uses a semi-parametric stochastic model is introduced in a second study. As a result, an (uncertain) estimate of the annual net carbon exchange of the observed ecosystem can be inferred directly from a statistically consistent integration of the noisy data. For the forest measurement sites analysed, the relative uncertainty for the annual sum did not exceed 11 percent highlighting the value of the data. Based on the same models, a disaggregation of the net CO₂ flux into carbon assimilation and respiration is presented in a third study that allows for the estimation of annual ecosystem carbon uptake and release. These two components can then be further analysed for their separate response to environmental conditions. Finally, a fourth study demonstrates how the results from data-led analyses can be turned into a simple parametric model that is able to predict the carbon exchange of forest ecosystems. Given the global network of measurements available the derived model can now be tested for generality and transferability to other biomes. In summary, this work particularly highlights the potential of the presented data-led methodologies to identify and describe dominant carbon exchange processes at the canopy level contributing to a better understanding of ecosystem functioning.}, language = {en} } @misc{ShanGuanHubaceketal.2018, author = {Shan, Yuli and Guan, Dabo and Hubacek, Klaus and Zheng, Bo and Davis, Steven J. and Jia, Lichao and Liu, Jianghua and Liu, Zhu and Fromer, Neil and Mi, Zhifu and Meng, Jing and Deng, Xiangzheng and Li, Yuan and Lin, Jintai and Schroeder, Heike and Weisz, Helga and Schellnhuber, Hans Joachim}, title = {City-level climate change mitigation in China}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1096}, issn = {1866-8372}, doi = {10.25932/publishup-47154}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-471541}, pages = {18}, year = {2018}, abstract = {As national efforts to reduce CO2 emissions intensify, policy-makers need increasingly specific, subnational information about the sources of CO2 and the potential reductions and economic implications of different possible policies. This is particularly true in China, a large and economically diverse country that has rapidly industrialized and urbanized and that has pledged under the Paris Agreement that its emissions will peak by 2030. We present new, city level estimates of CO2 emissions for 182 Chinese cities, decomposed into 17 different fossil fuels, 46 socioeconomic sectors, and 7 industrial processes. We find that more affluent cities have systematically lower emissions per unit of gross domestic product (GDP), supported by imports from less affluent, industrial cities located nearby. In turn, clusters of industrial cities are supported by nearby centers of coal or oil extraction. Whereas policies directly targeting manufacturing and electric power infrastructure would drastically undermine the GDP of industrial cities, consumption based policies might allow emission reductions to be subsidized by those with greater ability to pay. In particular, sector based analysis of each city suggests that technological improvements could be a practical and effective means of reducing emissions while maintaining growth and the current economic structure and energy system. We explore city-level emission reductions under three scenarios of technological progress to show that substantial reductions (up to 31\%) are possible by updating a disproportionately small fraction of existing infrastructure.}, language = {en} } @phdthesis{Vogel2013, author = {Vogel, Kristin}, title = {Applications of Bayesian networks in natural hazard assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69777}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Even though quite different in occurrence and consequences, from a modeling perspective many natural hazards share similar properties and challenges. Their complex nature as well as lacking knowledge about their driving forces and potential effects make their analysis demanding: uncertainty about the modeling framework, inaccurate or incomplete event observations and the intrinsic randomness of the natural phenomenon add up to different interacting layers of uncertainty, which require a careful handling. Nevertheless deterministic approaches are still widely used in natural hazard assessments, holding the risk of underestimating the hazard with disastrous effects. The all-round probabilistic framework of Bayesian networks constitutes an attractive alternative. In contrast to deterministic proceedings, it treats response variables as well as explanatory variables as random variables making no difference between input and output variables. Using a graphical representation Bayesian networks encode the dependency relations between the variables in a directed acyclic graph: variables are represented as nodes and (in-)dependencies between variables as (missing) edges between the nodes. The joint distribution of all variables can thus be described by decomposing it, according to the depicted independences, into a product of local conditional probability distributions, which are defined by the parameters of the Bayesian network. In the framework of this thesis the Bayesian network approach is applied to different natural hazard domains (i.e. seismic hazard, flood damage and landslide assessments). Learning the network structure and parameters from data, Bayesian networks reveal relevant dependency relations between the included variables and help to gain knowledge about the underlying processes. The problem of Bayesian network learning is cast in a Bayesian framework, considering the network structure and parameters as random variables itself and searching for the most likely combination of both, which corresponds to the maximum a posteriori (MAP score) of their joint distribution given the observed data. Although well studied in theory the learning of Bayesian networks based on real-world data is usually not straight forward and requires an adoption of existing algorithms. Typically arising problems are the handling of continuous variables, incomplete observations and the interaction of both. Working with continuous distributions requires assumptions about the allowed families of distributions. To "let the data speak" and avoid wrong assumptions, continuous variables are instead discretized here, thus allowing for a completely data-driven and distribution-free learning. An extension of the MAP score, considering the discretization as random variable as well, is developed for an automatic multivariate discretization, that takes interactions between the variables into account. The discretization process is nested into the network learning and requires several iterations. Having to face incomplete observations on top, this may pose a computational burden. Iterative proceedings for missing value estimation become quickly infeasible. A more efficient albeit approximate method is used instead, estimating the missing values based only on the observations of variables directly interacting with the missing variable. Moreover natural hazard assessments often have a primary interest in a certain target variable. The discretization learned for this variable does not always have the required resolution for a good prediction performance. Finer resolutions for (conditional) continuous distributions are achieved with continuous approximations subsequent to the Bayesian network learning, using kernel density estimations or mixtures of truncated exponential functions. All our proceedings are completely data-driven. We thus avoid assumptions that require expert knowledge and instead provide domain independent solutions, that are applicable not only in other natural hazard assessments, but in a variety of domains struggling with uncertainties.}, language = {en} }