@phdthesis{Schroeter2020, author = {Schr{\"o}ter, Kai}, title = {Improved flood risk assessment}, doi = {10.25932/publishup-48024}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-480240}, school = {Universit{\"a}t Potsdam}, pages = {408}, year = {2020}, abstract = {Rivers have always flooded their floodplains. Over 2.5 billion people worldwide have been affected by flooding in recent decades. The economic damage is also considerable, averaging 100 billion US dollars per year. There is no doubt that damage and other negative effects of floods can be avoided. However, this has a price: financially and politically. Costs and benefits can be estimated through risk assessments. Questions about the location and frequency of floods, about the objects that could be affected and their vulnerability are of importance for flood risk managers, insurance companies and politicians. Thus, both variables and factors from the fields of hydrology and sociol-economics play a role with multi-layered connections. One example are dikes along a river, which on the one hand contain floods, but on the other hand, by narrowing the natural floodplains, accelerate the flood discharge and increase the danger of flooding for the residents downstream. Such larger connections must be included in the assessment of flood risk. However, in current procedures this is accompanied by simplifying assumptions. Risk assessments are therefore fuzzy and associated with uncertainties. This thesis investigates the benefits and possibilities of new data sources for improving flood risk assessment. New methods and models are developed, which take the mentioned interrelations better into account and also quantify the existing uncertainties of the model results, and thus enable statements about the reliability of risk estimates. For this purpose, data on flood events from various sources are collected and evaluated. This includes precipitation and flow records at measuring stations as well as for instance images from social media, which can help to delineate the flooded areas and estimate flood damage with location information. Machine learning methods have been successfully used to recognize and understand correlations between floods and impacts from a wide range of data and to develop improved models. Risk models help to develop and evaluate strategies to reduce flood risk. These tools also provide advanced insights into the interplay of various factors and on the expected consequences of flooding. This work shows progress in terms of an improved assessment of flood risks by using diverse data from different sources with innovative methods as well as by the further development of models. Flood risk is variable due to economic and climatic changes, and other drivers of risk. In order to keep the knowledge about flood risks up-to-date, robust, efficient and adaptable methods as proposed in this thesis are of increasing importance.}, language = {en} } @phdthesis{Pilz2020, author = {Pilz, Tobias}, title = {Pursuing the understanding of uncertainties in hydrological modelling}, doi = {10.25932/publishup-47664}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-476643}, school = {Universit{\"a}t Potsdam}, pages = {136}, year = {2020}, abstract = {Hydrological models are important tools for the simulation and quantification of the water cycle. They therefore aid in the understanding of hydrological processes, prediction of river discharge, assessment of the impacts of land use and climate changes, or the management of water resources. However, uncertainties associated with hydrological modelling are still large. While significant research has been done on the quantification and reduction of uncertainties, there are still fields which have gained little attention so far, such as model structural uncertainties that are related to the process implementations in the models. This holds especially true for complex process-based models in contrast to simpler conceptual models. Consequently, the aim of this thesis is to improve the understanding of structural uncertainties with focus on process-based hydrological modelling, including methods for their quantification. To identify common deficits of frequently used hydrological models and develop further strategies on how to reduce them, a survey among modellers was conducted. It was found that there is a certain degree of subjectivity in the perception of modellers, for instance with respect to the distinction of hydrological models into conceptual groups. It was further found that there are ambiguities on how to apply a certain hydrological model, for instance how many parameters should be calibrated, together with a large diversity of opinion regarding the deficits of models. Nevertheless, evapotranspiration processes are often represented in a more physically based manner, while processes of groundwater and soil water movement are often simplified, which many survey participants saw as a drawback. A large flexibility, for instance with respect to different alternative process implementations or a small number of parameters that needs to be calibrated, was generally seen as strength of a model. Flexible and efficient software, which is straightforward to apply, has been increasingly acknowledged by the hydrological community. This work further elaborated on this topic in a twofold way. First, a software package for semi-automated landscape discretisation has been developed, which serves as a tool for model initialisation. This was complemented by a sensitivity analysis of important and commonly used discretisation parameters, of which the size of hydrological sub-catchments as well as the size and number of hydrologically uniform computational units appeared to be more influential than information considered for the characterisation of hillslope profiles. Second, a process-based hydrological model has been implemented into a flexible simulation environment with several alternative process representations and a number of numerical solvers. It turned out that, even though computation times were still long, enhanced computational capabilities nowadays in combination with innovative methods for statistical analysis allow for the exploration of structural uncertainties of even complex process-based models, which up to now was often neglected by the modelling community. In a further study it could be shown that process-based models may even be employed as tools for seasonal operational forecasting. In contrast to statistical models, which are faster to initialise and to apply, process-based models produce more information in addition to the target variable, even at finer spatial and temporal scales, and provide more insights into process behaviour and catchment functioning. However, the process-based model was much more dependent on reliable rainfall forecasts. It seems unlikely that there exists a single best formulation for hydrological processes, even for a specific catchment. This supports the use of flexible model environments with alternative process representations instead of a single model structure. However, correlation and compensation effects between process formulations, their parametrisation, and other aspects such as numerical solver and model resolution, may lead to surprising results and potentially misleading conclusions. In future studies, such effects should be more explicitly addressed and quantified. Moreover, model functioning appeared to be highly dependent on the meteorological conditions and rainfall input generally was the most important source of uncertainty. It is still unclear, how this could be addressed, especially in the light of the aforementioned correlations. The use of innovative data products, e.g.\ remote sensing data in combination with station measurements, and efficient processing methods for the improvement of rainfall input and explicit consideration of associated uncertainties is advisable to bring more insights and make hydrological simulations and predictions more reliable.}, language = {en} } @phdthesis{Roezer2018, author = {R{\"o}zer, Viktor}, title = {Pluvial flood loss to private households}, doi = {10.25932/publishup-42991}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-429910}, school = {Universit{\"a}t Potsdam}, pages = {XXII, 109}, year = {2018}, abstract = {Today, more than half of the world's population lives in urban areas. With a high density of population and assets, urban areas are not only the economic, cultural and social hubs of every society, they are also highly susceptible to natural disasters. As a consequence of rising sea levels and an expected increase in extreme weather events caused by a changing climate in combination with growing cities, flooding is an increasing threat to many urban agglomerations around the globe. To mitigate the destructive consequences of flooding, appropriate risk management and adaptation strategies are required. So far, flood risk management in urban areas is almost exclusively focused on managing river and coastal flooding. Often overlooked is the risk from small-scale rainfall-triggered flooding, where the rainfall intensity of rainstorms exceeds the capacity of urban drainage systems, leading to immediate flooding. Referred to as pluvial flooding, this flood type exclusive to urban areas has caused severe losses in cities around the world. Without further intervention, losses from pluvial flooding are expected to increase in many urban areas due to an increase of impervious surfaces compounded with an aging drainage infrastructure and a projected increase in heavy precipitation events. While this requires the integration of pluvial flood risk into risk management plans, so far little is known about the adverse consequences of pluvial flooding due to a lack of both detailed data sets and studies on pluvial flood impacts. As a consequence, methods for reliably estimating pluvial flood losses, needed for pluvial flood risk assessment, are still missing. Therefore, this thesis investigates how pluvial flood losses to private households can be reliably estimated, based on an improved understanding of the drivers of pluvial flood loss. For this purpose, detailed data from pluvial flood-affected households was collected through structured telephone- and web-surveys following pluvial flood events in Germany and the Netherlands. Pluvial flood losses to households are the result of complex interactions between impact characteristics such as the water depth and a household's resistance as determined by its risk awareness, preparedness, emergency response, building properties and other influencing factors. Both exploratory analysis and machine-learning approaches were used to analyze differences in resistance and impacts between households and their effects on the resulting losses. The comparison of case studies showed that the awareness around pluvial flooding among private households is quite low. Low awareness not only challenges the effective dissemination of early warnings, but was also found to influence the implementation of private precautionary measures. The latter were predominately implemented by households with previous experience of pluvial flooding. Even cases where previous flood events affected a different part of the same city did not lead to an increase in preparedness of the surveyed households, highlighting the need to account for small-scale variability in both impact and resistance parameters when assessing pluvial flood risk. While it was concluded that the combination of low awareness, ineffective early warning and the fact that only a minority of buildings were adapted to pluvial flooding impaired the coping capacities of private households, the often low water levels still enabled households to mitigate or even prevent losses through a timely and effective emergency response. These findings were confirmed by the detection of loss-influencing variables, showing that cases in which households were able to prevent any loss to the building structure are predominately explained by resistance variables such as the household's risk awareness, while the degree of loss is mainly explained by impact variables. Based on the important loss-influencing variables detected, different flood loss models were developed. Similar to flood loss models for river floods, the empirical data from the preceding data collection was used to train flood loss models describing the relationship between impact and resistance parameters and the resulting loss to building structures. Different approaches were adapted from river flood loss models using both models with the water depth as only predictor for building structure loss and models incorporating additional variables from the preceding variable detection routine. The high predictive errors of all compared models showed that point predictions are not suitable for estimating losses on the building level, as they severely impair the reliability of the estimates. For that reason, a new probabilistic framework based on Bayesian inference was introduced that is able to provide predictive distributions instead of single loss estimates. These distributions not only give a range of probable losses, they also provide information on how likely a specific loss value is, representing the uncertainty in the loss estimate. Using probabilistic loss models, it was found that the certainty and reliability of a loss estimate on the building level is not only determined by the use of additional predictors as shown in previous studies, but also by the choice of response distribution defining the shape of the predictive distribution. Here, a mix between a beta and a Bernoulli distribution to account for households that are able to prevent losses to their building's structure was found to provide significantly more certain and reliable estimates than previous approaches using Gaussian or non-parametric response distributions. The successful model transfer and post-event application to estimate building structure loss in Houston, TX, caused by pluvial flooding during Hurricane Harvey confirmed previous findings, and demonstrated the potential of the newly developed multi-variable beta model for future risk assessments. The highly detailed input data set constructed from openly available data sources containing over 304,000 affected buildings in Harris County further showed the potential of data-driven, building-level loss models for pluvial flood risk assessment. In conclusion, pluvial flood losses to private households are the result of complex interactions between impact and resistance variables, which should be represented in loss models. The local occurrence of pluvial floods requires loss estimates on high spatial resolutions, i.e. on the building level, where losses are variable and uncertainties are high. Therefore, probabilistic loss estimates describing the uncertainty of the estimate should be used instead of point predictions. While the performance of probabilistic models on the building level are mainly driven by the choice of response distribution, multi-variable models are recommended for two reasons: First, additional resistance variables improve the detection of cases in which households were able to prevent structural losses. Second, the added variability of additional predictors provides a better representation of the uncertainties when loss estimates from multiple buildings are aggregated. This leads to the conclusion that data-driven probabilistic loss models on the building level allow for a reliable loss estimation at an unprecedented level of detail, with a consistent quantification of uncertainties on all aggregation levels. This makes the presented approach suitable for a wide range of applications, from decision support in spatial planning to impact- based early warning systems.}, language = {en} } @phdthesis{Sieg2018, author = {Sieg, Tobias}, title = {Reliability of flood damage estimations across spatial scales}, doi = {10.25932/publishup-42616}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-426161}, school = {Universit{\"a}t Potsdam}, pages = {XIII, 115}, year = {2018}, abstract = {Extreme Naturereignisse sind ein integraler Bestandteil der Natur der Erde. Sie werden erst dann zu Gefahren f{\"u}r die Gesellschaft, wenn sie diesen Ereignissen ausgesetzt ist. Dann allerdings k{\"o}nnen Naturgefahren verheerende Folgen f{\"u}r die Gesellschaft haben. Besonders hydro-meteorologische Gefahren wie zum Beispiel Flusshochwasser, Starkregenereignisse, Winterst{\"u}rme, Orkane oder Tornados haben ein hohes Schadenspotential und treten rund um den Globus auf. Einhergehend mit einer immer w{\"a}rmer werdenden Welt, werden auch Extremwetterereignisse, welche potentiell Naturgefahren ausl{\"o}sen k{\"o}nnen, immer wahrscheinlicher. Allerdings tr{\"a}gt nicht nur eine sich ver{\"a}ndernde Umwelt zur Erh{\"o}hung des Risikos von Naturgefahren bei, sondern auch eine sich ver{\"a}ndernde Gesellschaft. Daher ist ein angemessenes Risikomanagement erforderlich um die Gesellschaft auf jeder r{\"a}umlichen Ebene an diese Ver{\"a}nderungen anzupassen. Ein essentieller Bestandteil dieses Managements ist die Absch{\"a}tzung der {\"o}konomischen Auswirkungen der Naturgefahren. Bisher allerdings fehlen verl{\"a}ssliche Methoden um die Auswirkungen von hydro-meteorologischen Gefahren abzusch{\"a}tzen. Ein Hauptbestandteil dieser Arbeit ist daher die Entwicklung und Anwendung einer neuen Methode, welche die Verl{\"a}sslichkeit der Schadenssch{\"a}tzung verbessert. Die Methode wurde beispielhaft zur Sch{\"a}tzung der {\"o}konomischen Auswirkungen eines Flusshochwassers auf einzelne Unternehmen bis hin zu den Auswirkungen auf das gesamte Wirtschaftssystem Deutschlands erfolgreich angewendet. Bestehende Methoden geben meist wenig Information {\"u}ber die Verl{\"a}sslichkeit ihrer Sch{\"a}tzungen. Da diese Informationen Entscheidungen zur Anpassung an das Risiko erleichtern, wird die Verl{\"a}sslichkeit der Schadenssch{\"a}tzungen mit der neuen Methode dargestellt. Die Verl{\"a}sslichkeit bezieht sich dabei nicht nur auf die Schadenssch{\"a}tzung selber, sondern auch auf die Annahmen, die {\"u}ber betroffene Geb{\"a}ude gemacht werden. Nach diesem Prinzip kann auch die Verl{\"a}sslichkeit von Annahmen {\"u}ber die Zukunft dargestellt werden, dies ist ein wesentlicher Aspekt f{\"u}r Prognosen. Die Darstellung der Verl{\"a}sslichkeit und die erfolgreiche Anwendung zeigt das Potential der Methode zur Verwendung von Analysen f{\"u}r gegenw{\"a}rtige und zuk{\"u}nftige hydro-meteorologische Gefahren.}, language = {en} } @phdthesis{Vogel2013, author = {Vogel, Kristin}, title = {Applications of Bayesian networks in natural hazard assessments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69777}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Even though quite different in occurrence and consequences, from a modeling perspective many natural hazards share similar properties and challenges. Their complex nature as well as lacking knowledge about their driving forces and potential effects make their analysis demanding: uncertainty about the modeling framework, inaccurate or incomplete event observations and the intrinsic randomness of the natural phenomenon add up to different interacting layers of uncertainty, which require a careful handling. Nevertheless deterministic approaches are still widely used in natural hazard assessments, holding the risk of underestimating the hazard with disastrous effects. The all-round probabilistic framework of Bayesian networks constitutes an attractive alternative. In contrast to deterministic proceedings, it treats response variables as well as explanatory variables as random variables making no difference between input and output variables. Using a graphical representation Bayesian networks encode the dependency relations between the variables in a directed acyclic graph: variables are represented as nodes and (in-)dependencies between variables as (missing) edges between the nodes. The joint distribution of all variables can thus be described by decomposing it, according to the depicted independences, into a product of local conditional probability distributions, which are defined by the parameters of the Bayesian network. In the framework of this thesis the Bayesian network approach is applied to different natural hazard domains (i.e. seismic hazard, flood damage and landslide assessments). Learning the network structure and parameters from data, Bayesian networks reveal relevant dependency relations between the included variables and help to gain knowledge about the underlying processes. The problem of Bayesian network learning is cast in a Bayesian framework, considering the network structure and parameters as random variables itself and searching for the most likely combination of both, which corresponds to the maximum a posteriori (MAP score) of their joint distribution given the observed data. Although well studied in theory the learning of Bayesian networks based on real-world data is usually not straight forward and requires an adoption of existing algorithms. Typically arising problems are the handling of continuous variables, incomplete observations and the interaction of both. Working with continuous distributions requires assumptions about the allowed families of distributions. To "let the data speak" and avoid wrong assumptions, continuous variables are instead discretized here, thus allowing for a completely data-driven and distribution-free learning. An extension of the MAP score, considering the discretization as random variable as well, is developed for an automatic multivariate discretization, that takes interactions between the variables into account. The discretization process is nested into the network learning and requires several iterations. Having to face incomplete observations on top, this may pose a computational burden. Iterative proceedings for missing value estimation become quickly infeasible. A more efficient albeit approximate method is used instead, estimating the missing values based only on the observations of variables directly interacting with the missing variable. Moreover natural hazard assessments often have a primary interest in a certain target variable. The discretization learned for this variable does not always have the required resolution for a good prediction performance. Finer resolutions for (conditional) continuous distributions are achieved with continuous approximations subsequent to the Bayesian network learning, using kernel density estimations or mixtures of truncated exponential functions. All our proceedings are completely data-driven. We thus avoid assumptions that require expert knowledge and instead provide domain independent solutions, that are applicable not only in other natural hazard assessments, but in a variety of domains struggling with uncertainties.}, language = {en} } @phdthesis{Stauch2006, author = {Stauch, Vanessa Juliane}, title = {Data-led methods for the analysis and interpretation of eddy covariance observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-12389}, school = {Universit{\"a}t Potsdam}, year = {2006}, abstract = {The terrestrial biosphere impacts considerably on the global carbon cycle. In particular, ecosystems contribute to set off anthropogenic induced fossil fuel emissions and hence decelerate the rise of the atmospheric CO₂ concentration. However, the future net sink strength of an ecosystem will heavily depend on the response of the individual processes to a changing climate. Understanding the makeup of these processes and their interaction with the environment is, therefore, of major importance to develop long-term climate mitigation strategies. Mathematical models are used to predict the fate of carbon in the soil-plant-atmosphere system under changing environmental conditions. However, the underlying processes giving rise to the net carbon balance of an ecosystem are complex and not entirely understood at the canopy level. Therefore, carbon exchange models are characterised by considerable uncertainty rendering the model-based prediction into the future prone to error. Observations of the carbon exchange at the canopy scale can help learning about the dominant processes and hence contribute to reduce the uncertainty associated with model-based predictions. For this reason, a global network of measurement sites has been established that provides long-term observations of the CO₂ exchange between a canopy and the atmosphere along with micrometeorological conditions. These time series, however, suffer from observation uncertainty that, if not characterised, limits their use in ecosystem studies. The general objective of this work is to develop a modelling methodology that synthesises physical process understanding with the information content in canopy scale data as an attempt to overcome the limitations in both carbon exchange models and observations. Similar hybrid modelling approaches have been successfully applied for signal extraction out of noisy time series in environmental engineering. Here, simple process descriptions are used to identify relationships between the carbon exchange and environmental drivers from noisy data. The functional form of these relationships are not prescribed a priori but rather determined directly from the data, ensuring the model complexity to be commensurate with the observations. Therefore, this data-led analysis results in the identification of the processes dominating carbon exchange at the ecosystem scale as reflected in the data. The description of these processes may then lead to robust carbon exchange models that contribute to a faithful prediction of the ecosystem carbon balance. This work presents a number of studies that make use of the developed data-led modelling approach for the analysis and interpretation of net canopy CO₂ flux observations. Given the limited knowledge about the underlying real system, the evaluation of the derived models with synthetic canopy exchange data is introduced as a standard procedure prior to any real data employment. The derived data-led models prove successful in several different applications. First, the data-based nature of the presented methods makes them particularly useful for replacing missing data in the observed time series. The resulting interpolated CO₂ flux observation series can then be analysed with dynamic modelling techniques, or integrated to coarser temporal resolution series for further use e.g., in model evaluation exercises. However, the noise component in these observations interferes with deterministic flux integration in particular when long time periods are considered. Therefore, a method to characterise the uncertainties in the flux observations that uses a semi-parametric stochastic model is introduced in a second study. As a result, an (uncertain) estimate of the annual net carbon exchange of the observed ecosystem can be inferred directly from a statistically consistent integration of the noisy data. For the forest measurement sites analysed, the relative uncertainty for the annual sum did not exceed 11 percent highlighting the value of the data. Based on the same models, a disaggregation of the net CO₂ flux into carbon assimilation and respiration is presented in a third study that allows for the estimation of annual ecosystem carbon uptake and release. These two components can then be further analysed for their separate response to environmental conditions. Finally, a fourth study demonstrates how the results from data-led analyses can be turned into a simple parametric model that is able to predict the carbon exchange of forest ecosystems. Given the global network of measurements available the derived model can now be tested for generality and transferability to other biomes. In summary, this work particularly highlights the potential of the presented data-led methodologies to identify and describe dominant carbon exchange processes at the canopy level contributing to a better understanding of ecosystem functioning.}, language = {en} } @phdthesis{Guentner2002, author = {G{\"u}ntner, Andreas}, title = {Large-scale hydrological modelling in the semi-arid north-east of Brazil}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000511}, school = {Universit{\"a}t Potsdam}, year = {2002}, abstract = {Semi-arid areas are, due to their climatic setting, characterized by small water resources. An increasing water demand as a consequence of population growth and economic development as well as a decreasing water availability in the course of possible climate change may aggravate water scarcity in future, which often exists already for present-day conditions in these areas. Understanding the mechanisms and feedbacks of complex natural and human systems, together with the quantitative assessment of future changes in volume, timing and quality of water resources are a prerequisite for the development of sustainable measures of water management to enhance the adaptive capacity of these regions. For this task, dynamic integrated models, containing a hydrological model as one component, are indispensable tools. The main objective of this study is to develop a hydrological model for the quantification of water availability in view of environmental change over a large geographic domain of semi-arid environments. The study area is the Federal State of Cear{\´a} (150 000 km2) in the semi-arid north-east of Brazil. Mean annual precipitation in this area is 850 mm, falling in a rainy season with duration of about five months. Being mainly characterized by crystalline bedrock and shallow soils, surface water provides the largest part of the water supply. The area has recurrently been affected by droughts which caused serious economic losses and social impacts like migration from the rural regions. The hydrological model Wasa (Model of Water Availability in Semi-Arid Environments) developed in this study is a deterministic, spatially distributed model being composed of conceptual, process-based approaches. Water availability (river discharge, storage volumes in reservoirs, soil moisture) is determined with daily resolution. Sub-basins, grid cells or administrative units (municipalities) can be chosen as spatial target units. The administrative units enable the coupling of Wasa in the framework of an integrated model which contains modules that do not work on the basis of natural spatial units. The target units mentioned above are disaggregated in Wasa into smaller modelling units within a new multi-scale, hierarchical approach. The landscape units defined in this scheme capture in particular the effect of structured variability of terrain, soil and vegetation characteristics along toposequences on soil moisture and runoff generation. Lateral hydrological processes at the hillslope scale, as reinfiltration of surface runoff, being of particular importance in semi-arid environments, can thus be represented also within the large-scale model in a simplified form. Depending on the resolution of available data, small-scale variability is not represented explicitly with geographic reference in Wasa, but by the distribution of sub-scale units and by statistical transition frequencies for lateral fluxes between these units. Further model components of Wasa which respect specific features of semi-arid hydrology are: (1) A two-layer model for evapotranspiration comprises energy transfer at the soil surface (including soil evaporation), which is of importance in view of the mainly sparse vegetation cover. Additionally, vegetation parameters are differentiated in space and time in dependence on the occurrence of the rainy season. (2) The infiltration module represents in particular infiltration-excess surface runoff as the dominant runoff component. (3) For the aggregate description of the water balance of reservoirs that cannot be represented explicitly in the model, a storage approach respecting different reservoirs size classes and their interaction via the river network is applied. (4) A model for the quantification of water withdrawal by water use in different sectors is coupled to Wasa. (5) A cascade model for the temporal disaggregation of precipitation time series, adapted to the specific characteristics of tropical convective rainfall, is applied for the generating rainfall time series of higher temporal resolution. All model parameters of Wasa can be derived from physiographic information of the study area. Thus, model calibration is primarily not required. Model applications of Wasa for historical time series generally results in a good model performance when comparing the simulation results of river discharge and reservoir storage volumes with observed data for river basins of various sizes. The mean water balance as well as the high interannual and intra-annual variability is reasonably represented by the model. Limitations of the modelling concept are most markedly seen for sub-basins with a runoff component from deep groundwater bodies of which the dynamics cannot be satisfactorily represented without calibration. Further results of model applications are: (1) Lateral processes of redistribution of runoff and soil moisture at the hillslope scale, in particular reinfiltration of surface runoff, lead to markedly smaller discharge volumes at the basin scale than the simple sum of runoff of the individual sub-areas. Thus, these processes are to be captured also in large-scale models. The different relevance of these processes for different conditions is demonstrated by a larger percentage decrease of discharge volumes in dry as compared to wet years. (2) Precipitation characteristics have a major impact on the hydrological response of semi-arid environments. In particular, underestimated rainfall intensities in the rainfall input due to the rough temporal resolution of the model and due to interpolation effects and, consequently, underestimated runoff volumes have to be compensated in the model. A scaling factor in the infiltration module or the use of disaggregated hourly rainfall data show good results in this respect. The simulation results of Wasa are characterized by large uncertainties. These are, on the one hand, due to uncertainties of the model structure to adequately represent the relevant hydrological processes. On the other hand, they are due to uncertainties of input data and parameters particularly in view of the low data availability. Of major importance is: (1) The uncertainty of rainfall data with regard to their spatial and temporal pattern has, due to the strong non-linear hydrological response, a large impact on the simulation results. (2) The uncertainty of soil parameters is in general of larger importance on model uncertainty than uncertainty of vegetation or topographic parameters. (3) The effect of uncertainty of individual model components or parameters is usually different for years with rainfall volumes being above or below the average, because individual hydrological processes are of different relevance in both cases. Thus, the uncertainty of individual model components or parameters is of different importance for the uncertainty of scenario simulations with increasing or decreasing precipitation trends. (4) The most important factor of uncertainty for scenarios of water availability in the study area is the uncertainty in the results of global climate models on which the regional climate scenarios are based. Both a marked increase or a decrease in precipitation can be assumed for the given data. Results of model simulations for climate scenarios until the year 2050 show that a possible future change in precipitation volumes causes a larger percentage change in runoff volumes by a factor of two to three. In the case of a decreasing precipitation trend, the efficiency of new reservoirs for securing water availability tends to decrease in the study area because of the interaction of the large number of reservoirs in retaining the overall decreasing runoff volumes.}, subject = {Cear{\´a} / Semiarides Gebiet / Wasserreserve / Hydrologie / Mathematisches Modell}, language = {en} }