@phdthesis{Begu2007, author = {Begu, Enkela}, title = {Elections in a spatial context : a case study of Albanian parliamentary elections, 1991-2005}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-15923}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {Exploring elections features from a geographical perspective is the focus of this study. Its primary objective is to develop a scientific approach based on geoinformation technology (GIT) that promotes deeper understanding how geographical settings affect the spatial and temporal variations of voting behaviour and election outcomes. For this purpose, the five parliamentary elections (1991-2005) following the political turnaround in 1990 in the South East European reform country Albania have been selected as a case study. Elections, like other social phenomena that do not develop uniformly over a territory, inherit a spatial dimension. Despite of fact that elections have been researched by various scientific disciplines ranging from political science to geography, studies that incorporate their spatial dimension are still limited in number and approaches. Consequently, the methodologies needed to generate an integrated knowledge on many facets that constitute election features are lacking. This study addresses characteristics and interactions of the essential elements involved in an election process. Thus, the baseline of the approach presented here is the exploration of relations between three entities: electorate (political and sociodemographic features), election process (electoral system and code) and place (environment where voters reside). To express this interaction the concept of electoral pattern is introduced. Electoral patterns are defined by the study as the final view of election results, chiefly in tabular and/or map form, generated by the complex interaction of social, economic, juridical, and spatial features of the electorate, which has occurred at a specific time and in a particular geographical location. GIT methods of geoanalysis and geovisualization are used to investigate the characteristics of electoral patterns in their spatial and temporal distribution. Aggregate-level data modelled in map form were used to analyse and visualize the spatial distribution of election patterns components and relations. The spatial dimension of the study is addressed in the following three main relations: One, the relation between place and electorate and its expression through the social, demographic and economic features of the electorate resulting in the profile of the electorate's context; second, the electorate-election interaction which forms the baseline to explore the perspective of local contextual effects in voting behaviour and election results; third, the relation between geographical location and election outcomes reflecting the implication of determining constituency boundaries on election results. To address the above relations, three types of variables: geo, independent and dependent, have been elaborated and two models have been created. The Data Model, developed in a GIS environment, facilitates structuring of election data in order to perform spatial analysis. The peculiarity of electoral patterns - a multidimensional array that contains information on three variables, stored in data layers of dissimilar spatial units of reference and scales of value measurement - prohibit spatial analysis based on the original source data. To perform a joint spatial analysis it is therefore mandatory to restructure the spatial units of reference while preserving their semantic content. In this operation, all relevant electoral as well as socio-demographic data referenced to different administrative spatial entities are re-referenced to uniform grid cells as virtual spatial units of reference. Depending on the scale of data acquisition and map presentation, a cell width of 0.5 km has been determined. The resulting fine grid forms the basis of subsequent data analyses and correlations. Conversion of the original vector data layers into target raster layers allows for unification of spatial units, at the same time retaining the existing level of detail of the data (variables, uniform distribution over space). This in turn facilitates the integration of the variables studied and the performance of GIS-based spatial analysis. In addition, conversion to raster format makes it possible to assign new values to the original data, which are based on a common scale eliminating existing differences in scale of measurement. Raster format operations of the type described are well-established data analysis techniques in GIT, yet they have rarely been employed to process and analyse electoral data. The Geovisualization Model, developed in a cartographic environment, complements the Data Model. As an analog graphic model it facilitates efficient communication and exploration of geographical information through cartographic visualization. Based on this model, 52 choropleth maps have been generated. They represent the outcome of the GIS-based electoral data analysis. The analog map form allows for in-depth visual analysis and interpretation of the distribution and correlation of the electoral data studied. For researchers, decision makers and a wider public the maps provide easy-to-access information on and promote easy-to-understand insight into the spatial dimension, regional variation and resulting structures of the electoral patterns defined.}, language = {en} } @phdthesis{Barbosa2020, author = {Barbosa, Lu{\´i}s Romero}, title = {Groundwater recharge in tropical wet regions via GIS-based and cosmic-ray neutron sensing}, doi = {10.25932/publishup-46064}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-460641}, school = {Universit{\"a}t Potsdam}, pages = {XXVI, 175}, year = {2020}, abstract = {Studies on the unsustainable use of groundwater resources are still considered incipient since it is frequently a poorly understood and managed, devalued and inadequately protected natural resource. Groundwater Recharge (GWR) is one of the most challenging elements to estimate since it can rarely be measured directly and cannot easily be derived from existing data. To overcome these limitations, many hydro(geo)logists have combined different approaches to estimate large-scale GWR, namely: remote sensing products, such as IMERG product; Water Budget Equation, also in combination with hydrological models, and; Geographic Information System (GIS), using estimation formulas. For intermediary-scale GWR estimation, there exist: Non-invasive Cosmic-Ray Neutron Sensing (CRNS); wireless networks from local soil probes; and soil hydrological models, such as HYDRUS. Accordingly, this PhD thesis aims, on the one hand, to demonstrate a GIS-based model coupling for estimating the GWR distribution on a large scale in tropical wet basins. On the other hand, it aims to use the time series from CRNS and invasive soil moisture probes to inversely calibrate the soil hydraulic properties, and based on this, estimating the intermediary-scale GWR using a soil hydrological model. For such purpose, two tropical wet basins located in a complex sedimentary aquifer in the coastal Northeast region of Brazil were selected. These are the Jo{\~a}o Pessoa Case Study Area and the Guara{\´i}ra Experimental Basin. Several satellite products in the first area were used as input to the GIS-based water budget equation model for estimating the water balance components and GWR in 2016 and 2017. In addition, the point-scale measurement and CRNS data were used in the second area to determine the soil hydraulic properties, and to estimate the GWR in the 2017-2018 and 2018-2019 hydrological years. The resulting values of GWR on large- and intermediary-scale were then compared and validated by the estimates obtained by groundwater table fluctuations. The GWR rates for IMERG- and rain-gauge-based scenarios showed similar coefficients between 68\% and 89\%, similar mean errors between 30\% and 34\%, and slightly-different bias between -13\% and 11\%. The results of GWR rates for soil probes and CRNS soil moisture scenarios ranged from -5.87 to -61.81 cm yr-1, which corresponds to 5\% and 38\% of the precipitation. The calculations of the mean GWR rates on large-scale, based on remote sensing data, and on intermediary-scale, based on CRNS data, held similar results for the Podzol soil type, namely 17.87\% and 17\% of the precipitation. It is then concluded that the proposed methodologies allowed for estimating realistically the GWR over the study areas, which can be a ground-breaking step towards improving the water management and decision-making in the Northeast of Brazil.}, language = {en} } @phdthesis{Bamberg2014, author = {Bamberg, Marlene}, title = {Planetary mapping tools applied to floor-fractured craters on Mars}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72104}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Planetary research is often user-based and requires considerable skill, time, and effort. Unfortunately, self-defined boundary conditions, definitions, and rules are often not documented or not easy to comprehend due to the complexity of research. This makes a comparison to other studies, or an extension of the already existing research, complicated. Comparisons are often distorted, because results rely on different, not well defined, or even unknown boundary conditions. The purpose of this research is to develop a standardized analysis method for planetary surfaces, which is adaptable to several research topics. The method provides a consistent quality of results. This also includes achieving reliable and comparable results and reducing the time and effort of conducting such studies. A standardized analysis method is provided by automated analysis tools that focus on statistical parameters. Specific key parameters and boundary conditions are defined for the tool application. The analysis relies on a database in which all key parameters are stored. These databases can be easily updated and adapted to various research questions. This increases the flexibility, reproducibility, and comparability of the research. However, the quality of the database and reliability of definitions directly influence the results. To ensure a high quality of results, the rules and definitions need to be well defined and based on previously conducted case studies. The tools then produce parameters, which are obtained by defined geostatistical techniques (measurements, calculations, classifications). The idea of an automated statistical analysis is tested to proof benefits but also potential problems of this method. In this study, I adapt automated tools for floor-fractured craters (FFCs) on Mars. These impact craters show a variety of surface features, occurring in different Martian environments, and having different fracturing origins. They provide a complex morphological and geological field of application. 433 FFCs are classified by the analysis tools due to their fracturing process. Spatial data, environmental context, and crater interior data are analyzed to distinguish between the processes involved in floor fracturing. Related geologic processes, such as glacial and fluvial activity, are too similar to be separately classified by the automated tools. Glacial and fluvial fracturing processes are merged together for the classification. The automated tools provide probability values for each origin model. To guarantee the quality and reliability of the results, classification tools need to achieve an origin probability above 50 \%. This analysis method shows that 15 \% of the FFCs are fractured by intrusive volcanism, 20 \% by tectonic activity, and 43 \% by water \& ice related processes. In total, 75 \% of the FFCs are classified to an origin type. This can be explained by a combination of origin models, superposition or erosion of key parameters, or an unknown fracturing model. Those features have to be manually analyzed in detail. Another possibility would be the improvement of key parameters and rules for the classification. This research shows that it is possible to conduct an automated statistical analysis of morphologic and geologic features based on analysis tools. Analysis tools provide additional information to the user and are therefore considered assistance systems.}, language = {en} } @phdthesis{Backmann2017, author = {Backmann, Pia}, title = {Individual- and trait-based modelling of plant communities and their herbivores}, school = {Universit{\"a}t Potsdam}, pages = {223}, year = {2017}, language = {en} } @phdthesis{Ayzel2021, author = {Ayzel, Georgy}, title = {Advancing radar-based precipitation nowcasting}, doi = {10.25932/publishup-50426}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-504267}, school = {Universit{\"a}t Potsdam}, pages = {xx, 68}, year = {2021}, abstract = {Precipitation forecasting has an important place in everyday life - during the day we may have tens of small talks discussing the likelihood that it will rain this evening or weekend. Should you take an umbrella for a walk? Or should you invite your friends for a barbecue? It will certainly depend on what your weather application shows. While for years people were guided by the precipitation forecasts issued for a particular region or city several times a day, the widespread availability of weather radars allowed us to obtain forecasts at much higher spatiotemporal resolution of minutes in time and hundreds of meters in space. Hence, radar-based precipitation nowcasting, that is, very-short-range forecasting (typically up to 1-3 h), has become an essential technique, also in various professional application contexts, e.g., early warning, sewage control, or agriculture. There are two major components comprising a system for precipitation nowcasting: radar-based precipitation estimates, and models to extrapolate that precipitation to the imminent future. While acknowledging the fundamental importance of radar-based precipitation retrieval for precipitation nowcasts, this thesis focuses only on the model development: the establishment of open and competitive benchmark models, the investigation of the potential of deep learning, and the development of procedures for nowcast errors diagnosis and isolation that can guide model development. The present landscape of computational models for precipitation nowcasting still struggles with the availability of open software implementations that could serve as benchmarks for measuring progress. Focusing on this gap, we have developed and extensively benchmarked a stack of models based on different optical flow algorithms for the tracking step and a set of parsimonious extrapolation procedures based on image warping and advection. We demonstrate that these models provide skillful predictions comparable with or even superior to state-of-the-art operational software. We distribute the corresponding set of models as a software library, rainymotion, which is written in the Python programming language and openly available at GitHub (https://github.com/hydrogo/rainymotion). That way, the library acts as a tool for providing fast, open, and transparent solutions that could serve as a benchmark for further model development and hypothesis testing. One of the promising directions for model development is to challenge the potential of deep learning - a subfield of machine learning that refers to artificial neural networks with deep architectures, which may consist of many computational layers. Deep learning showed promising results in many fields of computer science, such as image and speech recognition, or natural language processing, where it started to dramatically outperform reference methods. The high benefit of using "big data" for training is among the main reasons for that. Hence, the emerging interest in deep learning in atmospheric sciences is also caused and concerted with the increasing availability of data - both observational and model-based. The large archives of weather radar data provide a solid basis for investigation of deep learning potential in precipitation nowcasting: one year of national 5-min composites for Germany comprises around 85 billion data points. To this aim, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. RainNet was trained to predict continuous precipitation intensities at a lead time of 5 min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900 km x 900 km and has a resolution of 1 km in space and 5 min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In these experiments, RainNet was applied recursively in order to achieve lead times of up to 1 h. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the previously developed rainymotion library. RainNet significantly outperformed the benchmark models at all lead times up to 60 min for the routine verification metrics mean absolute error (MAE) and critical success index (CSI) at intensity thresholds of 0.125, 1, and 5 mm/h. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15 mm/h). The limited ability of RainNet to predict high rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5 min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16 km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5 min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5 min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research on model development for precipitation nowcasting, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance. The model development together with the verification experiments for both conventional and deep learning model predictions also revealed the need to better understand the source of forecast errors. Understanding the dominant sources of error in specific situations should help in guiding further model improvement. The total error of a precipitation nowcast consists of an error in the predicted location of a precipitation feature and an error in the change of precipitation intensity over lead time. So far, verification measures did not allow to isolate the location error, making it difficult to specifically improve nowcast models with regard to location prediction. To fill this gap, we introduced a framework to directly quantify the location error. To that end, we detect and track scale-invariant precipitation features (corners) in radar images. We then consider these observed tracks as the true reference in order to evaluate the performance (or, inversely, the error) of any model that aims to predict the future location of a precipitation feature. Hence, the location error of a forecast at any lead time ahead of the forecast time corresponds to the Euclidean distance between the observed and the predicted feature location at the corresponding lead time. Based on this framework, we carried out a benchmarking case study using one year worth of weather radar composites of the DWD. We evaluated the performance of four extrapolation models, two of which are based on the linear extrapolation of corner motion; and the remaining two are based on the Dense Inverse Search (DIS) method: motion vectors obtained from DIS are used to predict feature locations by linear and Semi-Lagrangian extrapolation. For all competing models, the mean location error exceeds a distance of 5 km after 60 min, and 10 km after 110 min. At least 25\% of all forecasts exceed an error of 5 km after 50 min, and of 10 km after 90 min. Even for the best models in our experiment, at least 5 percent of the forecasts will have a location error of more than 10 km after 45 min. When we relate such errors to application scenarios that are typically suggested for precipitation nowcasting, e.g., early warning, it becomes obvious that location errors matter: the order of magnitude of these errors is about the same as the typical extent of a convective cell. Hence, the uncertainty of precipitation nowcasts at such length scales - just as a result of locational errors - can be substantial already at lead times of less than 1 h. Being able to quantify the location error should hence guide any model development that is targeted towards its minimization. To that aim, we also consider the high potential of using deep learning architectures specific to the assimilation of sequential (track) data. Last but not least, the thesis demonstrates the benefits of a general movement towards open science for model development in the field of precipitation nowcasting. All the presented models and frameworks are distributed as open repositories, thus enhancing transparency and reproducibility of the methodological approach. Furthermore, they are readily available to be used for further research studies, as well as for practical applications.}, language = {en} } @phdthesis{Arodudu2017, author = {Arodudu, Oludunsin Tunrayo}, title = {Sustainability assessment of agro-bioenergy systems using energy efficiency indicators}, school = {Universit{\"a}t Potsdam}, year = {2017}, abstract = {The sustainability of agro-bioenergy systems is dependent on many factors, some local or regional in implementation, some others global in nature. This study assessed the effects of often ignored local and regional factors (e.g. alternative agronomic factor options, alternative agricultural production systems, alternative biomass flows, alternative conversion technologies etc. The results from this study suggests that key to enhancing the energy efficiency (and by extension the sustainability) of agro-bioenergy systems is paying attention to local and regional factors such as biomass conversion technology, alternative agronomic factor options, alternative agricultural production systems and available biomass flows.}, language = {en} }