@article{BoettcherThurnerHaefneretal.2023, author = {B{\"o}ttcher, Axel and Thurner, Veronika and H{\"a}fner, Tanja and Ottinger, Sarah}, title = {Erkenntnisse aus der Analyse von Studienverlaufsdaten als Grundlage f{\"u}r die Gestaltung von Beratungsangeboten}, series = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, journal = {Hochschuldidaktik Informatik HDI 2021 (Commentarii informaticae didacticae)}, number = {13}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-548-4}, issn = {1868-0844}, doi = {10.25932/publishup-61569}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-615693}, pages = {137 -- 156}, year = {2023}, abstract = {Viele Studierende stoßen im Rahmen ihres Informatikstudiums auf Probleme und ben{\"o}tigen individuell bedarfsgerechte Unterst{\"u}tzung, um beispielsweise trotz gewisser Startschwierigkeiten ihr Studium erfolgreich zu Ende zu f{\"u}hren. In die damit verbundene Lern- bzw. Studienberatung fließen Empfehlungen zur weiteren Studienverlaufsplanung ein. Anhand einer Datenanalyse {\"u}ber den Pr{\"u}fungsleistungsdaten der Studierenden {\"u}berpr{\"u}fen wir die hinter diesen Empfehlungen liegenden Hypothesen und leiten aus den dabei gewonnenen Erkenntnissen Konsequenzen f{\"u}r die Beratung ab. Insgesamt zeigt sich, dass sich nach den ersten Semestern ein mittlerer Bereich von Studierenden identifizieren l{\"a}sst, bei denen Studienabbruch und Studienerfolg etwa gleich wahrscheinlich sind. F{\"u}r diese Personengruppe ist Beratungsbedarf dringend gegeben. Gleichzeitig st{\"o}ßt die Datenanalyse auch an gewisse Grenzen, denn es zeigen sich insgesamt keine echt trennscharfen Muster, die fr{\"u}hzeitig im Studium eindeutig Erfolg oder Misserfolg prognostizieren. Dieses Ergebnis ist jedoch insofern erfreulich, als es bedeutet, dass jede:r Studierende:r auch nach einem suboptimalen Start ins Studium noch eine Chance auf einen Abschluss hat.}, language = {de} } @phdthesis{Duy2023, author = {Duy, Nguyen Le}, title = {Hydrological processes in the Vietnamese Mekong Delta}, doi = {10.25932/publishup-60260}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-602607}, school = {Universit{\"a}t Potsdam}, pages = {xxi, 153}, year = {2023}, abstract = {Understanding hydrological processes is of fundamental importance for the Vietnamese national food security and the livelihood of the population in the Vietnamese Mekong Delta (VMD). As a consequence of sparse data in this region, however, hydrologic processes, such as the controlling processes of precipitation, the interaction between surface and groundwater, and groundwater dynamics, have not been thoroughly studied. The lack of this knowledge may negatively impact the long-term strategic planning for sustainable groundwater resources management and may result in insufficient groundwater recharge and freshwater scarcity. It is essential to develop useful methods for a better understanding of hydrological processes in such data-sparse regions. The goal of this dissertation is to advance methodologies that can improve the understanding of fundamental hydrological processes in the VMD, based on the analyses of stable water isotopes and monitoring data. The thesis mainly focuses on the controlling processes of precipitation, the mechanism of surface-groundwater interaction, and the groundwater dynamics. These processes have not been fully addressed in the VMD so far. The thesis is based on statistical analyses of the isotopic data of Global Network of Isotopes in Precipitation (GNIP), of meteorological and hydrological data from Vietnamese agencies, and of the stable water isotopes and monitoring data collected as part of this work. First, the controlling processes of precipitation were quantified by the combination of trajectory analysis, multi-factor linear regression, and relative importance analysis (hereafter, a model-based statistical approach). The validity of this approach is confirmed by similar, but mainly qualitative results obtained in other studies. The total variation in precipitation isotopes (δ18O and δ2H) can be better explained by multiple linear regression (up to 80\%) than single-factor linear regression (30\%). The relative importance analysis indicates that atmospheric moisture regimes control precipitation isotopes rather than local climatic conditions. The most crucial factor is the upstream rainfall along the trajectories of air mass movement. However, the influences of regional and local climatic factors vary in importance over the seasons. The developed model-based statistical approach is a robust tool for the interpretation of precipitation isotopes and could also be applied to understand the controlling processes of precipitation in other regions. Second, the concept of the two-component lumped-parameter model (LPM) in conjunction with stable water isotopes was applied to examine the surface-groundwater interaction in the VMD. A calibration framework was also set up to evaluate the behaviour, parameter identifiability, and uncertainties of two-component LPMs. The modelling results provided insights on the subsurface flow conditions, the recharge contributions, and the spatial variation of groundwater transit time. The subsurface flow conditions at the study site can be best represented by the linear-piston flow distribution. The contributions of the recharge sources change with distance to the river. The mean transit time (mTT) of riverbank infiltration increases with the length of the horizontal flow path and the decreasing gradient between river and groundwater. River water infiltrates horizontally mainly via the highly permeable aquifer, resulting in short mTTs (<40 weeks) for locations close to the river (<200 m). The vertical infiltration from precipitation takes place primarily via a low-permeable overlying aquitard, resulting in considerably longer mTTs (>80 weeks). Notably, the transit time of precipitation infiltration is independent of the distance to the river. All these results are hydrologically plausible and could be quantified by the presented method for the first time. This study indicates that the highly complex mechanism of surface-groundwater interaction at riverbank infiltration systems can be conceptualized by exploiting two-component LPMs. It is illustrated that the model concept can be used as a tool to investigate the hydrological functioning of mixing processes and the flow path of multiple water components in riverbank infiltration systems. Lastly, a suite of time series analysis approaches was applied to examine the groundwater dynamics in the VMD. The assessment was focused on the time-variant trends of groundwater levels (GWLs), the groundwater memory effect (representing the time that an aquifer holds water), and the hydraulic response between surface water and multi-layer alluvial aquifers. The analysis indicates that the aquifers act as low-pass filters to reduce the high-frequency signals in the GWL variations, and limit the recharge to the deep groundwater. The groundwater abstraction has exceeded groundwater recharge between 1997 and 2017, leading to the decline of groundwater levels (0.01-0.55 m/year) in all considered aquifers in the VMD. The memory effect varies according to the geographical location, being shorter in shallow aquifers and flood-prone areas and longer in deep aquifers and coastal regions. Groundwater depth, season, and location primarily control the variation of the response time between the river and alluvial aquifers. These findings are important contributions to the hydrogeological literature of a little-known groundwater system in an alluvial setting. It is suggested that time series analysis can be used as an efficient tool to understand groundwater systems where resources are insufficient to develop a physical-based groundwater model. This doctoral thesis demonstrates that important aspects of hydrological processes can be understood by statistical analysis of stable water isotope and monitoring data. The approaches developed in this thesis can be easily transferred to regions in similar tropical environments, particularly those in alluvial settings. The results of the thesis can be used as a baseline for future isotope-based studies and contribute to the hydrogeological literature of little-known groundwater systems in the VMD.}, language = {en} } @phdthesis{Antonelli2021, author = {Antonelli, Andrea}, title = {Accurate waveform models for gravitational-wave astrophysics: synergetic approaches from analytical relativity}, doi = {10.25932/publishup-57667}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-576671}, school = {Universit{\"a}t Potsdam}, pages = {XII, 259, LXXV}, year = {2021}, abstract = {Gravitational-wave (GW) astrophysics is a field in full blossom. Since the landmark detection of GWs from a binary black hole on September 14th 2015, fifty-two compact-object binaries have been reported by the LIGO-Virgo collaboration. Such events carry astrophysical and cosmological information ranging from an understanding of how black holes and neutron stars are formed, what neutron stars are composed of, how the Universe expands, and allow testing general relativity in the highly-dynamical strong-field regime. It is the goal of GW astrophysics to extract such information as accurately as possible. Yet, this is only possible if the tools and technology used to detect and analyze GWs are advanced enough. A key aspect of GW searches are waveform models, which encapsulate our best predictions for the gravitational radiation under a certain set of parameters, and that need to be cross-correlated with data to extract GW signals. Waveforms must be very accurate to avoid missing important physics in the data, which might be the key to answer the fundamental questions of GW astrophysics. The continuous improvements of the current LIGO-Virgo detectors, the development of next-generation ground-based detectors such as the Einstein Telescope or the Cosmic Explorer, as well as the development of the Laser Interferometer Space Antenna (LISA), demand accurate waveform models. While available models are enough to capture the low spins, comparable-mass binaries routinely detected in LIGO-Virgo searches, those for sources from both current and next-generation ground-based and spaceborne detectors must be accurate enough to detect binaries with large spins and asymmetry in the masses. Moreover, the thousands of sources that we expect to detect with future detectors demand accurate waveforms to mitigate biases in the estimation of signals' parameters due to the presence of a foreground of many sources that overlap in the frequency band. This is recognized as one of the biggest challenges for the analysis of future-detectors' data, since biases might hinder the extraction of important astrophysical and cosmological information from future detectors' data. In the first part of this thesis, we discuss how to improve waveform models for binaries with high spins and asymmetry in the masses. In the second, we present the first generic metrics that have been proposed to predict biases in the presence of a foreground of many overlapping signals in GW data. For the first task, we will focus on several classes of analytical techniques. Current models for LIGO and Virgo studies are based on the post-Newtonian (PN, weak-field, small velocities) approximation that is most natural for the bound orbits that are routinely detected in GW searches. However, two other approximations have risen in prominence, the post-Minkowskian (PM, weak- field only) approximation natural for unbound (scattering) orbits and the small-mass-ratio (SMR) approximation typical of binaries in which the mass of one body is much bigger than the other. These are most appropriate to binaries with high asymmetry in the masses that challenge current waveform models. Moreover, they allow one to "cover" regions of the parameter space of coalescing binaries, thereby improving the interpolation (and faithfulness) of waveform models. The analytical approximations to the relativistic two-body problem can synergically be included within the effective-one-body (EOB) formalism, in which the two-body information from each approximation can be recast into an effective problem of a mass orbiting a deformed Schwarzschild (or Kerr) black hole. The hope is that the resultant models can cover both the low-spin comparable-mass binaries that are routinely detected, and the ones that challenge current models. The first part of this thesis is dedicated to a study about how to best incorporate information from the PN, PM, SMR and EOB approaches in a synergistic way. We also discuss how accurate the resulting waveforms are, as compared against numerical-relativity (NR) simulations. We begin by comparing PM models, whether alone or recast in the EOB framework, against PN models and NR simulations. We will show that PM information has the potential to improve currently-employed models for LIGO and Virgo, especially if recast within the EOB formalism. This is very important, as the PM approximation comes with a host of new computational techniques from particle physics to exploit. Then, we show how a combination of PM and SMR approximations can be employed to access previously-unknown PN orders, deriving the third subleading PN dynamics for spin-orbit and (aligned) spin1-spin2 couplings. Such new results can then be included in the EOB models currently used in GW searches and parameter estimation studies, thereby improving them when the binaries have high spins. Finally, we build an EOB model for quasi-circular nonspinning binaries based on the SMR approximation (rather than the PN one as usually done). We show how this is done in detail without incurring in the divergences that had affected previous attempts, and compare the resultant model against NR simulations. We find that the SMR approximation is an excellent approximation for all (quasi-circular nonspinning) binaries, including both the equal-mass binaries that are routinely detected in GW searches and the ones with highly asymmetric masses. In particular, the SMR-based models compare much better than the PN models, suggesting that SMR-informed EOB models might be the key to model binaries in the future. In the second task of this thesis, we work within the linear-signal ap- proximation and describe generic metrics to predict inference biases on the parameters of a GW source of interest in the presence of confusion noise from unfitted foregrounds and from residuals of other signals that have been incorrectly fitted out. We illustrate the formalism with simple (yet realistic) LISA sources, and demonstrate its validity against Monte-Carlo simulations. The metrics we describe pave the way for more realistic studies to quantify the biases with future ground-based and spaceborne detectors.}, language = {en} } @phdthesis{Giuri2023, author = {Giuri, Chiara}, title = {VERITAS Dark Matter search in dwarf spheroidal galaxies: an extended analysis}, doi = {10.25932/publishup-57586}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-575869}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 227}, year = {2023}, abstract = {In the last century, several astronomical measurements have supported that a significant percentage (about 22\%) of the total mass of the Universe, on galactic and extragalactic scales, is composed of a mysterious "dark" matter (DM). DM does not interact with the electromagnetic force; in other words it does not reflect, absorb or emit light. It is possible that DM particles are weakly interacting massive particles (WIMPs) that can annihilate (or decay) into Standard Model (SM) particles, and modern very- high-energy (VHE; > 100 GeV) instruments such as imaging atmospheric Cherenkov telescopes (IACTs) can play an important role in constraining the main properties of such DM particles, by detecting these products. One of the most privileged targets where to look for DM signal are dwarf spheroidal galaxies (dSphs), as they are expected to be high DM-dominated objects with a clean, gas-free environment. Some dSphs could be considered as extended sources, considering the angular resolution of IACTs; their angu- lar resolution is adequate to detect extended emission from dSphs. For this reason, we performed an extended-source analysis, by taking into account in the unbinned maximum likelihood estimation both the energy and the angular extension dependency of observed events. The goal was to set more constrained upper limits on the velocity-averaged cross-section annihilation of WIMPs with VERITAS data. VERITAS is an array of four IACTs, able to detect γ-ray photons ranging between 100 GeV and 30 TeV. The results of this extended analysis were compared against the traditional spectral analysis. We found that a 2D analysis may lead to more constrained results, depending on the DM mass, channel, and source. Moreover, in this thesis, the results of a multi-instrument project are presented too. Its goal was to combine already published 20 dSphs data from five different experiments, such as Fermi-LAT, MAGIC, H.E.S.S., VERITAS and HAWC, in order to set upper limits on the WIMP annihilation cross-section in the widest mass range ever reported.}, language = {en} } @article{VogelsangBorowskiBuschhueteretal.2019, author = {Vogelsang, Christoph and Borowski, Andreas and Buschh{\"u}ter, David and Enkrott, Patrick and Kempin, Maren and Kulgemeyer, Christoph and Reinhold, Peter and Riese, Josef and Schecker, Horst and Schr{\"o}der, Jan}, title = {Entwicklung von Professionswissen und Unterrichtsperformanz im Lehramtsstudium Physik}, series = {Zeitschrift f{\"u}r P{\"a}dagogik}, volume = {65}, journal = {Zeitschrift f{\"u}r P{\"a}dagogik}, number = {4}, publisher = {Beltz}, address = {Weinheim}, issn = {0044-3247}, pages = {473 -- 491}, year = {2019}, abstract = {Angehende Physiklehrkr{\"a}fte sollen im Rahmen ihres Studiums fachliches und fachdidaktisches Wissen erwerben, welches die Gestaltung lernf{\"o}rderlichen Unterrichts erm{\"o}glicht. Es ist allerdings empirisch nur wenig gekl{\"a}rt, wie sich dieses Wissen im Laufe des Studiums entwickelt und ob es zur Ausbildung von Handlungsf{\"a}higkeiten beitr{\"a}gt. Um derartige Wirkungsaussagen treffen zu k{\"o}nnen, m{\"u}ssen Instrumente entwickelt werden, die eine valide Testwertinterpretation zulassen. In diesem Beitrag werden auf Basis von im Projekt Profile-P+ entwickelten Instrumenten Validit{\"a}tsanalysen zur l{\"a}ngsschnittlichen Entwicklung des Professionswissens von Physiklehramtsstudierenden im Verlauf des Bachelorstudiums und ihrer F{\"a}higkeiten zur Planung und Reflexion von Physikunterricht sowie zum Erkl{\"a}ren von physikalischen Sachverhalten vor und nach dem Praxissemester dargestellt. Neben Wissenstests kamen standardisierte Performanztests zum Einsatz. Die vorliegenden Ergebnisse sprechen daf{\"u}r, dass die erhobenen Messwerte im Sinne von Wirkungsaussagen interpretiert werden k{\"o}nnen.}, language = {de} } @phdthesis{Amirkhanyan2019, author = {Amirkhanyan, Aragats}, title = {Methods and frameworks for GeoSpatioTemporal data analytics}, doi = {10.25932/publishup-44168}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441685}, school = {Universit{\"a}t Potsdam}, pages = {xxiv, 133}, year = {2019}, abstract = {In the era of social networks, internet of things and location-based services, many online services produce a huge amount of data that have valuable objective information, such as geographic coordinates and date time. These characteristics (parameters) in the combination with a textual parameter bring the challenge for the discovery of geospatiotemporal knowledge. This challenge requires efficient methods for clustering and pattern mining in spatial, temporal and textual spaces. In this thesis, we address the challenge of providing methods and frameworks for geospatiotemporal data analytics. As an initial step, we address the challenges of geospatial data processing: data gathering, normalization, geolocation, and storage. That initial step is the basement to tackle the next challenge -- geospatial clustering challenge. The first step of this challenge is to design the method for online clustering of georeferenced data. This algorithm can be used as a server-side clustering algorithm for online maps that visualize massive georeferenced data. As the second step, we develop the extension of this method that considers, additionally, the temporal aspect of data. For that, we propose the density and intensity-based geospatiotemporal clustering algorithm with fixed distance and time radius. Each version of the clustering algorithm has its own use case that we show in the thesis. In the next chapter of the thesis, we look at the spatiotemporal analytics from the perspective of the sequential rule mining challenge. We design and implement the framework that transfers data into textual geospatiotemporal data - data that contain geographic coordinates, time and textual parameters. By this way, we address the challenge of applying pattern/rule mining algorithms in geospatiotemporal space. As the applicable use case study, we propose spatiotemporal crime analytics -- discovery spatiotemporal patterns of crimes in publicly available crime data. The second part of the thesis, we dedicate to the application part and use case studies. We design and implement the application that uses the proposed clustering algorithms to discover knowledge in data. Jointly with the application, we propose the use case studies for analysis of georeferenced data in terms of situational and public safety awareness.}, language = {en} } @phdthesis{Goswami2014, author = {Goswami, Bedartha}, title = {Uncertainties in climate data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-78312}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Scientific inquiry requires that we formulate not only what we know, but also what we do not know and by how much. In climate data analysis, this involves an accurate specification of measured quantities and a consequent analysis that consciously propagates the measurement errors at each step. The dissertation presents a thorough analytical method to quantify errors of measurement inherent in paleoclimate data. An additional focus are the uncertainties in assessing the coupling between different factors that influence the global mean temperature (GMT). Paleoclimate studies critically rely on `proxy variables' that record climatic signals in natural archives. However, such proxy records inherently involve uncertainties in determining the age of the signal. We present a generic Bayesian approach to analytically determine the proxy record along with its associated uncertainty, resulting in a time-ordered sequence of correlated probability distributions rather than a precise time series. We further develop a recurrence based method to detect dynamical events from the proxy probability distributions. The methods are validated with synthetic examples and demonstrated with real-world proxy records. The proxy estimation step reveals the interrelations between proxy variability and uncertainty. The recurrence analysis of the East Asian Summer Monsoon during the last 9000 years confirms the well-known `dry' events at 8200 and 4400 BP, plus an additional significantly dry event at 6900 BP. We also analyze the network of dependencies surrounding GMT. We find an intricate, directed network with multiple links between the different factors at multiple time delays. We further uncover a significant feedback from the GMT to the El Ni{\~n}o Southern Oscillation at quasi-biennial timescales. The analysis highlights the need of a more nuanced formulation of influences between different climatic factors, as well as the limitations in trying to estimate such dependencies.}, language = {en} } @phdthesis{Balzer2014, author = {Balzer, Arnim}, title = {Crab flare observations with H.E.S.S. phase II}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72545}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {The H.E.S.S. array is a third generation Imaging Atmospheric Cherenkov Telescope (IACT) array. It is located in the Khomas Highland in Namibia, and measures very high energy (VHE) gamma-rays. In Phase I, the array started data taking in 2004 with its four identical 13 m telescopes. Since then, H.E.S.S. has emerged as the most successful IACT experiment to date. Among the almost 150 sources of VHE gamma-ray radiation found so far, even the oldest detection, the Crab Nebula, keeps surprising the scientific community with unexplained phenomena such as the recently discovered very energetic flares of high energy gamma-ray radiation. During its most recent flare, which was detected by the Fermi satellite in March 2013, the Crab Nebula was simultaneously observed with the H.E.S.S. array for six nights. The results of the observations will be discussed in detail during the course of this work. During the nights of the flare, the new 24 m × 32 m H.E.S.S. II telescope was still being commissioned, but participated in the data taking for one night. To be able to reconstruct and analyze the data of the H.E.S.S. Phase II array, the algorithms and software used by the H.E.S.S. Phase I array had to be adapted. The most prominent advanced shower reconstruction technique developed by de Naurois and Rolland, the template-based model analysis, compares real shower images taken by the Cherenkov telescope cameras with shower templates obtained using a semi-analytical model. To find the best fitting image, and, therefore, the relevant parameters that describe the air shower best, a pixel-wise log-likelihood fit is done. The adaptation of this advanced shower reconstruction technique to the heterogeneous H.E.S.S. Phase II array for stereo events (i.e. air showers seen by at least two telescopes of any kind), its performance using MonteCarlo simulations as well as its application to real data will be described.}, language = {en} } @phdthesis{Holler2014, author = {Holler, Markus}, title = {Photon reconstruction for the H.E.S.S. 28 m telescope and analysis of Crab Nebula and galactic centre observations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-72099}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {In the presented thesis, the most advanced photon reconstruction technique of ground-based γ-ray astronomy is adapted to the H.E.S.S. 28 m telescope. The method is based on a semi-analytical model of electromagnetic particle showers in the atmosphere. The properties of cosmic γ-rays are reconstructed by comparing the camera image of the telescope with the Cherenkov emission that is expected from the shower model. To suppress the dominant background from charged cosmic rays, events are selected based on several criteria. The performance of the analysis is evaluated with simulated events. The method is then applied to two sources that are known to emit γ-rays. The first of these is the Crab Nebula, the standard candle of ground-based γ-ray astronomy. The results of this source confirm the expected performance of the reconstruction method, where the much lower energy threshold compared to H.E.S.S. I is of particular importance. A second analysis is performed on the region around the Galactic Centre. The analysis results emphasise the capabilities of the new telescope to measure γ-rays in an energy range that is interesting for both theoretical and experimental astrophysics. The presented analysis features the lowest energy threshold that has ever been reached in ground-based γ-ray astronomy, opening a new window to the precise measurement of the physical properties of time-variable sources at energies of several tens of GeV.}, language = {en} } @phdthesis{Thomas2013, author = {Thomas, Bj{\"o}rn Daniel}, title = {Analysis and management of low flows in small catchments of Brandenburg, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-69247}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {Water management and environmental protection is vulnerable to extreme low flows during streamflow droughts. During the last decades, in most rivers of Central Europe summer runoff and low flows have decreased. Discharge projections agree that future decrease in runoff is likely for catchments in Brandenburg, Germany. Depending on the first-order controls on low flows, different adaption measures are expected to be appropriate. Small catchments were analyzed because they are expected to be more vulnerable to a changing climate than larger rivers. They are mainly headwater catchments with smaller ground water storage. Local characteristics are more important at this scale and can increase vulnerability. This thesis mutually evaluates potential adaption measures to sustain minimum runoff in small catchments of Brandenburg, Germany, and similarities of these catchments regarding low flows. The following guiding questions are addressed: (i) Which first-order controls on low flows and related time scales exist? (ii) Which are the differences between small catchments regarding low flow vulnerability? (iii) Which adaption measures to sustain minimum runoff in small catchments of Brandenburg are appropriate considering regional low flow patterns? Potential adaption measures to sustain minimum runoff during periods of low flows can be classified into three categories: (i) increase of groundwater recharge and subsequent baseflow by land use change, land management and artificial ground water recharge, (ii) increase of water storage with regulated outflow by reservoirs, lakes and wetland water management and (iii) regional low flow patterns have to be considered during planning of measures with multiple purposes (urban water management, waste water recycling and inter-basin water transfer). The question remained whether water management of areas with shallow groundwater tables can efficiently sustain minimum runoff. Exemplary, water management scenarios of a ditch irrigated area were evaluated using the model Hydrus-2D. Increasing antecedent water levels and stopping ditch irrigation during periods of low flows increased fluxes from the pasture to the stream, but storage was depleted faster during the summer months due to higher evapotranspiration. Fluxes from this approx. 1 km long pasture with an area of approx. 13 ha ranged from 0.3 to 0.7 l\s depending on scenario. This demonstrates that numerous of such small decentralized measures are necessary to sustain minimum runoff in meso-scale catchments. Differences in the low flow risk of catchments and meteorological low flow predictors were analyzed. A principal component analysis was applied on daily discharge of 37 catchments between 1991 and 2006. Flows decreased more in Southeast Brandenburg according to meteorological forcing. Low flow risk was highest in a region east of Berlin because of intersection of a more continental climate and the specific geohydrology. In these catchments, flows decreased faster during summer and the low flow period was prolonged. A non-linear support vector machine regression was applied to iteratively select meteorological predictors for annual 30-day minimum runoff in 16 catchments between 1965 and 2006. The potential evapotranspiration sum of the previous 48 months was the most important predictor (r²=0.28). The potential evapotranspiration of the previous 3 months and the precipitation of the previous 3 months and last year increased model performance (r²=0.49, including all four predictors). Model performance was higher for catchments with low yield and more damped runoff. In catchments with high low flow risk, explanatory power of long term potential evapotranspiration was high. Catchments with a high low flow risk as well as catchments with a considerable decrease in flows in southeast Brandenburg have the highest demand for adaption. Measures increasing groundwater recharge are to be preferred. Catchments with high low flow risk showed relatively deep and decreasing groundwater heads allowing increased groundwater recharge at recharge areas with higher altitude away from the streams. Low flows are expected to stay low or decrease even further because long term potential evapotranspiration was the most important low flow predictor and is projected to increase during climate change. Differences in low flow risk and runoff dynamics between catchments have to be considered for management and planning of measures which do not only have the task to sustain minimum runoff.}, language = {en} }