@article{WessigGerngrossPapeetal.2014, author = {Wessig, Pablo and Gerngroß, Maik and Pape, Simon and Bruhns, Philipp and Weber, Jens}, title = {Novel porous materials based on oligospiroketals (OSK)}, series = {RSC Advances : an international journal to further the chemical sciences}, volume = {2014}, journal = {RSC Advances : an international journal to further the chemical sciences}, number = {4}, issn = {2046-2069}, doi = {10.1039/c4ra04437a}, pages = {31123 -- 31129}, year = {2014}, abstract = {New porous materials based on covalently connected monomers are presented. The key step of the synthesis is an acetalisation reaction. In previous years we used acetalisation reactions extensively to build up various molecular rods. Based on this approach, investigations towards porous polymeric materials were conducted by us. Here we wish to present the results of these studies in the synthesis of 1D polyacetals and porous 3D polyacetals. By scrambling experiments with 1D acetals we could prove that exchange reactions occur between different building blocks (evidenced by MALDI-TOF mass spectrometry). Based on these results we synthesized porous 3D polyacetals under the same mild conditions.}, language = {en} } @misc{WessigGerngrossPapeetal.2014, author = {Wessig, Pablo and Gerngroß, Maik and Pape, Simon and Bruhns, Philipp and Weber, Jens}, title = {Novel porous materials based on oligospiroketals (OSK)}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-74466}, pages = {31123 -- 31129}, year = {2014}, abstract = {New porous materials based on covalently connected monomers are presented. The key step of the synthesis is an acetalisation reaction. In previous years we used acetalisation reactions extensively to build up various molecular rods. Based on this approach, investigations towards porous polymeric materials were conducted by us. Here we wish to present the results of these studies in the synthesis of 1D polyacetals and porous 3D polyacetals. By scrambling experiments with 1D acetals we could prove that exchange reactions occur between different building blocks (evidenced by MALDI-TOF mass spectrometry). Based on these results we synthesized porous 3D polyacetals under the same mild conditions.}, language = {en} } @misc{TrietDungMerzetal.2018, author = {Triet, Nguyen Van Khanh and Dung, Nguyen Viet and Merz, Bruno and Apel, Heiko}, title = {Towards risk-based flood management in highly productive paddy rice cultivation}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {931}, issn = {1866-8372}, doi = {10.25932/publishup-44603}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-446032}, pages = {2859 -- 2876}, year = {2018}, abstract = {Flooding is an imminent natural hazard threatening most river deltas, e.g. the Mekong Delta. An appropriate flood management is thus required for a sustainable development of the often densely populated regions. Recently, the traditional event-based hazard control shifted towards a risk management approach in many regions, driven by intensive research leading to new legal regulation on flood management. However, a large-scale flood risk assessment does not exist for the Mekong Delta. Particularly, flood risk to paddy rice cultivation, the most important economic activity in the delta, has not been performed yet. Therefore, the present study was developed to provide the very first insight into delta-scale flood damages and risks to rice cultivation. The flood hazard was quantified by probabilistic flood hazard maps of the whole delta using a bivariate extreme value statistics, synthetic flood hydrographs, and a large-scale hydraulic model. The flood risk to paddy rice was then quantified considering cropping calendars, rice phenology, and harvest times based on a time series of enhanced vegetation index (EVI) derived from MODIS satellite data, and a published rice flood damage function. The proposed concept provided flood risk maps to paddy rice for the Mekong Delta in terms of expected annual damage. The presented concept can be used as a blueprint for regions facing similar problems due to its generic approach. Furthermore, the changes in flood risk to paddy rice caused by changes in land use currently under discussion in the Mekong Delta were estimated. Two land-use scenarios either intensifying or reducing rice cropping were considered, and the changes in risk were presented in spatially explicit flood risk maps. The basic risk maps could serve as guidance for the authorities to develop spatially explicit flood management and mitigation plans for the delta. The land-use change risk maps could further be used for adaptive risk management plans and as a basis for a cost-benefit of the discussed land-use change scenarios. Additionally, the damage and risks maps may support the recently initiated agricultural insurance programme in Vietnam.}, language = {en} } @article{SecklerMetzler2022, author = {Seckler, Henrik and Metzler, Ralf}, title = {Bayesian deep learning for error estimation in the analysis of anomalous diffusion}, series = {Nature Communnications}, volume = {13}, journal = {Nature Communnications}, publisher = {Nature Publishing Group UK}, address = {London}, issn = {2041-1723}, doi = {10.1038/s41467-022-34305-6}, pages = {13}, year = {2022}, abstract = {Modern single-particle-tracking techniques produce extensive time-series of diffusive motion in a wide variety of systems, from single-molecule motion in living-cells to movement ecology. The quest is to decipher the physical mechanisms encoded in the data and thus to better understand the probed systems. We here augment recently proposed machine-learning techniques for decoding anomalous-diffusion data to include an uncertainty estimate in addition to the predicted output. To avoid the Black-Box-Problem a Bayesian-Deep-Learning technique named Stochastic-Weight-Averaging-Gaussian is used to train models for both the classification of the diffusionmodel and the regression of the anomalous diffusion exponent of single-particle-trajectories. Evaluating their performance, we find that these models can achieve a wellcalibrated error estimate while maintaining high prediction accuracies. In the analysis of the output uncertainty predictions we relate these to properties of the underlying diffusion models, thus providing insights into the learning process of the machine and the relevance of the output.}, language = {en} } @misc{SecklerMetzler2022, author = {Seckler, Henrik and Metzler, Ralf}, title = {Bayesian deep learning for error estimation in the analysis of anomalous diffusion}, series = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Zweitver{\"o}ffentlichungen der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {1314}, issn = {1866-8372}, doi = {10.25932/publishup-58602}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-586025}, pages = {13}, year = {2022}, abstract = {Sprache Englisch Modern single-particle-tracking techniques produce extensive time-series of diffusive motion in a wide variety of systems, from single-molecule motion in living-cells to movement ecology. The quest is to decipher the physical mechanisms encoded in the data and thus to better understand the probed systems. We here augment recently proposed machine-learning techniques for decoding anomalous-diffusion data to include an uncertainty estimate in addition to the predicted output. To avoid the Black-Box-Problem a Bayesian-Deep-Learning technique named Stochastic-Weight-Averaging-Gaussian is used to train models for both the classification of the diffusionmodel and the regression of the anomalous diffusion exponent of single-particle-trajectories. Evaluating their performance, we find that these models can achieve a wellcalibrated error estimate while maintaining high prediction accuracies. In the analysis of the output uncertainty predictions we relate these to properties of the underlying diffusion models, thus providing insights into the learning process of the machine and the relevance of the output.}, language = {en} } @article{SchoppaSiegVogeletal.2020, author = {Schoppa, Lukas and Sieg, Tobias and Vogel, Kristin and Z{\"o}ller, Gert and Kreibich, Heidi}, title = {Probabilistic flood loss models for companies}, series = {Water resources research}, volume = {56}, journal = {Water resources research}, number = {9}, publisher = {American Geophysical Union}, address = {Washington}, issn = {0043-1397}, doi = {10.1029/2020WR027649}, pages = {19}, year = {2020}, abstract = {Flood loss modeling is a central component of flood risk analysis. Conventionally, this involves univariable and deterministic stage-damage functions. Recent advancements in the field promote the use of multivariable and probabilistic loss models, which consider variables beyond inundation depth and account for prediction uncertainty. Although companies contribute significantly to total loss figures, novel modeling approaches for companies are lacking. Scarce data and the heterogeneity among companies impede the development of company flood loss models. We present three multivariable flood loss models for companies from the manufacturing, commercial, financial, and service sector that intrinsically quantify prediction uncertainty. Based on object-level loss data (n = 1,306), we comparatively evaluate the predictive capacity of Bayesian networks, Bayesian regression, and random forest in relation to deterministic and probabilistic stage-damage functions, serving as benchmarks. The company loss data stem from four postevent surveys in Germany between 2002 and 2013 and include information on flood intensity, company characteristics, emergency response, private precaution, and resulting loss to building, equipment, and goods and stock. We find that the multivariable probabilistic models successfully identify and reproduce essential relationships of flood damage processes in the data. The assessment of model skill focuses on the precision of the probabilistic predictions and reveals that the candidate models outperform the stage-damage functions, while differences among the proposed models are negligible. Although the combination of multivariable and probabilistic loss estimation improves predictive accuracy over the entire data set, wide predictive distributions stress the necessity for the quantification of uncertainty.}, language = {en} } @misc{RubeyBruneHeineetal.2017, author = {Rubey, Michael and Brune, Sascha and Heine, Christian and Davies, D. Rhodri and Williams, Simon E. and M{\"u}ller, R. Dietmar}, title = {Global patterns in Earth's dynamic topography since the Jurassic}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {623}, issn = {1866-8372}, doi = {10.25932/publishup-41824}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418241}, pages = {899 -- 919}, year = {2017}, abstract = {We evaluate the spatial and temporal evolution of Earth's long-wavelength surface dynamic topography since the Jurassic using a series of high-resolution global mantle convection models. These models are Earth-like in terms of convective vigour, thermal structure, surface heat-flux and the geographic distribution of heterogeneity. The models generate a degree-2-dominated spectrum of dynamic topography with negative amplitudes above subducted slabs (i.e. circum-Pacific regions and southern Eurasia) and positive amplitudes elsewhere (i.e. Africa, north-western Eurasia and the central Pacific). Model predictions are compared with published observations and subsidence patterns from well data, both globally and for the Australian and southern African regions. We find that our models reproduce the long-wavelength component of these observations, although observed smaller-scale variations are not reproduced. We subsequently define "geodynamic rules" for how different surface tectonic settings are affected by mantle processes: (i) locations in the vicinity of a subduction zone show large negative dynamic topography amplitudes; (ii) regions far away from convergent margins feature long-term positive dynamic topography; and (iii) rapid variations in dynamic support occur along the margins of overriding plates (e.g. the western US) and at points located on a plate that rapidly approaches a subduction zone (e.g. India and the Arabia Peninsula). Our models provide a predictive quantitative framework linking mantle convection with plate tectonics and sedimentary basin evolution, thus improving our understanding of how subduction and mantle convection affect the spatio-temporal evolution of basin architecture.}, language = {en} } @article{ReineckeTrautmannWageneretal.2022, author = {Reinecke, Robert and Trautmann, Tim and Wagener, Thorsten and Sch{\"u}ler, Katja}, title = {The critical need to foster computational reproducibility}, series = {Environmental research letters}, volume = {17}, journal = {Environmental research letters}, number = {4}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {1748-9326}, doi = {10.1088/1748-9326/ac5cf8}, pages = {5}, year = {2022}, language = {en} } @article{NevillNegraMyersetal.2020, author = {Nevill, Alan M. and Negra, Yassine and Myers, Tony D. and Sammoud, Senda and Chaabene, Helmi}, title = {Key somatic variables associated with, and differences between the 4 swimming strokes}, series = {Journal of sports sciences}, volume = {38}, journal = {Journal of sports sciences}, number = {7}, publisher = {Routledge, Taylor \& Francis Group}, address = {London}, issn = {0264-0414}, doi = {10.1080/02640414.2020.1734311}, pages = {787 -- 794}, year = {2020}, abstract = {This study identified key somatic and demographic characteristics that benefit all swimmers and, at the same time, identified further characteristics that benefit only specific swimming strokes. Three hundred sixty-three competitive-level swimmers (male [n = 202]; female [n = 161]) participated in the study. We adopted a multiplicative, allometric regression model to identify the key characteristics associated with 100 m swimming speeds (controlling for age). The model was refined using backward elimination. Characteristics that benefited some but not all strokes were identified by introducing stroke-by-predictor variable interactions. The regression analysis revealed 7 "common" characteristics that benefited all swimmers suggesting that all swimmers benefit from having less body fat, broad shoulders and hips, a greater arm span (but shorter lower arms) and greater forearm girths with smaller relaxed arm girths. The 4 stroke-specific characteristics reveal that backstroke swimmers benefit from longer backs, a finding that can be likened to boats with longer hulls also travel faster through the water. Other stroke-by-predictor variable interactions (taken together) identified that butterfly swimmers are characterized by greater muscularity in the lower legs. These results highlight the importance of considering somatic and demographic characteristics of young swimmers for talent identification purposes (i.e., to ensure that swimmers realize their most appropriate strokes).}, language = {en} } @article{NaliboffGlerumBruneetal.2020, author = {Naliboff, John B. and Glerum, Anne and Brune, Sascha and P{\´e}ron-Pinvidic, G. and Wrona, Thilo}, title = {Development of 3-D rift heterogeneity through fault network evolution}, series = {Geophysical Research Letters}, volume = {47}, journal = {Geophysical Research Letters}, number = {13}, publisher = {John Wiley \& Sons, Inc.}, address = {New Jersey}, pages = {11}, year = {2020}, abstract = {Observations of rift and rifted margin architecture suggest that significant spatial and temporal structural heterogeneity develops during the multiphase evolution of continental rifting. Inheritance is often invoked to explain this heterogeneity, such as preexisting anisotropies in rock composition, rheology, and deformation. Here, we use high-resolution 3-D thermal-mechanical numerical models of continental extension to demonstrate that rift-parallel heterogeneity may develop solely through fault network evolution during the transition from distributed to localized deformation. In our models, the initial phase of distributed normal faulting is seeded through randomized initial strength perturbations in an otherwise laterally homogeneous lithosphere extending at a constant rate. Continued extension localizes deformation onto lithosphere-scale faults, which are laterally offset by tens of km and discontinuous along-strike. These results demonstrate that rift- and margin-parallel heterogeneity of large-scale fault patterns may in-part be a natural byproduct of fault network coalescence.}, language = {en} } @misc{NaliboffGlerumBruneetal.2020, author = {Naliboff, John B. and Glerum, Anne and Brune, Sascha and P{\´e}ron-Pinvidic, G. and Wrona, Thilo}, title = {Development of 3-D rift heterogeneity through fault network evolution}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {13}, issn = {1866-8372}, doi = {10.25932/publishup-52466}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-524661}, pages = {13}, year = {2020}, abstract = {Observations of rift and rifted margin architecture suggest that significant spatial and temporal structural heterogeneity develops during the multiphase evolution of continental rifting. Inheritance is often invoked to explain this heterogeneity, such as preexisting anisotropies in rock composition, rheology, and deformation. Here, we use high-resolution 3-D thermal-mechanical numerical models of continental extension to demonstrate that rift-parallel heterogeneity may develop solely through fault network evolution during the transition from distributed to localized deformation. In our models, the initial phase of distributed normal faulting is seeded through randomized initial strength perturbations in an otherwise laterally homogeneous lithosphere extending at a constant rate. Continued extension localizes deformation onto lithosphere-scale faults, which are laterally offset by tens of km and discontinuous along-strike. These results demonstrate that rift- and margin-parallel heterogeneity of large-scale fault patterns may in-part be a natural byproduct of fault network coalescence.}, language = {en} } @misc{Metzler2020, author = {Metzler, Ralf}, title = {Superstatistics and non-Gaussian diffusion}, series = {The European physical journal special topics}, volume = {229}, journal = {The European physical journal special topics}, number = {5}, publisher = {Springer}, address = {Heidelberg}, issn = {1951-6355}, doi = {10.1140/epjst/e2020-900210-x}, pages = {711 -- 728}, year = {2020}, abstract = {Brownian motion and viscoelastic anomalous diffusion in homogeneous environments are intrinsically Gaussian processes. In a growing number of systems, however, non-Gaussian displacement distributions of these processes are being reported. The physical cause of the non-Gaussianity is typically seen in different forms of disorder. These include, for instance, imperfect "ensembles" of tracer particles, the presence of local variations of the tracer mobility in heteroegenous environments, or cases in which the speed or persistence of moving nematodes or cells are distributed. From a theoretical point of view stochastic descriptions based on distributed ("superstatistical") transport coefficients as well as time-dependent generalisations based on stochastic transport parameters with built-in finite correlation time are invoked. After a brief review of the history of Brownian motion and the famed Gaussian displacement distribution, we here provide a brief introduction to the phenomenon of non-Gaussianity and the stochastic modelling in terms of superstatistical and diffusing-diffusivity approaches.}, language = {en} } @misc{MardoukhiJeonMetzler2015, author = {Mardoukhi, Yousof and Jeon, Jae-Hyung and Metzler, Ralf}, title = {Geometry controlled anomalous diffusion in random fractal geometries}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {980}, issn = {1866-8372}, doi = {10.25932/publishup-47486}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-474864}, pages = {30134 -- 30147}, year = {2015}, abstract = {We investigate the ergodic properties of a random walker performing (anomalous) diffusion on a random fractal geometry. Extensive Monte Carlo simulations of the motion of tracer particles on an ensemble of realisations of percolation clusters are performed for a wide range of percolation densities. Single trajectories of the tracer motion are analysed to quantify the time averaged mean squared displacement (MSD) and to compare this with the ensemble averaged MSD of the particle motion. Other complementary physical observables associated with ergodicity are studied, as well. It turns out that the time averaged MSD of individual realisations exhibits non-vanishing fluctuations even in the limit of very long observation times as the percolation density approaches the critical value. This apparent non-ergodic behaviour concurs with the ergodic behaviour on the ensemble averaged level. We demonstrate how the non-vanishing fluctuations in single particle trajectories are analytically expressed in terms of the fractal dimension and the cluster size distribution of the random geometry, thus being of purely geometrical origin. Moreover, we reveal that the convergence scaling law to ergodicity, which is known to be inversely proportional to the observation time T for ergodic diffusion processes, follows a power-law ∼T-h with h < 1 due to the fractal structure of the accessible space. These results provide useful measures for differentiating the subdiffusion on random fractals from an otherwise closely related process, namely, fractional Brownian motion. Implications of our results on the analysis of single particle tracking experiments are provided.}, language = {en} } @article{LeungLeutbecherReichetal.2019, author = {Leung, Tsz Yan and Leutbecher, Martin and Reich, Sebastian and Shepherd, Theodore G.}, title = {Atmospheric Predictability: Revisiting the Inherent Finite-Time Barrier}, series = {Journal of the atmospheric sciences}, volume = {76}, journal = {Journal of the atmospheric sciences}, number = {12}, publisher = {American Meteorological Soc.}, address = {Boston}, issn = {0022-4928}, doi = {10.1175/JAS-D-19-0057.1}, pages = {3883 -- 3892}, year = {2019}, abstract = {The accepted idea that there exists an inherent finite-time barrier in deterministically predicting atmospheric flows originates from Edward N. Lorenz's 1969 work based on two-dimensional (2D) turbulence. Yet, known analytic results on the 2D Navier-Stokes (N-S) equations suggest that one can skillfully predict the 2D N-S system indefinitely far ahead should the initial-condition error become sufficiently small, thereby presenting a potential conflict with Lorenz's theory. Aided by numerical simulations, the present work reexamines Lorenz's model and reviews both sides of the argument, paying particular attention to the roles played by the slope of the kinetic energy spectrum. It is found that when this slope is shallower than -3, the Lipschitz continuity of analytic solutions (with respect to initial conditions) breaks down as the model resolution increases, unless the viscous range of the real system is resolved—which remains practically impossible. This breakdown leads to the inherent finite-time limit. If, on the other hand, the spectral slope is steeper than -3, then the breakdown does not occur. In this way, the apparent contradiction between the analytic results and Lorenz's theory is reconciled.}, language = {en} } @misc{LarhlimiDavidSelbigetal.2012, author = {Larhlimi, Abdelhalim and David, Laszlo and Selbig, Joachim and Bockmayr, Alexander}, title = {F2C2}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {921}, issn = {1866-8372}, doi = {10.25932/publishup-43243}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-432431}, pages = {11}, year = {2012}, abstract = {Background: Flux coupling analysis (FCA) has become a useful tool in the constraint-based analysis of genome-scale metabolic networks. FCA allows detecting dependencies between reaction fluxes of metabolic networks at steady-state. On the one hand, this can help in the curation of reconstructed metabolic networks by verifying whether the coupling between reactions is in agreement with the experimental findings. On the other hand, FCA can aid in defining intervention strategies to knock out target reactions. Results: We present a new method F2C2 for FCA, which is orders of magnitude faster than previous approaches. As a consequence, FCA of genome-scale metabolic networks can now be performed in a routine manner. Conclusions: We propose F2C2 as a fast tool for the computation of flux coupling in genome-scale metabolic networks. F2C2 is freely available for non-commercial use at https://sourceforge.net/projects/f2c2/files/.}, language = {en} } @misc{KaminskiSchaubSiegeletal.2013, author = {Kaminski, Roland and Schaub, Torsten H. and Siegel, Anne and Videla, Santiago}, title = {Minimal intervention strategies in logical signaling networks with ASP}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch Naturwissenschaftliche Reihe}, number = {4-5}, issn = {1866-8372}, doi = {10.25932/publishup-41570}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-415704}, pages = {675 -- 690}, year = {2013}, abstract = {Proposing relevant perturbations to biological signaling networks is central to many problems in biology and medicine because it allows for enabling or disabling certain biological outcomes. In contrast to quantitative methods that permit fine-grained (kinetic) analysis, qualitative approaches allow for addressing large-scale networks. This is accomplished by more abstract representations such as logical networks. We elaborate upon such a qualitative approach aiming at the computation of minimal interventions in logical signaling networks relying on Kleene's three-valued logic and fixpoint semantics. We address this problem within answer set programming and show that it greatly outperforms previous work using dedicated algorithms.}, language = {en} } @misc{HempelKoseskaNikoloskietal.2017, author = {Hempel, Sabrina and Koseska, Aneta and Nikoloski, Zoran and Kurths, J{\"u}rgen}, title = {Unraveling gene regulatory networks from time-resolved gene expression data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-400924}, pages = {26}, year = {2017}, abstract = {Background: Inferring regulatory interactions between genes from transcriptomics time-resolved data, yielding reverse engineered gene regulatory networks, is of paramount importance to systems biology and bioinformatics studies. Accurate methods to address this problem can ultimately provide a deeper insight into the complexity, behavior, and functions of the underlying biological systems. However, the large number of interacting genes coupled with short and often noisy time-resolved read-outs of the system renders the reverse engineering a challenging task. Therefore, the development and assessment of methods which are computationally efficient, robust against noise, applicable to short time series data, and preferably capable of reconstructing the directionality of the regulatory interactions remains a pressing research problem with valuable applications. Results: Here we perform the largest systematic analysis of a set of similarity measures and scoring schemes within the scope of the relevance network approach which are commonly used for gene regulatory network reconstruction from time series data. In addition, we define and analyze several novel measures and schemes which are particularly suitable for short transcriptomics time series. We also compare the considered 21 measures and 6 scoring schemes according to their ability to correctly reconstruct such networks from short time series data by calculating summary statistics based on the corresponding specificity and sensitivity. Our results demonstrate that rank and symbol based measures have the highest performance in inferring regulatory interactions. In addition, the proposed scoring scheme by asymmetric weighting has shown to be valuable in reducing the number of false positive interactions. On the other hand, Granger causality as well as information-theoretic measures, frequently used in inference of regulatory networks, show low performance on the short time series analyzed in this study. Conclusions: Our study is intended to serve as a guide for choosing a particular combination of similarity measures and scoring schemes suitable for reconstruction of gene regulatory networks from short time series data. We show that further improvement of algorithms for reverse engineering can be obtained if one considers measures that are rooted in the study of symbolic dynamics or ranks, in contrast to the application of common similarity measures which do not consider the temporal character of the employed data. Moreover, we establish that the asymmetric weighting scoring scheme together with symbol based measures (for low noise level) and rank based measures (for high noise level) are the most suitable choices.}, language = {en} } @misc{Frauenstein2017, type = {Master Thesis}, author = {Frauenstein, Andrea}, title = {Erarbeitung einer P-Bilanz im Rahmen eines Konzeptes zur Sanierung und Restaurierung des Rangsdorfer Sees}, doi = {10.25932/publishup-42876}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-428760}, school = {Universit{\"a}t Potsdam}, pages = {XIV, 152}, year = {2017}, abstract = {Der Rangsdorfer See (A = 2,44 km² , z(max) = 6 m, z(mean) = 1,930 m) im Landkreis Teltow Fl{\"a}ming ist einer von vielen Gew{\"a}ssern in Brandenburg, die derzeit den nach EU-Wasserrahmenrichtlinie geforderten guten Zustand nicht erreichen. Bekanntlich gilt Phosphor f{\"u}r viele Gew{\"a}sser als der bedeutendste produktionslimitierende N{\"a}hrstoff und ist somit aussichtsreicher Steuerfaktor f{\"u}r eine erfolgreiche Seentherapie. Ziel dieser Arbeit war es, die Gew{\"a}sserg{\"u}te des Rangsdorfer Sees nach trophischen Aspekten zu bewerten, Phosphor-Eintragspfade zu identifizieren, welche die h{\"o}chsten Frachten verursachen sowie Therapiemaßnahmen zu finden, die eine langfristige Zustandsverbesserung erm{\"o}glichen. In einer Szenarioanalyse wurde das modifizierte Einbox Modell angewendet, um die Wirksamkeit externer und interner Therapiemaßnahmen abzusch{\"a}tzen. Nach Abschluss der Studienarbeiten k{\"o}nnen folgende Schl{\"u}sse gezogen werden: Der Rangsdorfer See ist aufgrund seiner Morphometrie ein naturgegebenes n{\"a}hrstoffreiches Gew{\"a}sser und war das auch schon, bevor anthropogene Einfl{\"u}sse auf ihn einwirkten. Langj{\"a}hrige N{\"a}hrstoffeintr{\"a}ge verschiedener Herkunft (Abwassereinleitungen, Fischintensivhaltung, Rieselfelder) f{\"u}hrten jedoch zu einer {\"u}berm{\"a}ßigen Produktivit{\"a}t. Viele Belastungsquellen wurden ausgeschaltet, es findet jedoch immer noch ein relevanter N{\"a}hrstoffaustrag aus dem Einzugsgebiet statt. Unter Verwendung von Phosphor-Bilanzmodellen und seetypspezifischen kritischen Phosphor-Seekonzentrationen zeigt sich, dass die aktuell stattfindende externe Phosphor-Belastung den kritischen Phosphor-Eintrag zur mutmaßlichen Erreichung des guten {\"o}kologischen Zustandes {\"u}berschreitet. Anteilig die gr{\"o}ßte Fracht wird {\"u}ber den nat{\"u}rlichen Hauptzufluss in den Rangsdorfer See transportiert. Sanierungsmaßnahmen in dessen Einzugsgebiet stellen ein effektives Mittel dar. Eine technische L{\"o}sung zur N{\"a}hrstoffminderung im Zufluss (Eliminierungsanlage) kann unterst{\"u}tzend eingesetzt werden, muss aber dann bei unver{\"a}nderter hoher Phosphor-Konzentration im Zufluss dauerhaft betrieben werden. Das Einbox Modell stellte sich als hilfreiches Instrument zur Vorauswahl geeigneter Therapiemaßnahmen heraus.}, language = {de} } @article{CherstvyChechkinMetzler2014, author = {Cherstvy, Andrey G. and Chechkin, Aleksei V. and Metzler, Ralf}, title = {Particle invasion, survival, and non-ergodicity in 2D diffusion processes with space-dependent diffusivity}, series = {Soft matter}, volume = {2014}, journal = {Soft matter}, number = {10}, publisher = {Royal Society of Chemistry}, issn = {2046-2069}, doi = {10.1039/c3sm52846d}, pages = {1591 -- 1601}, year = {2014}, abstract = {We study the thermal Markovian diffusion of tracer particles in a 2D medium with spatially varying diffusivity D(r), mimicking recently measured, heterogeneous maps of the apparent diffusion coefficient in biological cells. For this heterogeneous diffusion process (HDP) we analyse the mean squared displacement (MSD) of the tracer particles, the time averaged MSD, the spatial probability density function, and the first passage time dynamics from the cell boundary to the nucleus. Moreover we examine the non-ergodic properties of this process which are important for the correct physical interpretation of time averages of observables obtained from single particle tracking experiments. From extensive computer simulations of the 2D stochastic Langevin equation we present an in-depth study of this HDP. In particular, we find that the MSDs along the radial and azimuthal directions in a circular domain obey anomalous and Brownian scaling, respectively. We demonstrate that the time averaged MSD stays linear as a function of the lag time and the system thus reveals a weak ergodicity breaking. Our results will enable one to rationalise the diffusive motion of larger tracer particles such as viruses or submicron beads in biological cells.}, language = {en} } @misc{CherstvyChechkinMetzler2014, author = {Cherstvy, Andrey G. and Chechkin, Aleksei V. and Metzler, Ralf}, title = {Particle invasion, survival, and non-ergodicity in 2D diffusion processes with space-dependent diffusivity}, number = {168}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-74021}, pages = {1591 -- 1601}, year = {2014}, abstract = {We study the thermal Markovian diffusion of tracer particles in a 2D medium with spatially varying diffusivity D(r), mimicking recently measured, heterogeneous maps of the apparent diffusion coefficient in biological cells. For this heterogeneous diffusion process (HDP) we analyse the mean squared displacement (MSD) of the tracer particles, the time averaged MSD, the spatial probability density function, and the first passage time dynamics from the cell boundary to the nucleus. Moreover we examine the non-ergodic properties of this process which are important for the correct physical interpretation of time averages of observables obtained from single particle tracking experiments. From extensive computer simulations of the 2D stochastic Langevin equation we present an in-depth study of this HDP. In particular, we find that the MSDs along the radial and azimuthal directions in a circular domain obey anomalous and Brownian scaling, respectively. We demonstrate that the time averaged MSD stays linear as a function of the lag time and the system thus reveals a weak ergodicity breaking. Our results will enable one to rationalise the diffusive motion of larger tracer particles such as viruses or submicron beads in biological cells.}, language = {en} } @misc{BhataraLaukkaBollAvetisyanetal.2016, author = {Bhatara, Anjali and Laukka, Petri and Boll-Avetisyan, Natalie and Granjon, Lionel and Elfenbein, Hillary Anger and B{\"a}nziger, Tanja}, title = {Second language ability and emotional prosody perception}, series = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Humanwissenschaftliche Reihe}, number = {503}, issn = {1866-8364}, doi = {10.25932/publishup-41186}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-411860}, pages = {13}, year = {2016}, abstract = {The present study examines the effect of language experience on vocal emotion perception in a second language. Native speakers of French with varying levels of self-reported English ability were asked to identify emotions from vocal expressions produced by American actors in a forced-choice task, and to rate their pleasantness, power, alertness and intensity on continuous scales. Stimuli included emotionally expressive English speech (emotional prosody) and non-linguistic vocalizations (affect bursts), and a baseline condition with Swiss-French pseudo-speech. Results revealed effects of English ability on the recognition of emotions in English speech but not in non-linguistic vocalizations. Specifically, higher English ability was associated with less accurate identification of positive emotions, but not with the interpretation of negative emotions. Moreover, higher English ability was associated with lower ratings of pleasantness and power, again only for emotional prosody. This suggests that second language skills may sometimes interfere with emotion recognition from speech prosody, particularly for positive emotions.}, language = {en} } @misc{BanerjeeSaalfrank2013, author = {Banerjee, Shiladitya and Saalfrank, Peter}, title = {Vibrationally resolved absorption, emission and resonance Raman spectra of diamondoids}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94542}, pages = {144 -- 158}, year = {2013}, abstract = {The time-dependent approach to electronic spectroscopy, as popularized by Heller and coworkers in the 1980's, is applied here in conjunction with linear-response, time-dependent density functional theory to study vibronic absorption, emission and resonance Raman spectra of several diamondoids. Two-state models, the harmonic and the Condon approximations, are used for the calculations, making them easily applicable to larger molecules. The method is applied to nine pristine lower and higher diamondoids: adamantane, diamantane, triamantane, and three isomers each of tetramantane and pentamantane. We also consider a hybrid species "Dia = Dia" - a shorthand notation for a recently synthesized molecule comprising two diamantane units connected by a C[double bond, length as m-dash]C double bond. We resolve and interpret trends in optical and vibrational properties of these molecules as a function of their size, shape, and symmetry, as well as effects of "blending" with sp2-hybridized C-atoms. Time-dependent correlation functions facilitate the computations and shed light on the vibrational dynamics following electronic transitions.}, language = {en} } @misc{AyzelSchefferHeistermann2020, author = {Ayzel, Georgy and Scheffer, Tobias and Heistermann, Maik}, title = {RainNet v1.0}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {964}, issn = {1866-8372}, doi = {10.25932/publishup-47294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-472942}, pages = {16}, year = {2020}, abstract = {In this study, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. Its design was inspired by the U-Net and SegNet families of deep learning models, which were originally designed for binary segmentation tasks. RainNet was trained to predict continuous precipitation intensities at a lead time of 5min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900km × 900km and has a resolution of 1km in space and 5min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In order to achieve a lead time of 1h, a recursive approach was implemented by using RainNet predictions at 5min lead times as model inputs for longer lead times. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the rainymotion library and had previously been shown to outperform DWD's operational nowcasting model for the same set of verification events. RainNet significantly outperforms the benchmark models at all lead times up to 60min for the routine verification metrics mean absolute error (MAE) and the critical success index (CSI) at intensity thresholds of 0.125, 1, and 5mm h⁻¹. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15mm h⁻¹). The limited ability of RainNet to predict heavy rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance in terms of a binary segmentation task. Furthermore, we suggest additional input data that could help to better identify situations with imminent precipitation dynamics. The model code, pretrained weights, and training data are provided in open repositories as an input for such future studies.}, language = {en} } @article{AyzelSchefferHeistermann2020, author = {Ayzel, Georgy and Scheffer, Tobias and Heistermann, Maik}, title = {RainNet v1.0}, series = {Geoscientific Model Development}, volume = {13}, journal = {Geoscientific Model Development}, number = {6}, publisher = {Copernicus Publ.}, address = {G{\"o}ttingen}, issn = {1991-959X}, doi = {10.5194/gmd-13-2631-2020}, pages = {2631 -- 2644}, year = {2020}, abstract = {In this study, we present RainNet, a deep convolutional neural network for radar-based precipitation nowcasting. Its design was inspired by the U-Net and SegNet families of deep learning models, which were originally designed for binary segmentation tasks. RainNet was trained to predict continuous precipitation intensities at a lead time of 5min, using several years of quality-controlled weather radar composites provided by the German Weather Service (DWD). That data set covers Germany with a spatial domain of 900km × 900km and has a resolution of 1km in space and 5min in time. Independent verification experiments were carried out on 11 summer precipitation events from 2016 to 2017. In order to achieve a lead time of 1h, a recursive approach was implemented by using RainNet predictions at 5min lead times as model inputs for longer lead times. In the verification experiments, trivial Eulerian persistence and a conventional model based on optical flow served as benchmarks. The latter is available in the rainymotion library and had previously been shown to outperform DWD's operational nowcasting model for the same set of verification events. RainNet significantly outperforms the benchmark models at all lead times up to 60min for the routine verification metrics mean absolute error (MAE) and the critical success index (CSI) at intensity thresholds of 0.125, 1, and 5mm h⁻¹. However, rainymotion turned out to be superior in predicting the exceedance of higher intensity thresholds (here 10 and 15mm h⁻¹). The limited ability of RainNet to predict heavy rainfall intensities is an undesirable property which we attribute to a high level of spatial smoothing introduced by the model. At a lead time of 5min, an analysis of power spectral density confirmed a significant loss of spectral power at length scales of 16km and below. Obviously, RainNet had learned an optimal level of smoothing to produce a nowcast at 5min lead time. In that sense, the loss of spectral power at small scales is informative, too, as it reflects the limits of predictability as a function of spatial scale. Beyond the lead time of 5min, however, the increasing level of smoothing is a mere artifact - an analogue to numerical diffusion - that is not a property of RainNet itself but of its recursive application. In the context of early warning, the smoothing is particularly unfavorable since pronounced features of intense precipitation tend to get lost over longer lead times. Hence, we propose several options to address this issue in prospective research, including an adjustment of the loss function for model training, model training for longer lead times, and the prediction of threshold exceedance in terms of a binary segmentation task. Furthermore, we suggest additional input data that could help to better identify situations with imminent precipitation dynamics. The model code, pretrained weights, and training data are provided in open repositories as an input for such future studies.}, language = {en} } @misc{AgarwalMarwanMaheswaranetal.2017, author = {Agarwal, Ankit and Marwan, Norbert and Maheswaran, Rathinasamy and Merz, Bruno and Kurths, J{\"u}rgen}, title = {Multi-scale event synchronization analysis for unravelling climate processes}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {661}, issn = {1866-8372}, doi = {10.25932/publishup-41827}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-418274}, pages = {13}, year = {2017}, abstract = {The temporal dynamics of climate processes are spread across different timescales and, as such, the study of these processes at only one selected timescale might not reveal the complete mechanisms and interactions within and between the (sub-) processes. To capture the non-linear interactions between climatic events, the method of event synchronization has found increasing attention recently. The main drawback with the present estimation of event synchronization is its restriction to analysing the time series at one reference timescale only. The study of event synchronization at multiple scales would be of great interest to comprehend the dynamics of the investigated climate processes. In this paper, the wavelet-based multi-scale event synchronization (MSES) method is proposed by combining the wavelet transform and event synchronization. Wavelets are used extensively to comprehend multi-scale processes and the dynamics of processes across various timescales. The proposed method allows the study of spatio-temporal patterns across different timescales. The method is tested on synthetic and real-world time series in order to check its replicability and applicability. The results indicate that MSES is able to capture relationships that exist between processes at different timescales.}, language = {en} }