@article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Maximum Possible and the Maximum Expected Earthquake Magnitude for Production-Induced Earthquakes at the Gas Field in Groningen, The Netherlands}, series = {Bulletin of the Seismological Society of America}, volume = {106}, journal = {Bulletin of the Seismological Society of America}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0037-1106}, doi = {10.1785/0120160220}, pages = {2917 -- 2921}, year = {2016}, abstract = {The Groningen gas field serves as a natural laboratory for production-induced earthquakes, because no earthquakes were observed before the beginning of gas production. Increasing gas production rates resulted in growing earthquake activity and eventually in the occurrence of the 2012M(w) 3.6 Huizinge earthquake. At least since this event, a detailed seismic hazard and risk assessment including estimation of the maximum earthquake magnitude is considered to be necessary to decide on the future gas production. In this short note, we first apply state-of-the-art methods of mathematical statistics to derive confidence intervals for the maximum possible earthquake magnitude m(max). Second, we calculate the maximum expected magnitude M-T in the time between 2016 and 2024 for three assumed gas-production scenarios. Using broadly accepted physical assumptions and 90\% confidence level, we suggest a value of m(max) 4.4, whereas M-T varies between 3.9 and 4.3, depending on the production scenario.}, language = {en} } @article{ZoellerHolschneider2016, author = {Z{\"o}ller, Gert and Holschneider, Matthias}, title = {The Earthquake History in a Fault Zone Tells Us Almost Nothing about m(max)}, series = {Seismological research letters}, volume = {87}, journal = {Seismological research letters}, publisher = {Seismological Society of America}, address = {Albany}, issn = {0895-0695}, doi = {10.1785/0220150176}, pages = {132 -- 137}, year = {2016}, abstract = {In the present study, we summarize and evaluate the endeavors from recent years to estimate the maximum possible earthquake magnitude m(max) from observed data. In particular, we use basic and physically motivated assumptions to identify best cases and worst cases in terms of lowest and highest degree of uncertainty of m(max). In a general framework, we demonstrate that earthquake data and earthquake proxy data recorded in a fault zone provide almost no information about m(max) unless reliable and homogeneous data of a long time interval, including several earthquakes with magnitude close to m(max), are available. Even if detailed earthquake information from some centuries including historic and paleoearthquakes are given, only very few, namely the largest events, will contribute at all to the estimation of m(max), and this results in unacceptably high uncertainties. As a consequence, estimators of m(max) in a fault zone, which are based solely on earthquake-related information from this region, have to be dismissed.}, language = {en} } @article{WichitsaNguanLaeuterLiero2016, author = {Wichitsa-Nguan, Korakot and L{\"a}uter, Henning and Liero, Hannelore}, title = {Estimability in Cox models}, series = {Statistical Papers}, volume = {57}, journal = {Statistical Papers}, publisher = {Springer}, address = {New York}, issn = {0932-5026}, doi = {10.1007/s00362-016-0755-x}, pages = {1121 -- 1140}, year = {2016}, abstract = {Our procedure of estimating is the maximum partial likelihood estimate (MPLE) which is the appropriate estimate in the Cox model with a general censoring distribution, covariates and an unknown baseline hazard rate . We find conditions for estimability and asymptotic estimability. The asymptotic variance matrix of the MPLE is represented and properties are discussed.}, language = {en} } @phdthesis{Wichitsanguan2016, author = {Wichitsa-nguan, Korakot}, title = {Modifications and extensions of the logistic regression and Cox model}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90033}, school = {Universit{\"a}t Potsdam}, pages = {x, 131}, year = {2016}, abstract = {In many statistical applications, the aim is to model the relationship between covariates and some outcomes. A choice of the appropriate model depends on the outcome and the research objectives, such as linear models for continuous outcomes, logistic models for binary outcomes and the Cox model for time-to-event data. In epidemiological, medical, biological, societal and economic studies, the logistic regression is widely used to describe the relationship between a response variable as binary outcome and explanatory variables as a set of covariates. However, epidemiologic cohort studies are quite expensive regarding data management since following up a large number of individuals takes long time. Therefore, the case-cohort design is applied to reduce cost and time for data collection. The case-cohort sampling collects a small random sample from the entire cohort, which is called subcohort. The advantage of this design is that the covariate and follow-up data are recorded only on the subcohort and all cases (all members of the cohort who develop the event of interest during the follow-up process). In this thesis, we investigate the estimation in the logistic model for case-cohort design. First, a model with a binary response and a binary covariate is considered. The maximum likelihood estimator (MLE) is described and its asymptotic properties are established. An estimator for the asymptotic variance of the estimator based on the maximum likelihood approach is proposed; this estimator differs slightly from the estimator introduced by Prentice (1986). Simulation results for several proportions of the subcohort show that the proposed estimator gives lower empirical bias and empirical variance than Prentice's estimator. Then the MLE in the logistic regression with discrete covariate under case-cohort design is studied. Here the approach of the binary covariate model is extended. Proving asymptotic normality of estimators, standard errors for the estimators can be derived. The simulation study demonstrates the estimation procedure of the logistic regression model with a one-dimensional discrete covariate. Simulation results for several proportions of the subcohort and different choices of the underlying parameters indicate that the estimator developed here performs reasonably well. Moreover, the comparison between theoretical values and simulation results of the asymptotic variance of estimator is presented. Clearly, the logistic regression is sufficient for the binary outcome refers to be available for all subjects and for a fixed time interval. Nevertheless, in practice, the observations in clinical trials are frequently collected for different time periods and subjects may drop out or relapse from other causes during follow-up. Hence, the logistic regression is not appropriate for incomplete follow-up data; for example, an individual drops out of the study before the end of data collection or an individual has not occurred the event of interest for the duration of the study. These observations are called censored observations. The survival analysis is necessary to solve these problems. Moreover, the time to the occurence of the event of interest is taken into account. The Cox model has been widely used in survival analysis, which can effectively handle the censored data. Cox (1972) proposed the model which is focused on the hazard function. The Cox model is assumed to be λ(t|x) = λ0(t) exp(β^Tx) where λ0(t) is an unspecified baseline hazard at time t and X is the vector of covariates, β is a p-dimensional vector of coefficient. In this thesis, the Cox model is considered under the view point of experimental design. The estimability of the parameter β0 in the Cox model, where β0 denotes the true value of β, and the choice of optimal covariates are investigated. We give new representations of the observed information matrix In(β) and extend results for the Cox model of Andersen and Gill (1982). In this way conditions for the estimability of β0 are formulated. Under some regularity conditions, ∑ is the inverse of the asymptotic variance matrix of the MPLE of β0 in the Cox model and then some properties of the asymptotic variance matrix of the MPLE are highlighted. Based on the results of asymptotic estimability, the calculation of local optimal covariates is considered and shown in examples. In a sensitivity analysis, the efficiency of given covariates is calculated. For neighborhoods of the exponential models, the efficiencies have then been found. It is appeared that for fixed parameters β0, the efficiencies do not change very much for different baseline hazard functions. Some proposals for applicable optimal covariates and a calculation procedure for finding optimal covariates are discussed. Furthermore, the extension of the Cox model where time-dependent coefficient are allowed, is investigated. In this situation, the maximum local partial likelihood estimator for estimating the coefficient function β(·) is described. Based on this estimator, we formulate a new test procedure for testing, whether a one-dimensional coefficient function β(·) has a prespecified parametric form, say β(·; ϑ). The score function derived from the local constant partial likelihood function at d distinct grid points is considered. It is shown that the distribution of the properly standardized quadratic form of this d-dimensional vector under the null hypothesis tends to a Chi-squared distribution. Moreover, the limit statement remains true when replacing the unknown ϑ0 by the MPLE in the hypothetical model and an asymptotic α-test is given by the quantiles or p-values of the limiting Chi-squared distribution. Finally, we propose a bootstrap version of this test. The bootstrap test is only defined for the special case of testing whether the coefficient function is constant. A simulation study illustrates the behavior of the bootstrap test under the null hypothesis and a special alternative. It gives quite good results for the chosen underlying model. References P. K. Andersen and R. D. Gill. Cox's regression model for counting processes: a large samplestudy. Ann. Statist., 10(4):1100{1120, 1982. D. R. Cox. Regression models and life-tables. J. Roy. Statist. Soc. Ser. B, 34:187{220, 1972. R. L. Prentice. A case-cohort design for epidemiologic cohort studies and disease prevention trials. Biometrika, 73(1):1{11, 1986.}, language = {en} } @unpublished{VasilievTarkhanov2016, author = {Vasiliev, Serguei and Tarkhanov, Nikolai Nikolaevich}, title = {Construction of series of perfect lattices by layer superposition}, volume = {5}, number = {11}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100591}, pages = {11}, year = {2016}, abstract = {We construct a new series of perfect lattices in n dimensions by the layer superposition method of Delaunay-Barnes.}, language = {en} } @article{TinpunKoppitz2016, author = {Tinpun, Kittisak and Koppitz, J{\"o}rg}, title = {Generating sets of infinite full transformation semigroups with restricted range}, series = {Acta scientiarum mathematicarum}, volume = {82}, journal = {Acta scientiarum mathematicarum}, publisher = {Institutum Bolyaianum Universitatis Szegediensis}, address = {Szeged}, issn = {0001-6969}, doi = {10.14232/actasm-015-502-4}, pages = {55 -- 63}, year = {2016}, abstract = {In the present paper, we consider minimal generating sets of infinite full transformation semigroups with restricted range modulo specific subsets. In particular, we determine relative ranks.}, language = {en} } @article{Tarkhanov2016, author = {Tarkhanov, Nikolai Nikolaevich}, title = {Deformation quantization and boundary value problems}, series = {International journal of geometric methods in modern physics : differential geometery, algebraic geometery, global analysis \& topology}, volume = {13}, journal = {International journal of geometric methods in modern physics : differential geometery, algebraic geometery, global analysis \& topology}, publisher = {World Scientific}, address = {Singapore}, issn = {0219-8878}, doi = {10.1142/S0219887816500079}, pages = {176 -- 195}, year = {2016}, abstract = {We describe a natural construction of deformation quantization on a compact symplectic manifold with boundary. On the algebra of quantum observables a trace functional is defined which as usual annihilates the commutators. This gives rise to an index as the trace of the unity element. We formulate the index theorem as a conjecture and examine it by the classical harmonic oscillator.}, language = {en} } @article{StolleMichaelisRauberg2016, author = {Stolle, Claudia and Michaelis, Ingo and Rauberg, Jan}, title = {The role of high-resolution geomagnetic field models for investigating ionospheric currents at low Earth orbit satellites}, series = {Earth, planets and space}, volume = {68}, journal = {Earth, planets and space}, publisher = {Springer}, address = {Heidelberg}, issn = {1880-5981}, doi = {10.1186/s40623-016-0494-1}, pages = {10}, year = {2016}, abstract = {Low Earth orbiting geomagnetic satellite missions, such as the Swarm satellite mission, are the only means to monitor and investigate ionospheric currents on a global scale and to make in situ measurements of F region currents. High-precision geomagnetic satellite missions are also able to detect ionospheric currents during quiet-time geomagnetic conditions that only have few nanotesla amplitudes in the magnetic field. An efficient method to isolate the ionospheric signals from satellite magnetic field measurements has been the use of residuals between the observations and predictions from empirical geomagnetic models for other geomagnetic sources, such as the core and lithospheric field or signals from the quiet-time magnetospheric currents. This study aims at highlighting the importance of high-resolution magnetic field models that are able to predict the lithospheric field and that consider the quiet-time magnetosphere for reliably isolating signatures from ionospheric currents during geomagnetically quiet times. The effects on the detection of ionospheric currents arising from neglecting the lithospheric and magnetospheric sources are discussed on the example of four Swarm orbits during very quiet times. The respective orbits show a broad range of typical scenarios, such as strong and weak ionospheric signal (during day- and nighttime, respectively) superimposed over strong and weak lithospheric signals. If predictions from the lithosphere or magnetosphere are not properly considered, the amplitude of the ionospheric currents, such as the midlatitude Sq currents or the equatorial electrojet (EEJ), is modulated by 10-15 \% in the examples shown. An analysis from several orbits above the African sector, where the lithospheric field is significant, showed that the peak value of the signatures of the EEJ is in error by 5 \% in average when lithospheric contributions are not considered, which is in the range of uncertainties of present empirical models of the EEJ.}, language = {en} } @article{SinclairBussideVilliersetal.2016, author = {Sinclair, Nathalie and Bussi, Maria G. Bartolini and de Villiers, Michael and Jones, Keith and Kortenkamp, Ulrich and Leung, Allen and Owens, Kay}, title = {Recent research on geometry education: an ICME-13 survey team report}, series = {ZDM : The International Journal on Mathematics Education}, volume = {48}, journal = {ZDM : The International Journal on Mathematics Education}, publisher = {Springer}, address = {Heidelberg}, issn = {1863-9690}, doi = {10.1007/s11858-016-0796-6}, pages = {691 -- 719}, year = {2016}, abstract = {This survey on the theme of Geometry Education (including new technologies) focuses chiefly on the time span since 2008. Based on our review of the research literature published during this time span (in refereed journal articles, conference proceedings and edited books), we have jointly identified seven major threads of contributions that span from the early years of learning (pre-school and primary school) through to post-compulsory education and to the issue of mathematics teacher education for geometry. These threads are as follows: developments and trends in the use of theories; advances in the understanding of visuo spatial reasoning; the use and role of diagrams and gestures; advances in the understanding of the role of digital technologies; advances in the understanding of the teaching and learning of definitions; advances in the understanding of the teaching and learning of the proving process; and, moving beyond traditional Euclidean approaches. Within each theme, we identify relevant research and also offer commentary on future directions.}, language = {en} } @article{ShtrakovKoppitz2016, author = {Shtrakov, Slavcho and Koppitz, J{\"o}rg}, title = {Stable varieties of semigroups and groupoids}, series = {Algebra universalis}, volume = {75}, journal = {Algebra universalis}, publisher = {Springer}, address = {Basel}, issn = {0002-5240}, doi = {10.1007/s00012-015-0359-7}, pages = {85 -- 106}, year = {2016}, abstract = {The paper deals with Sigma-composition and Sigma-essential composition of terms which lead to stable and s-stable varieties of algebras. A full description of all stable varieties of semigroups, commutative and idempotent groupoids is obtained. We use an abstract reduction system which simplifies the presentations of terms of type tau - (2) to study the variety of idempotent groupoids and s-stable varieties of groupoids. S-stable varieties are a variation of stable varieties, used to highlight replacement of subterms of a term in a deductive system instead of the usual replacement of variables by terms.}, language = {en} } @unpublished{ShlapunovTarkhanov2016, author = {Shlapunov, Alexander and Tarkhanov, Nikolai Nikolaevich}, title = {An open mapping theorem for the Navier-Stokes equations}, volume = {5}, number = {10}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-98687}, pages = {80}, year = {2016}, abstract = {We consider the Navier-Stokes equations in the layer R^n x [0,T] over R^n with finite T > 0. Using the standard fundamental solutions of the Laplace operator and the heat operator, we reduce the Navier-Stokes equations to a nonlinear Fredholm equation of the form (I+K) u = f, where K is a compact continuous operator in anisotropic normed H{\"o}lder spaces weighted at the point at infinity with respect to the space variables. Actually, the weight function is included to provide a finite energy estimate for solutions to the Navier-Stokes equations for all t in [0,T]. On using the particular properties of the de Rham complex we conclude that the Fr{\´e}chet derivative (I+K)' is continuously invertible at each point of the Banach space under consideration and the map I+K is open and injective in the space. In this way the Navier-Stokes equations prove to induce an open one-to-one mapping in the scale of H{\"o}lder spaces.}, language = {en} } @article{SchroeterRitterHolschneideretal.2016, author = {Schroeter, M-A and Ritter, M. and Holschneider, Matthias and Sturm, H.}, title = {Enhanced DySEM imaging of cantilever motion using artificial structures patterned by focused ion beam techniques}, series = {Journal of micromechanics and microengineering}, volume = {26}, journal = {Journal of micromechanics and microengineering}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0960-1317}, doi = {10.1088/0960-1317/26/3/035010}, pages = {7}, year = {2016}, abstract = {We use a dynamic scanning electron microscope (DySEM) to map the spatial distribution of the vibration of a cantilever beam. The DySEM measurements are based on variations of the local secondary electron signal within the imaging electron beam diameter during an oscillation period of the cantilever. For this reason, the surface of a cantilever without topography or material variation does not allow any conclusions about the spatial distribution of vibration due to a lack of dynamic contrast. In order to overcome this limitation, artificial structures were added at defined positions on the cantilever surface using focused ion beam lithography patterning. The DySEM signal of such high-contrast structures is strongly improved, hence information about the surface vibration becomes accessible. Simulations of images of the vibrating cantilever have also been performed. The results of the simulation are in good agreement with the experimental images.}, language = {en} } @misc{Scharrer2016, type = {Master Thesis}, author = {Scharrer, Christian}, title = {Relating diameter and mean curvature for varifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-97013}, school = {Universit{\"a}t Potsdam}, pages = {42}, year = {2016}, abstract = {The main results of this thesis are formulated in a class of surfaces (varifolds) generalizing closed and connected smooth submanifolds of Euclidean space which allows singularities. Given an indecomposable varifold with dimension at least two in some Euclidean space such that the first variation is locally bounded, the total variation is absolutely continuous with respect to the weight measure, the density of the weight measure is at least one outside a set of weight measure zero and the generalized mean curvature is locally summable to a natural power (dimension of the varifold minus one) with respect to the weight measure. The thesis presents an improved estimate of the set where the lower density is small in terms of the one dimensional Hausdorff measure. Moreover, if the support of the weight measure is compact, then the intrinsic diameter with respect to the support of the weight measure is estimated in terms of the generalized mean curvature. This estimate is in analogy to the diameter control for closed connected manifolds smoothly immersed in some Euclidean space of Peter Topping. Previously, it was not known whether the hypothesis in this thesis implies that two points in the support of the weight measure have finite geodesic distance.}, language = {en} } @phdthesis{Samaras2016, author = {Samaras, Stefanos}, title = {Microphysical retrieval of non-spherical aerosol particles using regularized inversion of multi-wavelength lidar data}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-396528}, school = {Universit{\"a}t Potsdam}, pages = {xiv, 190}, year = {2016}, abstract = {Numerous reports of relatively rapid climate changes over the past century make a clear case of the impact of aerosols and clouds, identified as sources of largest uncertainty in climate projections. Earth's radiation balance is altered by aerosols depending on their size, morphology and chemical composition. Competing effects in the atmosphere can be further studied by investigating the evolution of aerosol microphysical properties, which are the focus of the present work. The aerosol size distribution, the refractive index, and the single scattering albedo are commonly used such properties linked to aerosol type, and radiative forcing. Highly advanced lidars (light detection and ranging) have reduced aerosol monitoring and optical profiling into a routine process. Lidar data have been widely used to retrieve the size distribution through the inversion of the so-called Lorenz-Mie model (LMM). This model offers a reasonable treatment for spherically approximated particles, it no longer provides, though, a viable description for other naturally occurring arbitrarily shaped particles, such as dust particles. On the other hand, non-spherical geometries as simple as spheroids reproduce certain optical properties with enhanced accuracy. Motivated by this, we adapt the LMM to accommodate the spheroid-particle approximation introducing the notion of a two-dimensional (2D) shape-size distribution. Inverting only a few optical data points to retrieve the shape-size distribution is classified as a non-linear ill-posed problem. A brief mathematical analysis is presented which reveals the inherent tendency towards highly oscillatory solutions, explores the available options for a generalized solution through regularization methods and quantifies the ill-posedness. The latter will improve our understanding on the main cause fomenting instability in the produced solution spaces. The new approach facilitates the exploitation of additional lidar data points from depolarization measurements, associated with particle non-sphericity. However, the generalization of LMM vastly increases the complexity of the problem. The underlying theory for the calculation of the involved optical cross sections (T-matrix theory) is computationally so costly, that would limit a retrieval analysis to an unpractical point. Moreover the discretization of the model equation by a 2D collocation method, proposed in this work, involves double integrations which are further time consuming. We overcome these difficulties by using precalculated databases and a sophisticated retrieval software (SphInX: Spheroidal Inversion eXperiments) especially developed for our purposes, capable of performing multiple-dataset inversions and producing a wide range of microphysical retrieval outputs. Hybrid regularization in conjunction with minimization processes is used as a basis for our algorithms. Synthetic data retrievals are performed simulating various atmospheric scenarios in order to test the efficiency of different regularization methods. The gap in contemporary literature in providing full sets of uncertainties in a wide variety of numerical instances is of major concern here. For this, the most appropriate methods are identified through a thorough analysis on an overall-behavior basis regarding accuracy and stability. The general trend of the initial size distributions is captured in our numerical experiments and the reconstruction quality depends on data error level. Moreover, the need for more or less depolarization points is explored for the first time from the point of view of the microphysical retrieval. Finally, our approach is tested in various measurement cases giving further insight for future algorithm improvements.}, language = {en} } @unpublished{RoellyVallois2016, author = {Roelly, Sylvie and Vallois, Pierre}, title = {Convoluted Brownian motion}, volume = {5}, number = {9}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-96339}, pages = {37}, year = {2016}, abstract = {In this paper we analyse semimartingale properties of a class of Gaussian periodic processes, called convoluted Brownian motions, obtained by convolution between a deterministic function and a Brownian motion. A classical example in this class is the periodic Ornstein-Uhlenbeck process. We compute their characteristics and show that in general, they are neither Markovian nor satisfy a time-Markov field property. Nevertheless, by enlargement of filtration and/or addition of a one-dimensional component, one can in some case recover the Markovianity. We treat exhaustively the case of the bidimensional trigonometric convoluted Brownian motion and the higher-dimensional monomial convoluted Brownian motion.}, language = {en} } @article{PornsawadBoeckmann2016, author = {Pornsawad, Pornsarp and B{\"o}ckmann, Christine}, title = {Modified Iterative Runge-Kutta-Type Methods for Nonlinear Ill-Posed Problems}, series = {Numerical functional analysis and optimization : an international journal of rapid publication}, volume = {37}, journal = {Numerical functional analysis and optimization : an international journal of rapid publication}, publisher = {Wiley-VCH}, address = {Philadelphia}, issn = {0163-0563}, doi = {10.1080/01630563.2016.1219744}, pages = {1562 -- 1589}, year = {2016}, abstract = {This work is devoted to the convergence analysis of a modified Runge-Kutta-type iterative regularization method for solving nonlinear ill-posed problems under a priori and a posteriori stopping rules. The convergence rate results of the proposed method can be obtained under a Holder-type sourcewise condition if the Frechet derivative is properly scaled and locally Lipschitz continuous. Numerical results are achieved by using the Levenberg-Marquardt, Lobatto, and Radau methods.}, language = {en} } @phdthesis{Pirhayati2016, author = {Pirhayati, Mohammad}, title = {Edge operators and boundary value problems}, school = {Universit{\"a}t Potsdam}, pages = {VI, 119}, year = {2016}, language = {en} } @book{PikovskijPoliti2016, author = {Pikovskij, Arkadij and Politi, Antonio}, title = {Lyapunov Exponents}, publisher = {Cambridge University Press}, address = {Cambridge}, isbn = {978-1-107-03042-8}, publisher = {Universit{\"a}t Potsdam}, pages = {XII, 285}, year = {2016}, abstract = {Lyapunov exponents lie at the heart of chaos theory, and are widely used in studies of complex dynamics. Utilising a pragmatic, physical approach, this self-contained book provides a comprehensive description of the concept. Beginning with the basic properties and numerical methods, it then guides readers through to the most recent advances in applications to complex systems. Practical algorithms are thoroughly reviewed and their performance is discussed, while a broad set of examples illustrate the wide range of potential applications. The description of various numerical and analytical techniques for the computation of Lyapunov exponents offers an extensive array of tools for the characterization of phenomena such as synchronization, weak and global chaos in low and high-dimensional set-ups, and localization. This text equips readers with all the investigative expertise needed to fully explore the dynamical properties of complex systems, making it ideal for both graduate students and experienced researchers.}, language = {en} } @article{NehringRaflerZessin2016, author = {Nehring, Benjamin and Rafler, Mathias and Zessin, Hans}, title = {Splitting-characterizations of the Papangelou process}, series = {Mathematische Nachrichten}, volume = {289}, journal = {Mathematische Nachrichten}, publisher = {Wiley-VCH}, address = {Weinheim}, issn = {0025-584X}, doi = {10.1002/mana.201400384}, pages = {85 -- 96}, year = {2016}, abstract = {For point processes we establish a link between integration-by-parts-and splitting-formulas which can also be considered as integration-by-parts-formulas of a new type. First we characterize finite Papangelou processes in terms of their splitting kernels. The main part then consists in extending these results to the case of infinitely extended Papangelou and, in particular, Polya and Gibbs processes. (C) 2015 WILEY-VCH Verlag GmbH \& Co. KGaA, Weinheim}, language = {en} } @article{MuellerBoeckmannKolgotinetal.2016, author = {M{\"u}ller, Detlef and B{\"o}ckmann, Christine and Kolgotin, Alexei and Schneidenbach, Lars and Chemyakin, Eduard and Rosemann, Julia and Znak, Pavel and Romanov, Anton}, title = {Microphysical particle properties derived from inversion algorithms developed in the framework of EARLINET}, series = {Atmospheric measurement techniques : an interactive open access journal of the European Geosciences Union}, volume = {9}, journal = {Atmospheric measurement techniques : an interactive open access journal of the European Geosciences Union}, publisher = {Copernicus}, address = {G{\"o}ttingen}, issn = {1867-1381}, doi = {10.5194/amt-9-5007-2016}, pages = {5007 -- 5035}, year = {2016}, abstract = {We present a summary on the current status of two inversion algorithms that are used in EARLINET (European Aerosol Research Lidar Network) for the inversion of data collected with EARLINET multiwavelength Raman lidars. These instruments measure backscatter coefficients at 355, 532, and 1064 nm, and extinction coefficients at 355 and 532 nm. Development of these two algorithms started in 2000 when EARLINET was founded. The algorithms are based on a manually controlled inversion of optical data which allows for detailed sensitivity studies. The algorithms allow us to derive particle effective radius as well as volume and surface area concentration with comparably high confidence. The retrieval of the real and imaginary parts of the complex refractive index still is a challenge in view of the accuracy required for these parameters in climate change studies in which light absorption needs to be known with high accuracy. It is an extreme challenge to retrieve the real part with an accuracy better than 0.05 and the imaginary part with accuracy better than 0.005-0.1 or +/- 50 \%. Single-scattering albedo can be computed from the retrieved microphysical parameters and allows us to categorize aerosols into high-and low-absorbing aerosols. On the basis of a few exemplary simulations with synthetic optical data we discuss the current status of these manually operated algorithms, the potentially achievable accuracy of data products, and the goals for future work. One algorithm was used with the purpose of testing how well microphysical parameters can be derived if the real part of the complex refractive index is known to at least 0.05 or 0.1. The other algorithm was used to find out how well microphysical parameters can be derived if this constraint for the real part is not applied. The optical data used in our study cover a range of Angstrom exponents and extinction-to-backscatter (lidar) ratios that are found from lidar measurements of various aerosol types. We also tested aerosol scenarios that are considered highly unlikely, e.g. the lidar ratios fall outside the commonly accepted range of values measured with Raman lidar, even though the underlying microphysical particle properties are not uncommon. The goal of this part of the study is to test the robustness of the algorithms towards their ability to identify aerosol types that have not been measured so far, but cannot be ruled out based on our current knowledge of aerosol physics. We computed the optical data from monomodal logarithmic particle size distributions, i.e. we explicitly excluded the more complicated case of bimodal particle size distributions which is a topic of ongoing research work. Another constraint is that we only considered particles of spherical shape in our simulations. We considered particle radii as large as 7-10 mu m in our simulations where the Potsdam algorithm is limited to the lower value. We considered optical-data errors of 15\% in the simulation studies. We target 50\% uncertainty as a reasonable threshold for our data products, though we attempt to obtain data products with less uncertainty in future work.}, language = {en} } @article{MiethKloftRodriguezetal.2016, author = {Mieth, Bettina and Kloft, Marius and Rodriguez, Juan Antonio and Sonnenburg, Soren and Vobruba, Robin and Morcillo-Suarez, Carlos and Farre, Xavier and Marigorta, Urko M. and Fehr, Ernst and Dickhaus, Thorsten and Blanchard, Gilles and Schunk, Daniel and Navarro, Arcadi and M{\"u}ller, Klaus-Robert}, title = {Combining Multiple Hypothesis Testing with Machine Learning Increases the Statistical Power of Genome-wide Association Studies}, series = {Scientific reports}, volume = {6}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/srep36671}, pages = {14}, year = {2016}, abstract = {The standard approach to the analysis of genome-wide association studies (GWAS) is based on testing each position in the genome individually for statistical significance of its association with the phenotype under investigation. To improve the analysis of GWAS, we propose a combination of machine learning and statistical testing that takes correlation structures within the set of SNPs under investigation in a mathematically well-controlled manner into account. The novel two-step algorithm, COMBI, first trains a support vector machine to determine a subset of candidate SNPs and then performs hypothesis tests for these SNPs together with an adequate threshold correction. Applying COMBI to data from a WTCCC study (2007) and measuring performance as replication by independent GWAS published within the 2008-2015 period, we show that our method outperforms ordinary raw p-value thresholding as well as other state-of-the-art methods. COMBI presents higher power and precision than the examined alternatives while yielding fewer false (i.e. non-replicated) and more true (i.e. replicated) discoveries when its results are validated on later GWAS studies. More than 80\% of the discoveries made by COMBI upon WTCCC data have been validated by independent studies. Implementations of the COMBI method are available as a part of the GWASpi toolbox 2.0.}, language = {en} } @unpublished{MeraTarkhanov2016, author = {Mera, Azal and Tarkhanov, Nikolai Nikolaevich}, title = {The Neumann problem after Spencer}, volume = {5}, number = {6}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-90631}, pages = {21}, year = {2016}, abstract = {When trying to extend the Hodge theory for elliptic complexes on compact closed manifolds to the case of compact manifolds with boundary one is led to a boundary value problem for the Laplacian of the complex which is usually referred to as Neumann problem. We study the Neumann problem for a larger class of sequences of differential operators on a compact manifold with boundary. These are sequences of small curvature, i.e., bearing the property that the composition of any two neighbouring operators has order less than two.}, language = {en} } @article{Menne2016, author = {Menne, Ulrich}, title = {Weakly Differentiable Functions on Varifolds}, series = {Indiana University mathematics journal}, volume = {65}, journal = {Indiana University mathematics journal}, publisher = {Indiana University, Department of Mathematics}, address = {Bloomington}, issn = {0022-2518}, doi = {10.1512/iumj.2016.65.5829}, pages = {977 -- 1088}, year = {2016}, abstract = {The present paper is intended to provide the basis for the study of weakly differentiable functions on rectifiable varifolds with locally bounded first variation. The concept proposed here is defined by means of integration-by-parts identities for certain compositions with smooth functions. In this class, the idea of zero boundary values is realised using the relative perimeter of superlevel sets. Results include a variety of Sobolev Poincare-type embeddings, embeddings into spaces of continuous and sometimes Holder-continuous functions, and point wise differentiability results both of approximate and integral type as well as coarea formulae. As a prerequisite for this study, decomposition properties of such varifolds and a relative isoperimetric inequality are established. Both involve a concept of distributional boundary of a set introduced for this purpose. As applications, the finiteness of the geodesic distance associated with varifolds with suitable summability of the mean curvature and a characterisation of curvature varifolds are obtained.}, language = {en} } @article{Menne2016, author = {Menne, Ulrich}, title = {Sobolev functions on varifolds}, series = {Proceedings of the London Mathematical Society}, volume = {113}, journal = {Proceedings of the London Mathematical Society}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0024-6115}, doi = {10.1112/plms/pdw023}, pages = {725 -- 774}, year = {2016}, abstract = {This paper introduces first-order Sobolev spaces on certain rectifiable varifolds. These complete locally convex spaces are contained in the generally non-linear class of generalised weakly differentiable functions and share key functional analytic properties with their Euclidean counterparts. Assuming the varifold to satisfy a uniform lower density bound and a dimensionally critical summability condition on its mean curvature, the following statements hold. Firstly, continuous and compact embeddings of Sobolev spaces into Lebesgue spaces and spaces of continuous functions are available. Secondly, the geodesic distance associated to the varifold is a continuous, not necessarily Holder continuous Sobolev function with bounded derivative. Thirdly, if the varifold additionally has bounded mean curvature and finite measure, then the present Sobolev spaces are isomorphic to those previously available for finite Radon measures yielding many new results for those classes as well. Suitable versions of the embedding results obtained for Sobolev functions hold in the larger class of generalised weakly differentiable functions.}, language = {en} } @phdthesis{Mazzonetto2016, author = {Mazzonetto, Sara}, title = {On the exact simulation of (skew) Brownian diffusions with discontinuous drift}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-102399}, school = {Universit{\"a}t Potsdam}, pages = {ii, 100}, year = {2016}, abstract = {This thesis is focused on the study and the exact simulation of two classes of real-valued Brownian diffusions: multi-skew Brownian motions with constant drift and Brownian diffusions whose drift admits a finite number of jumps. The skew Brownian motion was introduced in the sixties by It{\^o} and McKean, who constructed it from the reflected Brownian motion, flipping its excursions from the origin with a given probability. Such a process behaves as the original one except at the point 0, which plays the role of a semipermeable barrier. More generally, a skew diffusion with several semipermeable barriers, called multi-skew diffusion, is a diffusion everywhere except when it reaches one of the barriers, where it is partially reflected with a probability depending on that particular barrier. Clearly, a multi-skew diffusion can be characterized either as solution of a stochastic differential equation involving weighted local times (these terms providing the semi-permeability) or by its infinitesimal generator as Markov process. In this thesis we first obtain a contour integral representation for the transition semigroup of the multiskew Brownian motion with constant drift, based on a fine analysis of its complex properties. Thanks to this representation we write explicitly the transition densities of the two-skew Brownian motion with constant drift as an infinite series involving, in particular, Gaussian functions and their tails. Then we propose a new useful application of a generalization of the known rejection sampling method. Recall that this basic algorithm allows to sample from a density as soon as one finds an - easy to sample - instrumental density verifying that the ratio between the goal and the instrumental densities is a bounded function. The generalized rejection sampling method allows to sample exactly from densities for which indeed only an approximation is known. The originality of the algorithm lies in the fact that one finally samples directly from the law without any approximation, except the machine's. As an application, we sample from the transition density of the two-skew Brownian motion with or without constant drift. The instrumental density is the transition density of the Brownian motion with constant drift, and we provide an useful uniform bound for the ratio of the densities. We also present numerical simulations to study the efficiency of the algorithm. The second aim of this thesis is to develop an exact simulation algorithm for a Brownian diffusion whose drift admits several jumps. In the literature, so far only the case of a continuous drift (resp. of a drift with one finite jump) was treated. The theoretical method we give allows to deal with any finite number of discontinuities. Then we focus on the case of two jumps, using the transition densities of the two-skew Brownian motion obtained before. Various examples are presented and the efficiency of our approach is discussed.}, language = {en} } @article{LyuSchulze2016, author = {Lyu, Xiaojing and Schulze, Bert-Wolfgang}, title = {Mellin Operators in the Edge Calculus}, series = {Complex analysis and operator theory}, volume = {10}, journal = {Complex analysis and operator theory}, publisher = {Springer}, address = {Basel}, issn = {1661-8254}, doi = {10.1007/s11785-015-0511-6}, pages = {965 -- 1000}, year = {2016}, abstract = {A manifold M with smooth edge Y is locally near Y modelled on X-Delta x Omega for a cone X-Delta := ( (R) over bar (+) x X)/({0} x X) where Xis a smooth manifold and Omega subset of R-q an open set corresponding to a chart on Y. Compared with pseudo-differential algebras, based on other quantizations of edge-degenerate symbols, we extend the approach with Mellin representations on the r half-axis up to r = infinity, the conical exit of X-boolean AND = R+ x X (sic) (r, x) at infinity. The alternative description of the edge calculus is useful for pseudo-differential structures on manifolds with higher singularities.}, language = {en} } @phdthesis{Lyu2016, author = {Lyu, Xiaojing}, title = {Operators on singular manifolds}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-103643}, school = {Universit{\"a}t Potsdam}, pages = {117}, year = {2016}, abstract = {We study the interplay between analysis on manifolds with singularities and complex analysis and develop new structures of operators based on the Mellin transform and tools for iterating the calculus for higher singularities. We refer to the idea of interpreting boundary value problems (BVPs) in terms of pseudo-differential operators with a principal symbolic hierarchy, taking into account that BVPs are a source of cone and edge operator algebras. The respective cone and edge pseudo-differential algebras in turn are the starting point of higher corner theories. In addition there are deep relationships between corner operators and complex analysis. This will be illustrated by the Mellin symbolic calculus.}, language = {en} } @article{LyTarkhanov2016, author = {Ly, Ibrahim and Tarkhanov, Nikolai Nikolaevich}, title = {A Rado theorem for p-harmonic functions}, series = {Boletin de la Sociedad Matem{\~A}!'tica Mexicana}, volume = {22}, journal = {Boletin de la Sociedad Matem{\~A}!'tica Mexicana}, publisher = {Springer}, address = {Basel}, issn = {1405-213X}, doi = {10.1007/s40590-016-0109-7}, pages = {461 -- 472}, year = {2016}, abstract = {Let A be a nonlinear differential operator on an open set X subset of R-n and S a closed subset of X. Given a class F of functions in X, the set S is said to be removable for F relative to A if any weak solution of A(u) = 0 in XS of class F satisfies this equation weakly in all of X. For the most extensively studied classes F, we show conditions on S which guarantee that S is removable for F relative to A.}, language = {en} } @phdthesis{Ludewig2016, author = {Ludewig, Matthias}, title = {Path integrals on manifolds with boundary and their asymptotic expansions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94387}, school = {Universit{\"a}t Potsdam}, pages = {146}, year = {2016}, abstract = {It is "scientific folklore" coming from physical heuristics that solutions to the heat equation on a Riemannian manifold can be represented by a path integral. However, the problem with such path integrals is that they are notoriously ill-defined. One way to make them rigorous (which is often applied in physics) is finite-dimensional approximation, or time-slicing approximation: Given a fine partition of the time interval into small subintervals, one restricts the integration domain to paths that are geodesic on each subinterval of the partition. These finite-dimensional integrals are well-defined, and the (infinite-dimensional) path integral then is defined as the limit of these (suitably normalized) integrals, as the mesh of the partition tends to zero. In this thesis, we show that indeed, solutions to the heat equation on a general compact Riemannian manifold with boundary are given by such time-slicing path integrals. Here we consider the heat equation for general Laplace type operators, acting on sections of a vector bundle. We also obtain similar results for the heat kernel, although in this case, one has to restrict to metrics satisfying a certain smoothness condition at the boundary. One of the most important manipulations one would like to do with path integrals is taking their asymptotic expansions; in the case of the heat kernel, this is the short time asymptotic expansion. In order to use time-slicing approximation here, one needs the approximation to be uniform in the time parameter. We show that this is possible by giving strong error estimates. Finally, we apply these results to obtain short time asymptotic expansions of the heat kernel also in degenerate cases (i.e. at the cut locus). Furthermore, our results allow to relate the asymptotic expansion of the heat kernel to a formal asymptotic expansion of the infinite-dimensional path integral, which gives relations between geometric quantities on the manifold and on the loop space. In particular, we show that the lowest order term in the asymptotic expansion of the heat kernel is essentially given by the Fredholm determinant of the Hessian of the energy functional. We also investigate how this relates to the zeta-regularized determinant of the Jacobi operator along minimizing geodesics.}, language = {en} } @article{LevyJimenezPaycha2016, author = {Levy, Cyril and Jimenez, Carolina Neira and Paycha, Sylvie}, title = {THE CANONICAL TRACE AND THE NONCOMMUTATIVE RESIDUE ON THE NONCOMMUTATIVE TORUS}, series = {Transactions of the American Mathematical Society}, volume = {368}, journal = {Transactions of the American Mathematical Society}, publisher = {American Mathematical Soc.}, address = {Providence}, issn = {0002-9947}, doi = {10.1090/tran/6369}, pages = {1051 -- 1095}, year = {2016}, abstract = {Using a global symbol calculus for pseudodifferential operators on tori, we build a canonical trace on classical pseudodifferential operators on noncommutative tori in terms of a canonical discrete sum on the underlying toroidal symbols. We characterise the canonical trace on operators on the noncommutative torus as well as its underlying canonical discrete sum on symbols of fixed (resp. any) noninteger order. On the grounds of this uniqueness result, we prove that in the commutative setup, this canonical trace on the noncommutative torus reduces to Kontsevich and Vishik's canonical trace which is thereby identified with a discrete sum. A similar characterisation for the noncommutative residue on noncommutative tori as the unique trace which vanishes on trace-class operators generalises Fathizadeh and Wong's characterisation in so far as it includes the case of operators of fixed integer order. By means of the canonical trace, we derive defect formulae for regularized traces. The conformal invariance of the \$ \zeta \$-function at zero of the Laplacian on the noncommutative torus is then a straightforward consequence.}, language = {en} } @article{Kroencke2016, author = {Kr{\"o}ncke, Klaus}, title = {Rigidity and Infinitesimal Deformability of Ricci Solitons}, series = {The journal of geometric analysis}, volume = {26}, journal = {The journal of geometric analysis}, publisher = {Springer}, address = {New York}, issn = {1050-6926}, doi = {10.1007/s12220-015-9608-4}, pages = {1795 -- 1807}, year = {2016}, abstract = {In this paper, an obstruction against the integrability of certain infinitesimal solitonic deformations is given. Using this obstruction, we show that the complex projective spaces of even complex dimension are rigid as Ricci solitons although they have infinitesimal solitonic deformations.}, language = {en} } @article{KretschmerCoumouDongesetal.2016, author = {Kretschmer, Marlene and Coumou, Dim and Donges, Jonathan Friedemann and Runge, Jakob}, title = {Using Causal Effect Networks to Analyze Different Arctic Drivers of Midlatitude Winter Circulation}, series = {Journal of climate}, volume = {29}, journal = {Journal of climate}, publisher = {American Meteorological Soc.}, address = {Boston}, issn = {0894-8755}, doi = {10.1175/JCLI-D-15-0654.1}, pages = {4069 -- 4081}, year = {2016}, abstract = {In recent years, the Northern Hemisphere midlatitudes have suffered from severe winters like the extreme 2012/13 winter in the eastern United States. These cold spells were linked to a meandering upper-tropospheric jet stream pattern and a negative Arctic Oscillation index (AO). However, the nature of the drivers behind these circulation patterns remains controversial. Various studies have proposed different mechanisms related to changes in the Arctic, most of them related to a reduction in sea ice concentrations or increasing Eurasian snow cover. Here, a novel type of time series analysis, called causal effect networks (CEN), based on graphical models is introduced to assess causal relationships and their time delays between different processes. The effect of different Arctic actors on winter circulation on weekly to monthly time scales is studied, and robust network patterns are found. Barents and Kara sea ice concentrations are detected to be important external drivers of the midlatitude circulation, influencing winter AO via tropospheric mechanisms and through processes involving the stratosphere. Eurasia snow cover is also detected to have a causal effect on sea level pressure in Asia, but its exact role on AO remains unclear. The CEN approach presented in this study overcomes some difficulties in interpreting correlation analyses, complements model experiments for testing hypotheses involving teleconnections, and can be used to assess their validity. The findings confirm that sea ice concentrations in autumn in the Barents and Kara Seas are an important driver of winter circulation in the midlatitudes.}, language = {en} } @article{KortenkampMonaghanTrouche2016, author = {Kortenkamp, Ulrich and Monaghan, John and Trouche, Luc}, title = {Jonathan M Borwein (1951-2016): exploring, experiencing and experimenting in mathematics - an inspiring journey in mathematics}, series = {Educational studies in mathematics : an international journal}, volume = {93}, journal = {Educational studies in mathematics : an international journal}, publisher = {Springer}, address = {Dordrecht}, issn = {0013-1954}, doi = {10.1007/s10649-016-9729-0}, pages = {131 -- 136}, year = {2016}, language = {en} } @article{Kollosche2016, author = {Kollosche, David}, title = {Criticising with Foucault: towards a guiding framework for socio-political studies in mathematics education}, series = {Educational studies in mathematics : an international journal}, volume = {91}, journal = {Educational studies in mathematics : an international journal}, publisher = {Springer}, address = {Dordrecht}, issn = {0013-1954}, doi = {10.1007/s10649-015-9648-5}, pages = {73 -- 86}, year = {2016}, abstract = {Socio-political studies in mathematics education often touch complex fields of interaction between education, mathematics and the political. In this paper I present a Foucault-based framework for socio-political studies in mathematics education which may guide research in that area. In order to show the potential of such a framework, I discuss the potential and limits of Marxian ideology critique, present existing Foucault-based research on socio-political aspects of mathematics education, develop my framework and show its use in an outline of a study on socio-political aspects of calculation in the mathematics classroom.}, language = {en} } @article{KleinRosenberger2016, author = {Klein, Markus and Rosenberger, Elke}, title = {Agmon estimates for the difference of exact and approximate Dirichlet eigenfunctions for difference operators}, series = {Asymptotic analysis}, volume = {97}, journal = {Asymptotic analysis}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0921-7134}, doi = {10.3233/ASY-151343}, pages = {61 -- 89}, year = {2016}, abstract = {We analyze a general class of difference operators H-epsilon = T-epsilon + V-epsilon on l(2)(((epsilon)Z)(d)), where V-epsilon is a multi-well potential and epsilon is a small parameter. We construct approximate eigenfunctions in neighbourhoods of the different wells and give weighted l(2)-estimates for the difference of these and the exact eigenfunctions of the associated Dirichlet-operators.}, language = {en} } @article{KistnerVollmeyerBurnsetal.2016, author = {Kistner, Saskia and Vollmeyer, Regina and Burns, Bruce D. and Kortenkamp, Ulrich}, title = {Model development in scientific discovery learning with a computer-based physics task}, series = {Computers in human behavior}, volume = {59}, journal = {Computers in human behavior}, publisher = {Elsevier}, address = {Oxford}, issn = {0747-5632}, doi = {10.1016/j.chb.2016.02.041}, pages = {446 -- 455}, year = {2016}, abstract = {Based on theories of scientific discovery learning (SDL) and conceptual change, this study explores students' preconceptions in the domain of torques in physics and the development of these conceptions while learning with a computer-based SDL task. As a framework we used a three-space theory of SDL and focused on model space, which is supposed to contain the current conceptualization/model of the learning domain, and on its change through hypothesis testing and experimenting. Three questions were addressed: (1) What are students' preconceptions of torques before learning about this domain? To do this a multiple-choice test for assessing students' models of torques was developed and given to secondary school students (N = 47) who learned about torques using computer simulations. (2) How do students' models of torques develop during SDL? Working with simulations led to replacement of some misconceptions with physically correct conceptions. (3) Are there differential patterns of model development and if so, how do they relate to students' use of the simulations? By analyzing individual differences in model development, we found that an intensive use of the simulations was associated with the acquisition of correct conceptions. Thus, the three-space theory provided a useful framework for understanding conceptual change in SDL.}, language = {en} } @article{KistnerBurnsVollmeyeretal.2016, author = {Kistner, Saskia and Burns, Bruce D. and Vollmeyer, Regina and Kortenkamp, Ulrich}, title = {The importance of understanding: Model space moderates goal specificity effects}, series = {The quarterly journal of experimental psychology}, volume = {69}, journal = {The quarterly journal of experimental psychology}, publisher = {Optical Society of America}, address = {Abingdon}, issn = {1747-0218}, doi = {10.1080/17470218.2015.1076865}, pages = {1179 -- 1196}, year = {2016}, abstract = {The three-space theory of problem solving predicts that the quality of a learner's model and the goal specificity of a task interact on knowledge acquisition. In Experiment 1 participants used a computer simulation of a lever system to learn about torques. They either had to test hypotheses (nonspecific goal), or to produce given values for variables (specific goal). In the good- but not in the poor-model condition they saw torque depicted as an area. Results revealed the predicted interaction. A nonspecific goal only resulted in better learning when a good model of torques was provided. In Experiment 2 participants learned to manipulate the inputs of a system to control its outputs. A nonspecific goal to explore the system helped performance when compared to a specific goal to reach certain values when participants were given a good model, but not when given a poor model that suggested the wrong hypothesis space. Our findings support the three-space theory. They emphasize the importance of understanding for problem solving and stress the need to study underlying processes.}, language = {en} } @article{KellerMuenchPogorzelski2016, author = {Keller, Matthias and M{\"u}nch, Florentin and Pogorzelski, Felix}, title = {Geometry and spectrum of rapidly branching graphs}, series = {Mathematische Nachrichten}, volume = {289}, journal = {Mathematische Nachrichten}, publisher = {Wiley-VCH}, address = {Weinheim}, issn = {0025-584X}, doi = {10.1002/mana.201400349}, pages = {1636 -- 1647}, year = {2016}, abstract = {We study graphs whose vertex degree tends to infinity and which are, therefore, called rapidly branching. We prove spectral estimates, discreteness of spectrum, first order eigenvalue and Weyl asymptotics solely in terms of the vertex degree growth. The underlying techniques are estimates on the isoperimetric constant. Furthermore, we give lower volume growth bounds and we provide a new criterion for stochastic incompleteness. (C) 2016 WILEY-VCH Verlag GmbH \& Co. KGaA, Weinheim}, language = {en} } @article{KellerMugnolo2016, author = {Keller, Matthias and Mugnolo, Delio}, title = {General Cheeger inequalities for p-Laplacians on graphs}, series = {Theoretical ecology}, volume = {147}, journal = {Theoretical ecology}, publisher = {Elsevier}, address = {Oxford}, issn = {0362-546X}, doi = {10.1016/j.na.2016.07.011}, pages = {80 -- 95}, year = {2016}, abstract = {We prove Cheeger inequalities for p-Laplacians on finite and infinite weighted graphs. Unlike in previous works, we do not impose boundedness of the vertex degree, nor do we restrict ourselves to the normalized Laplacian and, more generally, we do not impose any boundedness assumption on the geometry. This is achieved by a novel definition of the measure of the boundary which uses the idea of intrinsic metrics. For the non-normalized case, our bounds on the spectral gap of p-Laplacians are already significantly better for finite graphs and for infinite graphs they yield non-trivial bounds even in the case of unbounded vertex degree. We, furthermore, give upper bounds by the Cheeger constant and by the exponential volume growth of distance balls. (C) 2016 Elsevier Ltd. All rights reserved.}, language = {en} } @article{KellerLenzMuenchetal.2016, author = {Keller, Matthias and Lenz, Daniel and M{\"u}nch, Florentin and Schmidt, Marcel and Telcs, Andras}, title = {Note on short-time behavior of semigroups associated to self-adjoint operators}, series = {Bulletin of the London Mathematical Society}, volume = {48}, journal = {Bulletin of the London Mathematical Society}, publisher = {Oxford Univ. Press}, address = {Oxford}, issn = {0024-6093}, doi = {10.1112/blms/bdw054}, pages = {935 -- 944}, year = {2016}, abstract = {We present a simple observation showing that the heat kernel on a locally finite graph behaves for short times t roughly like t(d), where d is the combinatorial distance. This is very different from the classical Varadhan-type behavior on manifolds. Moreover, this also gives that short-time behavior and global behavior of the heat kernel are governed by two different metrics whenever the degree of the graph is not uniformly bounded.}, language = {en} } @article{HolschneiderLesurMauerbergeretal.2016, author = {Holschneider, Matthias and Lesur, Vincent and Mauerberger, Stefan and Baerenzung, Julien}, title = {Correlation-based modeling and separation of geomagnetic field components}, series = {Journal of geophysical research : Solid earth}, volume = {121}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2015JB012629}, pages = {3142 -- 3160}, year = {2016}, abstract = {We introduce a technique for the modeling and separation of geomagnetic field components that is based on an analysis of their correlation structures alone. The inversion is based on a Bayesian formulation, which allows the computation of uncertainties. The technique allows the incorporation of complex measurement geometries like observatory data in a simple way. We show how our technique is linked to other well-known inversion techniques. A case study based on observational data is given.}, language = {en} } @article{HermannHumbert2016, author = {Hermann, Andreas and Humbert, Emmanuel}, title = {About the mass of certain second order elliptic operators}, series = {Advances in mathematics}, volume = {294}, journal = {Advances in mathematics}, publisher = {Elsevier}, address = {San Diego}, issn = {0001-8708}, doi = {10.1016/j.aim.2016.03.008}, pages = {596 -- 633}, year = {2016}, abstract = {Let (M, g) be a closed Riemannian manifold of dimension n >= 3 and let f is an element of C-infinity (M), such that the operator P-f := Delta g + f is positive. If g is flat near some point p and f vanishes around p, we can define the mass of P1 as the constant term in the expansion of the Green function of P-f at p. In this paper, we establish many results on the mass of such operators. In particular, if f := n-2/n(n-1)s(g), i.e. if P-f is the Yamabe operator, we show the following result: assume that there exists a closed simply connected non-spin manifold M such that the mass is non-negative for every metric g as above on M, then the mass is non-negative for every such metric on every closed manifold of the same dimension as M. (C) 2016 Elsevier Inc. All rights reserved.}, language = {en} } @article{HedayatMahmoudiSchulze2016, author = {Hedayat Mahmoudi, Mahdi and Schulze, Bert-Wolfgang}, title = {Corner boundary value problems}, series = {Asian-European journal of mathematics}, volume = {10}, journal = {Asian-European journal of mathematics}, number = {1}, publisher = {World Scientific}, address = {Singapore}, issn = {1793-5571}, doi = {10.1142/S1793557117500541}, pages = {45}, year = {2016}, abstract = {The paper develops some crucial steps in extending the first-order cone or edge calculus to higher singularity orders. We focus here on order 2, but the ideas are motivated by an iterative approach for higher singularities.}, language = {en} } @article{HackHanischSchenkel2016, author = {Hack, Thomas-Paul and Hanisch, Florian and Schenkel, Alexander}, title = {Supergeometry in Locally Covariant Quantum Field Theory}, series = {Communications in mathematical physics}, volume = {342}, journal = {Communications in mathematical physics}, publisher = {Springer}, address = {New York}, issn = {0010-3616}, doi = {10.1007/s00220-015-2516-4}, pages = {615 -- 673}, year = {2016}, abstract = {In this paper we analyze supergeometric locally covariant quantum field theories. We develop suitable categories SLoc of super-Cartan supermanifolds, which generalize Lorentz manifolds in ordinary quantum field theory, and show that, starting from a few representation theoretic and geometric data, one can construct a functor U : SLoc -> S*Alg to the category of super-*-algebras, which can be interpreted as a non-interacting super-quantum field theory. This construction turns out to disregard supersymmetry transformations as the morphism sets in the above categories are too small. We then solve this problem by using techniques from enriched category theory, which allows us to replace the morphism sets by suitable morphism supersets that contain supersymmetry transformations as their higher superpoints. We construct superquantum field theories in terms of enriched functors eU : eSLoc -> eS*Alg between the enriched categories and show that supersymmetry transformations are appropriately described within the enriched framework. As examples we analyze the superparticle in 1 vertical bar 1-dimensions and the free Wess-Zumino model in 3 vertical bar 2-dimensions.}, language = {en} } @article{GregoryCotterReich2016, author = {Gregory, A. and Cotter, C. J. and Reich, Sebastian}, title = {MULTILEVEL ENSEMBLE TRANSFORM PARTICLE FILTERING}, series = {SIAM journal on scientific computing}, volume = {38}, journal = {SIAM journal on scientific computing}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia}, issn = {1064-8275}, doi = {10.1137/15M1038232}, pages = {A1317 -- A1338}, year = {2016}, abstract = {This paper extends the multilevel Monte Carlo variance reduction technique to nonlinear filtering. In particular, multilevel Monte Carlo is applied to a certain variant of the particle filter, the ensemble transform particle filter (EPTF). A key aspect is the use of optimal transport methods to re-establish correlation between coarse and fine ensembles after resampling; this controls the variance of the estimator. Numerical examples present a proof of concept of the effectiveness of the proposed method, demonstrating significant computational cost reductions (relative to the single-level ETPF counterpart) in the propagation of ensembles.}, language = {en} } @phdthesis{Gopalakrishnan2016, author = {Gopalakrishnan, Sathej}, title = {Mathematical modelling of host-disease-drug interactions in HIV disease}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100100}, school = {Universit{\"a}t Potsdam}, pages = {121}, year = {2016}, abstract = {The human immunodeficiency virus (HIV) has resisted nearly three decades of efforts targeting a cure. Sustained suppression of the virus has remained a challenge, mainly due to the remarkable evolutionary adaptation that the virus exhibits by the accumulation of drug-resistant mutations in its genome. Current therapeutic strategies aim at achieving and maintaining a low viral burden and typically involve multiple drugs. The choice of optimal combinations of these drugs is crucial, particularly in the background of treatment failure having occurred previously with certain other drugs. An understanding of the dynamics of viral mutant genotypes aids in the assessment of treatment failure with a certain drug combination, and exploring potential salvage treatment regimens. Mathematical models of viral dynamics have proved invaluable in understanding the viral life cycle and the impact of antiretroviral drugs. However, such models typically use simplified and coarse-grained mutation schemes, that curbs the extent of their application to drug-specific clinical mutation data, in order to assess potential next-line therapies. Statistical models of mutation accumulation have served well in dissecting mechanisms of resistance evolution by reconstructing mutation pathways under different drug-environments. While these models perform well in predicting treatment outcomes by statistical learning, they do not incorporate drug effect mechanistically. Additionally, due to an inherent lack of temporal features in such models, they are less informative on aspects such as predicting mutational abundance at treatment failure. This limits their application in analyzing the pharmacology of antiretroviral drugs, in particular, time-dependent characteristics of HIV therapy such as pharmacokinetics and pharmacodynamics, and also in understanding the impact of drug efficacy on mutation dynamics. In this thesis, we develop an integrated model of in vivo viral dynamics incorporating drug-specific mutation schemes learned from clinical data. Our combined modelling approach enables us to study the dynamics of different mutant genotypes and assess mutational abundance at virological failure. As an application of our model, we estimate in vivo fitness characteristics of viral mutants under different drug environments. Our approach also extends naturally to multiple-drug therapies. Further, we demonstrate the versatility of our model by showing how it can be modified to incorporate recently elucidated mechanisms of drug action including molecules that target host factors. Additionally, we address another important aspect in the clinical management of HIV disease, namely drug pharmacokinetics. It is clear that time-dependent changes in in vivo drug concentration could have an impact on the antiviral effect, and also influence decisions on dosing intervals. We present a framework that provides an integrated understanding of key characteristics of multiple-dosing regimens including drug accumulation ratios and half-lifes, and then explore the impact of drug pharmacokinetics on viral suppression. Finally, parameter identifiability in such nonlinear models of viral dynamics is always a concern, and we investigate techniques that alleviate this issue in our setting.}, language = {en} } @unpublished{GairingHoegeleKosenkova2016, author = {Gairing, Jan and H{\"o}gele, Michael and Kosenkova, Tetiana}, title = {Transportation distances and noise sensitivity of multiplicative L{\´e}vy SDE with applications}, volume = {5}, number = {2}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86693}, pages = {24}, year = {2016}, abstract = {This article assesses the distance between the laws of stochastic differential equations with multiplicative L{\´e}vy noise on path space in terms of their characteristics. The notion of transportation distance on the set of L{\´e}vy kernels introduced by Kosenkova and Kulik yields a natural and statistically tractable upper bound on the noise sensitivity. This extends recent results for the additive case in terms of coupling distances to the multiplicative case. The strength of this notion is shown in a statistical implementation for simulations and the example of a benchmark time series in paleoclimate.}, language = {en} } @article{FladHarutyunyanSchulze2016, author = {Flad, H. -J. and Harutyunyan, Gohar and Schulze, Bert-Wolfgang}, title = {Asymptotic parametrices of elliptic edge operators}, series = {Journal of pseudo-differential operators and applications}, volume = {7}, journal = {Journal of pseudo-differential operators and applications}, publisher = {Springer}, address = {Basel}, issn = {1662-9981}, doi = {10.1007/s11868-016-0159-7}, pages = {321 -- 363}, year = {2016}, abstract = {We study operators on singular manifolds, here of conical or edge type, and develop a new general approach of representing asymptotics of solutions to elliptic equations close to the singularities. We introduce asymptotic parametrices, using tools from cone and edge pseudo-differential algebras. Our structures are motivated by models of many-particle physics with singular Coulomb potentials that contribute higher order singularities in Euclidean space, determined by the number of particles.}, language = {en} } @unpublished{FedchenkoTarkhanov2016, author = {Fedchenko, Dmitry and Tarkhanov, Nikolai Nikolaevich}, title = {Boundary value problems for elliptic complexes}, volume = {5}, number = {3}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86705}, pages = {12}, year = {2016}, abstract = {The aim of this paper is to bring together two areas which are of great importance for the study of overdetermined boundary value problems. The first area is homological algebra which is the main tool in constructing the formal theory of overdetermined problems. And the second area is the global calculus of pseudodifferential operators which allows one to develop explicit analysis.}, language = {en} } @article{EichmairMetzger2016, author = {Eichmair, Michael and Metzger, Jan}, title = {JENKINS-SERRIN-TYPE RESULTS FOR THE JANG EQUATION}, series = {Journal of differential geometry}, volume = {102}, journal = {Journal of differential geometry}, publisher = {International Press of Boston}, address = {Somerville}, issn = {0022-040X}, doi = {10.4310/jdg/1453910454}, pages = {207 -- 242}, year = {2016}, abstract = {Let (M, g, k) be an initial data set for the Einstein equations of general relativity. We show that a canonical solution of the Jang equation exists in the complement of the union of all weakly future outer trapped regions in the initial data set with respect to a given end, provided that this complement contains no weakly past outer trapped regions. The graph of this solution relates the area of the horizon to the global geometry of the initial data set in a non-trivial way. We prove the existence of a Scherk-type solution of the Jang equation outside the union of all weakly future or past outer trapped regions in the initial data set. This result is a natural exterior analogue for the Jang equation of the classical Jenkins Serrin theory. We extend and complement existence theorems [19, 20, 40, 29, 18, 31, 11] for Scherk-type constant mean curvature graphs over polygonal domains in (M, g), where (M, g) is a complete Riemannian surface. We can dispense with the a priori assumptions that a sub solution exists and that (M, g) has particular symmetries. Also, our method generalizes to higher dimensions.}, language = {en} } @unpublished{DereudreMazzonettoRoelly2016, author = {Dereudre, David and Mazzonetto, Sara and Roelly, Sylvie}, title = {Exact simulation of Brownian diffusions with drift admitting jumps}, volume = {5}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-91049}, pages = {25}, year = {2016}, abstract = {Using an algorithm based on a retrospective rejection sampling scheme, we propose an exact simulation of a Brownian diffusion whose drift admits several jumps. We treat explicitly and extensively the case of two jumps, providing numerical simulations. Our main contribution is to manage the technical difficulty due to the presence of two jumps thanks to a new explicit expression of the transition density of the skew Brownian motion with two semipermeable barriers and a constant drift.}, language = {en} } @article{Denecke2016, author = {Denecke, Klaus-Dieter}, title = {The partial clone of linear terms}, series = {Siberian Mathematical Journal}, volume = {57}, journal = {Siberian Mathematical Journal}, publisher = {Pleiades Publ.}, address = {New York}, issn = {0037-4466}, doi = {10.1134/S0037446616040030}, pages = {589 -- 598}, year = {2016}, abstract = {Generalizing a linear expression over a vector space, we call a term of an arbitrary type tau linear if its every variable occurs only once. Instead of the usual superposition of terms and of the total many-sorted clone of all terms in the case of linear terms, we define the partial many-sorted superposition operation and the partial many-sorted clone that satisfies the superassociative law as weak identity. The extensions of linear hypersubstitutions are weak endomorphisms of this partial clone. For a variety V of one-sorted total algebras of type tau, we define the partial many-sorted linear clone of V as the partial quotient algebra of the partial many-sorted clone of all linear terms by the set of all linear identities of V. We prove then that weak identities of this clone correspond to linear hyperidentities of V.}, language = {en} } @phdthesis{Chutsagulprom2016, author = {Chutsagulprom, Nawinda}, title = {Ensemble-based filters dealing with non-Gaussianity and nonlinearity}, school = {Universit{\"a}t Potsdam}, pages = {95}, year = {2016}, language = {en} } @phdthesis{Cheng2016, author = {Cheng, Yuan}, title = {Recursive state estimation in dynamical systems}, school = {Universit{\"a}t Potsdam}, pages = {84}, year = {2016}, language = {en} } @article{ChangViahmoudiSchulze2016, author = {Chang, D. -C. and Viahmoudi, M. Hedayat and Schulze, Bert-Wolfgang}, title = {PSEUDO-DIFFERENTIAL ANALYSIS WITH TWISTED SYMBOLIC STRUCTURE}, series = {Journal of nonlinear and convex analysis : an international journal}, volume = {17}, journal = {Journal of nonlinear and convex analysis : an international journal}, publisher = {Yokohama Publishers}, address = {Yokohama}, issn = {1345-4773}, pages = {1889 -- 1937}, year = {2016}, abstract = {This paper is devoted to pseudo-differential operators and new applications. We establish necessary extensions of the standard calculus to specific classes of operator-valued symbols occurring in principal symbolic hierarchies of operators on manifolds with singularities or stratified spaces.}, language = {en} } @article{CattiauxFradonKuliketal.2016, author = {Cattiaux, Patrick and Fradon, Myriam and Kulik, Alexei M. and Roelly, Sylvie}, title = {Long time behavior of stochastic hard ball systems}, series = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, volume = {22}, journal = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, publisher = {International Statistical Institute}, address = {Voorburg}, issn = {1350-7265}, doi = {10.3150/14-BEJ672}, pages = {681 -- 710}, year = {2016}, abstract = {We study the long time behavior of a system of n = 2, 3 Brownian hard balls, living in R-d for d >= 2, submitted to a mutual attraction and to elastic collisions.}, language = {en} } @article{BaerenzungHolschneiderLesur2016, author = {B{\"a}renzung, Julien and Holschneider, Matthias and Lesur, Vincent}, title = {constraints}, series = {Journal of geophysical research : Solid earth}, volume = {121}, journal = {Journal of geophysical research : Solid earth}, publisher = {American Geophysical Union}, address = {Washington}, issn = {2169-9313}, doi = {10.1002/2015JB012464}, pages = {1343 -- 1364}, year = {2016}, abstract = {Prior information in ill-posed inverse problem is of critical importance because it is conditioning the posterior solution and its associated variability. The problem of determining the flow evolving at the Earth's core-mantle boundary through magnetic field models derived from satellite or observatory data is no exception to the rule. This study aims to estimate what information can be extracted on the velocity field at the core-mantle boundary, when the frozen flux equation is inverted under very weakly informative, but realistic, prior constraints. Instead of imposing a converging spectrum to the flow, we simply assume that its poloidal and toroidal energy spectra are characterized by power laws. The parameters of the spectra, namely, their magnitudes, and slopes are unknown. The connection between the velocity field, its spectra parameters, and the magnetic field model is established through the Bayesian formulation of the problem. Working in two steps, we determined the time-averaged spectra of the flow within the 2001-2009.5 period, as well as the flow itself and its associated uncertainties in 2005.0. According to the spectra we obtained, we can conclude that the large-scale approximation of the velocity field is not an appropriate assumption within the time window we considered. For the flow itself, we show that although it is dominated by its equatorial symmetric component, it is very unlikely to be perfectly symmetric. We also demonstrate that its geostrophic state is questioned in different locations of the outer core.}, language = {en} } @article{BaerStrohmaier2016, author = {B{\"a}r, Christian and Strohmaier, Alexander}, title = {A Rigorous Geometric Derivation of the Chiral Anomaly in Curved Backgrounds}, series = {Communications in mathematical physics}, volume = {347}, journal = {Communications in mathematical physics}, publisher = {Springer}, address = {New York}, issn = {0010-3616}, doi = {10.1007/s00220-016-2664-1}, pages = {703 -- 721}, year = {2016}, abstract = {We discuss the chiral anomaly for a Weyl field in a curved background and show that a novel index theorem for the Lorentzian Dirac operator can be applied to describe the gravitational chiral anomaly. A formula for the total charge generated by the gravitational and gauge field background is derived directly in Lorentzian signature and in a mathematically rigorous manner. It contains a term identical to the integrand in the Atiyah-Singer index theorem and another term involving the.-invariant of the Cauchy hypersurfaces.}, language = {en} } @article{BomansonJanhunenSchaubetal.2016, author = {Bomanson, Jori and Janhunen, Tomi and Schaub, Torsten H. and Gebser, Martin and Kaufmann, Benjamin}, title = {Answer Set Programming Modulo Acyclicity}, series = {Fundamenta informaticae}, volume = {147}, journal = {Fundamenta informaticae}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2016-1398}, pages = {63 -- 91}, year = {2016}, abstract = {Acyclicity constraints are prevalent in knowledge representation and applications where acyclic data structures such as DAGs and trees play a role. Recently, such constraints have been considered in the satisfiability modulo theories (SMT) framework, and in this paper we carry out an analogous extension to the answer set programming (ASP) paradigm. The resulting formalism, ASP modulo acyclicity, offers a rich set of primitives to express constraints related to recursive structures. In the technical results of the paper, we relate the new generalization with standard ASP by showing (i) how acyclicity extensions translate into normal rules, (ii) how weight constraint programs can be instrumented by acyclicity extensions to capture stability in analogy to unfounded set checking, and (iii) how the gap between supported and stable models is effectively closed in the presence of such an extension. Moreover, we present an efficient implementation of acyclicity constraints by incorporating a respective propagator into the state-of-the-art ASP solver CLASP. The implementation provides a unique combination of traditional unfounded set checking with acyclicity propagation. In the experimental part, we evaluate the interplay of these orthogonal checks by equipping logic programs with supplementary acyclicity constraints. The performance results show that native support for acyclicity constraints is a worthwhile addition, furnishing a complementary modeling construct in ASP itself as well as effective means for translation-based ASP solving.}, language = {en} } @unpublished{BlanchardMuecke2016, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, volume = {5}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89782}, pages = {36}, year = {2016}, abstract = {We consider a statistical inverse learning problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X_i, superposed with an additional noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependence of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} } @unpublished{BlanchardKraemer2016, author = {Blanchard, Gilles and Kr{\"a}mer, Nicole}, title = {Convergence rates of kernel conjugate gradient for random design regression}, volume = {5}, number = {8}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94195}, pages = {31}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient algorithm, where regularization against overfitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L^2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @article{BlanchardKraemer2016, author = {Blanchard, Gilles and Kraemer, Nicole}, title = {Convergence rates of Kernel Conjugate Gradient for random design regression}, series = {Analysis and applications}, volume = {14}, journal = {Analysis and applications}, publisher = {World Scientific}, address = {Singapore}, issn = {0219-5305}, doi = {10.1142/S0219530516400017}, pages = {763 -- 794}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient (CG) algorithm, where regularization against over-fitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L-2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @article{BlanchardFlaskaHandyetal.2016, author = {Blanchard, Gilles and Flaska, Marek and Handy, Gregory and Pozzi, Sara and Scott, Clayton}, title = {Classification with asymmetric label noise: Consistency and maximal denoising}, series = {Electronic journal of statistics}, volume = {10}, journal = {Electronic journal of statistics}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/16-EJS1193}, pages = {2780 -- 2824}, year = {2016}, abstract = {In many real-world classification problems, the labels of training examples are randomly corrupted. Most previous theoretical work on classification with label noise assumes that the two classes are separable, that the label noise is independent of the true class label, or that the noise proportions for each class are known. In this work, we give conditions that are necessary and sufficient for the true class-conditional distributions to be identifiable. These conditions are weaker than those analyzed previously, and allow for the classes to be nonseparable and the noise levels to be asymmetric and unknown. The conditions essentially state that a majority of the observed labels are correct and that the true class-conditional distributions are "mutually irreducible," a concept we introduce that limits the similarity of the two distributions. For any label noise problem, there is a unique pair of true class-conditional distributions satisfying the proposed conditions, and we argue that this pair corresponds in a certain sense to maximal denoising of the observed distributions. Our results are facilitated by a connection to "mixture proportion estimation," which is the problem of estimating the maximal proportion of one distribution that is present in another. We establish a novel rate of convergence result for mixture proportion estimation, and apply this to obtain consistency of a discrimination rule based on surrogate loss minimization. Experimental results on benchmark data and a nuclear particle classification problem demonstrate the efficacy of our approach.}, language = {en} } @phdthesis{Berner2016, author = {Berner, Nadine}, title = {Deciphering multiple changes in complex climate time series using Bayesian inference}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-100065}, school = {Universit{\"a}t Potsdam}, pages = {xvi, 135}, year = {2016}, abstract = {Change points in time series are perceived as heterogeneities in the statistical or dynamical characteristics of the observations. Unraveling such transitions yields essential information for the understanding of the observed system's intrinsic evolution and potential external influences. A precise detection of multiple changes is therefore of great importance for various research disciplines, such as environmental sciences, bioinformatics and economics. The primary purpose of the detection approach introduced in this thesis is the investigation of transitions underlying direct or indirect climate observations. In order to develop a diagnostic approach capable to capture such a variety of natural processes, the generic statistical features in terms of central tendency and dispersion are employed in the light of Bayesian inversion. In contrast to established Bayesian approaches to multiple changes, the generic approach proposed in this thesis is not formulated in the framework of specialized partition models of high dimensionality requiring prior specification, but as a robust kernel-based approach of low dimensionality employing least informative prior distributions. First of all, a local Bayesian inversion approach is developed to robustly infer on the location and the generic patterns of a single transition. The analysis of synthetic time series comprising changes of different observational evidence, data loss and outliers validates the performance, consistency and sensitivity of the inference algorithm. To systematically investigate time series for multiple changes, the Bayesian inversion is extended to a kernel-based inference approach. By introducing basic kernel measures, the weighted kernel inference results are composed into a proxy probability to a posterior distribution of multiple transitions. The detection approach is applied to environmental time series from the Nile river in Aswan and the weather station Tuscaloosa, Alabama comprising documented changes. The method's performance confirms the approach as a powerful diagnostic tool to decipher multiple changes underlying direct climate observations. Finally, the kernel-based Bayesian inference approach is used to investigate a set of complex terrigenous dust records interpreted as climate indicators of the African region of the Plio-Pleistocene period. A detailed inference unravels multiple transitions underlying the indirect climate observations, that are interpreted as conjoint changes. The identified conjoint changes coincide with established global climate events. In particular, the two-step transition associated to the establishment of the modern Walker-Circulation contributes to the current discussion about the influence of paleoclimate changes on the environmental conditions in tropical and subtropical Africa at around two million years ago.}, language = {en} } @article{Benini2016, author = {Benini, Marco}, title = {Optimal space of linear classical observables for Maxwell k-forms via spacelike and timelike compact de Rham cohomologies}, series = {Journal of mathematical physics}, volume = {57}, journal = {Journal of mathematical physics}, publisher = {American Institute of Physics}, address = {Melville}, issn = {0022-2488}, doi = {10.1063/1.4947563}, pages = {1249 -- 1279}, year = {2016}, abstract = {Being motivated by open questions in gauge field theories, we consider non-standard de Rham cohomology groups for timelike compact and spacelike compact support systems. These cohomology groups are shown to be isomorphic respectively to the usual de Rham cohomology of a spacelike Cauchy surface and its counterpart with compact support. Furthermore, an analog of the usual Poincare duality for de Rham cohomology is shown to hold for the case with non-standard supports as well. We apply these results to find optimal spaces of linear observables for analogs of arbitrary degree k of both the vector potential and the Faraday tensor. The term optimal has to be intended in the following sense: The spaces of linear observables we consider distinguish between different configurations; in addition to that, there are no redundant observables. This last point in particular heavily relies on the analog of Poincare duality for the new cohomology groups. Published by AIP Publishing.}, language = {en} } @article{BeinruckerDoganBlanchard2016, author = {Beinrucker, Andre and Dogan, Urun and Blanchard, Gilles}, title = {Extensions of stability selection using subsamples of observations and covariates}, series = {Statistics and Computing}, volume = {26}, journal = {Statistics and Computing}, publisher = {Springer}, address = {Dordrecht}, issn = {0960-3174}, doi = {10.1007/s11222-015-9589-y}, pages = {1059 -- 1077}, year = {2016}, abstract = {We introduce extensions of stability selection, a method to stabilise variable selection methods introduced by Meinshausen and Buhlmann (J R Stat Soc 72:417-473, 2010). We propose to apply a base selection method repeatedly to random subsamples of observations and subsets of covariates under scrutiny, and to select covariates based on their selection frequency. We analyse the effects and benefits of these extensions. Our analysis generalizes the theoretical results of Meinshausen and Buhlmann (J R Stat Soc 72:417-473, 2010) from the case of half-samples to subsamples of arbitrary size. We study, in a theoretical manner, the effect of taking random covariate subsets using a simplified score model. Finally we validate these extensions on numerical experiments on both synthetic and real datasets, and compare the obtained results in detail to the original stability selection method.}, language = {en} } @article{Becker2016, author = {Becker, Christian}, title = {Cheeger-Chern-Simons Theory and Differential String Classes}, series = {Annales de l'Institut Henri Poincar{\~A}©}, volume = {17}, journal = {Annales de l'Institut Henri Poincar{\~A}©}, publisher = {Springer}, address = {Basel}, issn = {1424-0637}, doi = {10.1007/s00023-016-0485-6}, pages = {1529 -- 1594}, year = {2016}, abstract = {We construct new concrete examples of relative differential characters, which we call Cheeger-Chern-Simons characters. They combine the well-known Cheeger-Simons characters with Chern-Simons forms. In the same way as Cheeger-Simons characters generalize Chern-Simons invariants of oriented closed manifolds, Cheeger-Chern-Simons characters generalize Chern-Simons invariants of oriented manifolds with boundary. We study the differential cohomology of compact Lie groups G and their classifying spaces BG. We show that the even degree differential cohomology of BG canonically splits into Cheeger-Simons characters and topologically trivial characters. We discuss the transgression in principal G-bundles and in the universal bundle. We introduce two methods to lift the universal transgression to a differential cohomology valued map. They generalize the Dijkgraaf-Witten correspondence between 3-dimensional Chern-Simons theories and Wess-Zumino-Witten terms to fully extended higher-order Chern-Simons theories. Using these lifts, we also prove two versions of a differential Hopf theorem. Using Cheeger-Chern-Simons characters and transgression, we introduce the notion of differential trivializations of universal characteristic classes. It generalizes well-established notions of differential String classes to arbitrary degree. Specializing to the class , we recover isomorphism classes of geometric string structures on Spin (n) -bundles with connection and the corresponding spin structures on the free loop space. The Cheeger-Chern-Simons character associated with the class together with its transgressions to loop space and higher mapping spaces defines a Chern-Simons theory, extended down to points. Differential String classes provide trivializations of this extended Chern-Simons theory. This setting immediately generalizes to arbitrary degree: for any universal characteristic class of principal G-bundles, we have an associated Cheeger-Chern-Simons character and extended Chern-Simons theory. Differential trivialization classes yield trivializations of this extended Chern-Simons theory.}, language = {en} } @article{AntoniniAzzaliSkandalis2016, author = {Antonini, Paolo and Azzali, Sara and Skandalis, Georges}, title = {Bivariant K-theory with R/Z-coefficients and rho classes of unitary representations}, series = {Journal of functional analysis}, volume = {270}, journal = {Journal of functional analysis}, publisher = {Elsevier}, address = {San Diego}, issn = {0022-1236}, doi = {10.1016/j.jfa.2015.06.017}, pages = {447 -- 481}, year = {2016}, abstract = {We construct equivariant KK-theory with coefficients in and R/Z as suitable inductive limits over II1-factors. We show that the Kasparov product, together with its usual functorial properties, extends to KK-theory with real coefficients. Let Gamma be a group. We define a Gamma-algebra A to be K-theoretically free and proper (KFP) if the group trace tr of Gamma acts as the unit element in KKR Gamma (A, A). We show that free and proper Gamma-algebras (in the sense of Kasparov) have the (KFP) property. Moreover, if Gamma is torsion free and satisfies the KK Gamma-form of the Baum-Connes conjecture, then every Gamma-algebra satisfies (KFP). If alpha : Gamma -> U-n is a unitary representation and A satisfies property (KFP), we construct in a canonical way a rho class rho(A)(alpha) is an element of KKR/Z1,Gamma (A A) This construction generalizes the Atiyah-Patodi-Singer K-theory class with R/Z-coefficients associated to alpha. (C) 2015 Elsevier Inc. All rights reserved.}, language = {en} } @unpublished{AlsaedyTarkhanov2016, author = {Alsaedy, Ammar and Tarkhanov, Nikolai Nikolaevich}, title = {A Hilbert boundary value problem for generalised Cauchy-Riemann equations}, volume = {5}, number = {1}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-86109}, pages = {21}, year = {2016}, abstract = {We elaborate a boundary Fourier method for studying an analogue of the Hilbert problem for analytic functions within the framework of generalised Cauchy-Riemann equations. The boundary value problem need not satisfy the Shapiro-Lopatinskij condition and so it fails to be Fredholm in Sobolev spaces. We show a solvability condition of the Hilbert problem, which looks like those for ill-posed problems, and construct an explicit formula for approximate solutions.}, language = {en} } @unpublished{Alsaedy2016, author = {Alsaedy, Ammar}, title = {Variational primitive of a differential form}, volume = {5}, number = {4}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89223}, pages = {8}, year = {2016}, abstract = {In this paper we specify the Dirichlet to Neumann operator related to the Cauchy problem for the gradient operator with data on a part of the boundary. To this end, we consider a nonlinear relaxation of this problem which is a mixed boundary problem of Zaremba type for the p-Laplace equation.}, language = {en} } @article{AcevedoReichCubasch2016, author = {Acevedo, Walter and Reich, Sebastian and Cubasch, Ulrich}, title = {Towards the assimilation of tree-ring-width records using ensemble Kalman filtering techniques}, series = {Climate dynamics : observational, theoretical and computational research on the climate system}, volume = {46}, journal = {Climate dynamics : observational, theoretical and computational research on the climate system}, publisher = {Springer}, address = {New York}, issn = {0930-7575}, doi = {10.1007/s00382-015-2683-1}, pages = {1909 -- 1920}, year = {2016}, abstract = {This paper investigates the applicability of the Vaganov-Shashkin-Lite (VSL) forward model for tree-ring-width chronologies as observation operator within a proxy data assimilation (DA) setting. Based on the principle of limiting factors, VSL combines temperature and moisture time series in a nonlinear fashion to obtain simulated TRW chronologies. When used as observation operator, this modelling approach implies three compounding, challenging features: (1) time averaging, (2) "switching recording" of 2 variables and (3) bounded response windows leading to "thresholded response". We generate pseudo-TRW observations from a chaotic 2-scale dynamical system, used as a cartoon of the atmosphere-land system, and attempt to assimilate them via ensemble Kalman filtering techniques. Results within our simplified setting reveal that VSL's nonlinearities may lead to considerable loss of assimilation skill, as compared to the utilization of a time-averaged (TA) linear observation operator. In order to understand this undesired effect, we embed VSL's formulation into the framework of fuzzy logic (FL) theory, which thereby exposes multiple representations of the principle of limiting factors. DA experiments employing three alternative growth rate functions disclose a strong link between the lack of smoothness of the growth rate function and the loss of optimality in the estimate of the TA state. Accordingly, VSL's performance as observation operator can be enhanced by resorting to smoother FL representations of the principle of limiting factors. This finding fosters new interpretations of tree-ring-growth limitation processes.}, language = {en} }