@article{BachocBlanchardNeuvial2018, author = {Bachoc, Francois and Blanchard, Gilles and Neuvial, Pierre}, title = {On the post selection inference constant under restricted isometry properties}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1490}, pages = {3736 -- 3757}, year = {2018}, abstract = {Uniformly valid confidence intervals post model selection in regression can be constructed based on Post-Selection Inference (PoSI) constants. PoSI constants are minimal for orthogonal design matrices, and can be upper bounded in function of the sparsity of the set of models under consideration, for generic design matrices. In order to improve on these generic sparse upper bounds, we consider design matrices satisfying a Restricted Isometry Property (RIP) condition. We provide a new upper bound on the PoSI constant in this setting. This upper bound is an explicit function of the RIP constant of the design matrix, thereby giving an interpolation between the orthogonal setting and the generic sparse setting. We show that this upper bound is asymptotically optimal in many settings by constructing a matching lower bound.}, language = {en} } @article{BeinruckerDoganBlanchard2016, author = {Beinrucker, Andre and Dogan, Urun and Blanchard, Gilles}, title = {Extensions of stability selection using subsamples of observations and covariates}, series = {Statistics and Computing}, volume = {26}, journal = {Statistics and Computing}, publisher = {Springer}, address = {Dordrecht}, issn = {0960-3174}, doi = {10.1007/s11222-015-9589-y}, pages = {1059 -- 1077}, year = {2016}, abstract = {We introduce extensions of stability selection, a method to stabilise variable selection methods introduced by Meinshausen and Buhlmann (J R Stat Soc 72:417-473, 2010). We propose to apply a base selection method repeatedly to random subsamples of observations and subsets of covariates under scrutiny, and to select covariates based on their selection frequency. We analyse the effects and benefits of these extensions. Our analysis generalizes the theoretical results of Meinshausen and Buhlmann (J R Stat Soc 72:417-473, 2010) from the case of half-samples to subsamples of arbitrary size. We study, in a theoretical manner, the effect of taking random covariate subsets using a simplified score model. Finally we validate these extensions on numerical experiments on both synthetic and real datasets, and compare the obtained results in detail to the original stability selection method.}, language = {en} } @book{Blanchard2011, author = {Blanchard, Gilles}, title = {Komplexit{\"a}tsanalyse in Statistik und Lerntheorie : Antrittsvorlesung 2011-05-04}, publisher = {Univ.-Bibl.}, address = {Potsdam}, year = {2011}, abstract = {Gilles Blanchards Vortrag gew{\"a}hrt Einblicke in seine Arbeiten zur Entwicklung und Analyse statistischer Eigenschaften von Lernalgorithmen. In vielen modernen Anwendungen, beispielsweise bei der Schrifterkennung oder dem Spam- Filtering, kann ein Computerprogramm auf der Basis vorgegebener Beispiele automatisch lernen, relevante Vorhersagen f{\"u}r weitere F{\"a}lle zu treffen. Mit der mathematischen Analyse der Eigenschaften solcher Methoden besch{\"a}ftigt sich die Lerntheorie, die mit der Statistik eng zusammenh{\"a}ngt. Dabei spielt der Begriff der Komplexit{\"a}t der erlernten Vorhersageregel eine wichtige Rolle. Ist die Regel zu einfach, wird sie wichtige Einzelheiten ignorieren. Ist sie zu komplex, wird sie die vorgegebenen Beispiele "auswendig" lernen und keine Verallgemeinerungskraft haben. Blanchard wird erl{\"a}utern, wie Mathematische Werkzeuge dabei helfen, den richtigen Kompromiss zwischen diesen beiden Extremen zu finden.}, language = {de} } @article{BlanchardCarpentierGutzeit2018, author = {Blanchard, Gilles and Carpentier, Alexandra and Gutzeit, Maurilio}, title = {Minimax Euclidean separation rates for testing convex hypotheses in R-d}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1472}, pages = {3713 -- 3735}, year = {2018}, abstract = {We consider composite-composite testing problems for the expectation in the Gaussian sequence model where the null hypothesis corresponds to a closed convex subset C of R-d. We adopt a minimax point of view and our primary objective is to describe the smallest Euclidean distance between the null and alternative hypotheses such that there is a test with small total error probability. In particular, we focus on the dependence of this distance on the dimension d and variance 1/n giving rise to the minimax separation rate. In this paper we discuss lower and upper bounds on this rate for different smooth and non-smooth choices for C.}, language = {en} } @article{BlanchardDelattreRoquain2014, author = {Blanchard, Gilles and Delattre, Sylvain and Roquain, Etienne}, title = {Testing over a continuum of null hypotheses with False Discovery Rate control}, series = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, volume = {20}, journal = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, number = {1}, publisher = {International Statistical Institute}, address = {Voorburg}, issn = {1350-7265}, doi = {10.3150/12-BEJ488}, pages = {304 -- 333}, year = {2014}, abstract = {We consider statistical hypothesis testing simultaneously over a fairly general, possibly uncountably infinite, set of null hypotheses, under the assumption that a suitable single test (and corresponding p-value) is known for each individual hypothesis. We extend to this setting the notion of false discovery rate (FDR) as a measure of type I error. Our main result studies specific procedures based on the observation of the p-value process. Control of the FDR at a nominal level is ensured either under arbitrary dependence of p-values, or under the assumption that the finite dimensional distributions of the p-value process have positive correlations of a specific type (weak PRDS). Both cases generalize existing results established in the finite setting. Its interest is demonstrated in several non-parametric examples: testing the mean/signal in a Gaussian white noise model, testing the intensity of a Poisson process and testing the c.d.f. of i.i.d. random variables.}, language = {en} } @unpublished{BlanchardDelattreRoquain2012, author = {Blanchard, Gilles and Delattre, Sylvain and Roquain, {\´E}tienne}, title = {Testing over a continuum of null hypotheses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56877}, year = {2012}, abstract = {We introduce a theoretical framework for performing statistical hypothesis testing simultaneously over a fairly general, possibly uncountably infinite, set of null hypotheses. This extends the standard statistical setting for multiple hypotheses testing, which is restricted to a finite set. This work is motivated by numerous modern applications where the observed signal is modeled by a stochastic process over a continuum. As a measure of type I error, we extend the concept of false discovery rate (FDR) to this setting. The FDR is defined as the average ratio of the measure of two random sets, so that its study presents some challenge and is of some intrinsic mathematical interest. Our main result shows how to use the p-value process to control the FDR at a nominal level, either under arbitrary dependence of p-values, or under the assumption that the finite dimensional distributions of the p-value process have positive correlations of a specific type (weak PRDS). Both cases generalize existing results established in the finite setting, the latter one leading to a less conservative procedure. The interest of this approach is demonstrated in several non-parametric examples: testing the mean/signal in a Gaussian white noise model, testing the intensity of a Poisson process and testing the c.d.f. of i.i.d. random variables. Conceptually, an interesting feature of the setting advocated here is that it focuses directly on the intrinsic hypothesis space associated with a testing model on a random process, without referring to an arbitrary discretization.}, language = {en} } @article{BlanchardDickhausRoquainetal.2014, author = {Blanchard, Gilles and Dickhaus, Thorsten and Roquain, Etienne and Villers, Fanny}, title = {On least favorable configurations for step-up-down tests}, series = {Statistica Sinica}, volume = {24}, journal = {Statistica Sinica}, number = {1}, publisher = {Statistica Sinica, Institute of Statistical Science, Academia Sinica}, address = {Taipei}, issn = {1017-0405}, doi = {10.5705/ss.2011.205}, pages = {1 -- U31}, year = {2014}, language = {en} } @article{BlanchardFlaskaHandyetal.2016, author = {Blanchard, Gilles and Flaska, Marek and Handy, Gregory and Pozzi, Sara and Scott, Clayton}, title = {Classification with asymmetric label noise: Consistency and maximal denoising}, series = {Electronic journal of statistics}, volume = {10}, journal = {Electronic journal of statistics}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/16-EJS1193}, pages = {2780 -- 2824}, year = {2016}, abstract = {In many real-world classification problems, the labels of training examples are randomly corrupted. Most previous theoretical work on classification with label noise assumes that the two classes are separable, that the label noise is independent of the true class label, or that the noise proportions for each class are known. In this work, we give conditions that are necessary and sufficient for the true class-conditional distributions to be identifiable. These conditions are weaker than those analyzed previously, and allow for the classes to be nonseparable and the noise levels to be asymmetric and unknown. The conditions essentially state that a majority of the observed labels are correct and that the true class-conditional distributions are "mutually irreducible," a concept we introduce that limits the similarity of the two distributions. For any label noise problem, there is a unique pair of true class-conditional distributions satisfying the proposed conditions, and we argue that this pair corresponds in a certain sense to maximal denoising of the observed distributions. Our results are facilitated by a connection to "mixture proportion estimation," which is the problem of estimating the maximal proportion of one distribution that is present in another. We establish a novel rate of convergence result for mixture proportion estimation, and apply this to obtain consistency of a discrimination rule based on surrogate loss minimization. Experimental results on benchmark data and a nuclear particle classification problem demonstrate the efficacy of our approach.}, language = {en} } @article{BlanchardHoffmannReiss2018, author = {Blanchard, Gilles and Hoffmann, Marc and Reiss, Markus}, title = {Optimal adaptation for early stopping in statistical inverse problems}, series = {SIAM/ASA Journal on Uncertainty Quantification}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {3}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia}, issn = {2166-2525}, doi = {10.1137/17M1154096}, pages = {1043 -- 1075}, year = {2018}, abstract = {For linear inverse problems Y = A mu + zeta, it is classical to recover the unknown signal mu by iterative regularization methods ((mu) over cap,(m) = 0,1, . . .) and halt at a data-dependent iteration tau using some stopping rule, typically based on a discrepancy principle, so that the weak (or prediction) squared-error parallel to A((mu) over cap (()(tau)) - mu)parallel to(2) is controlled. In the context of statistical estimation with stochastic noise zeta, we study oracle adaptation (that is, compared to the best possible stopping iteration) in strong squared- error E[parallel to((mu) over cap (()(tau)) - mu)parallel to(2)]. For a residual-based stopping rule oracle adaptation bounds are established for general spectral regularization methods. The proofs use bias and variance transfer techniques from weak prediction error to strong L-2-error, as well as convexity arguments and concentration bounds for the stochastic part. Adaptive early stopping for the Landweber method is studied in further detail and illustrated numerically.}, language = {en} } @article{BlanchardHoffmannReiss2018, author = {Blanchard, Gilles and Hoffmann, Marc and Reiss, Markus}, title = {Early stopping for statistical inverse problems via truncated SVD estimation}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1482}, pages = {3204 -- 3231}, year = {2018}, abstract = {We consider truncated SVD (or spectral cut-off, projection) estimators for a prototypical statistical inverse problem in dimension D. Since calculating the singular value decomposition (SVD) only for the largest singular values is much less costly than the full SVD, our aim is to select a data-driven truncation level (m) over cap is an element of {1, . . . , D} only based on the knowledge of the first (m) over cap singular values and vectors. We analyse in detail whether sequential early stopping rules of this type can preserve statistical optimality. Information-constrained lower bounds and matching upper bounds for a residual based stopping rule are provided, which give a clear picture in which situation optimal sequential adaptation is feasible. Finally, a hybrid two-step approach is proposed which allows for classical oracle inequalities while considerably reducing numerical complexity.}, language = {en} } @article{BlanchardKawanabeSugiyamaetal.2006, author = {Blanchard, Gilles and Kawanabe, Motoaki and Sugiyama, Masashi and Spokoiny, Vladimir G. and M{\"u}ller, Klaus-Robert}, title = {In search of non-Gaussian components of a high-dimensional distribution}, issn = {1532-4435}, year = {2006}, abstract = {Finding non-Gaussian components of high-dimensional data is an important preprocessing step for efficient information processing. This article proposes a new linear method to identify the '' non-Gaussian subspace '' within a very general semi-parametric framework. Our proposed method, called NGCA (non-Gaussian component analysis), is based on a linear operator which, to any arbitrary nonlinear (smooth) function, associates a vector belonging to the low dimensional non-Gaussian target subspace, up to an estimation error. By applying this operator to a family of different nonlinear functions, one obtains a family of different vectors lying in a vicinity of the target space. As a final step, the target space itself is estimated by applying PCA to this family of vectors. We show that this procedure is consistent in the sense that the estimaton error tends to zero at a parametric rate, uniformly over the family, Numerical examples demonstrate the usefulness of our method}, language = {en} } @article{BlanchardKraemer2016, author = {Blanchard, Gilles and Kraemer, Nicole}, title = {Convergence rates of Kernel Conjugate Gradient for random design regression}, series = {Analysis and applications}, volume = {14}, journal = {Analysis and applications}, publisher = {World Scientific}, address = {Singapore}, issn = {0219-5305}, doi = {10.1142/S0219530516400017}, pages = {763 -- 794}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient (CG) algorithm, where regularization against over-fitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L-2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @unpublished{BlanchardKraemer2016, author = {Blanchard, Gilles and Kr{\"a}mer, Nicole}, title = {Convergence rates of kernel conjugate gradient for random design regression}, volume = {5}, number = {8}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94195}, pages = {31}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient algorithm, where regularization against overfitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L^2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @article{BlanchardMathe2012, author = {Blanchard, Gilles and Mathe, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, series = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, volume = {28}, journal = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, number = {11}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0266-5611}, doi = {10.1088/0266-5611/28/11/115011}, pages = {23}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which corrects both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration it is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} } @unpublished{BlanchardMathe2012, author = {Blanchard, Gilles and Math{\´e}, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57117}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which takes into account both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration this modification is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} } @article{BlanchardMuecke2018, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, series = {Foundations of Computational Mathematics}, volume = {18}, journal = {Foundations of Computational Mathematics}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1615-3375}, doi = {10.1007/s10208-017-9359-7}, pages = {971 -- 1013}, year = {2018}, abstract = {We consider a statistical inverse learning (also called inverse regression) problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X-i , superposed with an additive noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependency of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} } @article{BlanchardMuecke2020, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Kernel regression, minimax rates and effective dimensionality}, series = {Analysis and applications}, volume = {18}, journal = {Analysis and applications}, number = {4}, publisher = {World Scientific}, address = {New Jersey}, issn = {0219-5305}, doi = {10.1142/S0219530519500258}, pages = {683 -- 696}, year = {2020}, abstract = {We investigate if kernel regularization methods can achieve minimax convergence rates over a source condition regularity assumption for the target function. These questions have been considered in past literature, but only under specific assumptions about the decay, typically polynomial, of the spectrum of the the kernel mapping covariance operator. In the perspective of distribution-free results, we investigate this issue under much weaker assumption on the eigenvalue decay, allowing for more complex behavior that can reflect different structure of the data at different scales.}, language = {en} } @unpublished{BlanchardMuecke2016, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, volume = {5}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89782}, pages = {36}, year = {2016}, abstract = {We consider a statistical inverse learning problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X_i, superposed with an additional noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependence of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} } @misc{BlanchardScott2018, author = {Blanchard, Gilles and Scott, Clayton}, title = {Corrigendum to: Classification with asymmetric label noise}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {1}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1422}, pages = {1779 -- 1781}, year = {2018}, abstract = {We point out a flaw in Lemma 15 of [1]. We also indicate how the main results of that section are still valid using a modified argument.}, language = {en} } @article{BlanchardZadorozhnyi2019, author = {Blanchard, Gilles and Zadorozhnyi, Oleksandr}, title = {Concentration of weakly dependent Banach-valued sums and applications to statistical learning methods}, series = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, volume = {25}, journal = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, number = {4B}, publisher = {International Statistical Institute}, address = {Voorburg}, issn = {1350-7265}, doi = {10.3150/18-BEJ1095}, pages = {3421 -- 3458}, year = {2019}, abstract = {We obtain a Bernstein-type inequality for sums of Banach-valued random variables satisfying a weak dependence assumption of general type and under certain smoothness assumptions of the underlying Banach norm. We use this inequality in order to investigate in the asymptotical regime the error upper bounds for the broad family of spectral regularization methods for reproducing kernel decision rules, when trained on a sample coming from a tau-mixing process.}, language = {en} }