@misc{BlanchardScott2018, author = {Blanchard, Gilles and Scott, Clayton}, title = {Corrigendum to: Classification with asymmetric label noise}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {1}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1422}, pages = {1779 -- 1781}, year = {2018}, abstract = {We point out a flaw in Lemma 15 of [1]. We also indicate how the main results of that section are still valid using a modified argument.}, language = {en} } @article{BlanchardHoffmannReiss2018, author = {Blanchard, Gilles and Hoffmann, Marc and Reiss, Markus}, title = {Optimal adaptation for early stopping in statistical inverse problems}, series = {SIAM/ASA Journal on Uncertainty Quantification}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {3}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia}, issn = {2166-2525}, doi = {10.1137/17M1154096}, pages = {1043 -- 1075}, year = {2018}, abstract = {For linear inverse problems Y = A mu + zeta, it is classical to recover the unknown signal mu by iterative regularization methods ((mu) over cap,(m) = 0,1, . . .) and halt at a data-dependent iteration tau using some stopping rule, typically based on a discrepancy principle, so that the weak (or prediction) squared-error parallel to A((mu) over cap (()(tau)) - mu)parallel to(2) is controlled. In the context of statistical estimation with stochastic noise zeta, we study oracle adaptation (that is, compared to the best possible stopping iteration) in strong squared- error E[parallel to((mu) over cap (()(tau)) - mu)parallel to(2)]. For a residual-based stopping rule oracle adaptation bounds are established for general spectral regularization methods. The proofs use bias and variance transfer techniques from weak prediction error to strong L-2-error, as well as convexity arguments and concentration bounds for the stochastic part. Adaptive early stopping for the Landweber method is studied in further detail and illustrated numerically.}, language = {en} } @article{MueckeBlanchard2018, author = {M{\"u}cke, Nicole and Blanchard, Gilles}, title = {Parallelizing spectrally regularized kernel algorithms}, series = {Journal of machine learning research}, volume = {19}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {29}, year = {2018}, abstract = {We consider a distributed learning approach in supervised learning for a large class of spectral regularization methods in an reproducing kernel Hilbert space (RKHS) framework. The data set of size n is partitioned into m = O (n(alpha)), alpha < 1/2, disjoint subsamples. On each subsample, some spectral regularization method (belonging to a large class, including in particular Kernel Ridge Regression, L-2-boosting and spectral cut-off) is applied. The regression function f is then estimated via simple averaging, leading to a substantial reduction in computation time. We show that minimax optimal rates of convergence are preserved if m grows sufficiently slowly (corresponding to an upper bound for alpha) as n -> infinity, depending on the smoothness assumptions on f and the intrinsic dimensionality. In spirit, the analysis relies on a classical bias/stochastic error analysis.}, language = {en} } @article{BachocBlanchardNeuvial2018, author = {Bachoc, Francois and Blanchard, Gilles and Neuvial, Pierre}, title = {On the post selection inference constant under restricted isometry properties}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1490}, pages = {3736 -- 3757}, year = {2018}, abstract = {Uniformly valid confidence intervals post model selection in regression can be constructed based on Post-Selection Inference (PoSI) constants. PoSI constants are minimal for orthogonal design matrices, and can be upper bounded in function of the sparsity of the set of models under consideration, for generic design matrices. In order to improve on these generic sparse upper bounds, we consider design matrices satisfying a Restricted Isometry Property (RIP) condition. We provide a new upper bound on the PoSI constant in this setting. This upper bound is an explicit function of the RIP constant of the design matrix, thereby giving an interpolation between the orthogonal setting and the generic sparse setting. We show that this upper bound is asymptotically optimal in many settings by constructing a matching lower bound.}, language = {en} } @article{BlanchardCarpentierGutzeit2018, author = {Blanchard, Gilles and Carpentier, Alexandra and Gutzeit, Maurilio}, title = {Minimax Euclidean separation rates for testing convex hypotheses in R-d}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1472}, pages = {3713 -- 3735}, year = {2018}, abstract = {We consider composite-composite testing problems for the expectation in the Gaussian sequence model where the null hypothesis corresponds to a closed convex subset C of R-d. We adopt a minimax point of view and our primary objective is to describe the smallest Euclidean distance between the null and alternative hypotheses such that there is a test with small total error probability. In particular, we focus on the dependence of this distance on the dimension d and variance 1/n giving rise to the minimax separation rate. In this paper we discuss lower and upper bounds on this rate for different smooth and non-smooth choices for C.}, language = {en} } @article{BlanchardHoffmannReiss2018, author = {Blanchard, Gilles and Hoffmann, Marc and Reiss, Markus}, title = {Early stopping for statistical inverse problems via truncated SVD estimation}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {2}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1482}, pages = {3204 -- 3231}, year = {2018}, abstract = {We consider truncated SVD (or spectral cut-off, projection) estimators for a prototypical statistical inverse problem in dimension D. Since calculating the singular value decomposition (SVD) only for the largest singular values is much less costly than the full SVD, our aim is to select a data-driven truncation level (m) over cap is an element of {1, . . . , D} only based on the knowledge of the first (m) over cap singular values and vectors. We analyse in detail whether sequential early stopping rules of this type can preserve statistical optimality. Information-constrained lower bounds and matching upper bounds for a residual based stopping rule are provided, which give a clear picture in which situation optimal sequential adaptation is feasible. Finally, a hybrid two-step approach is proposed which allows for classical oracle inequalities while considerably reducing numerical complexity.}, language = {en} } @article{BlanchardMuecke2018, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, series = {Foundations of Computational Mathematics}, volume = {18}, journal = {Foundations of Computational Mathematics}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1615-3375}, doi = {10.1007/s10208-017-9359-7}, pages = {971 -- 1013}, year = {2018}, abstract = {We consider a statistical inverse learning (also called inverse regression) problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X-i , superposed with an additive noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependency of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} }