@article{KawanabeBlanchardSugiyamaetal.2006, author = {Kawanabe, Motoaki and Blanchard, Gilles and Sugiyama, Masashi and Spokoiny, Vladimir G. and M{\"u}ller, Klaus-Robert}, title = {A novel dimension reduction procedure for searching non-Gaussian subspaces}, issn = {0302-9743}, doi = {10.1007/11679363_19}, year = {2006}, abstract = {In this article, we consider high-dimensional data which contains a low-dimensional non-Gaussian structure contaminated with Gaussian noise and propose a new linear method to identify the non-Gaussian subspace. Our method NGCA (Non-Gaussian Component Analysis) is based on a very general semi-parametric framework and has a theoretical guarantee that the estimation error of finding the non-Gaussian components tends to zero at a parametric rate. NGCA can be used not only as preprocessing for ICA, but also for extracting and visualizing more general structures like clusters. A numerical study demonstrates the usefulness of our method}, language = {en} } @article{BlanchardFlaskaHandyetal.2016, author = {Blanchard, Gilles and Flaska, Marek and Handy, Gregory and Pozzi, Sara and Scott, Clayton}, title = {Classification with asymmetric label noise: Consistency and maximal denoising}, series = {Electronic journal of statistics}, volume = {10}, journal = {Electronic journal of statistics}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/16-EJS1193}, pages = {2780 -- 2824}, year = {2016}, abstract = {In many real-world classification problems, the labels of training examples are randomly corrupted. Most previous theoretical work on classification with label noise assumes that the two classes are separable, that the label noise is independent of the true class label, or that the noise proportions for each class are known. In this work, we give conditions that are necessary and sufficient for the true class-conditional distributions to be identifiable. These conditions are weaker than those analyzed previously, and allow for the classes to be nonseparable and the noise levels to be asymmetric and unknown. The conditions essentially state that a majority of the observed labels are correct and that the true class-conditional distributions are "mutually irreducible," a concept we introduce that limits the similarity of the two distributions. For any label noise problem, there is a unique pair of true class-conditional distributions satisfying the proposed conditions, and we argue that this pair corresponds in a certain sense to maximal denoising of the observed distributions. Our results are facilitated by a connection to "mixture proportion estimation," which is the problem of estimating the maximal proportion of one distribution that is present in another. We establish a novel rate of convergence result for mixture proportion estimation, and apply this to obtain consistency of a discrimination rule based on surrogate loss minimization. Experimental results on benchmark data and a nuclear particle classification problem demonstrate the efficacy of our approach.}, language = {en} } @article{MiethKloftRodriguezetal.2016, author = {Mieth, Bettina and Kloft, Marius and Rodriguez, Juan Antonio and Sonnenburg, Soren and Vobruba, Robin and Morcillo-Suarez, Carlos and Farre, Xavier and Marigorta, Urko M. and Fehr, Ernst and Dickhaus, Thorsten and Blanchard, Gilles and Schunk, Daniel and Navarro, Arcadi and M{\"u}ller, Klaus-Robert}, title = {Combining Multiple Hypothesis Testing with Machine Learning Increases the Statistical Power of Genome-wide Association Studies}, series = {Scientific reports}, volume = {6}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/srep36671}, pages = {14}, year = {2016}, abstract = {The standard approach to the analysis of genome-wide association studies (GWAS) is based on testing each position in the genome individually for statistical significance of its association with the phenotype under investigation. To improve the analysis of GWAS, we propose a combination of machine learning and statistical testing that takes correlation structures within the set of SNPs under investigation in a mathematically well-controlled manner into account. The novel two-step algorithm, COMBI, first trains a support vector machine to determine a subset of candidate SNPs and then performs hypothesis tests for these SNPs together with an adequate threshold correction. Applying COMBI to data from a WTCCC study (2007) and measuring performance as replication by independent GWAS published within the 2008-2015 period, we show that our method outperforms ordinary raw p-value thresholding as well as other state-of-the-art methods. COMBI presents higher power and precision than the examined alternatives while yielding fewer false (i.e. non-replicated) and more true (i.e. replicated) discoveries when its results are validated on later GWAS studies. More than 80\% of the discoveries made by COMBI upon WTCCC data have been validated by independent studies. Implementations of the COMBI method are available as a part of the GWASpi toolbox 2.0.}, language = {en} } @article{BlanchardZadorozhnyi2019, author = {Blanchard, Gilles and Zadorozhnyi, Oleksandr}, title = {Concentration of weakly dependent Banach-valued sums and applications to statistical learning methods}, series = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, volume = {25}, journal = {Bernoulli : official journal of the Bernoulli Society for Mathematical Statistics and Probability}, number = {4B}, publisher = {International Statistical Institute}, address = {Voorburg}, issn = {1350-7265}, doi = {10.3150/18-BEJ1095}, pages = {3421 -- 3458}, year = {2019}, abstract = {We obtain a Bernstein-type inequality for sums of Banach-valued random variables satisfying a weak dependence assumption of general type and under certain smoothness assumptions of the underlying Banach norm. We use this inequality in order to investigate in the asymptotical regime the error upper bounds for the broad family of spectral regularization methods for reproducing kernel decision rules, when trained on a sample coming from a tau-mixing process.}, language = {en} } @unpublished{BlanchardKraemer2016, author = {Blanchard, Gilles and Kr{\"a}mer, Nicole}, title = {Convergence rates of kernel conjugate gradient for random design regression}, volume = {5}, number = {8}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-94195}, pages = {31}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient algorithm, where regularization against overfitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L^2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @article{BlanchardKraemer2016, author = {Blanchard, Gilles and Kraemer, Nicole}, title = {Convergence rates of Kernel Conjugate Gradient for random design regression}, series = {Analysis and applications}, volume = {14}, journal = {Analysis and applications}, publisher = {World Scientific}, address = {Singapore}, issn = {0219-5305}, doi = {10.1142/S0219530516400017}, pages = {763 -- 794}, year = {2016}, abstract = {We prove statistical rates of convergence for kernel-based least squares regression from i.i.d. data using a conjugate gradient (CG) algorithm, where regularization against over-fitting is obtained by early stopping. This method is related to Kernel Partial Least Squares, a regression method that combines supervised dimensionality reduction with least squares projection. Following the setting introduced in earlier related literature, we study so-called "fast convergence rates" depending on the regularity of the target regression function (measured by a source condition in terms of the kernel integral operator) and on the effective dimensionality of the data mapped into the kernel space. We obtain upper bounds, essentially matching known minimax lower bounds, for the L-2 (prediction) norm as well as for the stronger Hilbert norm, if the true regression function belongs to the reproducing kernel Hilbert space. If the latter assumption is not fulfilled, we obtain similar convergence rates for appropriate norms, provided additional unlabeled data are available.}, language = {en} } @misc{BlanchardScott2018, author = {Blanchard, Gilles and Scott, Clayton}, title = {Corrigendum to: Classification with asymmetric label noise}, series = {Electronic journal of statistics}, volume = {12}, journal = {Electronic journal of statistics}, number = {1}, publisher = {Institute of Mathematical Statistics}, address = {Cleveland}, issn = {1935-7524}, doi = {10.1214/18-EJS1422}, pages = {1779 -- 1781}, year = {2018}, abstract = {We point out a flaw in Lemma 15 of [1]. We also indicate how the main results of that section are still valid using a modified argument.}, language = {en} } @article{KatzSamuelsBlanchardScott2019, author = {Katz-Samuels, Julian and Blanchard, Gilles and Scott, Clayton}, title = {Decontamination of Mutual Contamination Models}, series = {Journal of machine learning research}, volume = {20}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {57}, year = {2019}, abstract = {Many machine learning problems can be characterized by mutual contamination models. In these problems, one observes several random samples from different convex combinations of a set of unknown base distributions and the goal is to infer these base distributions. This paper considers the general setting where the base distributions are defined on arbitrary probability spaces. We examine three popular machine learning problems that arise in this general setting: multiclass classification with label noise, demixing of mixed membership models, and classification with partial labels. In each case, we give sufficient conditions for identifiability and present algorithms for the infinite and finite sample settings, with associated performance guarantees.}, language = {en} } @unpublished{BlanchardMathe2012, author = {Blanchard, Gilles and Math{\´e}, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57117}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which takes into account both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration this modification is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} } @article{BlanchardMathe2012, author = {Blanchard, Gilles and Mathe, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, series = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, volume = {28}, journal = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, number = {11}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0266-5611}, doi = {10.1088/0266-5611/28/11/115011}, pages = {23}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which corrects both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration it is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} }