@article{KatzSamuelsBlanchardScott2019, author = {Katz-Samuels, Julian and Blanchard, Gilles and Scott, Clayton}, title = {Decontamination of Mutual Contamination Models}, series = {Journal of machine learning research}, volume = {20}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {57}, year = {2019}, abstract = {Many machine learning problems can be characterized by mutual contamination models. In these problems, one observes several random samples from different convex combinations of a set of unknown base distributions and the goal is to infer these base distributions. This paper considers the general setting where the base distributions are defined on arbitrary probability spaces. We examine three popular machine learning problems that arise in this general setting: multiclass classification with label noise, demixing of mixed membership models, and classification with partial labels. In each case, we give sufficient conditions for identifiability and present algorithms for the infinite and finite sample settings, with associated performance guarantees.}, language = {en} } @article{KawanabeBlanchardSugiyamaetal.2006, author = {Kawanabe, Motoaki and Blanchard, Gilles and Sugiyama, Masashi and Spokoiny, Vladimir G. and M{\"u}ller, Klaus-Robert}, title = {A novel dimension reduction procedure for searching non-Gaussian subspaces}, issn = {0302-9743}, doi = {10.1007/11679363_19}, year = {2006}, abstract = {In this article, we consider high-dimensional data which contains a low-dimensional non-Gaussian structure contaminated with Gaussian noise and propose a new linear method to identify the non-Gaussian subspace. Our method NGCA (Non-Gaussian Component Analysis) is based on a very general semi-parametric framework and has a theoretical guarantee that the estimation error of finding the non-Gaussian components tends to zero at a parametric rate. NGCA can be used not only as preprocessing for ICA, but also for extracting and visualizing more general structures like clusters. A numerical study demonstrates the usefulness of our method}, language = {en} } @article{KloftBlanchard2012, author = {Kloft, Marius and Blanchard, Gilles}, title = {On the Convergence Rate of l(p)-Norm Multiple Kernel Learning}, series = {JOURNAL OF MACHINE LEARNING RESEARCH}, volume = {13}, journal = {JOURNAL OF MACHINE LEARNING RESEARCH}, publisher = {MICROTOME PUBL}, address = {BROOKLINE}, issn = {1532-4435}, pages = {2465 -- 2502}, year = {2012}, abstract = {We derive an upper bound on the local Rademacher complexity of l(p)-norm multiple kernel learning, which yields a tighter excess risk bound than global approaches. Previous local approaches analyzed the case p - 1 only while our analysis covers all cases 1 <= p <= infinity, assuming the different feature mappings corresponding to the different kernels to be uncorrelated. We also show a lower bound that shows that the bound is tight, and derive consequences regarding excess loss, namely fast convergence rates of the order O( n(-)1+alpha/alpha where alpha is the minimum eigenvalue decay rate of the individual kernels.}, language = {en} } @article{MiethKloftRodriguezetal.2016, author = {Mieth, Bettina and Kloft, Marius and Rodriguez, Juan Antonio and Sonnenburg, Soren and Vobruba, Robin and Morcillo-Suarez, Carlos and Farre, Xavier and Marigorta, Urko M. and Fehr, Ernst and Dickhaus, Thorsten and Blanchard, Gilles and Schunk, Daniel and Navarro, Arcadi and M{\"u}ller, Klaus-Robert}, title = {Combining Multiple Hypothesis Testing with Machine Learning Increases the Statistical Power of Genome-wide Association Studies}, series = {Scientific reports}, volume = {6}, journal = {Scientific reports}, publisher = {Nature Publ. Group}, address = {London}, issn = {2045-2322}, doi = {10.1038/srep36671}, pages = {14}, year = {2016}, abstract = {The standard approach to the analysis of genome-wide association studies (GWAS) is based on testing each position in the genome individually for statistical significance of its association with the phenotype under investigation. To improve the analysis of GWAS, we propose a combination of machine learning and statistical testing that takes correlation structures within the set of SNPs under investigation in a mathematically well-controlled manner into account. The novel two-step algorithm, COMBI, first trains a support vector machine to determine a subset of candidate SNPs and then performs hypothesis tests for these SNPs together with an adequate threshold correction. Applying COMBI to data from a WTCCC study (2007) and measuring performance as replication by independent GWAS published within the 2008-2015 period, we show that our method outperforms ordinary raw p-value thresholding as well as other state-of-the-art methods. COMBI presents higher power and precision than the examined alternatives while yielding fewer false (i.e. non-replicated) and more true (i.e. replicated) discoveries when its results are validated on later GWAS studies. More than 80\% of the discoveries made by COMBI upon WTCCC data have been validated by independent studies. Implementations of the COMBI method are available as a part of the GWASpi toolbox 2.0.}, language = {en} } @article{MueckeBlanchard2018, author = {M{\"u}cke, Nicole and Blanchard, Gilles}, title = {Parallelizing spectrally regularized kernel algorithms}, series = {Journal of machine learning research}, volume = {19}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {29}, year = {2018}, abstract = {We consider a distributed learning approach in supervised learning for a large class of spectral regularization methods in an reproducing kernel Hilbert space (RKHS) framework. The data set of size n is partitioned into m = O (n(alpha)), alpha < 1/2, disjoint subsamples. On each subsample, some spectral regularization method (belonging to a large class, including in particular Kernel Ridge Regression, L-2-boosting and spectral cut-off) is applied. The regression function f is then estimated via simple averaging, leading to a substantial reduction in computation time. We show that minimax optimal rates of convergence are preserved if m grows sufficiently slowly (corresponding to an upper bound for alpha) as n -> infinity, depending on the smoothness assumptions on f and the intrinsic dimensionality. In spirit, the analysis relies on a classical bias/stochastic error analysis.}, language = {en} }