@unpublished{BlanchardMathe2012, author = {Blanchard, Gilles and Math{\´e}, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57117}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which takes into account both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration this modification is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} } @article{BlanchardMathe2012, author = {Blanchard, Gilles and Mathe, Peter}, title = {Discrepancy principle for statistical inverse problems with application to conjugate gradient iteration}, series = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, volume = {28}, journal = {Inverse problems : an international journal of inverse problems, inverse methods and computerised inversion of data}, number = {11}, publisher = {IOP Publ. Ltd.}, address = {Bristol}, issn = {0266-5611}, doi = {10.1088/0266-5611/28/11/115011}, pages = {23}, year = {2012}, abstract = {The authors discuss the use of the discrepancy principle for statistical inverse problems, when the underlying operator is of trace class. Under this assumption the discrepancy principle is well defined, however a plain use of it may occasionally fail and it will yield sub-optimal rates. Therefore, a modification of the discrepancy is introduced, which corrects both of the above deficiencies. For a variety of linear regularization schemes as well as for conjugate gradient iteration it is shown to yield order optimal a priori error bounds under general smoothness assumptions. A posteriori error control is also possible, however at a sub-optimal rate, in general. This study uses and complements previous results for bounded deterministic noise.}, language = {en} } @article{KloftBlanchard2012, author = {Kloft, Marius and Blanchard, Gilles}, title = {On the Convergence Rate of l(p)-Norm Multiple Kernel Learning}, series = {JOURNAL OF MACHINE LEARNING RESEARCH}, volume = {13}, journal = {JOURNAL OF MACHINE LEARNING RESEARCH}, publisher = {MICROTOME PUBL}, address = {BROOKLINE}, issn = {1532-4435}, pages = {2465 -- 2502}, year = {2012}, abstract = {We derive an upper bound on the local Rademacher complexity of l(p)-norm multiple kernel learning, which yields a tighter excess risk bound than global approaches. Previous local approaches analyzed the case p - 1 only while our analysis covers all cases 1 <= p <= infinity, assuming the different feature mappings corresponding to the different kernels to be uncorrelated. We also show a lower bound that shows that the bound is tight, and derive consequences regarding excess loss, namely fast convergence rates of the order O( n(-)1+alpha/alpha where alpha is the minimum eigenvalue decay rate of the individual kernels.}, language = {en} } @unpublished{BlanchardDelattreRoquain2012, author = {Blanchard, Gilles and Delattre, Sylvain and Roquain, {\´E}tienne}, title = {Testing over a continuum of null hypotheses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-56877}, year = {2012}, abstract = {We introduce a theoretical framework for performing statistical hypothesis testing simultaneously over a fairly general, possibly uncountably infinite, set of null hypotheses. This extends the standard statistical setting for multiple hypotheses testing, which is restricted to a finite set. This work is motivated by numerous modern applications where the observed signal is modeled by a stochastic process over a continuum. As a measure of type I error, we extend the concept of false discovery rate (FDR) to this setting. The FDR is defined as the average ratio of the measure of two random sets, so that its study presents some challenge and is of some intrinsic mathematical interest. Our main result shows how to use the p-value process to control the FDR at a nominal level, either under arbitrary dependence of p-values, or under the assumption that the finite dimensional distributions of the p-value process have positive correlations of a specific type (weak PRDS). Both cases generalize existing results established in the finite setting, the latter one leading to a less conservative procedure. The interest of this approach is demonstrated in several non-parametric examples: testing the mean/signal in a Gaussian white noise model, testing the intensity of a Poisson process and testing the c.d.f. of i.i.d. random variables. Conceptually, an interesting feature of the setting advocated here is that it focuses directly on the intrinsic hypothesis space associated with a testing model on a random process, without referring to an arbitrary discretization.}, language = {en} }