@unpublished{BlanchardMuecke2016, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, volume = {5}, number = {5}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {2193-6943}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-89782}, pages = {36}, year = {2016}, abstract = {We consider a statistical inverse learning problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X_i, superposed with an additional noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependence of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} } @article{BlanchardMuecke2018, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Optimal rates for regularization of statistical inverse learning problems}, series = {Foundations of Computational Mathematics}, volume = {18}, journal = {Foundations of Computational Mathematics}, number = {4}, publisher = {Springer}, address = {New York}, issn = {1615-3375}, doi = {10.1007/s10208-017-9359-7}, pages = {971 -- 1013}, year = {2018}, abstract = {We consider a statistical inverse learning (also called inverse regression) problem, where we observe the image of a function f through a linear operator A at i.i.d. random design points X-i , superposed with an additive noise. The distribution of the design points is unknown and can be very general. We analyze simultaneously the direct (estimation of Af) and the inverse (estimation of f) learning problems. In this general framework, we obtain strong and weak minimax optimal rates of convergence (as the number of observations n grows large) for a large class of spectral regularization methods over regularity classes defined through appropriate source conditions. This improves on or completes previous results obtained in related settings. The optimality of the obtained rates is shown not only in the exponent in n but also in the explicit dependency of the constant factor in the variance of the noise and the radius of the source condition set.}, language = {en} } @article{MueckeBlanchard2018, author = {M{\"u}cke, Nicole and Blanchard, Gilles}, title = {Parallelizing spectrally regularized kernel algorithms}, series = {Journal of machine learning research}, volume = {19}, journal = {Journal of machine learning research}, publisher = {Microtome Publishing}, address = {Cambridge, Mass.}, issn = {1532-4435}, pages = {29}, year = {2018}, abstract = {We consider a distributed learning approach in supervised learning for a large class of spectral regularization methods in an reproducing kernel Hilbert space (RKHS) framework. The data set of size n is partitioned into m = O (n(alpha)), alpha < 1/2, disjoint subsamples. On each subsample, some spectral regularization method (belonging to a large class, including in particular Kernel Ridge Regression, L-2-boosting and spectral cut-off) is applied. The regression function f is then estimated via simple averaging, leading to a substantial reduction in computation time. We show that minimax optimal rates of convergence are preserved if m grows sufficiently slowly (corresponding to an upper bound for alpha) as n -> infinity, depending on the smoothness assumptions on f and the intrinsic dimensionality. In spirit, the analysis relies on a classical bias/stochastic error analysis.}, language = {en} } @article{BlanchardMuecke2020, author = {Blanchard, Gilles and M{\"u}cke, Nicole}, title = {Kernel regression, minimax rates and effective dimensionality}, series = {Analysis and applications}, volume = {18}, journal = {Analysis and applications}, number = {4}, publisher = {World Scientific}, address = {New Jersey}, issn = {0219-5305}, doi = {10.1142/S0219530519500258}, pages = {683 -- 696}, year = {2020}, abstract = {We investigate if kernel regularization methods can achieve minimax convergence rates over a source condition regularity assumption for the target function. These questions have been considered in past literature, but only under specific assumptions about the decay, typically polynomial, of the spectrum of the the kernel mapping covariance operator. In the perspective of distribution-free results, we investigate this issue under much weaker assumption on the eigenvalue decay, allowing for more complex behavior that can reflect different structure of the data at different scales.}, language = {en} }