Back
Algorithmic Stability and Generalization Performance
We present a novel way of obtaining PAC-style bounds on the generalization error of learning algorithms, explicitly using their stability properties. A {\em stable} learner being one for which the learned solution does not change much for small changes in the training set. The bounds we obtain do not depend on any measure of the complexity of the hypothesis space (e.g. VC dimension) but rather depend on how the learning algorithm searches this space, and can thus be applied even when the VC dimension in infinite. We demonstrate that regularization networks possess the required stability property and apply our method to obtain new bounds on their generalization performance.
@inproceedings{1437, title = {Algorithmic Stability and Generalization Performance}, journal = {Advances in Neural Information Processing Systems}, booktitle = {Advances in Neural Information Processing Systems 13}, abstract = {We present a novel way of obtaining PAC-style bounds on the generalization error of learning algorithms, explicitly using their stability properties. A {\em stable} learner being one for which the learned solution does not change much for small changes in the training set. The bounds we obtain do not depend on any measure of the complexity of the hypothesis space (e.g. VC dimension) but rather depend on how the learning algorithm searches this space, and can thus be applied even when the VC dimension in infinite. We demonstrate that regularization networks possess the required stability property and apply our method to obtain new bounds on their generalization performance.}, pages = {196-202}, editors = {Leen, T.K. , T.G. Dietterich, V. Tresp}, publisher = {MIT Press}, organization = {Max-Planck-Gesellschaft}, school = {Biologische Kybernetik}, address = {Cambridge, MA, USA}, month = apr, year = {2001}, slug = {1437}, author = {Bousquet, O. and Elisseeff, A.}, month_numeric = {4} }