underfit and overfit(欠拟合和过拟合)

欠拟合和过拟合

实践中,如果测试数据集是给定的,我们通常用机器学习模型在该测试数据集的误差来反映泛化误差。

基于上述重要结论,以下两种拟合问题值得注意:

  • 欠拟合:机器学习模型无法得到较低训练误差。
  • 过拟合:机器学习模型的训练误差远小于其在测试数据集上的误差。

我们要尽可能同时避免欠拟合和过拟合的出现。虽然有很多因素可能导致这两种拟合问题,在这里我们重点讨论两个因素:

    模型的选择和训练数据集的大小。

underfit and overfit(欠拟合和过拟合)
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon

num_train = 100
num_test = 100
true_w = [1.2, -3.4, 5.6]
true_b = 5.0

x = nd.random.normal(shape=(num_train + num_test, 1))
X = nd.concat(x, nd.power(x, 2), nd.power(x, 3))
y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_w[2] * X[:, 2] + true_b
y += .1 * nd.random.normal(shape=y.shape)

('x:', x[:5], 'X:', X[:5], 'y:', y[:5])
D:\Users\Administrator\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

('x:', 
 [[ 1.1630785 ]
  [ 0.4838046 ]
  [ 0.29956347]
  [ 0.15302546]
  [-1.1688148 ]]
 <NDArray 5x1 @cpu(0)>, 'X:', 
 [[ 1.1630785   1.3527517   1.5733565 ]
  [ 0.4838046   0.2340669   0.11324265]
  [ 0.29956347  0.08973827  0.02688231]
  [ 0.15302546  0.02341679  0.00358337]
  [-1.1688148   1.366128   -1.5967506 ]]
 <NDArray 5x3 @cpu(0)>, 'y:', 
 [ 10.534649    5.530093    5.1570797   5.0066853 -10.068435 ]
 <NDArray 5 @cpu(0)>)

%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt

def train(X_train, X_test, y_train, y_test):
    # 线性回归模型
    net = gluon.nn.Sequential()
    with net.name_scope():
        net.add(gluon.nn.Dense(1))
    net.initialize()
    # 设一些默认参数
    learning_rate = 0.01
    epochs = 100
    batch_size = min(10, y_train.shape[0])
    dataset_train = gluon.data.ArrayDataset(X_train, y_train)
    data_iter_train = gluon.data.DataLoader(
        dataset_train, batch_size, shuffle=True)
    # 默认SGD和均方误差
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {
        'learning_rate': learning_rate})
    square_loss = gluon.loss.L2Loss()
    # 保存训练和测试损失
    train_loss = []
    test_loss = []
    for e in range(epochs):
        for data, label in data_iter_train:
            with autograd.record():
                output = net(data)
                loss = square_loss(output, label)
            loss.backward()
            trainer.step(batch_size)
        train_loss.append(square_loss(
            net(X_train), y_train).mean().asscalar())
        test_loss.append(square_loss(
            net(X_test), y_test).mean().asscalar())
    # 打印结果
    plt.plot(train_loss)
    plt.plot(test_loss)
    plt.legend(['train','test'])
    plt.show()
    return ('learned weight', net[0].weight.data(),
            'learned bias', net[0].bias.data())

train(X[:num_train, :], X[num_train:, :], y[:num_train], y[num_train:])
underfit and overfit(欠拟合和过拟合)
('learned weight', 
 [[ 1.1744344 -3.3910487  5.6037426]]
 <NDArray 1x3 @cpu(0)>, 'learned bias', 
 [4.9861517]
 <NDArray 1 @cpu(0)>)

# underfit
train(x[:num_train, :], x[num_train:, :], y[:num_train], y[num_train:])
underfit and overfit(欠拟合和过拟合)
('learned weight', 
 [[22.693857]]
 <NDArray 1x1 @cpu(0)>, 'learned bias', 
 [-0.65745103]
 <NDArray 1 @cpu(0)>)

# overfit
train(X[0:2, :], X[num_train:, :], y[0:2], y[num_train:])
underfit and overfit(欠拟合和过拟合)
('learned weight', 
 [[2.0588458 1.9273669 2.0477402]]
 <NDArray 1x3 @cpu(0)>, 'learned bias', 
 [2.482129]
 <NDArray 1 @cpu(0)>)