欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

【PyTorch】正则化:L1范数,L2范数,Dropout

程序员文章站 2022-07-13 10:38:16
...

正则化
当数据集较小或者网络模型较简单,在进行一些处理的时候,很容易形成训练精度很高了,但是测试的误差仍然较大,为了解决这种过拟合的问题,需要进行正则化处理,有以下几种方式:

  • 目标函数添加正则项:L1范数
  • 目标函数添加正则项:L2f范数
  • 当增强数据集后可使用dropout,获得更好的效果

正则化的详细的内容可参考:https://www.cnblogs.com/pinking/p/9310728.html
代码实现

L2正则化:

#高维线性回归
import torch
import torch.nn as nn
import numpy as np
import sys
import matplotlib.pyplot as plt
%matplotlib inline
n_train,n_test,num_inputs = 20,100,200
true_w,true_b = torch.ones(num_inputs,1)*0.01,0.05

features = torch.randn((n_train +n_test,num_inputs))
labels = torch.matmul(features,true_w)+true_b
labels += torch.tensor(np.random.normal(0,0.01,size = labels.size()),dtype = torch.float)
train_features ,test_features = features[:n_train,:],features[n_train:,:]
train_labels,test_labels = labels[:n_train],labels[n_train:]

#通过目标函数添加L2惩罚项来实现权重衰减
#初始化
def init_params():
    w = torch.randn((num_inputs,1),requires_grad = True)
    b = torch.zeros(1,requeires_grad = True)
    return [w,b]

#定义L2范数  1/2(w1+w2+..)^2
def l2_penaalty(w):
    return (w**2).sum()/2
#定义训练和测试模型  plt.semilogy对y轴使用了对数尺度
#x_vals:横坐标,y_vals:纵坐标 训练损失,x_label:横坐标的表头,y_label:纵坐标的表头,
#x2_vals=None,y2_vals=None 测试损失, legend=None:提示线型,figsize=(3.5,2.5)图像大小
def semilogy(x_vals,y_vals,x_label,y_label,x2_vals=None,y2_vals=None,
            legend=None,figsize=(3.5,2.5)):
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.semilogy(x_vals,y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals,y2_vals,linestyle=':')
        plt.legend(legend)

#定义训练和测试
batch_size, num_epochs, lr,loss = 1, 100, 0.003,torch.nn.MSELoss()
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)

def fit_and_plot_pytorch(wd):
    # 对权重参数衰减。权重名称一般是以weight结尾
    net = nn.Linear(num_inputs, 1)
    nn.init.normal_(net.weight, mean=0, std=1)
    nn.init.normal_(net.bias, mean=0, std=1)
    # 对权重参数衰减
    optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd)
    # 不对偏差参数衰减
    optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr)  
    
    train_ls, test_ls = [], []
    for _ in range(num_epochs):
        for X, y in train_iter:
            #最小均方误差损失
            l = loss(net(X), y).mean()
            optimizer_w.zero_grad()
            optimizer_b.zero_grad()
            
            l.backward()
            
            # 对两个optimizer实例分别调用step函数,从而分别更新权重和偏差
            optimizer_w.step()
            optimizer_b.step()
        #将训练和测试损失添加到列表中
        train_ls.append(loss(net(train_features), train_labels).mean().item())
        test_ls.append(loss(net(test_features), test_labels).mean().item())
    semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
                 range(1, num_epochs + 1), test_ls, ['train', 'test'])
    print('L2 norm of w:', net.weight.data.norm().item())
#通过加大超参数 使得惩罚项的比重增大
fit_and_plot_pytorch(5)

Dropout

#dropout  不改变输入的期望值
import torch
import torch.nn as nn
import numpy as np
import sys
import torchvision
import torchvision.transforms as transforms

#获取fashion_mnist数据集
mnist_train = torchvision.datasets.FashionMNIST(root='~/Desktop/OpenCV_demo/Datasets/FashionMNIST', train=True, download=False, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='~/Desktop/OpenCV_demo/Datasets/FashionMNIST', train=False, download=False, transform=transforms.ToTensor())
#获得训练和测试数据
if sys.platform.startswith('win'):
    num_workers = 0
else:
    num_workers = 4
train_iter = torch.utils.data.DataLoader(
    mnist_train,batch_size = batch_size,shuffle = True,num_workers =  num_workers)
test_iter = torch.utils.data.DataLoader(
    mnist_test,batch_size = batch_size,shuffle = True,num_workers =  num_workers)
 #定义dropout
 def dropout(X,drop_prob):
    X = X.float()
    assert 0<=drop_prob<=1
    keep_prob = 1-drop_prob
    #全部元素都丢弃
    if keep_prob ==0:
        #返回原来的元素
        return torch.zeros_like(X)
    mask = (torch.rand(X.shape)<keep_prob).float()
    
    return mask *X/keep_prob
   
#对模型的精度进行评估
def evaluate_accuracy(data_iter,net):
    acc_sum,n = 0.0,0
    for X,y in data_iter:
        if isinstance(net,torch.nn.Module):
            # 评估模式, 这会关闭dropout
            net.eval()
            #判断行和最大的元素是否与类别相同,即计算准确的元素的个数
            acc_sum += (net(X).argmax(dim=1)==y).float().sum().item()
            net.train()
        else:
            if ('is_training' in net.__code__.co_varnames):
                acc_sum += (net(X,is_training = False).argmax(dim=1)==y).float().sum()
            else:
                acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    #返回准确的元素概率
    return acc_sum / n

num_epochs,lr,batch_size= 5,100,256

def train_softmax(net,train_iter,test_iter,loss,num_epochs,batch_size,
                  params=None, lr=None, optimizer=None):
    for epoch in range(num_epochs):
        #损失值、正确数量、总数 初始化
        train_l_sum,train_acc_sum,n = 0.0,0.0,0
        
        for X,y in train_iter:
            y_hat = net(X)
            l = loss(y_hat,y).sum()
            
             # 梯度清零 损失函数和优化函数梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
                    
            l.backward()
            if optimizer is None:
                sgd(params, lr, batch_size)
            else:
                optimizer.step() 
            
            train_l_sum += l.item()
            train_acc_sum +=(y_hat.argmax(dim=1)==y).sum().item()
            n += y.shape[0]
            
        test_acc = evaluate_accuracy(test_iter,net)
        print('epoch %d, loss %.4f, train acc %.3f,test acc %.3f'

              %(epoch+1,train_l_sum/n,train_acc_sum/n,test_acc))
#定义网络模型 
class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer,self).__init__()
    def forward(self,x):
        return x.view(x.shape[0],-1)
        
drop_prob1,drop_prob2 = 0.2,0.5
num_inputs,num_outputs,num_hiddens1,num_hiddens2 = 784,10,256,256
net = nn.Sequential(
        FlattenLayer(),
        nn.Linear(num_inputs, num_hiddens1),
        nn.ReLU(),
        nn.Dropout(drop_prob1),
        nn.Linear(num_hiddens1, num_hiddens2), 
        nn.ReLU(),
        nn.Dropout(drop_prob2),
        nn.Linear(num_hiddens2, 10)
        )

for param in net.parameters():
    nn.init.normal_(param, mean=0, std=0.01)
#训练网络
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
loss = torch.nn.CrossEntropyLoss()
train_softmax(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)