pytorch 如何使用batch训练lstm网络
程序员文章站
2024-01-01 11:56:52
batch的lstm# 导入相应的包import torchimport torch.nn as nnimport torch.nn.functional as fimport torch.optim...
batch的lstm
# 导入相应的包 import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim import torch.utils.data as data torch.manual_seed(1) # 准备数据的阶段 def prepare_sequence(seq, to_ix): idxs = [to_ix[w] for w in seq] return torch.tensor(idxs, dtype=torch.long) with open("/home/lstm_train.txt", encoding='utf8') as f: train_data = [] word = [] label = [] data = f.readline().strip() while data: data = data.strip() sp = data.split(' ') if len(sp) == 2: word.append(sp[0]) label.append(sp[1]) else: if len(word) == 100 and 'i-pro' in label: train_data.append((word, label)) word = [] label = [] data = f.readline() word_to_ix = {} for sent, _ in train_data: for word in sent: if word not in word_to_ix: word_to_ix[word] = len(word_to_ix) tag_to_ix = {"o": 0, "i-pro": 1} for i in range(len(train_data)): train_data[i] = ([word_to_ix[t] for t in train_data[i][0]], [tag_to_ix[t] for t in train_data[i][1]]) # 词向量的维度 embedding_dim = 128 # 隐藏层的单元数 hidden_dim = 128 # 批大小 batch_size = 10 class lstmtagger(nn.module): def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size): super(lstmtagger, self).__init__() self.hidden_dim = hidden_dim self.batch_size = batch_size self.word_embeddings = nn.embedding(vocab_size, embedding_dim) # the lstm takes word embeddings as inputs, and outputs hidden states # with dimensionality hidden_dim. self.lstm = nn.lstm(embedding_dim, hidden_dim, batch_first=true) # the linear layer that maps from hidden state space to tag space self.hidden2tag = nn.linear(hidden_dim, tagset_size) def forward(self, sentence): embeds = self.word_embeddings(sentence) # input_tensor = embeds.view(self.batch_size, len(sentence) // self.batch_size, -1) lstm_out, _ = self.lstm(embeds) tag_space = self.hidden2tag(lstm_out) scores = f.log_softmax(tag_space, dim=2) return scores def predict(self, sentence): embeds = self.word_embeddings(sentence) lstm_out, _ = self.lstm(embeds) tag_space = self.hidden2tag(lstm_out) scores = f.log_softmax(tag_space, dim=2) return scores loss_function = nn.nllloss() model = lstmtagger(embedding_dim, hidden_dim, len(word_to_ix), len(tag_to_ix), batch_size) optimizer = optim.sgd(model.parameters(), lr=0.1) data_set_word = [] data_set_label = [] for data_tuple in train_data: data_set_word.append(data_tuple[0]) data_set_label.append(data_tuple[1]) torch_dataset = data.tensordataset(torch.tensor(data_set_word, dtype=torch.long), torch.tensor(data_set_label, dtype=torch.long)) # 把 dataset 放入 dataloader loader = data.dataloader( dataset=torch_dataset, # torch tensordataset format batch_size=batch_size, # mini batch size shuffle=true, # num_workers=2, # 多线程来读数据 ) # 训练过程 for epoch in range(200): for step, (batch_x, batch_y) in enumerate(loader): # 梯度清零 model.zero_grad() tag_scores = model(batch_x) # 计算损失 tag_scores = tag_scores.view(-1, tag_scores.shape[2]) batch_y = batch_y.view(batch_y.shape[0]*batch_y.shape[1]) loss = loss_function(tag_scores, batch_y) print(loss) # 后向传播 loss.backward() # 更新参数 optimizer.step() # 测试过程 with torch.no_grad(): inputs = torch.tensor([data_set_word[0]], dtype=torch.long) print(inputs) tag_scores = model.predict(inputs) print(tag_scores.shape) print(torch.argmax(tag_scores, dim=2))
补充:pytorch基础-使用lstm神经网络实现手写数据集识别
看代码吧~
import numpy as np import torch from torch import nn,optim from torch.autograd import variable from torchvision import datasets,transforms from torch.utils.data import dataloader
# 训练集 train_data = datasets.mnist(root="./", # 存放位置 train = true, # 载入训练集 transform=transforms.totensor(), # 把数据变成tensor类型 download = true # 下载 ) # 测试集 test_data = datasets.mnist(root="./", train = false, transform=transforms.totensor(), download = true )
# 批次大小 batch_size = 64 # 装载训练集 train_loader = dataloader(dataset=train_data,batch_size=batch_size,shuffle=true) # 装载测试集 test_loader = dataloader(dataset=test_data,batch_size=batch_size,shuffle=true)
for i,data in enumerate(train_loader): inputs,labels = data print(inputs.shape) print(labels.shape) break
# 定义网络结构 class lstm(nn.module): def __init__(self): super(lstm,self).__init__()# 初始化 self.lstm = torch.nn.lstm( input_size = 28, # 表示输入特征的大小 hidden_size = 64, # 表示lstm模块的数量 num_layers = 1, # 表示lstm隐藏层的层数 batch_first = true # lstm默认格式input(seq_len,batch,feature)等于true表示input和output变成(batch,seq_len,feature) ) self.out = torch.nn.linear(in_features=64,out_features=10) self.softmax = torch.nn.softmax(dim=1) def forward(self,x): # (batch,seq_len,feature) x = x.view(-1,28,28) # output:(batch,seq_len,hidden_size)包含每个序列的输出结果 # 虽然lstm的batch_first为true,但是h_n,c_n的第0个维度还是num_layers # h_n :[num_layers,batch,hidden_size]只包含最后一个序列的输出结果 # c_n:[num_layers,batch,hidden_size]只包含最后一个序列的输出结果 output,(h_n,c_n) = self.lstm(x) output_in_last_timestep = h_n[-1,:,:] x = self.out(output_in_last_timestep) x = self.softmax(x) return x
# 定义模型 model = lstm() # 定义代价函数 mse_loss = nn.crossentropyloss()# 交叉熵 # 定义优化器 optimizer = optim.adam(model.parameters(),lr=0.001)# 随机梯度下降
# 定义模型训练和测试的方法 def train(): # 模型的训练状态 model.train() for i,data in enumerate(train_loader): # 获得一个批次的数据和标签 inputs,labels = data # 获得模型预测结果(64,10) out = model(inputs) # 交叉熵代价函数out(batch,c:类别的数量),labels(batch) loss = mse_loss(out,labels) # 梯度清零 optimizer.zero_grad() # 计算梯度 loss.backward() # 修改权值 optimizer.step() def test(): # 模型的测试状态 model.eval() correct = 0 # 测试集准确率 for i,data in enumerate(test_loader): # 获得一个批次的数据和标签 inputs,labels = data # 获得模型预测结果(64,10) out = model(inputs) # 获得最大值,以及最大值所在的位置 _,predicted = torch.max(out,1) # 预测正确的数量 correct += (predicted==labels).sum() print("test acc:{0}".format(correct.item()/len(test_data))) correct = 0 for i,data in enumerate(train_loader): # 训练集准确率 # 获得一个批次的数据和标签 inputs,labels = data # 获得模型预测结果(64,10) out = model(inputs) # 获得最大值,以及最大值所在的位置 _,predicted = torch.max(out,1) # 预测正确的数量 correct += (predicted==labels).sum() print("train acc:{0}".format(correct.item()/len(train_data)))
# 训练 for epoch in range(10): print("epoch:",epoch) train() test()
以上为个人经验,希望能给大家一个参考,也希望大家多多支持。
推荐阅读
-
pytorch 如何使用batch训练lstm网络
-
pytorch 如何使用amp进行混合精度训练
-
Pytorch:卷积神经网络CNN,使用重复元素的网络(VGG)训练MNIST数据集99%以上正确率
-
pytorch 如何使用float64训练
-
pytorch 如何使用amp进行混合精度训练
-
使用Pytorch训练two-head网络的操作
-
如何在TensorFlow2.X中使用自定义训练循环的情况下在TensorBoard中绘制网络结构图(计算图)
-
Keras框架下Batch_Size在LSTM训练与预测中的使用
-
深度学习:使用pytorch训练cifar10数据集(基于Lenet网络)
-
使用Pytorch训练two-head网络的操作