Pytorch线性回归-Pytorch
程序员文章站
2022-06-11 23:44:37
...
概述
一个简单的线性回归示例,展示线性回归的过程。
示例
import torch
import torch.nn as nn
from torch.autograd import Variable
class LinearRegressionModle(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegressionModle, self).__init__()
# 全连接层
self.linear = nn.Linear(input_dim, output_dim)
# 前向传播
def forward(self, x):
out = self.linear(x)
return out
# 创建参数
# 生成一个11*1的矩阵
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
print(x_train)
x_train = x_train.reshape(-1,1)
print(x_train)
x_train.shape
y_values = [2*i + 1 for i in x_values]
print(y_values)
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1,1)
print(y_train)
y_train.shape
input_dim = 1
output_dim = 1
model = LinearRegressionModle(input_dim,output_dim)
# 指定参数和损失函数
epochs = 1000
learning_rate = 0.01
# 优化器
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)
criterion = nn.MSELoss()
# 训练模型
for epoch in range(epochs):
epoch += 1
# 注意转成tensor
inputs = Variable(torch.from_numpy(x_train), requires_grad = True)
labels = Variable(torch.from_numpy(y_train), requires_grad = True)
# 梯度每次迭代清零
optimizer.zero_grad()
# 前向传播,调用forward方法
outptus = model(inputs)
# 计算损失
loss = criterion(inputs,labels)
# 返向传播,计算梯度
loss.backward()
# 更新权重参数
optimizer.step()
if epoch % 50 == 0:
print('epoch {}, loss {}'.format(epoch, loss.item()))
# 测试模型
predicted = model(torch.from_numpy(x_train)).data.numpy()
print(predicted)
# 保存模型
torch.save(model.state_dict(), "model.pk1")
# 加载模型
model.load_state_dict(torch.load("model.pk1"))
参考
上一篇: pytorch入门>线性回归代码实现1
下一篇: Pytorch入门>简单的线性回归2