线性回归模型实现
程序员文章站
2022-04-09 20:24:40
...
1.包的导入
import torch
import torch.utils.data as Data
from torch import nn,optim
import numpy as np
import matplotlib.pyplot as plt
import os
from visdom import Visdom
import numpy as np
'''
nn是neural networks,定义了大量神经网络的层。核心数据结构是Module
'''
2.数据集的生成
#线性模型:y = w1*x1 + w2*x2 + b
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
plt.style.use("seaborn-whitegrid")#设计背景样式
viz=Visdom()#可视化
viz.line([0.],[0.],win='train_loss', opts=dict(title='train loss'))
global_step = 0
# 生成数据集
num_inputs = 2 # 输入参数的数量
num_examples = 1000 # 要生成的数据个数
lr = 0.03
batch_size = 10
true_w = [0.6, -0.5] # 真实权值
true_b = 2.2
features =np.random.normal(0, 1, (num_examples, num_inputs))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
features=torch.from_numpy(features).float().to(device)
labels=torch.from_numpy(labels).float().to(device)
3.读取数据
# 按batch size读取数据
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True) # 迭代器
4.定义模型
#定义模型(继承(nn.module(),__init__,__forward__)
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
def forword(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
net.to(device)
5.初始化模型参数
#初始化模型参数
nn.init.normal_(net.linear.weight, mean=0, std=0.01) # net为一层,赋值为正态分布
nn.init.constant_(net.linear.bias, val=0)#将0赋予net.linear.bias
6.损失函数
loss = nn.MSELoss()#(均方误差)
7.优化器
optimizer = optim.SGD(net.parameters(), lr=lr)
8.训练模型
#训练模型
train_curve = list()
num_epochs = 10 # 迭代轮数
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:# X是输入,y是真实值,output是预测值
#forward
output = net.forword(X)
X, y = X.to(device), y.to(device)
#backward
optimizer.zero_grad()#梯度清零
l= loss(output,y.view(-1,1))
l.backward()
#update weights
optimizer.step()
8.可视化
#可视化
global_step += 1
viz.line([l.item()], [global_step], win='train_loss', update='append')
train_curve.append(l.item())
print('epoch %d, loss: %f' % (epoch, l.item()))
train_x = range(len(train_curve))
# Range函数的作用是顺次取出序列的数
# Len函数的作用是求列表的长度(长度为7)以元素的个数
# 两者的配合使用是顺次取出列表中的数据
train_y = train_curve
plt.plot(train_x, train_y, label='Train')
plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()
import torch
import torch.utils.data as Data
# import torch.nn as nn
# import torch.optim as optim
from torch import nn,optim
import numpy as np
import matplotlib.pyplot as plt
import os
from visdom import Visdom
import numpy as np
'''
nn是neural networks,定义了大量神经网络的层。核心数据结构是Module
'''
# 整一个线性模型:y = w1*x1 + w2*x2 + b
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 1. 生成数据集
num_inputs = 2 # 输入参数的数量
num_examples = 1000 # 要生成的数据个数
num_epochs = 10 # 迭代轮数
lr = 0.03 # 学习率
batch_size = 10# 取的时候一批10个
true_w = [0.6, -0.5] # 真实权值
true_b = 2.2
viz=Visdom()
viz.line([0.],[0.],win='train_loss', opts=dict(title='train loss'))
global_step = 0
# features =torch.tensor(np.random.normal(loc=0, scale=1, size=(num_examples, num_inputs)),dtype=torch.float)
# features =np.random.normal(loc=0, scale=1, size=(num_examples, num_inputs))
features =np.random.normal(0, 1, (num_examples, num_inputs))
# 1000行2列,均值0,标准差1
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
features=torch.from_numpy(features).float().to(device)
labels=torch.from_numpy(labels).float().to(device)
# 线性模型:y = w1*x1 + w2*x2 + b
# 2. 读取数据
dataset = Data.TensorDataset(features, labels)
# import torch.utils.data as Data
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True) # 迭代器
# 3. 定义模型
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
def forword(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
net.to(device)
# 4. 初始化模型参数
nn.init.normal_(net.linear.weight, mean=0, std=0.01) # net为一层,赋值为正态分布
nn.init.constant_(net.linear.bias, val=0) # 复制为常量
# 5. 定义损失函数,nn模块定义了很多损失函数
loss = nn.MSELoss()
# 6. 定义优化算法
optimizer = optim.SGD(net.parameters(), lr=lr)
# print(optimizer)
# 7.训练模型
train_curve = list()
for epoch in range(1, num_epochs + 1):
for X, y in data_iter: # X是输入,y是真实值,output是预测值
# forward
output = net.forword(X)
X, y = X.to(device), y.to(device)
# backward
optimizer.zero_grad() # 梯度清零
l = loss(output, y.view(-1, 1))
l.backward()
# update weights
optimizer.step()
#可视化
global_step += 1
viz.line([l.item()], [global_step], win='train_loss', update='append')
train_curve.append(l.item())
print('epoch %d, loss: %f' % (epoch, l.item()))
train_x = range(len(train_curve))
# Range函数的作用是顺次取出序列的数
# Len函数的作用是求列表的长度(长度为7)以元素的个数
# 两者的配合使用是顺次取出列表中的数据
train_y = train_curve
plt.plot(train_x, train_y, label='Train')
plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()
可视化
matplotlib
visdom可视化
上一篇: 华为防火墙双机热备实验
下一篇: 该换老婆了