MXNET深度学习框架-07-从0开始实现多层感知机(MLP)
程序员文章站
2024-03-14 09:52:10
...
多层感知机(multilayer perceptron,MLP)其实与逻辑回归没有太大的区别,主要就是在输入层和输出层之间加了几层隐层:
下面来实现一下这个网络:
PS:与上两章一样,依旧使用服饰类的mnist数据集。
1、数据集获取
mnist_train = gn.data.vision.FashionMNIST(train=True)
mnist_test = gn.data.vision.FashionMNIST(train=False)
2、数据读取
batch_size = 100
transformer = gn.data.vision.transforms.ToTensor()
train_data = gn.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True)
test_data = gn.data.DataLoader(dataset=mnist_test, batch_size=batch_size, shuffle=False)
3、初始化模型参数
num_input = 28 * 28 * 1
num_output = 10
num_hidden=256 # 隐藏层神经元个数
w1 = nd.random_normal(shape=(num_input, num_hidden),scale=0.01) # 初始化(0.01内的值)
b1 = nd.zeros(shape=(num_hidden))
w2 = nd.random_normal(shape=(num_hidden, num_output),scale=0.01)
b2 = nd.zeros(shape=(num_output))
params = [w1, b1,w2,b2]
for param in params:
param.attach_grad() # 开辟临时空间
4、**函数
如果我们使用线性操作符来构造神经网络,那么整个模型依旧是一个线性函数:
,这里。
所以为了让模型可以拟合非线性函数,引入一个非线性**函数:
def relu(x):
return nd.maximum(0,x)
5、定义模型
def net(x):
x=x.reshape(-1,num_input)
h1=relu(nd.dot(x,w1)+b1)
output=nd.dot(h1,w2)+b2 # 最后一层一般不做**
return output
6、定义准确率
# 定义准确率
def accuracy(output,label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def evaluate_accuracy(data_iter,net):# 定义测试集准确率
acc=0
for data,label in data_iter:
data,label=transform(data,label)
output=net(data)
acc+=accuracy(output,label)
return acc/len(data_iter)
7、梯度下降优化器
def SGD(params,lr):
for pa in params:
pa[:]=pa-lr*pa.grad # 参数沿着梯度的反方向走特定距离
8、训练
(与前两章一样)
lr=0.5
epochs=20
for epoch in range(epochs):
train_loss=0
train_acc=0
for image,y in train_data:
image,y=transform(image,y) # 类型转换,数据归一化
with ag.record():
output=net(image)
loss=cross_loss(output,y)
loss.backward()
# 将梯度做平均,这样学习率不会对batch_size那么敏感
SGD(params,lr/batch_size)
train_loss+=nd.mean(loss).asscalar()
train_acc+=accuracy(output,y)
test_acc=evaluate_accuracy(test_data,net)
print("Epoch %d, Loss:%f, Train acc:%f, Test acc:%f"
%(epoch,train_loss/len(train_data),train_acc/len(train_data),test_acc))
训练结果:
可以看到,同样是训练20轮(与前两章比较),模型的准确率已经接近90%,可以说加入了隐层,分类效果确实较好。
9、预测
(与前两章一样)
# 训练完成后,可对样本进行预测
image_10,label_10=mnist_test[:10] #拿到前10个数据
show_image(image_10)
print("真实样本标签:",label_10)
print("真实数字标签对应的服饰名:",get_fashion_mnist_labels(label_10))
image_10,label_10=transform(image_10,label_10)
predict_label=net(image_10).argmax(axis=1)
print("预测样本标签:",predict_label.astype("int8"))
print("预测数字标签对应的服饰名:",get_fashion_mnist_labels(predict_label.asnumpy()))
预测结果:
附上所有源码:
import mxnet.autograd as ag
import mxnet.ndarray as nd
import mxnet.gluon as gn
def transform(data, label):
return data.astype("float32") / 255, label.astype("float32") # 样本归一化
mnist_train = gn.data.vision.FashionMNIST(train=True)
mnist_test = gn.data.vision.FashionMNIST(train=False)
data, label = mnist_train[0:9]
print(data.shape, label) # 查看数据维度
import matplotlib.pyplot as plt
def show_image(image): # 显示图像
n = image.shape[0]
_, figs = plt.subplots(1, n, figsize=(15, 15))
for i in range(n):
figs[i].imshow(image[i].reshape((28, 28)).asnumpy())
plt.show()
def get_fashion_mnist_labels(labels): # 显示图像标签
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
#
# show_image(data)
# print(get_fashion_mnist_labels(label))
'''----数据读取----'''
batch_size = 100
transformer = gn.data.vision.transforms.ToTensor()
train_data = gn.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True)
test_data = gn.data.DataLoader(dataset=mnist_test, batch_size=batch_size, shuffle=False)
'''----初始化模型参数----'''
num_input = 28 * 28 * 1
num_output = 10
num_hidden=256 # 隐藏层神经元个数
w1 = nd.random_normal(shape=(num_input, num_hidden),scale=0.01) # 初始化(0.01内的值)
b1 = nd.zeros(shape=(num_hidden))
w2 = nd.random_normal(shape=(num_hidden, num_output),scale=0.01)
b2 = nd.zeros(shape=(num_output))
params = [w1, b1,w2,b2]
for param in params:
param.attach_grad() # 开辟临时空间
# 定义**函数relu
def relu(x):
return nd.maximum(0,x)
'''----定义模型----'''
# 所谓的模型就是将全连接层与relu串起来
def net(x):
x=x.reshape(-1,num_input)
h1=relu(nd.dot(x,w1)+b1)
output=nd.dot(h1,w2)+b2 # 最后一层一般不做**
return output
# softmax和交叉熵损失函数
# 由于将它们分开会导致数值不稳定(前两章博文的结果可以对比),所以直接使用gluon提供的API
cross_loss=gn.loss.SoftmaxCrossEntropyLoss()
# 定义准确率
def accuracy(output,label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def evaluate_accuracy(data_iter,net):# 定义测试集准确率
acc=0
for data,label in data_iter:
data,label=transform(data,label)
output=net(data)
acc+=accuracy(output,label)
return acc/len(data_iter)
# 梯度下降优化器
def SGD(params,lr):
for pa in params:
pa[:]=pa-lr*pa.grad # 参数沿着梯度的反方向走特定距离
# 训练
lr=0.1
epochs=20
for epoch in range(epochs):
train_loss=0
train_acc=0
for image,y in train_data:
image,y=transform(image,y) # 类型转换,数据归一化
with ag.record():
output=net(image)
loss=cross_loss(output,y)
loss.backward()
# 将梯度做平均,这样学习率不会对batch_size那么敏感
SGD(params,lr/batch_size)
train_loss+=nd.mean(loss).asscalar()
train_acc+=accuracy(output,y)
test_acc=evaluate_accuracy(test_data,net)
print("Epoch %d, Loss:%f, Train acc:%f, Test acc:%f"
%(epoch,train_loss/len(train_data),train_acc/len(train_data),test_acc))
'''----预测-------'''
# 训练完成后,可对样本进行预测
image_10,label_10=mnist_test[:10] #拿到前10个数据
show_image(image_10)
print("真实样本标签:",label_10)
print("真实数字标签对应的服饰名:",get_fashion_mnist_labels(label_10))
image_10,label_10=transform(image_10,label_10)
predict_label=net(image_10).argmax(axis=1)
print("预测样本标签:",predict_label.astype("int8"))
print("预测数字标签对应的服饰名:",get_fashion_mnist_labels(predict_label.asnumpy()))