三层神经网络
程序员文章站
2022-03-10 15:35:43
...
实验题目:三层神经网络
单一处理代码:
import sys,os
sys.path.append(os.pardir)
import numpy as np
import time
import pickle
from dataset.mnist import load_mnist
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c) # 溢出对策
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def get_data():
(x_train,t_train),(x_test,t_test) = load_mnist(normalize=True, flatten=True,one_hot_label=True)
return x_test,t_test
def init_network():
with open("sample_weight.pkl",'rb') as f:
network = pickle.load(f)
return network
def predict(network,x):
W1,W2,W3 = network['W1'],network['W2'],network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1,W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2,W3) + b3
y = softmax(a3)
return y
#单一计算
x, t = get_data()
network = init_network()
accuracy_cnt = 0
start = time.perf_counter()
for i in range(len(x)):
y = predict(network,x[i])
p = np.argmax(y)
if p == t[i].all():
accuracy_cnt +=1
end= time.perf_counter()
print('单一计算使用时间:'+str(end-start)+'秒')
print('精度:'+ str(float(accuracy_cnt)/len(x)))
批处理代码:
import sys,os
sys.path.append(os.pardir)
import numpy as np
import time
import pickle
from dataset.mnist import load_mnist
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c) # 溢出对策
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def get_data():
(x_train,t_train),(x_test,t_test) = load_mnist(normalize=True, flatten=True,one_hot_label=True)
return x_test,t_test
def init_network():
with open("sample_weight.pkl",'rb') as f:
network = pickle.load(f)
return network
def predict(network,x):
W1,W2,W3 = network['W1'],network['W2'],network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1,W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2,W3) + b3
y = softmax(a3)
return y
#批处理
x, t = get_data()
network = init_network()
batch_size = 100 # 批数量
accuracy_cnt = 0
start = time.perf_counter()
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1)
accuracy_cnt += np.sum(p == t[i:i+batch_size].all())
end= time.perf_counter()
print('批处理使用时间:'+str(end-start)+'秒')
print('精度:'+ str(float(accuracy_cnt)/len(x)))
实验结果:
欢迎访问作者个人技术博客:BlackvonCode(www.blackvon.top)
作者的微信公众号和小程序
上一篇: SpringBoot Actuator端点的实现原理【概括整理】
下一篇: redis发布订阅模式