神经网络优化过程
程序员文章站
2022-03-04 20:31:46
...
- 单个神经元前向传播反向传播优化过程实现
import numpy as np
import h5py
from data import load_dataset
def mode(train_x, train_y, test_x, test_y, n_num=2000, learn_late=0.05):
# 初始化参数
w, b = grade_init(train_x.shape[0])
# 模型训练
update_grade, costs = optima(w, b, train_x, train_y, n_num, learn_late)
w = update_grade["w"]
b = update_grade["b"]
# 预测
y_predict = predict(w, b, test_x)
y_predict2 = predict(w, b, train_x)
print("测试集预测准确率{}".format(100 - np.mean(np.abs(y_predict - test_y)) * 100))
print("训练集预测准确率{}".format(100 - np.mean(np.abs(y_predict2 - train_y)) * 100))
def optima(w, b, X, Y, n_num, learn_late):
costs = []
for i in range(n_num):
params, cost = grade(w, b, X, Y)
dw = params["dw"]
db = params["db"]
w = w - learn_late * dw
b = b - learn_late * db
if i // 100 == 0:
costs.append(cost)
print(costs)
update_grade = {"w": w, "b": b}
return update_grade, costs
def sigmiod(z):
s = 1 / (1 + np.exp(-z))
return s
def grade_init(shape):
w = np.zeros((shape, 1))
b = 0
return w, b
def grade(w, b, X, Y):
# 前向传播
m = X.shape[1]
A = sigmiod(np.dot(w.T, X) + b)
cost = 1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
# 反向传播
dz = A - Y
dw = 1 / m * np.dot(X, dz.T)
db = 1 / m * np.sum(dz)
params = {"dw": dw,
"db": db}
return params, cost
def predict(w, b, X):
y_predict = np.zeros((1, X.shape[1]))
w = w.reshape(X.shape[0], 1)
A = sigmiod(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
if A[0, i] <= 0.5:
y_predict[0, i] = 0
else:
y_predict[0, i] = 1
return y_predict
def main():
# 1、读取样本数据
train_x, train_y, test_x, test_y, classes = load_dataset()
train_x = train_x.reshape(train_x.shape[0], -1).T
test_x = test_x.reshape(test_x.shape[0], -1).T
train_x = train_x / 255
test_x = test_x / 255
mode(train_x, train_y, test_x, test_y, n_num=2000, learn_late=0.05)
if __name__ == '__main__':
main()