简单的Neural Network神经网络算法
程序员文章站
2022-07-13 11:43:12
...
#-- coding:utf-8 --
import numpy as np
def tanh(x):#定义的一个双曲线函数
return np.tanh(x)
def tan_deriv(x):
return 1.0-np.tanh(x)*np.tanh(x)#双曲线的导数
def logistic(x):#逻辑函数
return 1/(1+np.exp(-x))#sigmoid函数的表达式
def logistic_derivative(x):
return logistic(x)*(1-logistic(x))#逻辑函数的导数
class NeuralNetwork:
def init(self,layers,activation=‘tanh’):
if activation==‘logistic’:
self.activation=logistic
self.activation_deriv=logistic_derivative
elif activation==‘tanh’:
self.activation=tanh
self.activation_deriv=tan_deriv
self.weights=[]
for i in range(1,len(layers)-1):#设置权重weights
self.weights.append((2*np.random.random((layers[i-1]+1,layers[i]+1))-1)*0.25)
self.weights.append((2*np.random.random((layers[i]+1,layers[i+1]))-1)*0.25)
def fit(self,X,y,learning_rate=0.2,epochs=10000):
X=np.atleast_2d(X)
temp=np.ones([X.shape[0],X.shape[1]+1])
temp[:,0:-1]=X
X=temp
y=np.array(y)#array是指数组
for k in range(epochs):
i=np.random.randint(X.shape[0])
a=[X[i]]
for l in range(len(self.weights)):
a.append(self.activation(np.dot(a[1],self.weights[1])))
error=y[i]-a[-1]
deltas=[error*self.activation_deriv(a[-1])]
for l in range(len(a)-2,0,-1):
deltas.append(deltas[-1].dot(self.weights[1].T)*self.activation_deriv(a[1]))
deltas.reverse()
for i in range(len(self.weights)):
layer=np.atleast_2d(a[i])
delta=np.atleast_2d(deltas[i])
self.weights[i]+=learning_rate*layer.T.dot(delta)
def predict(self,x):
x=np.array(x)
temp=np.ones(x.shape[0]+1)
temp[0:-1]=x
a=temp
for l in range(0,len(self.weights)):
a=self.activation(np.dot(a,self.weights[1]))
return a
上一篇: neural_network
下一篇: PyTorch模型转ONNX