欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

深度学习入门之温故而知新

程序员文章站 2024-03-23 17:46:22
...

目录

 

 

目录

 

前言

一、感知机.

二、神经网络

 



 

 


前言

写作目的: 回顾《深度学习入门》的内容, 捋一下思路, 看到后面需要理清思路。


提示:以下是本篇文章正文内容,下面案例可供参考

一、感知机.

 

深度学习入门之温故而知新 深度学习入门之温故而知新

import numpy as np

x = np.array([0, 1]) #输入
w = np.array([0.5, 0.5]) #权重
b = -0.7 #偏置 就是阈值的相反数
print(w*x)
print(np.sum(w*x) + b)

[0.  0.5]
-0.19999999999999996

深度学习入门之温故而知新 相异为True

 

深度学习入门之温故而知新深度学习入门之温故而知新

def AND(x1, x2): #与门
    '''
    w1, w2, theta = 0.5, 0.5, 0.7
    tmp = x1*w1 + x2*w2
    if tmp <= theta:
        return 0
    elif tmp > theta:
        return 1
    '''
    x = np.array([x1, x2])
    w = np.array([0.5, 0.5])
    b = -0.7
    tmp = np.sum(x*w) + b
    if tmp > 0:
        return 1
    else:
        return 0

    
print(AND(0, 0))
print(AND(1, 0))
print(AND(0, 1))
print(AND(1, 1))


0
0
0
1

 

def NAND(x1, x2): #与非门
    '''
    w1, w2, theta = -0.5, -0.5, -0.7
    tmp = x1*w1 + x2*w2
    if tmp <= theta:
        return 0
    elif tmp > theta:
        return 1
    '''
    x = np.array([x1, x2])
    w = np.array([-0.5, -0.5])
    b = 0.7
    tmp = np.sum(x*w) + b
    if tmp > 0:
        return 1
    else:
        return 0

print(NAND(0, 0))
print(NAND(1, 0))
print(NAND(0, 1))
print(NAND(1, 1))


1
1
1
0

 

def OR(x1, x2): #非门
    '''
    w1, w2, theta = 0.5, 0.5, 0.4
    tmp = x1*w1 + x2*w2
    if tmp <= theta:
        return 0
    elif tmp > theta:
        return 1
    '''
    x = np.array([x1, x2])
    w = np.array([0.5, 0.5])
    b = -0.2
    tmp = np.sum(w*x) + b
    if tmp > 0:
        return 1
    else:
        return 0
    
print(OR(0, 0))
print(OR(1, 0))
print(OR(0, 1))
print(OR(1, 1))


0
1
1
1

深度学习入门之温故而知新  代码如下:

def XOR(x1, x2): #异或门
    s1 = NAND(x1, x2)
    s2 = OR(x1, x2)
    y = AND(s1, s2)
    return y

print(XOR(1, 1))
print(XOR(1, 0))
print(XOR(0, 1))
print(XOR(0, 0))


0
1
1
0

 

 

二、神经网络

 

深度学习入门之温故而知新**函数-->深度学习入门之温故而知新深度学习入门之温故而知新

深度学习入门之温故而知新深度学习入门之温故而知新深度学习入门之温故而知新

 

def sigmoid(x):
    return 1 / (1+np.exp(-x))

# x = np.array([-1.0, 1.0, 2.0]) #由于numpy的广播功能, np.array也可以进行计算
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)

plt.plot(x, y)
plt.ylim(-0.1, 1.1) #指定y轴的范围
plt.show()
import numpy as np
import matplotlib.pylab as plt

def step_function(x):
    return np.array(x > 0, dtype=np.int)

x = np.arange(-5.0, 5.0, 0.1)
y = step_function(x)

plt.plot(x, y)
plt.ylim(-0.1, 1.1) #指定y轴的范围
plt.show()
import numpy as np

def relu(x):
    return np.maximum(0, x)

 

深度学习入门之温故而知新深度学习入门之温故而知新

深度学习入门之温故而知新深度学习入门之温故而知新

深度学习入门之温故而知新深度学习入门之温故而知新

 

深度学习入门之温故而知新深度学习入门之温故而知新

import numpy as np

#输入层到第1层的信号传递
X = np.array([1.0, 0.5]) 
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]]) #权重
B1 = np.array([0.1, 0.2, 0.3]) #偏置

print(W1.shape)
print(X.shape)
print(B1.shape)

A1 = np.dot(X, W1) + B1
print(A1)
Z1 = sigmoid(A1)
print(Z1)

深度学习入门之温故而知新深度学习入门之温故而知新

#第1层到第2层的信号传递
W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 = np.array([0.1, 0.2])

print(Z1.shape)
print(W2.shape)
print(B2.shape)

A2 = np.dot(Z1, W2) + B2
Z2 = sigmoid(A2)
print(A2)
print(Z2)

深度学习入门之温故而知新深度学习入门之温故而知新

def identify_function(x): #恒等函数, 一般回归问题使用
    return x
#第2层到输出层
W3 = np.array([[0.1, 0.3], [0.2, 0.4]])
B3 = np.array([0.1, 0.2])

A3 = np.dot(Z2, W3) + B3
Y = identify_function(A3)

print(Y)

深度学习入门之温故而知新

深度学习入门之温故而知新

深度学习入门之温故而知新深度学习入门之温故而知新深度学习入门之温故而知新深度学习入门之温故而知新

def softmax(a): #输出层
    c = np.max(a)
    exp_a = np.exp(a - c) # 溢出对策
    sum_exp_a = np.sum(exp_a)
    y = exp_a / sum_exp_a
    
    return y

整合代码:

#综合整理
def init_network():
    network = {}
    network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
    network['b1'] = np.array([0.1, 0.2, 0.3])
    network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
    network['b2'] = np.array([0.1, 0.2])
    network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
    network['b3'] = np.array([0.1, 0.2])
    
    return network



def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = identify_function(a3)
    
    return y

network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)

手写数字识别:

深度学习入门之温故而知新

深度学习入门之温故而知新深度学习入门之温故而知新

import sys, os
sys.path.append(os.pardir) #为了导入父目录的文件而进行的设定
from sourceCode.dataset.mnist import load_mnist

# 第一次调用会花费几分钟
# load_mnist以 (训练图像, 训练标签), (测试图像, 测试标签) 返回读入的MNIST数据
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)

# 输出各个数据的形状
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)


(60000, 784)
(60000,)
(10000, 784)
(10000,)

 

import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image


def img_show(img):
    # 把保存为Numpy数组的图像数据转换为PIL用的数据对象
    pil_img = Image.fromarray(np.uint8(img)) 
    pil_img.show() # 再显示它
  

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
# x_train的形状是(6000,784),即6000行728列的矩阵,所以x_train[0]表示第一列的784个数据
img = x_train[0]
# t_train的形状是(6000,),即一行或者一列数据6000个,所以t_train[0]是第一个数据,这里它的值是5
label = t_train[0]

print(label) # 5
print(img.shape) # (784,)
img = img.reshape(28, 28) # 把图像的形状变成原来的尺寸
print(img.shape) # (28, 28)
img_show(img)
import sys, os
sys.path.append(os.pardir)  # 为了导入父目录的文件而进行的设定
import numpy as np
import pickle
from dataset.mnist import load_mnist


def get_data():
    (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
    return x_test, t_test # 返回(测试图像, 测试标签)


def init_network():
    with open("sourceCode//ch03//sample_weight.pkl", 'rb') as f:
        network = pickle.load(f)
    return network


def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)

    return y


# 神经网络的推理处理
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
    y = predict(network, x[i])
    p = np.argmax(y) # 获取概率最高的元素的索引
    if p == t[i]:
        accuracy_cnt += 1

print("Accuracy:" + str(float(accuracy_cnt) / len(x)))

 

x, _ = get_data()
network = init_network()
W1, W2, W3 = network['W1'], network['W2'], network['W3']

print(x.shape)
print(x[0].shape)
print(W1.shape)
print(W2.shape)
print(W3.shape)


(10000, 784)
(784,)
(784, 50)
(50, 100)
(100, 10)

深度学习入门之温故而知新

# 批处理方式
x, t = get_data()
network = init_network()

batch_size = 100 # 批数量
accuracy_cnt = 0

for i in range(0, len(x), batch_size):
    x_batch = x[i : i + batch_size]
    y_batch = predict(network, x_batch)
    p = np.argmax(y_batch, axis = 1) # 沿着第1维找到最大元素的索引; 第0维对应第一个维度; 第1维对应第二个维度
    accuracy_cnt += np.sum(p == t[i : i + batch_size])
    
    
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))


Accuracy:0.9352