经典卷积神经网络LeNet-5模型
程序员文章站
2022-03-17 14:25:34
...
经典的卷积神经网络模型有1998年的LeNet-5模型,2012年的AlexNet模型,2014年的VGG模型,2014年的GoogleNet模型和2015年的ResNet模型。
本文主要讲述经典LeNet-5模型,此模型一共有7层,结构如下图所示:
7层结构分别是:
卷积层-池化层-卷积层-池化层-全连接层-全连接层-全链接输出层
以下是LeNet-5模型的代码,在神经网络代码之上做的修改即可实现,代码如下。。。
此部分为LeNet-5模型中的lenet_inference.py代码部分,把神经网络的结构给改了,加入了卷积层和池化层。
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 11:36:35 2017
@author: cxl
"""
import tensorflow as tf
#INPUT_NODE = 784
#OUTPUT_NODE = 10
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
CONV1_DEEP = 32
CONV1_SIZE = 5
CONV2_DEEP = 64
CONV2_SIZE = 5
FC_SIZE = 512
def inference(input_tensor,train,regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights =tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,
NUM_CHANNELS,CONV1_DEEP],
initializer = tf.truncated_normal_initializer(stddev = 0.1))
conv1_biases = tf.get_variable("bias",[CONV1_DEEP],
initializer = tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides = [1,1,1,1],
padding = "SAME")
relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
with tf.name_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1,ksize = [1,2,2,1],strides=[1,2,2,1],
padding = 'SAME')
with tf.variable_scope('layer3-conv2'):
conv2_weights =tf.get_variable("weight",[CONV2_SIZE,CONV2_SIZE,
CONV1_DEEP,CONV2_DEEP],
initializer = tf.truncated_normal_initializer(stddev = 0.1))
conv2_biases = tf.get_variable("bias",[CONV2_DEEP],
initializer = tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides = [1,1,1,1],
padding = "SAME")
relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
with tf.name_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2,ksize = [1,2,2,1],strides=[1,2,2,1],
padding = 'SAME')
pool_shape=pool2.get_shape().as_list()
nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable("weights",[nodes,FC_SIZE],
initializer = tf.truncated_normal_initializer(stddev = 0.1))
if regularizer != None:
tf.add_to_collection("losses",regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias",[FC_SIZE],
initializer = tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
if train: fc1 = tf.nn.dropout(fc1,0.5)
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable("weights",[FC_SIZE,NUM_LABELS],
initializer = tf.truncated_normal_initializer(stddev = 0.1))
if regularizer != None:
tf.add_to_collection("losses",regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias",[NUM_LABELS],
initializer = tf.constant_initializer(0.1))
logit = tf.matmul(fc1,fc2_weights)+fc2_biases
return logit
此部分为LeNet-5模型中的lenet_train.py代码部分,只修改了数据输入的格式。
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 15:45:22 2017
@author: cxl
"""
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import lenet_inference
import numpy as np
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY=0.99
REGULARAZTION_RATE=0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY=0.99
MODEL_SAVE_PATH = "./path/to/model/"
MODEL_NAME = "model.ckpt"
def train(mnist):
x=tf.placeholder(tf.float32,[BATCH_SIZE,lenet_inference.IMAGE_SIZE,
mnist_inference.IMAGE_SIZE,lenet_inference.NUM_CHANNELS],name='x-input')
y_=tf.placeholder(tf.float32,[None,lenet_inference.NUM_LABELS],name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
y = lenet_inference.inference(x,True,regularizer)
global_step = tf.Variable(0,trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY,global_step)
variables_averages_op=variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y,labels=tf.argmax(y_,1))
cross_entropy_mean=tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,
LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
with tf.control_dependencies([train_step,variables_averages_op]):
train_op = tf.no_op(name = 'train')
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(TRAINING_STEPS):
xs,ys = mnist.train.next_batch(BATCH_SIZE)
reshaped_xs = np.reshape(xs,(BATCH_SIZE,
lenet_inference.IMAGE_SIZE,
lenet_inference.IMAGE_SIZE,
lenet_inference.NUM_CHANNELS
))
_,loss_value,step = sess.run([train_op,loss,global_step],
feed_dict={x:reshaped_xs,y_:ys})
if i%1000 ==0:
print("After %d training step(s),loss on training"
"batch is %g."%(step,loss_value))
saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),
global_step = global_step)
def main(argv = None):
mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
train(mnist)
if __name__=='__main__':
tf.app.run()
此部分为LeNet-5模型中的lenet_eval.py代码部分,此部分也只修改了数据输入的格式。
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 23:29:34 2017
@author: cxl
"""
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import lenet_inference
import lenet_train
EVAL_INTERVAL_SECS = 10
def evaluate(mnist):
#with tf.Graph().as_default() as g:
#x=tf.placeholder(tf.float32,[None,lenet_inference.INPUT_NODE],name = 'x-input')
x=tf.placeholder(tf.float32,[mnist.validation.num_examples,
lenet_inference.IMAGE_SIZE,
lenet_inference.IMAGE_SIZE,
lenet_inference.NUM_CHANNELS],name='x-input')
y_= tf.placeholder(tf.float32,[None,lenet_inference.OUTPUT_NODE],name = 'y-input')
xs,ys = mnist.validation.images,mnist.validation.labels
reshaped_xs = np.reshape(xs,(mnist.validation.num_examples,
lenet_inference.IMAGE_SIZE,
lenet_inference.IMAGE_SIZE,
lenet_inference.NUM_CHANNELS
))
validate_feed = {x:reshaped_xs,y_:ys}
y = lenet_inference.inference(x,False,None)
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(
lenet_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(lenet_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy,feed_dict = validate_feed)
print("After %s training step(s),validation"
"accuracy = %g" % (global_step,accuracy_score))
else:
print("No checkpoint file found")
return
time.sleep(EVAL_INTERVAL_SECS)
def main(argv = None):
mnist = input_data.read_data_sets("/tmp/data",one_hot = True)
evaluate(mnist)
if __name__ == '__main__':
tf.app.run()
以上程序均已调试成功,如有错误,希望各位小伙伴加以批评指正哈!
上一篇: 卷积神经网络-LeNet
下一篇: JavaScript 学习笔记 (1)
推荐阅读
-
TensorFlow卷积神经网络之使用训练好的模型识别猫狗图片
-
手写数字识别 ----卷积神经网络模型官方案例注释(基于Tensorflow,Python)
-
Keras : 利用卷积神经网络CNN对图像进行分类,以mnist数据集为例建立模型并预测
-
基于Tensorflow, OpenCV. 使用MNIST数据集训练卷积神经网络模型,用于手写数字识别
-
卷积神经网络的经典网络介绍
-
Lenet-5卷积神经网络结构详解(一)——原理解析
-
手写数字识别 ----卷积神经网络模型官方案例注释(基于Tensorflow,Python)
-
经典卷积网络模型LeNet-5模型来解决MNIST数字识别问题(主要解决验证集正确率低的问题)
-
TensorFlow卷积神经网络之使用训练好的模型识别猫狗图片
-
卷积神经网络经典论文的学习笔记