AlexNet——TensorFlow实现MNIST图像分类预测
程序员文章站
2024-03-19 23:10:22
...
AlexNet网络模型的详细介绍参考ImageNet Classification with Deep Convolutional Neural Networks论文解读,
TensorFlow的介绍参考TensorFlow简介,
MNIST数据集的介绍参考MNIST数据集。
实验大致步骤如下,
- 加载MNIST数据集,同时初始化网络超参;
- 建立计算图;
- 建立Session会话,执行计算图进行AlexNet模型训练和结果预测(训练模型和评估模型)。
实现代码如下,
1 #coding=utf-8
2 from __future__ import print_function
3
4 from tensorflow.examples.tutorials.mnist import input_data
5 mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
6
7 import tensorflow as tf
8 # 初始化网络超参数
9 learning_rate = 0.001
10 training_iters = 200000
11 batch_size = 64
12 display_step = 20
13
14 # 初始化网络参数
15 n_input = 784 # 输入的维度
16 n_classes = 10 # 标签的维度,即输出的个数
17 dropout = 0.75 # Dropout随机失活率
18
19 # 占位符
20 x = tf.placeholder(tf.float32, [None, n_input])
21 y = tf.placeholder(tf.float32, [None, n_classes])
22 keep_prob = tf.placeholder(tf.float32)
23
24 # 卷积
25 def conv2d(name, l_input, w, b):
26 return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)
27
28 # 最大池化
29 def max_pool(name, l_input, k):
30 return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
31
32 # 归一化
33 def norm(name, l_input, lsize=4):
34 return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
35
36 # 初始化网络的权重和偏置项
37 weights = {
38 'wc1': tf.Variable(tf.random_normal([11, 11, 1, 64])),
39 'wc2': tf.Variable(tf.random_normal([5, 5, 64, 192])),
40 'wc3': tf.Variable(tf.random_normal([3, 3, 192, 384])),
41 'wc4': tf.Variable(tf.random_normal([3, 3, 384, 384])),
42 'wc5': tf.Variable(tf.random_normal([3, 3, 384, 256])),
43 'wd1': tf.Variable(tf.random_normal([4*4*256, 4096])),
44 'wd2': tf.Variable(tf.random_normal([4096, 4096])),
45 'out': tf.Variable(tf.random_normal([4096, 10]))
46 }
47 biases = {
48 'bc1': tf.Variable(tf.random_normal([64])),
49 'bc2': tf.Variable(tf.random_normal([192])),
50 'bc3': tf.Variable(tf.random_normal([384])),
51 'bc4': tf.Variable(tf.random_normal([384])),
52 'bc5': tf.Variable(tf.random_normal([256])),
53 'bd1': tf.Variable(tf.random_normal([4096])),
54 'bd2': tf.Variable(tf.random_normal([4096])),
55 'out': tf.Variable(tf.random_normal([n_classes]))
56 }
57
58 # 定义整个网络
59 def alex_net(_X, _weights, _biases, _dropout):
60 # 向量转为矩阵
61 _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
62
63 # 第一层卷积
64 # 卷积
65 conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
66 # 下采样
67 pool1 = max_pool('pool1', conv1, k=2)
68 # 归一化
69 norm1 = norm('norm1', pool1, lsize=4)
70
71 # 第二层卷积
72 # 卷积
73 conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
74 # 下采样
75 pool2 = max_pool('pool2', conv2, k=2)
76 # 归一化
77 norm2 = norm('norm2', pool2, lsize=4)
78
79 # 第三层卷积
80 # 卷积
81 conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
82 # 归一化
83 norm3 = norm('norm3', conv3, lsize=4)
84
85 # 第四层卷积
86 # 卷积
87 conv4 = conv2d('conv4', norm3, _weights['wc4'], _biases['bc4'])
88 # 归一化
89 norm4 = norm('norm4', conv4, lsize=4)
90
91 # 第五层卷积
92 # 卷积
93 conv5 = conv2d('conv5', norm4, _weights['wc5'], _biases['bc5'])
94 # 下采样
95 pool5 = max_pool('pool5', conv5, k=2)
96 # 归一化
97 norm5 = norm('norm5', pool5, lsize=4)
98
99 # 全连接层1
100 dense1 = tf.reshape(norm5, [-1, _weights['wd1'].get_shape().as_list()[0]])
101 dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')
102 dense1 = tf.nn.dropout(dense1, _dropout)
103
104 # 全连接层2
105 dense2 = tf.reshape(dense1, [-1, _weights['wd2'].get_shape().as_list()[0]])
106 dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
107 dense2 = tf.nn.dropout(dense2, _dropout)
108
109 # 网络输出层
110 out = tf.matmul(dense2, _weights['out']) + _biases['out']
111 return out
112
113 # 构建模型
114 pred = alex_net(x, weights, biases, keep_prob)
115
116 # 定义损失函数和学习步骤
117 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))
118 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
119
120 # 测试网络
121 correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
122 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
123
124 # 初始化所有的共享变量
125 init = tf.initialize_all_variables()
126
127 # session会话开始训练
128 with tf.Session() as sess:
129 sess.run(init)
130 step = 1
131 # Keep training until reach max iterations
132 while step * batch_size < training_iters:
133 batch_xs, batch_ys = mnist.train.next_batch(batch_size)
134 # 获取批数据
135 sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
136 if step % display_step == 0:
137 # 计算精度
138 acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
139 # 计算损失值
140 loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
141 print ("Iter " + str(step*batch_size) + ", Minibatch Loss = " + "{:.6f}".format(loss) + ", Training Accuracy = " + "{:.5f}".format(acc))
142 step += 1
143 # 计算测试精度
144 print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))
运行效果如下,
WARNING:tensorflow:From alex_mnist.py:5: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please write your own downloading logic.
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Please use urllib or similar directly.
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting /tmp/data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.data to implement this functionality.
Extracting /tmp/data/train-labels-idx1-ubyte.gz
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use tf.one_hot on tensors.
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.
Instructions for updating:
Please use alternatives such as official/mnist/dataset.py from tensorflow/models.
WARNING:tensorflow:From alex_mnist.py:117: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
WARNING:tensorflow:From /home/cnu105/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:118: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
2019-06-02 11:17:06.612024: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2019-06-02 11:17:11.746821: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 0 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:04:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:11.987157: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 1 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:05:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:12.242069: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 2 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:08:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:12.494381: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 3 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:09:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:12.769954: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:897] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-06-02 11:17:12.771926: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 4 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:83:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:13.045728: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:897] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-06-02 11:17:13.047507: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 5 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:84:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:13.330365: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:897] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-06-02 11:17:13.332353: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 6 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:87:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:13.647518: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:897] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-06-02 11:17:13.649454: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1392] Found device 7 with properties:
name: GeForce GTX TITAN X major: 5 minor: 2 memoryClockRate(GHz): 1.076
pciBusID: 0000:88:00.0
totalMemory: 11.93GiB freeMemory: 11.81GiB
2019-06-02 11:17:13.656551: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1471] Adding visible gpu devices: 0, 1, 2, 3, 4, 5, 6, 7
2019-06-02 11:17:16.979112: I tensorflow/core/common_runtime/gpu/gpu_device.cc:952] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-06-02 11:17:16.979177: I tensorflow/core/common_runtime/gpu/gpu_device.cc:958] 0 1 2 3 4 5 6 7
2019-06-02 11:17:16.979185: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 0: N Y Y Y N N N N
2019-06-02 11:17:16.979191: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 1: Y N Y Y N N N N
2019-06-02 11:17:16.979195: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 2: Y Y N Y N N N N
2019-06-02 11:17:16.979200: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 3: Y Y Y N N N N N
2019-06-02 11:17:16.979204: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 4: N N N N N Y Y Y
2019-06-02 11:17:16.979208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 5: N N N N Y N Y Y
2019-06-02 11:17:16.979216: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 6: N N N N Y Y N Y
2019-06-02 11:17:16.979222: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] 7: N N N N Y Y Y N
2019-06-02 11:17:16.981134: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 11432 MB memory) -> physical GPU (device: 0, name: GeForce GTX TITAN X, pci bus id: 0000:04:00.0, compute capability: 5.2)
2019-06-02 11:17:18.523499: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:1 with 11432 MB memory) -> physical GPU (device: 1, name: GeForce GTX TITAN X, pci bus id: 0000:05:00.0, compute capability: 5.2)
2019-06-02 11:17:19.855094: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:2 with 11432 MB memory) -> physical GPU (device: 2, name: GeForce GTX TITAN X, pci bus id: 0000:08:00.0, compute capability: 5.2)
2019-06-02 11:17:21.862306: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:3 with 11432 MB memory) -> physical GPU (device: 3, name: GeForce GTX TITAN X, pci bus id: 0000:09:00.0, compute capability: 5.2)
2019-06-02 11:17:23.769646: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:4 with 11432 MB memory) -> physical GPU (device: 4, name: GeForce GTX TITAN X, pci bus id: 0000:83:00.0, compute capability: 5.2)
2019-06-02 11:17:25.666274: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:5 with 11432 MB memory) -> physical GPU (device: 5, name: GeForce GTX TITAN X, pci bus id: 0000:84:00.0, compute capability: 5.2)
2019-06-02 11:17:27.601951: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:6 with 11432 MB memory) -> physical GPU (device: 6, name: GeForce GTX TITAN X, pci bus id: 0000:87:00.0, compute capability: 5.2)
2019-06-02 11:17:29.557999: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1084] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:7 with 11432 MB memory) -> physical GPU (device: 7, name: GeForce GTX TITAN X, pci bus id: 0000:88:00.0, compute capability: 5.2)
Iter 1280, Minibatch Loss = 334336.312500, Training Accuracy = 0.53125
Iter 2560, Minibatch Loss = 160650.203125, Training Accuracy = 0.65625
Iter 3840, Minibatch Loss = 58704.535156, Training Accuracy = 0.84375
Iter 5120, Minibatch Loss = 108148.156250, Training Accuracy = 0.81250
Iter 6400, Minibatch Loss = 28172.527344, Training Accuracy = 0.89062
Iter 7680, Minibatch Loss = 77572.265625, Training Accuracy = 0.81250
Iter 8960, Minibatch Loss = 34307.992188, Training Accuracy = 0.90625
Iter 10240, Minibatch Loss = 63918.812500, Training Accuracy = 0.84375
Iter 11520, Minibatch Loss = 42227.984375, Training Accuracy = 0.89062
Iter 12800, Minibatch Loss = 44791.882812, Training Accuracy = 0.87500
Iter 14080, Minibatch Loss = 31170.875000, Training Accuracy = 0.90625
Iter 15360, Minibatch Loss = 37148.425781, Training Accuracy = 0.87500
Iter 16640, Minibatch Loss = 24845.406250, Training Accuracy = 0.92188
Iter 17920, Minibatch Loss = 16903.435547, Training Accuracy = 0.93750
Iter 19200, Minibatch Loss = 64891.562500, Training Accuracy = 0.92188
Iter 20480, Minibatch Loss = 6705.311523, Training Accuracy = 0.96875
Iter 21760, Minibatch Loss = 13014.283203, Training Accuracy = 0.95312
Iter 23040, Minibatch Loss = 27697.880859, Training Accuracy = 0.93750
Iter 24320, Minibatch Loss = 22059.644531, Training Accuracy = 0.89062
Iter 25600, Minibatch Loss = 38557.429688, Training Accuracy = 0.92188
Iter 26880, Minibatch Loss = 27364.382812, Training Accuracy = 0.93750
Iter 28160, Minibatch Loss = 11737.021484, Training Accuracy = 0.92188
Iter 29440, Minibatch Loss = 22346.703125, Training Accuracy = 0.92188
Iter 30720, Minibatch Loss = 31779.984375, Training Accuracy = 0.87500
Iter 32000, Minibatch Loss = 2139.180664, Training Accuracy = 0.98438
Iter 33280, Minibatch Loss = 15727.681641, Training Accuracy = 0.92188
Iter 34560, Minibatch Loss = 49101.921875, Training Accuracy = 0.89062
Iter 35840, Minibatch Loss = 14567.428711, Training Accuracy = 0.92188
Iter 37120, Minibatch Loss = 56209.976562, Training Accuracy = 0.92188
Iter 38400, Minibatch Loss = 9376.914062, Training Accuracy = 0.96875
Iter 39680, Minibatch Loss = 6285.625488, Training Accuracy = 0.96875
Iter 40960, Minibatch Loss = 8804.068359, Training Accuracy = 0.93750
Iter 42240, Minibatch Loss = 20884.656250, Training Accuracy = 0.95312
Iter 43520, Minibatch Loss = 4721.957031, Training Accuracy = 0.98438
Iter 44800, Minibatch Loss = 18102.578125, Training Accuracy = 0.89062
Iter 46080, Minibatch Loss = 1833.863403, Training Accuracy = 0.98438
Iter 47360, Minibatch Loss = 1440.751953, Training Accuracy = 0.98438
Iter 48640, Minibatch Loss = 10380.244141, Training Accuracy = 0.92188
Iter 49920, Minibatch Loss = 3477.957031, Training Accuracy = 0.98438
Iter 51200, Minibatch Loss = 7075.423828, Training Accuracy = 0.98438
Iter 52480, Minibatch Loss = 16556.628906, Training Accuracy = 0.95312
Iter 53760, Minibatch Loss = 22188.033203, Training Accuracy = 0.90625
Iter 55040, Minibatch Loss = 19870.359375, Training Accuracy = 0.95312
Iter 56320, Minibatch Loss = 4704.218750, Training Accuracy = 0.98438
Iter 57600, Minibatch Loss = 17331.410156, Training Accuracy = 0.89062
Iter 58880, Minibatch Loss = 3667.264648, Training Accuracy = 0.95312
Iter 60160, Minibatch Loss = 19282.742188, Training Accuracy = 0.93750
Iter 61440, Minibatch Loss = 9652.192383, Training Accuracy = 0.93750
Iter 62720, Minibatch Loss = 9680.341797, Training Accuracy = 0.96875
Iter 64000, Minibatch Loss = 18480.486328, Training Accuracy = 0.96875
Iter 65280, Minibatch Loss = 20012.876953, Training Accuracy = 0.92188
Iter 66560, Minibatch Loss = 15355.311523, Training Accuracy = 0.93750
Iter 67840, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 69120, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 70400, Minibatch Loss = 4498.256348, Training Accuracy = 0.95312
Iter 71680, Minibatch Loss = 6279.625000, Training Accuracy = 0.95312
Iter 72960, Minibatch Loss = 12970.482422, Training Accuracy = 0.92188
Iter 74240, Minibatch Loss = 5218.539062, Training Accuracy = 0.96875
Iter 75520, Minibatch Loss = 15303.201172, Training Accuracy = 0.93750
Iter 76800, Minibatch Loss = 3257.265137, Training Accuracy = 0.95312
Iter 78080, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 79360, Minibatch Loss = 14279.500000, Training Accuracy = 0.95312
Iter 80640, Minibatch Loss = 8679.945312, Training Accuracy = 0.98438
Iter 81920, Minibatch Loss = 7986.568359, Training Accuracy = 0.96875
Iter 83200, Minibatch Loss = 3156.044434, Training Accuracy = 0.96875
Iter 84480, Minibatch Loss = 13759.476562, Training Accuracy = 0.89062
Iter 85760, Minibatch Loss = 6791.756836, Training Accuracy = 0.93750
Iter 87040, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 88320, Minibatch Loss = 14531.399414, Training Accuracy = 0.93750
Iter 89600, Minibatch Loss = 9443.058594, Training Accuracy = 0.95312
Iter 90880, Minibatch Loss = 11772.400391, Training Accuracy = 0.93750
Iter 92160, Minibatch Loss = 691.387207, Training Accuracy = 0.98438
Iter 93440, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 94720, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 96000, Minibatch Loss = 5482.347168, Training Accuracy = 0.98438
Iter 97280, Minibatch Loss = 4430.060059, Training Accuracy = 0.96875
Iter 98560, Minibatch Loss = 3621.751465, Training Accuracy = 0.96875
Iter 99840, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 101120, Minibatch Loss = 8754.013672, Training Accuracy = 0.95312
Iter 102400, Minibatch Loss = 1073.010498, Training Accuracy = 0.98438
Iter 103680, Minibatch Loss = 11556.099609, Training Accuracy = 0.93750
Iter 104960, Minibatch Loss = 3953.391357, Training Accuracy = 0.96875
Iter 106240, Minibatch Loss = 9589.357422, Training Accuracy = 0.93750
Iter 107520, Minibatch Loss = 3694.890137, Training Accuracy = 0.98438
Iter 108800, Minibatch Loss = 16642.496094, Training Accuracy = 0.95312
Iter 110080, Minibatch Loss = 3514.834961, Training Accuracy = 0.96875
Iter 111360, Minibatch Loss = 5243.551758, Training Accuracy = 0.92188
Iter 112640, Minibatch Loss = 1790.374512, Training Accuracy = 0.98438
Iter 113920, Minibatch Loss = 61.289795, Training Accuracy = 0.98438
Iter 115200, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 116480, Minibatch Loss = 6023.192383, Training Accuracy = 0.93750
Iter 117760, Minibatch Loss = 1192.194824, Training Accuracy = 0.98438
Iter 119040, Minibatch Loss = 1352.831055, Training Accuracy = 0.96875
Iter 120320, Minibatch Loss = 4922.876465, Training Accuracy = 0.92188
Iter 121600, Minibatch Loss = 677.854614, Training Accuracy = 0.98438
Iter 122880, Minibatch Loss = 6639.125977, Training Accuracy = 0.95312
Iter 124160, Minibatch Loss = 1121.854980, Training Accuracy = 0.98438
Iter 125440, Minibatch Loss = 2850.864502, Training Accuracy = 0.93750
Iter 126720, Minibatch Loss = 24656.960938, Training Accuracy = 0.92188
Iter 128000, Minibatch Loss = 7486.107910, Training Accuracy = 0.98438
Iter 129280, Minibatch Loss = 9559.851562, Training Accuracy = 0.93750
Iter 130560, Minibatch Loss = 8428.903320, Training Accuracy = 0.93750
Iter 131840, Minibatch Loss = 6142.676758, Training Accuracy = 0.93750
Iter 133120, Minibatch Loss = 8347.507812, Training Accuracy = 0.96875
Iter 134400, Minibatch Loss = 3275.400146, Training Accuracy = 0.96875
Iter 135680, Minibatch Loss = 1656.050293, Training Accuracy = 0.96875
Iter 136960, Minibatch Loss = 8057.255371, Training Accuracy = 0.95312
Iter 138240, Minibatch Loss = 4164.952637, Training Accuracy = 0.98438
Iter 139520, Minibatch Loss = 6530.175781, Training Accuracy = 0.92188
Iter 140800, Minibatch Loss = 3056.857178, Training Accuracy = 0.95312
Iter 142080, Minibatch Loss = 3378.891113, Training Accuracy = 0.98438
Iter 143360, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 144640, Minibatch Loss = 7268.273926, Training Accuracy = 0.96875
Iter 145920, Minibatch Loss = 1075.168457, Training Accuracy = 0.98438
Iter 147200, Minibatch Loss = 3567.811279, Training Accuracy = 0.96875
Iter 148480, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 149760, Minibatch Loss = 6473.182617, Training Accuracy = 0.93750
Iter 151040, Minibatch Loss = 992.911133, Training Accuracy = 0.95312
Iter 152320, Minibatch Loss = 2331.686523, Training Accuracy = 0.96875
Iter 153600, Minibatch Loss = 100.348145, Training Accuracy = 0.98438
Iter 154880, Minibatch Loss = 1558.047974, Training Accuracy = 0.95312
Iter 156160, Minibatch Loss = 2154.779053, Training Accuracy = 0.95312
Iter 157440, Minibatch Loss = 330.975830, Training Accuracy = 0.98438
Iter 158720, Minibatch Loss = 2498.369873, Training Accuracy = 0.98438
Iter 160000, Minibatch Loss = 3539.295654, Training Accuracy = 0.93750
Iter 161280, Minibatch Loss = 7858.209961, Training Accuracy = 0.96875
Iter 162560, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 163840, Minibatch Loss = 4297.341309, Training Accuracy = 0.95312
Iter 165120, Minibatch Loss = 212.943359, Training Accuracy = 0.98438
Iter 166400, Minibatch Loss = 6788.069336, Training Accuracy = 0.92188
Iter 167680, Minibatch Loss = 6468.097168, Training Accuracy = 0.96875
Iter 168960, Minibatch Loss = 5101.724609, Training Accuracy = 0.95312
Iter 170240, Minibatch Loss = 9081.995117, Training Accuracy = 0.90625
Iter 171520, Minibatch Loss = 5971.581055, Training Accuracy = 0.93750
Iter 172800, Minibatch Loss = 7561.061035, Training Accuracy = 0.92188
Iter 174080, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 175360, Minibatch Loss = 506.290039, Training Accuracy = 0.96875
Iter 176640, Minibatch Loss = 5510.062988, Training Accuracy = 0.93750
Iter 177920, Minibatch Loss = 3553.265137, Training Accuracy = 0.95312
Iter 179200, Minibatch Loss = 4029.658936, Training Accuracy = 0.92188
Iter 180480, Minibatch Loss = 8109.407227, Training Accuracy = 0.95312
Iter 181760, Minibatch Loss = 806.317383, Training Accuracy = 0.98438
Iter 183040, Minibatch Loss = 3882.623779, Training Accuracy = 0.96875
Iter 184320, Minibatch Loss = 10893.535156, Training Accuracy = 0.93750
Iter 185600, Minibatch Loss = 2430.192383, Training Accuracy = 0.96875
Iter 186880, Minibatch Loss = 2718.515869, Training Accuracy = 0.98438
Iter 188160, Minibatch Loss = 5090.834961, Training Accuracy = 0.92188
Iter 189440, Minibatch Loss = 3633.469238, Training Accuracy = 0.96875
Iter 190720, Minibatch Loss = 0.000000, Training Accuracy = 1.00000
Iter 192000, Minibatch Loss = 297.924072, Training Accuracy = 0.98438
Iter 193280, Minibatch Loss = 2371.230713, Training Accuracy = 0.95312
Iter 194560, Minibatch Loss = 3749.918213, Training Accuracy = 0.96875
Iter 195840, Minibatch Loss = 2433.742676, Training Accuracy = 0.98438
Iter 197120, Minibatch Loss = 921.659790, Training Accuracy = 0.96875
Iter 198400, Minibatch Loss = 3149.487549, Training Accuracy = 0.98438
Iter 199680, Minibatch Loss = 30.536133, Training Accuracy = 0.98438
Testing Accuracy: 0.96484375
注:在实验过程中,可以使用nvidia-smi命令了解GPU使用情况,如下所示。
完!
推荐阅读
-
AlexNet——TensorFlow实现MNIST图像分类预测
-
如何用TensorFlow和TF-Slim实现图像分类与分割
-
Tensorflow使用slim工具(vgg16模型)实现图像分类与分割
-
5.1tensorflow5.1神经网络调参实现mnist数据集分类正确率98%以上(实现动态学习率调整)
-
详解tensorflow训练自己的数据集实现CNN图像分类
-
tensorflow使用神经网络实现mnist分类
-
基于Tensorflow2 Lite在Android手机上实现图像分类
-
Keras : 利用卷积神经网络CNN对图像进行分类,以mnist数据集为例建立模型并预测
-
详解tensorflow训练自己的数据集实现CNN图像分类
-
tensorflow+inceptionv3图像分类网络结构的解析与代码实现【附下载】