神经网络之VGGNet模型的实现(Python+TensorFlow)
程序员文章站
2024-03-14 09:48:10
...
代码实现的是VGG-16的结构。
# -*- coding:utf-8 -*-
#
# VGG-16 Net model
import numpy as np
import tensorflow as tf
class Vgg16:
def __init__(self, images, name):
self.name = name
self.input = images
self.output = self.vgg16(self.input)
list_vars = tf.trainable_variable()
self.vars = [var for var in list_vars]
def get_conv_weight(self, shape, name):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def get_bias(self, shape, name):
return tf.Variable(tf.constant(0.0, shape=shape), name=name)
def get_fc_weight(self, shape, name):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def conv_layer(self, x, ks, out_units, name):
with tf.variable_scope(name):
in_units = x.get_shape().as_list()[-1]
filt = self.get_conv_weight([ks,ks,in_units,out_units], name='weight')
bias = self.get_bias([out_units], name='bias')
out = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, filt, [1,1,1,1], padding='SAME'), bias))
return out
def fc_layer(self, x, out_units, name):
with tf.variable_scope(name):
in_units = np.prod(x.get_shape().as_list()[1:])
x_flat = tf.reshape(x, [-1, in_units])
weight = self.get_fc_weight([in_units,out_units], name='weight')
biases = self.get_bias([out_units], name='bias')
out = tf.nn.bias_add(tf.matmul(x_flat, weight), biases)
return out
def avg_pool(self, x, name):
return tf.nn.avg_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name=name)
def max_pool(self, x, name):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name=name)
def vgg16(self, x, keep_prob):
conv1_1 = self.conv_layer(x, ks=3, out_units=64, 'conv1_1')
conv1_2 = self.conv_layer(conv1_1, ks=3, out_units=64, 'conv1_2')
pool1 = self.max_pool(conv1_2, 'pool1')
conv2_1 = self.conv_layer(pool1, ks=3, out_units=128, 'conv2_1')
conv2_2 = self.conv_layer(conv2_1, ks=3, out_units=128, 'conv2_2')
pool2 = self.max_pool(conv2_2, 'pool2')
conv3_1 = self.conv_layer(pool2, ks=3, out_units=256, 'conv3_1')
conv3_2 = self.conv_layer(conv3_1, ks=3, out_units=256, 'conv3_2')
conv3_3 = self.conv_layer(conv3_2, ks=3, out_units=256, 'conv3_3')
pool3 = self.max_pool(conv3_3, 'pool3')
conv4_1 = self.conv_layer(pool3, ks=3, out_units=512, 'conv4_1')
conv4_2 = self.conv_layer(conv4_1, ks=3, out_units=512, 'conv4_2')
conv4_3 = self.conv_layer(conv4_2, ks=3, out_units=512, 'conv4_3')
pool4 = self.max_pool(conv4_3, 'pool4')
conv5_1 = self.conv_layer(pool4, ks=3, out_units=512, 'conv5_1')
conv5_2 = self.conv_layer(conv5_1, ks=3, out_units=512, 'conv5_2')
conv5_3 = self.conv_layer(conv5_2, ks=3, out_units=512, 'conv5_3')
pool5 = self.max_pool(conv5_3, 'pool5')
fc6 = self.fc_layer(pool5, out_units=4096, 'fc6')
fc6_relu = tf.nn.relu(fc6)
fc6_drop = tf.nn.dropout(fc6_relu, keep_prob, name='fc6_drop')
fc7 = self.fc_layer(fc6_drop, 'fc7')
fc7_relu = tf.nn.relu(fc7)
fc7_drop = tf.nn.dropout(fc7_relu, keep_prob, name='fc7_drop')
fc8 = self.fc_layer(fc7_drop, 'fc8')
out = tf.nn.softmax(fc8, name='out')
return out
上一篇: 经典CNN之:VGGNet
推荐阅读
-
学习笔记之构建一个具有Logistic回归的神经网络模型
-
Django之模型层多表操作的实现
-
TensorFlow卷积神经网络之使用训练好的模型识别猫狗图片
-
Django之模型层多表操作的实现
-
TF之DNN:TF利用简单7个神经元的三层全连接神经网络实现降低损失到0.000以下(输入、隐藏、输出层分别为 2、3 、 2 个神经元)——Jason niu
-
TF之p2p:基于TF利用p2p模型部分代码实现提高图像的分辨率
-
ThinkPHP开发指南-模型之连贯操作如何实现的
-
ThinkPHP开发指南-模型之连贯操作如何实现的
-
TensorFlow卷积神经网络之使用训练好的模型识别猫狗图片
-
ThinkPHP开发指南-模型之连贯操作如何实现的