卷积神经网络"/>
TensorFlow学习(11)——卷积神经网络
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data# ====================一、载入训练数据==============================
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
batch_size = 100
n_batch = mnist.train.num_examples# 初始化权值
def weight_variable(shape):# 生成一个截断的正太分布initial = tf.truncated_normal(shape=shape, stddev=0.1)return tf.Variable(initial)# 初始化偏置值
def bias_variable(shape):initial = tf.constant(0.1, shape=shape)return tf.Variable(initial)# 卷积层
def conv2d(x, W):# x是输入的tensor,形状如[batch, in_height, in_width, in_channels],# 其中in_height和in_width表示图片的长宽,in_channels表示通道数,1表示黑白,3表示彩色# W是滤波器/卷积核,形如[filter_height, filter_width, in_channels, out_channels],# filter_height表示滤波器的高,其中包括输入通道in_channels和输出通道out_channels# strides是步长,其中strides[0]和strides[3]都固定等于1. strides[1]代表x方向的步长,strides[2]代表y方向的步长# pandding是边缘,"SAME"表示边缘补零,"VALID"表示不补零return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")# 池化层
def max_pool_2x2(x):# ksize表示池化核的大小,形如[1,x,y,1],其中第0和3个位置都固定为1return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")# ====================二、构造神经网络==============================
# 定义placeholder
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# 把x还原会图片的样子,形如[batch, in_height, in_width, in_channels]
x_image = tf.reshape(x, [-1, 28, 28, 1])# 分析:
# 28*28的图片第一次卷积后还是28*28,卷积核为[1,1,1,1],所以不变;
# 第一次池化后变为14*14,因为池化核是[1,2,2,1],所以缩小了一半
# 第二次卷积后为14*14,第二次池化后变为了7*7
# 进过上面操作后得到64张7*7的平面# 初始化第一个卷积层的权值和偏置值
# 形状[filter_height, filter_width, in_channels, out_channels],in_channels表示输入通道数1,out_channels表示输出32个卷积核
W_conv1 = weight_variable([5,5,1,32])
# 每一个卷积核一个偏置值
b_conv1 = bias_variable([32])# 卷积操作,并通过激活函数
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1)+b_conv1)
# 池化操作
h_pool1 = max_pool_2x2(h_conv1)# 初始化第二个卷积层的权值和偏置值
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])# 计算第二层卷积
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)# -------------------------------------------------
# 定义第一层全连接层,7*7*64代表上一层的输出,1024是我们自己定义的
# 初始化
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])# 先把上面第二层的池化层[100, 7, 7, 64]还原为一维的形状,这里的-1表示任意值,类似于None
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
# 求第一层全连接层的输出
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)# 添加Dropout操作
keep_pro = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_pro)# 初始化第二个全连接层(其实就是输出层)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)# ====================三、损失函数==============================
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))# ====================四、最小化损失函数==============================
train = tf.train.AdadeltaOptimizer(learning_rate=0.2).minimize(loss=loss)# 验证准确率
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))# ====================五、开始训练==============================
init = tf.global_variables_initializer()
with tf.Session() as sess:sess.run(init)for step in range(1):for batch in range(n_batch):batch_xz, batch_yz = mnist.train.next_batch(batch_size=batch_size)result = sess.run(train, feed_dict={x:batch_xz, y:batch_yz, keep_pro:0.5})print("====batch====>"+str(batch)+"====accuracy====>"+str(sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_pro:0.5})))acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_pro:0.5})print("======最后的准确率======>"+str(acc))
结束线/
欢迎大家加入Q群讨论:463255841
结束线/
更多推荐
TensorFlow学习(11)——卷积神经网络
发布评论