Tensorflow 使用CNN 进行mnist 训练代码示例
前沿(可绕开):
初次学习tensorflow是在莫烦老师的带领下学习的,感触颇深,接下来就让我们看下关于CNN分类的模型。
本人是在VS2017上进行开发的, 首先,个人问题不知道为什么无法下载mnist的资源,只能手动放在工程文件夹下
其中MNIST就是,百度云资源:点击打开链接
代码如下:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST/',one_hot = True) #这里向文件夹读取资源
def calculate_accuracy(v_xs,v_ys): #这里用 test 资源来进行准确率的训练
global prediction #指全局的预测模型
y_prediction = sess.run(prediction,feed_dict={xs:v_xs,keep_prob : 1}) #进行测试文件预测
num_correct = tf.equal(tf.argmax(y_prediction,1),tf.argmax(v_ys,1)) #这里预测结果和 原来结果对比
accuary = tf.reduce_mean(tf.cast(num_correct,tf.float32)) #计算准确的概率
result = sess.run(accuary,feed_dict= {xs:v_xs , ys:v_ys ,keep_prob : 1})
return result
y_prediction = sess.run(prediction,feed_dict={xs:v_xs,keep_prob : 1}) #进行测试文件预测
num_correct = tf.equal(tf.argmax(y_prediction,1),tf.argmax(v_ys,1)) #这里预测结果和 原来结果对比
accuary = tf.reduce_mean(tf.cast(num_correct,tf.float32)) #计算准确的概率
result = sess.run(accuary,feed_dict= {xs:v_xs , ys:v_ys ,keep_prob : 1})
return result
def convn_layer(batch_x,batch_y,in_size,out_size,in_resource, name): #这里卷积和池化层一起
#这里初始化卷积层变量,卷积采样为batch_x*batch_y,输入层数为 in_size,输出层数out_size
W= tf.Variable( tf.truncated_normal([batch_x,batch_y,in_size,out_size], stddev = 0.1,dtype = tf.float32))
b=tf.Variable(tf.constant(0.1,shape = [out_size]))
conv = tf.nn.relu(tf.nn.conv2d(in_resource,W ,strides=[1,1,1,1],padding = 'SAME')+b) #这里卷积
pool = tf.nn.max_pool(conv,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME') #这里池化,层数*2
conv = tf.nn.relu(tf.nn.conv2d(in_resource,W ,strides=[1,1,1,1],padding = 'SAME')+b) #这里卷积
pool = tf.nn.max_pool(conv,ksize=[1,2,2,1],strides=[1,2,2,1],padding = 'SAME') #这里池化,层数*2
return pool
def func_layer(in_size,out_size,in_resource,name,if_drop,m_function = None): #这里为添加神经网络函数
W= tf.Variable( tf.truncated_normal([in_size,out_size], stddev = 0.1,dtype =tf.float32))
b= tf.Variable( tf.constant(0.1,shape = [out_size]))
b= tf.Variable( tf.constant(0.1,shape = [out_size]))
if m_function == None:
func = tf.matmul(in_resource,W)+b
else:
func = m_function(tf.matmul(in_resource,W)+b)
if if_drop:
output = tf.nn.dropout(func,keep_prob)
else:
output = func
return output
func = tf.matmul(in_resource,W)+b
else:
func = m_function(tf.matmul(in_resource,W)+b)
if if_drop:
output = tf.nn.dropout(func,keep_prob)
else:
output = func
return output
xs = tf.placeholder(tf.float32,[None,784])/255 #mnist 的图片为28*28,所以为784,/255使得数值减小,利于快速下降
ys = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32,name = 'dropout_value')
image_in = tf.reshape(xs, [-1,28,28,1])
conv_layer_1 = convn_layer(5,5,1,32,image_in,name = 'conv_layer_1') #添加卷积,池化层
conv_layer_2 = convn_layer(5,5,32,64,conv_layer_1,name = 'conv_layer_2') #添加卷积,池化层
conv_layer_2 = convn_layer(5,5,32,64,conv_layer_1,name = 'conv_layer_2') #添加卷积,池化层
conv_layer_2_flat = tf.reshape(conv_layer_2,[-1,7*7*64])
#添加神经网络层
layer1_out = func_layer(7*7*64,1024,conv_layer_2_flat,name = 'function_layer1',if_drop=1,m_function =tf.nn.relu)
prediction = func_layer(1024,10,layer1_out,name = 'function_layer_out',if_drop=0,m_function =tf.nn.softmax )
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices = [1]))
train = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) #采用Adam的训练方式
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
batch_x,batch_y = mnist.train.next_batch(100)
sess.run(train,feed_dict = {xs:batch_x, ys:batch_y, keep_prob: 0.5})
batch_x,batch_y = mnist.train.next_batch(100)
sess.run(train,feed_dict = {xs:batch_x, ys:batch_y, keep_prob: 0.5})
if i % 50==0:
print(calculate_accuracy(mnist.test.images[:1000], mnist.test.labels[:1000])) #这里输入测试样本进行准确率运算
print(calculate_accuracy(mnist.test.images[:1000], mnist.test.labels[:1000])) #这里输入测试样本进行准确率运算
训练比较慢,以下为输出准确率结果: