tensorflow实现mnist

import tensorflow as tf

# 输入数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)


# 定义网络的超参数
learning_rate = 0.0001
training_iters = 200000
batch_size = 128
display_step = 10



# 定义网络的参数
n_input = 784  # 输入的维度(img shape: 28×28)
n_classes = 10 # 标记的维度 (0-9 digits)
dropout = 0.75 # Dropout的概率,输出的可能性




x = tf.placeholder(tf.float32,shape = [None,n_input],name = 'x-input')
y = tf.placeholder(tf.float32,shape =[None,n_classes],name = 'y-input')

keep_prob= tf.placeholder(tf.float32) #dropout


#构建网络模型,以后都用这种模式写tensorflow
#接下来我们定义AlexNet需要用到的卷积、池化和规范化操作。为了简单,我们将这些功能封装起来。代码如下:

# 定义卷积操作



def  conv2d(name,x,W,b,strides = 1):
    x = tf.nn.conv2d(x,W,strides=[1,strides,strides,1],padding='SAME')
    x = tf.nn.bias_add (x,b)
    return   tf.nn.relu(x,name = name)  # 使用relu激活函数


# 定义池化层操作
def   maxpool2d(name,x,k=2):
    return  tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME',name = name )


# 规范化操作

def norm(name, l_input, lsize=4):
  return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0,
                   beta=0.75, name=name)




# 定义所有的网络参数
weights = {
  'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
  'wc2': tf.Variable(tf.random_normal([3, 3, 64, 256])),
    'wd1': tf.Variable(tf.random_normal([4*4*256, 128])),
    'wd2': tf.Variable(tf.random_normal([128, 64])),
    'wd3': tf.Variable(tf.random_normal([64, 32])),

    'out': tf.Variable(tf.random_normal([32, 10]))
}




biases = {

  'bc1': tf.Variable(tf.random_normal([64])),
  'bc2': tf.Variable(tf.random_normal([256])),
    'bd1': tf.Variable(tf.random_normal([128])),
    'bd2': tf.Variable(tf.random_normal([64])),
    'bd3': tf.Variable(tf.random_normal([32])),

    'out': tf.Variable(tf.random_normal([n_classes]))

}




def my_model(x, weights, biases, dropout):
  # Reshape input picture
  x = tf.reshape(x, shape=[-1, 28, 28, 1])

  # 第一层卷积
  # 卷积

  conv1 = conv2d('conv1', x, weights['wc1'], biases['bc1'])
  conv1 = tf.nn.dropout(conv1, dropout)


  #下采样
  print('cov1.shape: ', conv1.get_shape().as_list())


  pool1 = maxpool2d('pool1',conv1,k=3)

  print('pool1.shape: ', pool1.get_shape().as_list())
  norm1 = norm('norm1',pool1,lsize=4)

  conv2 = conv2d('conv2', pool1, weights['wc2'], biases['bc2'])
  conv2 = tf.nn.dropout(conv2, dropout)

  print('conv2.shape: ', conv2.get_shape().as_list())

  # 最大池化(向下采样)
  pool2 = maxpool2d('pool2', conv2, k=3)
  print('pool2.shape: ', pool2.get_shape().as_list())
  # 规范化
  norm2 = norm('norm2', pool2, lsize=4)
  # 全连接层1

  fc1 = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
  fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
  fc1 = tf.nn.relu(fc1)
  # dropout
  fc1 = tf.nn.dropout(fc1, dropout)




  fc2 = tf.reshape(fc1, [-1, weights['wd2'].get_shape().as_list()[0]])
  fc2 = tf.add(tf.matmul(fc2, weights['wd2']), biases['bd2'])
  fc2 = tf.nn.relu(fc2)
  # dropout
  fc2 = tf.nn.dropout(fc2, dropout)



  fc3 = tf.reshape(fc2, [-1, weights['wd3'].get_shape().as_list()[0]])
  fc3 = tf.add(tf.matmul(fc3, weights['wd3']), biases['bd3'])
  fc3 = tf.nn.relu(fc3)
  # dropout
  fc3 = tf.nn.dropout(fc3, dropout)



  out = tf.add(tf.matmul(fc3, weights['out']), biases['out'])



  return out



pred = my_model(x, weights, biases, keep_prob)
print("pred")

print(pred.shape)



# 定义损失函数和优化器
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# 评估函数
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))



# 初始化变量
init = tf.global_variables_initializer()

with tf.Session() as sess:
  sess.run(init)
  step = 1
  # 开始训练,直到达到training_iters,即200000
  while step * batch_size < training_iters:
    batch_x, batch_y = mnist.train.next_batch(batch_size)


    sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
                             keep_prob: dropout})

    if step % 10 == 0:
       # 计算损失值和准确度,输出
       loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                y: batch_y,
                                                keep_prob: 1.})
       print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
            "{:.6f}".format(loss) + ", Training Accuracy= " + \
            "{:.5f}".format(acc))
    step += 1
  print("Optimization Finished!")

   # 计算测试集的准确度
  print("Testing Accuracy:", \
     sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
                                   y: mnist.test.labels[:256],
                                   keep_prob: 1.}))

猜你喜欢

转载自blog.csdn.net/wang263334857/article/details/89067764