import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data def add_layer(inputs, in_size, out_size, activation_function=None): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.nn.softmax(tf.matmul(inputs, Weights) + biases) if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs mnist=input_data.read_data_sets("MNIST_data",one_hot=True) batch_size=100 n_batch=mnist.train.num_examples//batch_size x=tf.placeholder(tf.float32,[None,784]) y=tf.placeholder(tf.float32,[None,10]) l1=add_layer(x,784,10,activation_function=tf.nn.relu) prediction=add_layer(l1,10,10,activation_function=None) #二次迭代 loss=tf.reduce_mean(tf.square(y-prediction)) #梯度下降 train_step = tf.train.GradientDescentOptimizer(1).minimize(loss) #初始化 init = tf.global_variables_initializer() #结果存放在一个布尔列表中 correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) with tf.Session() as sess: sess.run(init) for epoch in range(21000): for batch in range(n_batch): batch_xs,batch_ys=mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print("Iter"+str(epoch)+",Testing Accuracy"+str(acc))
Tensorflow MNIS数据集手写识别
猜你喜欢
转载自blog.csdn.net/qq_39622065/article/details/80056959
今日推荐
周排行