softmax回归(simple)

#导入TensorFlow
import tensorflow as tf 
#导入MNIST教学模块
from tensorflow.examples.tutorials.mnist import input_data
#获取MNSIT数据
mnist  = input_data.read_data_sets("MNIST_data/", one_hot = True)

#创建一个占位符(placeholder),代表待识别的图片
x = tf.placeholder(tf.float32,[None, 784])

print(x)

#W是Softmax的参数,将一个784维度的输入转换为一个10维的输出
W  = tf.Variable(tf.zeros([784,10])) #变量的参数用tf.Variable表示
print(W)

#b是Softmax的参数,一般叫做“偏置项”(bias)
b = tf.Variable(tf.zeros([10]))
print(b)
#y代表模型的输出,依赖x, W, b
y = tf.nn.softmax(tf.matmul(x,W) + b) 
print(tf.matmul(x,W))
print(y)

#y_是实际的图像标签,同样以占位符表示
y_ = tf.placeholder(tf.float32, [None, 10])#
print(y_)

#根据y和y_构造交叉熵损失
cross_ebtropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y)))
print(cross_ebtropy)

#梯度下降法进行优化,0.01是梯度下降优化器的学习率(learning rate)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_ebtropy)
print(train_step)

#创建一个会话,只有在Session中才能运行优化步骤,保存变量
sess = tf.InteractiveSession()
print(sess)
#运行之前必须要初始化所有变量,分配内存
tf.global_variables_initializer().run() 

#开始优化,1000次梯度下降
for _ in range(1000):
	batch_xs, batch_ys = mnist.train.next_batch(100) #batch_xs对应图像x,batch_ys对应标签y_
    
	#传入占位符
	sess.run(train_step, feed_dict={x:batch_xs, y_:batch_ys})


#预测结果
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
print(correct_prediction)

#计算预测准确率,都是Tensor
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(accuracy)

#获取模型的准确率
print(sess.run(accuracy, feed_dict = {x:mnist.test.images, y_:mnist.test.labels}))

猜你喜欢

转载自blog.csdn.net/weixin_42694291/article/details/81045755