深度学习tensorflow:入门、原理与进阶实战的第一个例子,我做了适当简化,图像动态演示数据在训练过程中,不断逼近函数y=2x的过程。
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#准备训练数据,并且画出数据点的分布
train_X=np.linspace(-1,1,100)
train_Y=2*train_X+np.random.randn(*train_X.shape)*0.2
plt.grid()
plt.plot(train_X,train_Y,'ro',label='Original data')
#搭建模型
X=tf.placeholder(tf.float32)
Y=tf.placeholder(tf.float32)
W=tf.Variable(tf.random_normal([1]),name="weight")
b=tf.Variable(tf.zeros([1]),name="bias")
#前向结构
z=tf.multiply(X,W)+b
#反向优化
cost=tf.reduce_mean(tf.square(Y-z))
learning_rate=0.01
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#初始化
init=tf.global_variables_initializer()
training_epochs=100
#启动模型
with tf.Session() as sess:
sess.run(init)
plotdata={"epoch":[],"loss":[]}
for epoch in range(training_epochs):
for (x,y) in zip(train_X,train_Y):
sess.run(optimizer,feed_dict={X:x,Y:y}) #每一次优化,cost是一个标量:衡量误差的标量
loss=sess.run(cost,feed_dict={X:train_X,Y:train_Y})#每训练一个纪元,根据此时weight和bias统计一下此时的平均误差衡量数值loss
print("Epoch:",epoch+1,"Cost:",loss,"W=",sess.run(W),"b=",sess.run(b))
plotdata["epoch"].append(epoch)
plotdata["loss"].append(loss)
plt.plot(train_X,sess.run(z,feed_dict={X:train_X,Y:train_Y}))
plt.title("W:%f,b:%f"%(sess.run(W),sess.run(b)))
plt.legend()
plt.pause(0.1)
print("Finished")
plt.show()
改代码请在IDE中运行,jupyter中无法显示动态图