模型
本次使用的是线性回归模型
y=Wx+b
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_points=1000
vectors_set=[]
for i in range(num_points):
x1=np.random.normal(0.0,0.55)
y1=x1*0.1+0.3+np.random.normal(0.0,0.03)
vectors_set.append([x1,y1])
x_data=[v[0] for v in vectors_set]
y_data=[v[1] for v in vectors_set]
plt.scatter(x_data,y_data,c='r')
plt.show()
W=tf.Variable(tf.random_uniform([1],-1.0,1.0),name='W')
b=tf.Variable(tf.zeros([1]),name='b')
y=W*x_data+b
#以预估值y和实际值y_data之间的均方差误差作为损失函数
loss=tf.reduce_mean(tf.square(y-y_data),name='loss')
#采用梯度下降法优化参数
optimizer=tf.train.GradientDescentOptimizer(0.5)
#训练的过程就是最小化这个误差值
train=optimizer.minimize(loss,name='train')
sess=tf.Session()
init=tf.global_variables_initializer()
sess.run(init)
print("W=",sess.run(W),"b=",sess.run(b),"loss=",sess.run(loss))
for step in range(20):
sess.run(train)
print("W=", sess.run(W), "b=", sess.run(b), "loss=", sess.run(loss))
print("W=",sess.run(W),"b=",sess.run(b))