# -*- coding: utf-8 -*-
"""
Created on Sun May 27 14:02:07 2018
@author:被遗弃的 庸才
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
activation_function=tf.nn.relu
n_hindden_units=50
n_layers=8
def built_net(xs,ys,norm):
def add_layer(inputs,in_size,out_size,activation_function=None,norm=False):
Weights=tf.Variable(tf.random_normal([in_size,out_size],mean=0.,stddev=1.))
biases=tf.Variable(tf.zeros([1, out_size]) +0.1)
Wx_plus_b=tf.matmul(inputs,Weights)+biases
Wx_plus_b=tf.nn.dropout(Wx_plus_b,keep_prob)
if norm:#这个是对w和b进行处理
fc_mean,fc_var=tf.nn.moments(Wx_plus_b, axes=[0])#得到这批数据的均值和方差
scale=tf.Variable(tf.ones([out_size]))
shift=tf.Variable(tf.zeros([out_size]))
epsilon=0.001
Wx_plus_b=tf.nn.batch_normalization(Wx_plus_b,fc_mean,fc_var,shift,scale,epsilon)
# similar with this two steps:这个和上面达到的效果是一样的
# Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
# Wx_plus_b = Wx_plus_b * scale + shift后面的两个参数会自动的训练
if activation_function is None:
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
#这里是输入
layers_inputs=[xs]#
if norm:#这个是对w和b进行处理
fc_mean,fc_var=tf.nn.moments(xs, axes=[0])#得到这批数据的均值和方差
scale=tf.Variable(tf.ones([1]))
shift=tf.Variable(tf.zeros([1]))
epsilon=0.001
xs=tf.nn.batch_normalization(xs,fc_mean,fc_var,shift,scale,epsilon)
for l_n in range(n_layers):
layers_input=layers_inputs[l_n]#这个是在之后能够传入值
if l_n==0:
in_size=1#输入的大小
else:
in_size=n_hindden_units#输入的大小
output=add_layer(layers_input,in_size,n_hindden_units,activation_function,norm)
layers_inputs.append(output)
prediction=add_layer(layers_inputs[-1],n_hindden_units,1,activation_function=None)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
train_op=tf.train.AdamOptimizer(0.001).minimize(loss)
return [train_op,loss,layers_inputs,prediction]
x_data=np.linspace(-7,10,500)[:,np.newaxis]#-7到10 一共500个,的列矩阵
noise=np.random.normal(0,0.8,x_data.shape)
y_data=np.sin(x_data)-0.5+noise
#tf的参数设置中
keep_prob=tf.placeholder(tf.float32)
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
train_op,loss,layers_inputs,prediction=built_net(xs,ys,norm=False)
train_op_norm,loss_norm,layers_inputs_norm,prediction_nrom=built_net(xs,ys,norm=True)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
for i in range(1000):
try:
ax.lines.remove(lines[0])#去除第一条
pass
except Exception:
pass
sess.run(train_op_norm,feed_dict={xs:x_data,ys:y_data,keep_prob:1})
#print(sess.run(loss_norm,feed_dict={xs:x_data,ys:y_data}))
prediction=sess.run(prediction_nrom,feed_dict={xs:x_data,keep_prob:1})
lines=ax.plot(x_data,prediction,'r-',lw=5)
plt.pause(0.1)
"""
Created on Sun May 27 14:02:07 2018
@author:被遗弃的 庸才
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
activation_function=tf.nn.relu
n_hindden_units=50
n_layers=8
def built_net(xs,ys,norm):
def add_layer(inputs,in_size,out_size,activation_function=None,norm=False):
Weights=tf.Variable(tf.random_normal([in_size,out_size],mean=0.,stddev=1.))
biases=tf.Variable(tf.zeros([1, out_size]) +0.1)
Wx_plus_b=tf.matmul(inputs,Weights)+biases
Wx_plus_b=tf.nn.dropout(Wx_plus_b,keep_prob)
if norm:#这个是对w和b进行处理
fc_mean,fc_var=tf.nn.moments(Wx_plus_b, axes=[0])#得到这批数据的均值和方差
scale=tf.Variable(tf.ones([out_size]))
shift=tf.Variable(tf.zeros([out_size]))
epsilon=0.001
Wx_plus_b=tf.nn.batch_normalization(Wx_plus_b,fc_mean,fc_var,shift,scale,epsilon)
# similar with this two steps:这个和上面达到的效果是一样的
# Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
# Wx_plus_b = Wx_plus_b * scale + shift后面的两个参数会自动的训练
if activation_function is None:
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
#这里是输入
layers_inputs=[xs]#
if norm:#这个是对w和b进行处理
fc_mean,fc_var=tf.nn.moments(xs, axes=[0])#得到这批数据的均值和方差
scale=tf.Variable(tf.ones([1]))
shift=tf.Variable(tf.zeros([1]))
epsilon=0.001
xs=tf.nn.batch_normalization(xs,fc_mean,fc_var,shift,scale,epsilon)
for l_n in range(n_layers):
layers_input=layers_inputs[l_n]#这个是在之后能够传入值
if l_n==0:
in_size=1#输入的大小
else:
in_size=n_hindden_units#输入的大小
output=add_layer(layers_input,in_size,n_hindden_units,activation_function,norm)
layers_inputs.append(output)
prediction=add_layer(layers_inputs[-1],n_hindden_units,1,activation_function=None)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
train_op=tf.train.AdamOptimizer(0.001).minimize(loss)
return [train_op,loss,layers_inputs,prediction]
x_data=np.linspace(-7,10,500)[:,np.newaxis]#-7到10 一共500个,的列矩阵
noise=np.random.normal(0,0.8,x_data.shape)
y_data=np.sin(x_data)-0.5+noise
#tf的参数设置中
keep_prob=tf.placeholder(tf.float32)
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
train_op,loss,layers_inputs,prediction=built_net(xs,ys,norm=False)
train_op_norm,loss_norm,layers_inputs_norm,prediction_nrom=built_net(xs,ys,norm=True)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
for i in range(1000):
try:
ax.lines.remove(lines[0])#去除第一条
pass
except Exception:
pass
sess.run(train_op_norm,feed_dict={xs:x_data,ys:y_data,keep_prob:1})
#print(sess.run(loss_norm,feed_dict={xs:x_data,ys:y_data}))
prediction=sess.run(prediction_nrom,feed_dict={xs:x_data,keep_prob:1})
lines=ax.plot(x_data,prediction,'r-',lw=5)
plt.pause(0.1)