[TensorFlow学习手记] 5 - 建造神经网络

版权声明:小博主大三在读,水平有限,希望大家多多指导,Personal Page:holeungliu.com https://blog.csdn.net/soulmeetliang/article/details/78623150

这里写图片描述


Code

import tensorflow as tf 
import matplotlib.pyplot as plt 
import numpy as np 

'''
Add layer
定义添加神经层的函数def add_layer(),它有四个参数:输入值、输入的大小、输出的大小和激励函数,
我们设定默认的激励函数是None
'''

def add_layer(inputs, in_size, out_size, activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size,out_size])) # Normal distribution 生成随机变量矩阵
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) # biases初始值不推荐为0
    Wx_plus_b = tf.matmul(inputs,Weights) + biases

    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs

x_data = np.linspace(-1,1,300)[:, np.newaxis]

'''
np.newaxis的功能是插入新维度,可以看出np.newaxis分别是在行或列上增加维度,
原来是(10,)的数组,在行上增加维度变成(1,10)的二维数组,在列上增加维度变为(10,1)的二维数组
看下面的例子:

In [1]: np.linspace(1, 10, 10)
Out[1]: array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])

In [2]: np.linspace(1, 10, 10)[np.newaxis,:]
Out[2]: array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]])

In [3]: np.linspace(1, 10, 10)[:,np.newaxis]
Out[3]:
array([[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.],
[ 9.],
[ 10.]])

In [4]: np.linspace(1, 10, 10).shape
Out[4]: (10,)

In [5]: np.linspace(1, 10, 10)[np.newaxis,:].shape
Out[5]: (1, 10)

In [6]: np.linspace(1, 10, 10)[:,np.newaxis].shape
Out[6]: (10, 1)

'''

noise = np.random.normal(0,0.05,x_data.shape) # 增加噪声点
y_data = np.square(x_data) - 0.5 + noise # x^2 + 0.5 +noise

# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None,1])
ys = tf.placeholder(tf.float32, [None,1])
'''
利用占位符定义我们所需的神经网络的输入。 tf.placeholder()就是代表占位符,
这里的None代表无论输入有多少都可以,因为输入只有一个特征,所以这里是1'''
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) # 添加隐藏层
predition = add_layer(l1, 10, 1, activation_function=None) # 添加输出层

loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), reduction_indices=[1]))
'''
tf.reduce_*(
    input_tensor, 输入
    axis=None, # 取哪一维度
    keep_dims=False, # 保持维度
    name=None,
    reduction_indices=None # 兼容
)
'''

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init)

for i in range(1000):
    # training
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        # to see the step improvement
        print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))

Result

1.09957
0.00971825
0.00745628
0.00676008
0.00616613
0.00563508
0.00525119
0.004969
0.00473265
0.00450678
0.00431545
0.00412944
0.00399298
0.00388178
0.00376811
0.00367137
0.00359963
0.00353908
0.00348795
0.00344684

猜你喜欢

转载自blog.csdn.net/soulmeetliang/article/details/78623150