from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 定义神经网络中的隐藏层
def add_layer(inputs, size_inputs, size_outputs, act_fun = None):
Weight = tf.Variable(tf.random_normal([size_inputs, size_outputs]))
biases = tf.Variable(tf.zeros([1, size_outputs]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weight) + biases
if act_fun == None:
output = Wx_plus_b
else:
output = act_fun(Wx_plus_b)
return output
# 计算正确率
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# 引入数据
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 定义输入变量
xs = tf.placeholder(tf.float32, [None, 784], name='x_in')
ys = tf.placeholder(tf.float32, [None, 10], name='y_in')
# 预测值
prediction = add_layer(xs, 784, 10, act_fun=tf.nn.softmax)
# 定义代价函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))
# 定义训练方法:梯度下降; 指定学习率:0.001(一定不要太大,否则会NAN); 训练目的:最小化loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#声明tf.train.Saver用于保存模型
saver = tf.train.Saver()
# 初始化全部变量
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# 迭代5,000次
for i in range(5000):
batch_xs, batch_ys = mnist.train.next_batch(50)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
# sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
# 输出正确率
if i % 50 == 0:
print(compute_accuracy(mnist.test.images, mnist.test.labels))
#将模型保存为/tensorflow_google/model/model.ckpt
saver.save(sess, "./model/DigitalRrecognition/DigitalRrecognition.ckpt")
# 测试图片的路径
root1 = './Image/t1.png'
# 加载已经保存的模型,并通过已保存的模型中变量的值来计算加法
saver.restore(sess, "./model/DigitalRrecognition/DigitalRrecognition.ckpt")
# 将图片转为灰度图并规定大小为【28,28】
demo = Image.open(root1).convert('L').resize([28, 28])
# 将图片所表示的灰度矩阵形状改为【1,18*18】
demoArray = np.array(demo).reshape(1, 28 * 28)
# 将矩阵里的数化为0~1之间的数
demoArray = (255 - demoArray) / 255
# 输入矩阵,进行预测,得到结果
result = sess.run(prediction, feed_dict={xs: demoArray})
# 结果为一个矩阵,范围在0~1之间,第几个数最大则代表预测结果是多少
print(result)
sess.close()
t1.png:
输出结果:
0.0944
.
.
0.9091
0.9035
[[6.3137204e-12 5.0958712e-12 7.9769625e-06 3.3179897e-01 5.3250981e-16
6.6806221e-01 1.5467061e-09 1.1380750e-20 1.3088244e-04 1.4278804e-08]]
输出的矩阵中,从0开始数,第5个数6.6806221e-01最大,所以结果是5
但是总不能每次预测数字都要训练一个神经网络吧,我们只要把训练好的神经网络保存起来,需要调用的时候直接调用就好了。
上面的程序中
#将模型保存为/tensorflow_google/model/model.ckpt
saver.save(sess, "./model/DigitalRrecognition/DigitalRrecognition.ckpt")
这里就保存了训练好的神经网络,调用的时候需要声明神经网络中的变量。
调用实例如下:
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 定义神经网络中的隐藏层
def add_layer(inputs, size_inputs, size_outputs, act_fun = None):
Weight = tf.Variable(tf.random_normal([size_inputs, size_outputs]))
biases = tf.Variable(tf.zeros([1, size_outputs]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weight) + biases
if act_fun == None:
output = Wx_plus_b
else:
output = act_fun(Wx_plus_b)
return output
# 计算正确率
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
# 引入数据
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 定义输入变量
xs = tf.placeholder(tf.float32, [None, 784], name='x_in')
ys = tf.placeholder(tf.float32, [None, 10], name='y_in')
# 预测值
prediction = add_layer(xs, 784, 10, act_fun=tf.nn.softmax)
# 定义代价函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))
# 定义训练方法:梯度下降; 指定学习率:0.001(一定不要太大,否则会NAN); 训练目的:最小化loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#声明tf.train.Saver用于保存模型
saver = tf.train.Saver()
root = './Image/t1.png'
with tf.Session() as sess:
#加载已经保存的模型,并通过已保存的模型中变量的值来计算加法
saver.restore(sess, "./model/DigitalRrecognition/DigitalRrecognition.ckpt")
demo = Image.open(root).convert('L').resize([28, 28])
demoArray = np.array(demo).reshape(1, 28 * 28)
demoArray = (255 - demoArray) / 255
result = sess.run(prediction, feed_dict={xs: demoArray})
print(result)