版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/gentelyang/article/details/77480710
深度自编码器的原理上一节已经讲过,这次我们来看一下它的python代码实现,这是基于mnist的自编码实现。
from __future__ import division, print_function, absolute_import import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data", one_hot=True) learning_rate = 0.01 training_epochs = 20 batch_size = 256 display_step = 1 examples_to_show = 10 n_hidden_1 = 256 n_hidden_2 = 128 n_input = 784 X = tf.placeholder("float", [None, n_input]) weights = { 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])), } biases = { 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'decoder_b2': tf.Variable(tf.random_normal([n_input])), } def encoder(x): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2'])) return layer_2 def decoder(x): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2'])) return layer_2 encoder_op = encoder(X) decoder_op = decoder(encoder_op) y_pred = decoder_op y_true = X cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) total_batch = int(mnist.train.num_examples/batch_size) for epoch in range(training_epochs): for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) if epoch % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) print("Optimization Finished!") encode_decode = sess.run( y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) f, a = plt.subplots(2, 10, figsize=(10, 2)) for i in range(examples_to_show): a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) f.show() plt.draw() plt.waitforbuttonpress() 经过20次的迭代,我们的输出结果cost明显的减小了 Epoch: 0001 cost= 0.196800113 Epoch: 0002 cost= 0.169325382 Epoch: 0003 cost= 0.155912638 Epoch: 0004 cost= 0.148683071 Epoch: 0005 cost= 0.142708376 Epoch: 0006 cost= 0.136180028 Epoch: 0007 cost= 0.130748138 Epoch: 0008 cost= 0.125925466 Epoch: 0009 cost= 0.122442275 Epoch: 0010 cost= 0.117254384 Epoch: 0011 cost= 0.114797853 Epoch: 0012 cost= 0.112438530 Epoch: 0013 cost= 0.109801762 Epoch: 0014 cost= 0.107820347 Epoch: 0015 cost= 0.105974235 Epoch: 0016 cost= 0.105912112 Epoch: 0017 cost= 0.104165390 Epoch: 0018 cost= 0.100365378 Epoch: 0019 cost= 0.100399643 Epoch: 0020 cost= 0.099709332 Optimization Finished! 通过观察最后的图像特征,经过自编码后的图片与原始的输入非常的相似,只是多了一些噪声在原始的图片上。cost的值也降到了0.1一下,cost值 还可以通过调节参数来让其继续减小。