tensorflow (3)

'''第七章:图像数据处理'''
# 将 MNIST 输入数据转化为 TFRecord 的格式
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np

#生成整数型的属性
def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

# 生成字符串型的属性
def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

mnist = input_data.read_data_sets('D:/BaiduNetdiskDownload/deeplearning/TensorFlow/Data_sets/MNIST_data',dtype=tf.uint8,one_hot=True)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples

# 输出TFRecord 文件的地址
filename = 'D:/BaiduNetdiskDownload/deeplearning/TensorFlow/output.tfrecords'
# 创造一个 writer 来写 TFRecord 文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
    # 将图片矩阵转换成一个字符串
    image_raw = images[index].tostring()
    # 将一个样例转化为 Example Protocol Buffer,并将所有的信息写入这个数据结构
    example = tf.train.Example(features=tf.train.Features(feature={
        'pixels':_int64_feature(pixels),
        'label': _int64_feature(np.argmax(labels[index])),
        'image_raw': _bytes_feature(image_raw)}))
    
    # 将一个Example 写入TFRecord 文件
    writer.write(example.SerializeToString())
writer.close()
Extracting D:/BaiduNetdiskDownload/deeplearning/TensorFlow/Data_sets/MNIST_data\train-images-idx3-ubyte.gz
Extracting D:/BaiduNetdiskDownload/deeplearning/TensorFlow/Data_sets/MNIST_data\train-labels-idx1-ubyte.gz
Extracting D:/BaiduNetdiskDownload/deeplearning/TensorFlow/Data_sets/MNIST_data\t10k-images-idx3-ubyte.gz
Extracting D:/BaiduNetdiskDownload/deeplearning/TensorFlow/Data_sets/MNIST_data\t10k-labels-idx1-ubyte.gz
In [14]:
'''读取 TFRecord 文件中的数据'''
import tensorflow as tf
reader = tf.TFRecordReader()
# 创建一个队列来维护输入文件列表
filename_queue = tf.train.string_input_producer(
    ['D:/BaiduNetdiskDownload/deeplearning/TensorFlow/output.tfrecords'])
# 从文件中读取一个样例,也可以使用 read_up_to 函数一次性读取多个样例
_,serialized_example = reader.read(filename_queue)
# 解析读入的样例,如果解析多个,可以使用parse_example 函数
features = tf.parse_single_example(serialized_example,features={
    'image_raw': tf.FixedLenFeature([],tf.string),
    'pixels': tf.FixedLenFeature([],tf.int64),
    'label': tf.FixedLenFeature([],tf.int64),
})

# tf.decode_raw 可以将字符串解析成图像对应的像素数组
image = tf.decode_raw(features['image_raw'],tf.uint8)
label = tf.cast(features['label'],tf.int32)
pixels = tf.cast(features['pixels'],tf.int32)

sess = tf.Session()
# 启动多线程处理输入数据
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)

for i in range(1):
    print(sess.run([image,label,pixels]))
[array([  0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  97,
        96,  77, 118,  61,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,  90, 138, 235, 235, 235, 235, 235,
       235, 251, 251, 248, 254, 245, 235, 190,  21,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0, 140, 251, 254, 254, 254, 254,
       254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 189,  23,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0, 226, 254, 208, 199,
       199, 199, 199, 139,  61,  61,  61,  61,  61, 128, 222, 254, 254,
       189,  21,   0,   0,   0,   0,   0,   0,   0,   0,   0,  38,  82,
        13,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  34,
       213, 254, 254, 115,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,  84, 254, 254, 234,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,  84, 254, 254, 234,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0, 106, 157, 254, 254, 243,  51,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,  25, 117, 228, 228, 228, 253, 254, 254, 254, 254, 240,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,  68, 119, 220, 254, 254, 254, 254, 254, 254, 254, 254,
       254, 142,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,  37, 187, 253, 254, 254, 254, 223, 206, 206,  75,  68,
       215, 254, 254, 117,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0, 113, 219, 254, 242, 227, 115,  89,  31,   0,   0,
         0,   0, 200, 254, 241,  41,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0, 169, 254, 176,  62,   0,   0,   0,   0,
         0,   0,   0,  48, 231, 254, 234,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,  18, 124,   0,   0,   0,   0,
         0,   0,   0,   0,   0,  84, 254, 254, 166,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0, 139, 254, 238,  57,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0, 210, 250, 254, 168,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 242, 254, 239,
        57,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  89, 251,
       241,  86,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   5,
       206, 246, 157,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   4, 117,  69,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
         0,   0,   0,   0], dtype=uint8), 7, 784]
In [36]:
import matplotlib.pyplot as plt

# 读取文件的原始数据
image_row_data = tf.gfile.FastGFile('D:/BaiduNetdiskDownload/deeplearning/TensorFlow/tensorflow-tutorial-master/Deep_Learning_with_TensorFlow/datasets/cat.jpg', 'rb').read()
with tf.Session() as sess:
    # 对png格式的图像进行解码,从而得到图像对应的三维矩阵
    img_data = tf.image.decode_jpeg(image_row_data)
    # 把对应的三维数组改为实数数组
    img_data = tf.image.convert_image_dtype(img_data,dtype=tf.float32)
    resized = tf.image.resize_images(img_data,[180,267],method=1)
    # 组成四维矩阵
    batched = tf.expand_dims(resized,0)
    boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
    result = tf.image.draw_bounding_boxes(batched,boxes)
#     print(img_data.eval())
    #显示图片
    plt.imshow(result[0].eval())
    plt.show()
  
    #保存图片
#     encoded_image = tf.image.encode_jpeg(img_data)
#     with tf.gfile.FastGFile('D:/BaiduNetdiskDownload/deeplearning/TensorFlow/output','wb') as f:
#         f.write(encoded_image.eval())
In [46]:
'''队列与多线程'''
#初始化先进先出队列,
q = tf.FIFOQueue(2,'int32')
init = q.enqueue_many(([0,10],))
x = q.dequeue()
y = x + 1
q_inc = q.enqueue([y])

with tf.Session() as sess:
    sess.run(init)
    for _ in range(5):
        v,_ = sess.run([x,q_inc])
        print(v)
# 多线程
import threading
import time

def MyLoop(coord,worker_id):
    while not coord.should_stop():
        if np.random.rand() < 0.1:
            print('Stoping from id: %d\n' % worker_id)
            # 通知其他线程停止
            coord.request_stop()
        else:
            print('Working on id: %d\n' % worker_id)
        time.sleep(1)

coord = tf.train.Coordinator()
# 创建5个线程
threads = [threading.Thread(target=MyLoop,args=(coord,i)) for i in range(5)]

#启动所有线程
for t in threads:
    t.start()
coord.join(threads)
0
10
1
11
2
Working on id: 0
Working on id: 1


Working on id: 2

Working on id: 3

Working on id: 4

Working on id: 1

Stoping from id: 2

In [49]:
'''LSTM 结构的简单实现'''
lstm_hidden_size = 2
batch_size = 100
# 定义一个LSTM 结构,LSTM 中使用的变量也会在该函数中自动生成
lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_hidden_size)

state = lstm.zero_state(batch_size,tf.float32)

# 定义损失函数
loss = 0

# for i in range(num_steps):
#     if i > 0 :
#         tf.get_variable_scope().reuse_variables()
#     lstm_output,state = lstm(current_input,state)
#     final_output = fully_connected(lstm_output)
#     loss += calc_loss(final_output,expected_output)
In [1]:
'''利用循环神经网络实现对Sin x 的取值的预测'''
import tensorflow as tf
import numpy as np
# import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt

hidden_size = 30  # LSTM 中隐藏层
num_layers = 2    # LSTM 的层数

timesteps = 10   #循环神经网络的训练序列长度
training_steps = 10000   # 训练轮数
batch_size = 32   # batch 的大小

training_examples = 10000    #训练数据个数
testing_examples = 1000    #测试数据个数
sample_gap = 0.01          # 采样间隔

def generate_data(seq):
    X = []
    Y = []
    for i in range(len(seq) - timesteps):
        X.append([seq[i: i + timesteps]])
        Y.append([seq[i + timesteps]])
    return np.array(X,dtype=np.float32),np.array(Y,dtype=np.float32)

def lstm_model(X,Y,is_training):
    # 使用多层的 LSTM 结构
    cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(hidden_size) for _ in range(num_layers)])
    # 使用tensorflow 借口将多层的LSTM结构连接成RNN网络,并计算其前向传播结果
    outputs,_ = tf.nn.dynamic_rnn(cell,X,dtype=tf.float32)
    # outputs 是顶层LSTM在每一步的输出结果,它的维度是[batch_size,time,hidden_size]
    output = outputs[:,-1,:]
    # 对LSTM网络的输出再加一层全连接层并计算损失
    predictions = tf.contrib.layers.fully_connected(output,1,activation_fn=None)
    # 只在训练时计算损失函数和优化步骤。测试时直接返回预测结果
    if not is_training:
        return predictions,None,None
    # 计算损失函数
    loss = tf.losses.mean_squared_error(labels=Y,predictions=predictions)
    # 创建模型优化器并得到优化步骤
    train_op = tf.contrib.layers.optimize_loss(loss,tf.train.get_global_step(),
                                               optimizer='Adagrad',learning_rate=0.1)
    return predictions,loss,train_op

def train(sess,train_X,train_Y):
    # 将训练数据以数据集的方式提供给计算图
    ds = tf.data.Dataset.from_tensor_slices((train_X,train_Y))
    ds = ds.repeat().shuffle(1000).batch(batch_size)
    X,Y = ds.make_one_shot_iterator().get_next()

    # 调用模型,得到预测结果、损失函数、和训练操作
    with tf.variable_scope('model'):
        predictions,loss,train_op = lstm_model(X,Y,True)

    # 初始化变量
    sess.run(tf.global_variables_initializer())
    for i in range(training_steps):
        _,l = sess.run([train_op,loss])
        if i % 100 == 0:
            print('train step: ' + str(i) + ", loss: "+ str(l))

def run_eval(sess,test_X,test_Y):
    # 将测试数据以数据集的方式提供给计算图
    ds = tf.data.Dataset.from_tensor_slices((test_X,test_Y))
    ds = ds.batch(1)
    X,Y = ds.make_one_shot_iterator().get_next()

    # 调用模型得到计算结果。这里不需要输入真正的y值
    with tf.variable_scope('model',reuse=True):
        prediction,_,_ = lstm_model(X,[0.0],False)
    # 将预测数据存入一个数组
    predictions = []
    labels = []
    for i in range(testing_examples):
        p, l = sess.run([prediction,Y])
        predictions.append(p)
        labels.append(l)

    #计算rmse 作为评价指标
    predictions = np.array(predictions).squeeze()
    labels = np.array(labels).squeeze()
    rmse = np.sqrt(((predictions - labels) ** 2).mean(axis=0))
    print('Mean Square Error is : %f' % rmse)

    # 对预测的sin 函数曲线进行绘图,
    plt.figure()
    plt.plot(predictions,label='predictions')
    plt.plot(labels,label='real_sin')
    plt.legend()
    plt.show()

test_start = (training_examples + timesteps)* sample_gap
test_end = test_start + (testing_examples + timesteps) * sample_gap
train_X,train_Y = generate_data(np.sin(np.linspace(0,test_start,training_examples + timesteps,dtype=np.float32)))
test_X,test_Y = generate_data(np.sin(np.linspace(test_start,test_end,testing_examples + timesteps,dtype=np.float32)))

with tf.Session() as sess:
    # 训练数据
    train(sess,train_X,train_Y)
    # 使用训练好的模型对测试数据进行预测
    run_eval(sess,test_X,test_Y)

    
WARNING:tensorflow:From D:\ruanjianku\Python\anaconda\anaconda\envs\tensorflow\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Use the retry module or similar alternatives.
train step: 0, loss: 0.537163
train step: 100, loss: 0.00581189
train step: 200, loss: 0.00299591
train step: 300, loss: 0.00362103
train step: 400, loss: 0.00159373
train step: 500, loss: 0.00264734
train step: 600, loss: 0.00183228
train step: 700, loss: 0.00165961
train step: 800, loss: 0.00191989
train step: 900, loss: 0.00192808
train step: 1000, loss: 0.00145073
train step: 1100, loss: 0.00133246
train step: 1200, loss: 0.00142721
train step: 1300, loss: 0.00106491
train step: 1400, loss: 0.0013736
train step: 1500, loss: 0.000597999
train step: 1600, loss: 0.000442221
train step: 1700, loss: 0.000407495
train step: 1800, loss: 0.000593534
train step: 1900, loss: 0.000506367
train step: 2000, loss: 0.000501703
train step: 2100, loss: 0.000312137
train step: 2200, loss: 0.00030301
train step: 2300, loss: 0.000283719
train step: 2400, loss: 0.00017731
train step: 2500, loss: 0.000214766
train step: 2600, loss: 0.000170595
train step: 2700, loss: 0.000124601
train step: 2800, loss: 0.000110573
train step: 2900, loss: 0.000103416
train step: 3000, loss: 8.32583e-05
train step: 3100, loss: 6.57491e-05
train step: 3200, loss: 6.01334e-05
train step: 3300, loss: 4.98296e-05
train step: 3400, loss: 2.84965e-05
train step: 3500, loss: 3.41676e-05
train step: 3600, loss: 2.77772e-05
train step: 3700, loss: 2.65457e-05
train step: 3800, loss: 1.27864e-05
train step: 3900, loss: 1.35928e-05
train step: 4000, loss: 1.44357e-05
train step: 4100, loss: 9.52751e-06
train step: 4200, loss: 1.6007e-05
train step: 4300, loss: 9.59341e-06
train step: 4400, loss: 9.13521e-06
train step: 4500, loss: 5.68226e-06
train step: 4600, loss: 5.64791e-06
train step: 4700, loss: 6.73165e-06
train step: 4800, loss: 5.68442e-06
train step: 4900, loss: 5.85126e-06
train step: 5000, loss: 4.59743e-06
train step: 5100, loss: 7.64715e-06
train step: 5200, loss: 4.90294e-06
train step: 5300, loss: 5.94921e-06
train step: 5400, loss: 4.33101e-06
train step: 5500, loss: 4.53998e-06
train step: 5600, loss: 5.45164e-06
train step: 5700, loss: 3.17082e-06
train step: 5800, loss: 4.55867e-06
train step: 5900, loss: 2.75635e-06
train step: 6000, loss: 3.12139e-06
train step: 6100, loss: 2.77356e-06
train step: 6200, loss: 3.31039e-06
train step: 6300, loss: 2.46291e-06
train step: 6400, loss: 3.40177e-06
train step: 6500, loss: 3.2726e-06
train step: 6600, loss: 3.09002e-06
train step: 6700, loss: 2.67247e-06
train step: 6800, loss: 3.59184e-06
train step: 6900, loss: 3.50847e-06
train step: 7000, loss: 3.42534e-06
train step: 7100, loss: 3.36364e-06
train step: 7200, loss: 2.87578e-06
train step: 7300, loss: 3.24877e-06
train step: 7400, loss: 3.36535e-06
train step: 7500, loss: 2.92387e-06
train step: 7600, loss: 3.49139e-06
train step: 7700, loss: 2.75819e-06
train step: 7800, loss: 2.41327e-06
train step: 7900, loss: 3.54369e-06
train step: 8000, loss: 3.38162e-06
train step: 8100, loss: 3.3548e-06
train step: 8200, loss: 2.23144e-06
train step: 8300, loss: 3.49297e-06
train step: 8400, loss: 2.42371e-06
train step: 8500, loss: 2.33066e-06
train step: 8600, loss: 2.62689e-06
train step: 8700, loss: 2.88476e-06
train step: 8800, loss: 2.53705e-06
train step: 8900, loss: 3.51168e-06
train step: 9000, loss: 2.0171e-06
train step: 9100, loss: 2.87982e-06
train step: 9200, loss: 2.10734e-06
train step: 9300, loss: 3.04445e-06
train step: 9400, loss: 3.0418e-06
train step: 9500, loss: 2.94214e-06
train step: 9600, loss: 2.07377e-06
train step: 9700, loss: 2.22246e-06
train step: 9800, loss: 2.36514e-06
train step: 9900, loss: 2.02391e-06
Mean Square Error is : 0.001656

猜你喜欢

转载自blog.csdn.net/doulinxi115413/article/details/80700768