11.2 Pytorch
11.2.5 RNN分类姓氏
@Author:By Runsen
数据集下载链接:https://download.pytorch.org/tutorial/data.zip,参考:https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
新建data文件夹,下载数据集,并将其解压缩到当前data文件夹中。
maoli@VM-0-5-ubuntu:~/pytorch/data$ tree
├── eng-fra.txt
└── names
├── Arabic.txt
├── Chinese.txt
├── Czech.txt
├── Dutch.txt
├── English.txt
├── French.txt
├── German.txt
├── Greek.txt
├── Irish.txt
├── Italian.txt
├── Japanese.txt
├── Korean.txt
├── Polish.txt
├── Portuguese.txt
├── Russian.txt
├── Scottish.txt
├── Spanish.txt
└── Vietnamese.txt
在 data/names
目录中包含18个名的 “[Language].txt” 文本文件,每个文件都包含不同国家的一堆姓氏(name),我们将建立和训练一个基本的字符级RNN进行分类单词,最后的预测作为输出,即判断出哪里姓氏属于哪个国家。
import glob
def findFiles(path):
return glob.glob(path)
print(findFiles('data/names/*.txt'))
####结果如下####
['data/names/Czech.txt', 'data/names/Polish.txt', 'data/names/Greek.txt', 'data/names/Dutch.txt', 'data/names/Irish.txt', 'data/names/English.txt', 'data/names/Chinese.txt', 'data/names/Portuguese.txt', 'data/names/French.txt', 'data/names/Russian.txt', 'data/names/Vietnamese.txt', 'data/names/German.txt', 'data/names/Arabic.txt', 'data/names/Scottish.txt', 'data/names/Japanese.txt', 'data/names/Korean.txt', 'data/names/Italian.txt', 'data/names/Spanish.txt']
在深度学习中,需要将文本数据转化为张量作为运算,我们将大小写英语字母共52个作为二维张量来代表第一个字母
import string
all_letters = string.ascii_letters
n_letters = len(all_letters)
print(all_letters)
print(n_letters)
####结果如下####
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
52
每个姓氏一行大多是罗马化,我们需要从Unicode转换为ASCII码,构造category_lines
字典,key储存国家名字,value储存姓氏,all_categories列表储存数据集中的所有国家名字。
import unicodedata
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
####结果如下####
Slusarski
# 构建category_lines字典, 每种语言的名称列表
category_lines = {}
all_categories = []
# 读取一个文件并分成几行
def readLines(filename):
# 换行符分割姓氏
lines = open(filename, encoding='utf-8').read().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = filename.split('/')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
print(all_categories[:5])
print(category_lines['Czech'][:10])
print(n_categories)
####结果如下####
18
['Czech', 'Polish', 'Greek', 'Dutch', 'Irish']
['Abl', 'Adsit', 'Ajdrna', 'Alt', 'Antonowitsch', 'Antonowitz', 'Bacon', 'Ballalatak', 'Ballaltick', 'Bartonova']
现在,我们可以将“A”代替成 的二维张量,“Albert”名字则可以使用 三维张量代替。
import torch
# 从all_letters中查找对应字母索引
def letterToIndex(letter):
# "a" = 0
return all_letters.find(letter)
def letterToTensor(letter):
# 将一个字母变成一个 [1 x 52] 张量
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
def lineToTensor(line):
# 把每一个名字变成一个 <line_length x 1 x 52>
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('A'))
print(lineToTensor('Albert').size())
####结果如下####
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
torch.Size([6, 1, 52])
建立模型,2个Linear线性层,在输入和隐藏状态下运行,输出后是LogSoftmax层。
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
# 继承父类RNN构造函数
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
我们需要打乱输入的顺序,使用random
随机读取txt文本文件
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
####结果如下####
category = Italian / line = Ughi
category = Greek / line = Kanavos
category = German / line = Knochenmus
category = Italian / line = Carracci
category = Arabic / line = Salib
category = Czech / line = Tomasek
category = Korean / line = Yeon
category = German / line = Kruger
category = Arabic / line = Kanaan
category = Italian / line = Traverso
训练模型前,我们需要传递input作为输入,输出的output作为下次输入
input = lineToTensor('Albert')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input[0], hidden)
print(output)
####结果如下####
tensor([[-2.7340, -3.2297, -3.9142, -2.9320, -3.0394, -2.3856, -2.8909, -3.3923,
-2.8116, -3.0205, -2.5174, -2.6097, -2.9909, -2.9457, -3.2937, -2.1645,
-3.4901, -3.1045]], grad_fn=<LogSoftmaxBackward>)
下面训练模型,损失函数使用LogSoftmax
对应的nn.NLLLoss
,学习率定义0.005,设计categoryFromOutput
函数通过topk
方法计算出all_categories
字典对应的index,从而取出categories分类级别,即国家名。
criterion = nn.NLLLoss()
learning_rate = 0.005
def categoryFromOutput(output):
# 模型的输出output,返回对应国家的类别category
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
def train(category_tensor, line_tensor):
# 初始化hidden
hidden = rnn.initHidden()
# 清空上一步的残余更新参数值
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
# 计算参数的梯度更新值
loss.backward()
# 参数的梯度值乘以学习率
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
最后,我们需要运行100000次示例。同时将 train
函数返回输出和损失,打印其猜测并绘制损失
import time
import math
n_iters = 100000 #迭代次数
print_every = 5000 #每隔5000次打印
plot_every = 1000 #每隔1000次
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
# 读取文件
category, line, category_tensor, line_tensor = randomTrainingExample()
# 训练模型
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# print
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# plot
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
####结果如下####
5000 5% (0m 11s) 2.3777 Nicolai / Italian ✓
10000 10% (0m 22s) 2.9138 Michael / Dutch ✗ (Irish)
15000 15% (0m 35s) 1.6987 Gridchin / Russian ✓
20000 20% (0m 46s) 1.0600 Chung / Chinese ✗ (Vietnamese)
25000 25% (0m 57s) 1.8140 Elston / English ✓
30000 30% (1m 9s) 0.9686 Pappayiorgas / Greek ✓
35000 35% (1m 20s) 2.2312 Grunewald / Irish ✗ (German)
40000 40% (1m 32s) 0.4435 Valikhanov / Russian ✓
45000 45% (1m 43s) 6.6400 Park / Polish ✗ (Korean)
50000 50% (1m 55s) 1.7123 O'Bree / French ✗ (Irish)
55000 55% (2m 7s) 3.1289 Rog / Korean ✗ (Polish)
60000 60% (2m 18s) 0.4017 Jue / Chinese ✓
65000 65% (2m 30s) 0.4370 Koury / Arabic ✓
70000 70% (2m 42s) 1.9272 Buchta / Spanish ✗ (Czech)
75000 75% (2m 53s) 0.5008 Matoke / Japanese ✓
80000 80% (3m 4s) 6.6334 Sak / Korean ✗ (Russian)
85000 85% (3m 16s) 0.9145 Pini / Italian ✓
90000 90% (3m 27s) 1.4701 Fionn / Irish ✓
95000 95% (3m 38s) 2.9261 Vymenets / Dutch ✗ (Russian)
100000 100% (3m 49s) 0.4123 Phan / Vietnamese ✓
下面通过matplotlib绘制all_losses
import matplotlib.pyplot as plt
plt.plot(all_losses)
plt.show()
下面定义evaluate
函数预测名字转化的line_tensor
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
return predictions
predict('Dovesky')
> Dovesky
(-0.75) Czech
(-0.82) Russian
(-3.27) English
最后,我们可以使用Flask简单的部署RNN模型,访问127.0.0.1:5000,传递预测名字
import flask # 导入flask
app = flask.Flask(__name__) # 实例化
@app.route('/<input_line>')# 装饰器实现路由
def index(input_line):
return str(predict(input_line, 3)) + '\n'
if __name__ == '__main__':
app.run()
####结果如下####
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Zhang
[[-0.22450284659862518, 'Chinese'],
[-1.7545788288116455, 'Vietnamese'],
[-4.870341777801514, 'Korean']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Badia
curl 127.0.0.1:5000/Badia
[[-1.0556448698043823, 'Arabic'],
[-1.35159170627594, 'Spanish'],
[-2.4573655128479004, 'Italian']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Bill
[[-1.0748096704483032, 'English']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Edison
[[-1.294445514678955, 'English'],
[-1.3679521083831787, 'Scottish'],
[-2.0646870136260986, 'Russian']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Curry
[[-1.7633719444274902, 'English'],
[-2.1153502464294434, 'Scottish'],
[-2.178600311279297, 'Irish']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/Kobe
[[-1.3392773866653442, 'Japanese'],
[-1.3548816442489624, 'Scottish'],
[-2.238069534301758, 'English']]
ubuntu@VM-0-5-ubuntu:~$ curl 127.0.0.1:5000/James
[[-1.717521071434021, 'Dutch'],
[-1.8096932172775269, 'Spanish'],
[-1.8986538648605347, 'English']]