在这次的笔记中,我们以自然语言处理中二分类的例子,用pytorch框架将RNN实现一遍。
结核如下图:
首先手动实现:
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
# embedding layer
self.embed = nn.Embedding(input_size, hidden_size)
#hidden layer, attention to the size of input: 2*hidden_size
self.i2h = nn.Linear(2*hidden_size, hidden_size)
#output layer
self.i20 = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
x = self.embed(input)
combined = torch.cat((x,hidden),1)
hidden = self.i2h(combined)
output = self.i2o(hidden)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(self.input_size, self.hidden_size))
下面我们再用pytorch里原有的层来实现:
class SimpleRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=1):
super(SimpleRNN,self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.rnn = nn.RNN(hidden_size, hidden_size, num_layers,batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
x = self.embedding(input)
output, hidden = self.rnn(x,hidden)
output = output[:,-1,:]# get the lastest ouput
output = self.fc(output)
output = self.softmax(output)
return output,hidden