import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# normalize the dataset between 0 and 1 with sklearn preprocessing
T = np.arange(0,100,0.1)
dataset = np.sin(T)
scaler = MinMaxScaler()# reshape 第一个是样本数,第二个是特征数
dataset = scaler.fit_transform(dataset.reshape(-1,1)).ravel()
划分 训练集 和 测试集
# convert an array of values into a dataset matrix x and ydefcreate_dataset(dataset, look_back=2):
dataX, dataY =[],[]for i inrange(len(dataset)-look_back):
dataX.append(dataset[i])
dataY.append(dataset[i + look_back])return numpy.array(dataX), numpy.array(dataY)
# split into train and test sets
train_size =int(len(dataset)*0.7)
test_size =len(dataset)- train_size
train, test = dataset[: train_size], dataset[train_size:]
trainX, trainY = create_dataset(train)
testX, testY = create_dataset(test)
import torch.nn as nn
classModel(nn.Module):def__init__(self, input_size, output_size):super(Model, self).__init__()# define parameters of one layer network
self.fc = nn.Linear(1,1)defforward(self,input):
output = self.fc(input)return output
开始 训练,打印 loss
learning_rate =0.02# 学习速率for k inrange(100):
loss =0for i inrange(trainX.size()[0]):
x = Variable(trainX[i])
y = model(x)
target = Variable(trainY[i])
loss +=(y - target)**2# train the model
loss = loss/trainX.size()[0]
loss.backward()# 回传for p in model.parameters():
p.data -= learning_rate * p.grad.data # 参数更新
p.grad.data.zero_()# 梯度更新# model.zero_grad()print(loss)
结果预测(用蓝色的线 X 拟合 红色的线 Y)
Y =[]for i inrange(trainY.size()[0]):
x = Variable(trainY[i])
y = model(x)
Y.append(y.data.numpy())
plt.figure()
plt.plot(np.array(Y).ravel(),'r')
plt.ylim(0,1)
Y_real =[y.numpy()for y in trainY]
Y_real = np.array(Y_real).ravel()
plt.plot(Y_real,'b--')
plt.show()
# convert an array of values into a dataset matrixdefcreate_dataset(dataset, look_back=1):
dataX, dataY =[],[]for i inrange(len(dataset)-look_back):
dataX.append(dataset[i])# 存储差值
dataY.append(dataset[i + look_back]- dataset[i])return numpy.array(dataX), numpy.array(dataY)
分割 测试集 和 训练集,绘图,可以看到 trainx 是 sin 走势,而 trainy 是 cos 走势
# reshape into X=t and Y=t+1
look_back =1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)# sin
plt.plot((trainX - trainX.mean())[:60]*0.1,'b--', label ='trainx')# cos
plt.plot(trainY[:60],'r', label ='trainy')
plt.legend()
classModel(nn.Module):def__init__(self, input_size, hidden_size, output_size):super(Model, self).__init__()# 定义一个单一隐层的神经网络之模型参数
self.i2h = nn.Parameter(torch.randn(input_size, hidden_size))
self.h2o = nn.Parameter(torch.randn(hidden_size, output_size))
self.bh = nn.Parameter(torch.randn(1, hidden_size))
self.bo = nn.Parameter(torch.randn(1, output_size))defforward(self, inputs):# dim should be same except catting dimension
hidden = F.relu(inputs.matmul(self.i2h)+ self.bh)
output = hidden.matmul(self.h2o)+ self.bo
return output
model = Model(1,64,1)
开始训练
learning_rate =1e-2for k inrange(500):
loss =0for i inrange(trainX.size()[0]):
x = Variable(trainX[i])
y = model(x)
loss +=(y - Variable(trainY[i]))**2
loss = loss/trainX.size()[0]
loss.backward(retain_graph=True)for p in model.parameters():
p.data.add_(-learning_rate, p.grad.data)
model.zero_grad()print(loss)