Python 实现极限学习机(预测)

'''这种算法是针对 SLFNs (即含单个隐藏层的前馈型神经网络)的监督型学习算法,
其主要思想是:输入层与隐藏层之间的权值参数,以及隐藏层上的偏置向量参数是 once for all 的(不需要像其他基于梯度的学习算法一样通过迭代反复调整刷新),
求解很直接,只需求解一个最小范数最小二乘问题(最终化归成求解一个矩阵的 Moore-Penrose 广义逆问题)。'''
#class hpelm.elm.ELM(inputs, outputs, classification='', w=None, batch=1000, accelerator=None, precision='double', norm=None, tprint=5)
#coding:utf-8
from __future__ import division
from pylab import*
from sklearn.svm import SVR
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import numpy as np
import math
import csv
from sklearn import metrics
import csv
from pylab import*
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
from sklearn.ensemble import RandomForestRegressor
from hpelm import ELM
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from keras import metrics
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import csv
from sklearn.model_selection import train_test_split
from pylab import*
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import time
from matplotlib import pyplot
import math
from random import random
from pylab import*
from sklearn.utils import shuffle
i=0
j=[]
data = []
X = []
indicess = []
xback =24
with open(r'D:\晴天新.csv') as f:
    reader = csv.reader(f)
    for row in reader:
            if i == 0:
                i += 1
                continue
            else:
             data.append(row[:])#提取出每一行中的2:14列
data = np.array(data)
m,n = np.shape(data)
for i in range(m):
    for j in range(n):
        data[i][j] = data[i][j].astype('float64')#是从第三列开始的
data = data.astype('float64')
y = data[:,-1]
y1 = data[:,-1]
set1 = data[:,:]
set2 = data[:,-1]
def create_interval_dataset(dataset1, dataset2, xback):
    dataX, dataY = [], []
    for i in range(0, len(dataset1)-xback,24):
        dataX.append(dataset1[i:i+xback])
        dataY.append(dataset2[i+xback:i+2*xback])
    return np.asarray(dataX), np.asarray(dataY)
dataX, dataY = create_interval_dataset(set1, set2, 24) 
dataY=np.reshape(dataY, [-1,24])
X_tr = dataX[:26, :]
X_te = dataX[26:, :]
y_tr = dataY[:26]
y_te= dataY[26:]
X_tr = np.reshape(X_tr, [-1,168])
X_te= np.reshape(X_te, [-1,168])
y_tr = np.reshape(y_tr, [-1,24])
y_te = np.reshape(y_te, [-1,24])
scaler = MinMaxScaler(feature_range=(0, 1))
scaler1 = MinMaxScaler(feature_range=(0, 1))
X_tr= scaler.fit_transform(X_tr)
X_te= scaler1.fit_transform(X_te)
scaler2 = MinMaxScaler(feature_range=(0, 1))
y_tr= scaler2.fit_transform(y_tr.reshape(-1, 24))
scaler3 = MinMaxScaler(feature_range=(0, 1))
y_te= scaler3.fit_transform(y_te.reshape(-1,24))
X_tr = np.reshape(X_tr, [-1,168])
X_te= np.reshape(X_te, [-1,168])
y_tr = np.reshape(y_tr, [-1,24])
y_te= np.reshape(y_te, [-1,24])
y_te = y_te .astype('float64')
y_tr = y_tr.astype('float64')
y_tr = np.reshape(y_tr, [-1,24])
y_te= np.reshape(y_te, [-1,24])
y_tr = y_tr.astype('float64')
X_tr = X_tr.astype('float64')
X_te = X_te.astype('float64')
y_te = y_te.astype('float64')
y_tr = np.reshape(y_tr, [-1,24])
elm = ELM(168, y_tr.shape[1])
elm.add_neurons(5, "sigm")
elm.add_neurons(6, "rbf_l2")
elm.train(X_tr, y_tr, 'r') 
predicted = elm.predict(X_te)
predicted1 = elm.predict(X_tr)
predicted = scaler3.inverse_transform(predicted)
predicted1 = scaler2.inverse_transform(predicted1)
X_te= scaler1.inverse_transform(X_te)
y_tr = scaler2.inverse_transform(y_tr)
y_te = scaler3.inverse_transform(y_te)
predicted= predicted.flatten()
y_tr = y_tr.flatten()
y_te = y_te.flatten()
predicted1= predicted1.flatten()
start=0
end =0
fei0=[]
for i in range(len(y_tr)):
    if y_tr[i]!=0:
        fei0.append(i)
xina=[]
xinb=[]
for i in fei0:
    xina.append(y_tr[i])
    xinb.append(predicted1[i])
fei01=[]
for i in range(len(y_te)):
    if y_te[i]!=0:
        fei01.append(i)
xina1=[]
xinb1=[]
for i in fei01:
    xina1.append(y_te[i])
    xinb1.append(predicted[i])
print("*****************训练集误差***********************")
v = list(map(lambda x: (abs((x[0] - x[1]) / x[0])), zip(xina, xinb)))
loss = sum(v) * 100 / len(y_tr)
#print("v", v)
print("MAPE loss",loss)
v = list(map(lambda x: ((pow((x[0] - x[1]), 2))), zip(xina, xinb)))
loss = math.sqrt(sum(v) / len(y_tr))
print("the RMSE  is :",loss)
#accuracy = metrics.mean_squared_error(y_true=y_te, y_pred=pred_X)
#print('gcForest accuracy : {}'.format(accuracy))
v = list(map(lambda x: (abs((x[0] - x[1])) ), zip(xina, xinb)))
loss = sum(v)/ len(y_tr)
print("the MABE is :",loss)
file.write('——————————ELM提前一小时测试误差————————————' + '\n')
print("*****************测试集误差***********************")
v = list(map(lambda x: (abs((x[0] - x[1]) / x[0])), zip(xina1, xinb1)))
loss = sum(v) * 100 / len(y_te)
print("the MAPE is :  ",loss)
v = list(map(lambda x: ((pow((x[0] - x[1]), 2))), zip(xina1, xinb1)))
loss = math.sqrt(sum(v)/ len(y_te))
print("RMSE loss",loss)
v = list(map(lambda x: (abs((x[0] - x[1])) ), zip(xina1, xinb1)))
loss = sum(v)/ len(y_te)
print("MABE loss",loss)


plt.plot(y_te[:100], 'g--', lw=2, label='ELM真实值曲线')
plt.plot(predicted[:100] ,'r', lw=2, label='ELM预测值曲线')
plt.title('ELM',fontsize=18)#表的名称
plt.legend()
plt.show()

猜你喜欢

转载自blog.csdn.net/pwtd_huran/article/details/79729913