多流输入,八路,

这写的也太复杂了…
注意细节

import pickle
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Dropout, Dense, Conv2D, Flatten, MaxPooling2D,Input,BatchNormalization,Activation
from keras.callbacks import ModelCheckpoint
from keras import utils  # 归一化变量
from sklearn.model_selection import train_test_split
from keras.layers.merge import concatenate
from keras.utils import plot_model
from keras.optimizers  import Adam
from keras import regularizers

f1 = open('test_image.p', 'rb')
X = pickle.load(f1)
f1.close()

f1 = open('y_test.p', 'rb')
y = pickle.load(f1)
f1.close()
Y = np.array(y)

y1 = utils.to_categorical(Y, 8)
X_train, X_test, Y_train, Y_test = train_test_split(X, y1, test_size=0.2,random_state=0)

f = []
for i in range(8):
    tmp = X_train[:,:,:,i]
    tmp = np.expand_dims(tmp,axis=-1)
    f.append(tmp)
    
f1= []
for i in range(8):
    tmp = X_test[:,:,:,i]
    tmp = np.expand_dims(tmp,axis=-1)
    f1.append(tmp)

inputA = Input(shape=(64,64,1))
inputB = Input(shape=(64,64,1))
inputC = Input(shape=(64,64,1))
inputD = Input(shape=(64,64,1))
inputE = Input(shape=(64,64,1))
inputF = Input(shape=(64,64,1))
inputG = Input(shape=(64,64,1))
inputH = Input(shape=(64,64,1))

weight_decay = 0.005 

def tmp_model(input_data):
    x = Conv2D(32,(3,3),kernel_regularizer=regularizers.l2(weight_decay))(input_data)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(64,(3,3),kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(128,(3,3),kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(256,(3,3),kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Flatten()(x)
    M = Model(inputs=input_data,outputs=x)
    return M

m1 = tmp_model(inputA)
m2 = tmp_model(inputB)
m3 = tmp_model(inputC)
m4 = tmp_model(inputD)
m5 = tmp_model(inputE)
m6 = tmp_model(inputF)
m7 = tmp_model(inputG)
m8 = tmp_model(inputH)

com = concatenate([m1.output,m2.output,m3.output,m4.output,m5.output,m6.output,m7.output,m8.output],axis=-1)
z = Dense(1024,activation='relu')(com)
z = Dropout(0.5)(z)
z = Dense(8,activation='softmax')(z)

model = Model(inputs=[m1.input,m2.input,m3.input,m4.input,m5.input,m6.input,m7.input,m8.input],outputs=z)

filepath = "best_8.hdf5"
# 只把准确率最高的一次,存储下来
checkpoint = ModelCheckpoint(filepath
                             , monitor='val_accuracy'
                             , verbose=0, save_best_only=True, mode='max')

callbacks_list = [checkpoint]
model.compile(loss='categorical_crossentropy'
              , optimizer='adam', metrics=['accuracy'])

BATCH_SIZE = 64
EPOCHS = 100
history = model.fit([f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7]], Y_train, batch_size=BATCH_SIZE
                    , epochs=EPOCHS
                    , shuffle=True
                    , callbacks=callbacks_list, validation_data=([f1[0],f1[1],f1[2],f1[3],f1[4],f1[5],f1[6],f1[7]],Y_test), verbose=1)


# 存储一些训练集和测试集上的accuracy和loss
t_a = history.history['accuracy']
v_a = history.history['val_accuracy']


t_l = history.history['loss']
v_l = history.history['val_loss']

# 把accuracy变化的曲线存到txt中
np.savetxt('acc_curve8.txt',(t_a,v_a))

plt.plot(t_a)
plt.plot(v_a)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='best')
plt.show()

# summarize history for loss
np.savetxt('loss_curve8.txt',(t_l,v_l))

plt.plot(t_l)
plt.plot(v_l)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='best')
plt.show()
s = model.evaluate([f1[0],f1[1],f1[2],f1[3],f1[4],f1[5],f1[6],f1[7]], Y_test)
print(s[1])

猜你喜欢

转载自blog.csdn.net/weixin_47289438/article/details/112173947