网格搜索

1.Batchsize and epochs

import numpy as np
from sklearn.model_selection import GridSearchCV
import tensorflow as tf
from tensorflow.keras import Sequential,layers
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.datasets import load_iris

def create_model():
    # 创建模型
    model = Sequential()
    model.add(layers.Dense(12, input_dim=8, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    # 编译模型
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

# 为了复现,设置随机种子
seed = 7
np.random.seed(seed)

dataset = np.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# 切分数据为输入 X 和输出 Y
X = dataset[:,0:8]
Y = dataset[:,8]

# 创建模型
model = KerasClassifier(build_fn=create_model, verbose=0)
# 定义网格搜索参数
batch_size = [10,20,40,60,80,100]
epochs = [10,20,50,100]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
grid_result = grid.fit(X, Y)

print("Best: %f using %s"%(grid_result.best_score_,grid_result.best_params_))

2.Optimizer

import numpy as np
from sklearn.model_selection import GridSearchCV
import tensorflow as tf
from tensorflow.keras import Sequential,layers
from tensorflow.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.datasets import load_iris


# 构建模型的函数
def create_model(optimizer='adam'):
    # 创建模型
    model = Sequential()
    model.add(Dense(12, input_dim=8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    # 编译模型
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    return model

# 为了复现,设置随机种子
seed = 7
np.random.seed(seed)

# 加载数据
dataset = np.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# 切分数据为输入 X 和输出 Y
X = dataset[:,0:8]
Y = dataset[:,8]

# 创建模型,使用到了上一步找出的 epochs、batch size 最优参数
model = KerasClassifier(build_fn=create_model, epochs=100, batch_size=80, verbose=0)
# 定义网格搜索参数
optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
param_grid = dict(optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)

# 总结结果
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))

3.Learning and Momentum

import numpy as np
from sklearn.model_selection import GridSearchCV
import tensorflow as tf
from tensorflow.keras import Sequential,layers
from tensorflow.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.datasets import load_iris

def create_model(learn_rate = 0.01,momentum=0):
    model = Sequential()
    model.add(Dense(12,input_dim=8,activation='relu'))
    model.add(Dense(1,activation='sigmoid'))

    optimizer = tf.keras.optimizers.SGD(lr = learn_rate,momentum=momentum)
    model.compile(loss = tf.keras.losses.binary_crossentropy,optimizer=optimizer,metrics=['accuracy'])
    return model

seed = 7
np.random.seed(seed)


dataset = np.loadtxt("pima-indians-diabetes.csv", delimiter=",")

X = dataset[:,0:8]
Y = dataset[:,8]

model = KerasClassifier(build_fn=create_model, epochs=100, batch_size=10, verbose=0)

learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
param_grid = dict(learn_rate=learn_rate, momentum=momentum)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)

# 总结结果
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))

猜你喜欢

转载自blog.csdn.net/weixin_41988545/article/details/112845538