Using example from Keras Tuner website, I wrote simple tuning code
base_model = tf.keras.applications.vgg16.VGG16(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
def build_model(hp):
model = tf.keras.Sequential();
model.add(base_model);
for i in range(hp.Int('num_layers', 1, 2)):
model.add(tf.keras.layers.Conv2D(filters=hp.Int('Conv2D_' + str(i),
min_value=32,
max_value=512,
step=32),
kernel_size=3, activation='relu'));
model.add(tf.keras.layers.Dropout(hp.Choice('rate', [0.3, 0.5])));
model.add(tf.keras.layers.GlobalAveragePooling2D());
model.add(tf.keras.layers.Flatten());
model.add(tf.keras.layers.Dropout(0.2));
model.add(tf.keras.layers.Dense(5, activation='softmax'));
model.compile(optimizer=tf.keras.optimizers.RMSprop(hp.Choice('learning_rate', [1e-4, 1e-5])),
loss='categorical_crossentropy',
metrics=['accuracy']);
return model
epochs = 2
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=24,
executions_per_trial=1,
directory=LOG_DIR);
tuner.search_space_summary();
tuner.search(train_generator,
callbacks=[callback],
epochs = epochs,
steps_per_epoch = train_generator.samples // BATCH_SIZE,
validation_data = valid_generator,
validation_steps = valid_generator.samples // BATCH_SIZE,
verbose = 1);
tuner.results_summary();
models = tuner.get_best_models(num_models=2);
However, when I run it with varying number of layers, but it shows mismatch between number of layers reported and value of num_layers. For example it reports three Conv2D layers and yet it shows num_layers as 1. Why ?
[Trial summary]
|-Trial ID: 79cd7bb6146b4c243eb2bc51f19985de
|-Score: 0.8444444537162781
|-Best step: 0
> Hyperparameters:
|-Conv2D_0: 448
|-Conv2D_1: 448
|-Conv2D_2: 512
|-learning_rate: 0.0001
|-num_layers: 1
|-rate: 0.5