When I use my dataset to turn base on Resnet-50 in Keras(backend is tensorflow), I find it very odd that when after each epoch, val is slower than train. I don't know why, is it because my GPU do not have enough memory? My GPU is K2200, which has 4 GB memory. Am I misunderstanding the paras' meaning ?
I have 35946 train pic so I use:
samples_per_epoch=35946,
I have 8986 val pic so I use:
nb_val_samples=8986,
The following is part of my code:
train_datagen = ImageDataGenerator(
rescale=1./255,
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False,
zoom_range=0.1,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
'data/train',
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
'data/val',
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(train_generator,
# steps_per_epoch=X_train.shape[0] // batch_size,
samples_per_epoch=35946,
epochs=epochs,
validation_data=validation_generator,
verbose=1,
nb_val_samples=8986,
callbacks=[earlyStopping,saveBestModel,tensorboard])
def data_generator_task():
while not self._stop_event.is_set(): try: if self._pickle_safe or self.queue.qsize() < max_q_size: generator_output = next(self._generator) self.queue.put(generator_output) else: time.sleep(wait_time) except Exception: self._stop_event.set() raise ` – Boanergeslogs
? – Moonrise