gpt4 book ai didi

python - Keras:微调 Inception 时精度下降

转载 作者:太空狗 更新时间:2023-10-29 21:35:56 27 4
gpt4 key购买 nike

我在使用 Keras 微调 Inception 模型时遇到问题。

我已经成功地使用教程和文档生成了一个完全连接的顶层模型,该模型使用 Inception 中的瓶颈特征将我的数据集分类到正确的类别中,准确率超过 99%。

import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications


# dimensions of our images.
img_width, img_height = 150, 150

#paths for saving weights and finding datasets
top_model_weights_path = 'Inception_fc_model_v0.h5'
train_data_dir = '../data/train2'
validation_data_dir = '../data/train2'

#training related parameters?
inclusive_images = 1424
nb_train_samples = 1424
nb_validation_samples = 1424
epochs = 50
batch_size = 16


def save_bottlebeck_features():
datagen = ImageDataGenerator(rescale=1. / 255)

# build bottleneck features
model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(img_width,img_height,3))

generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)

bottleneck_features_train = model.predict_generator(
generator, nb_train_samples // batch_size)

np.save('bottleneck_features_train', bottleneck_features_train)

generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)

bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples // batch_size)

np.save('bottleneck_features_validation', bottleneck_features_validation)

def train_top_model():
train_data = np.load('bottleneck_features_train.npy')
train_labels = np.array(range(inclusive_images))

validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = np.array(range(inclusive_images))

print('base size ', train_data.shape[1:])

model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(1000, activation='relu'))
model.add(Dense(inclusive_images, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])

proceed = True

#model.load_weights(top_model_weights_path)

while proceed:
history = model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size)#,
#validation_data=(validation_data, validation_labels), verbose=1)
if history.history['acc'][-1] > .99:
proceed = False

model.save_weights(top_model_weights_path)


save_bottlebeck_features()
train_top_model()

Epoch 50/50 1424/1424 [==============================] - 17s 12ms/step - loss: 0.0398 - acc: 0.9909

我还能够将这个模型堆叠在初始之上以创建我的完整模型并使用该完整模型成功地对我的训练集进行分类。

from keras import Model
from keras import optimizers
from keras.callbacks import EarlyStopping

img_width, img_height = 150, 150

top_model_weights_path = 'Inception_fc_model_v0.h5'
train_data_dir = '../data/train2'
validation_data_dir = '../data/train2'

#how many inclusive examples do we have?
inclusive_images = 1424
nb_train_samples = 1424
nb_validation_samples = 1424
epochs = 50
batch_size = 16

# build the complete network for evaluation
base_model = applications.inception_v3.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width,img_height,3))

top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(1000, activation='relu'))
top_model.add(Dense(inclusive_images, activation='softmax'))

top_model.load_weights(top_model_weights_path)

#combine base and top model
fullModel = Model(input= base_model.input, output= top_model(base_model.output))

#predict with the full training dataset
results = fullModel.predict_generator(ImageDataGenerator(rescale=1. / 255).flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False))

检查此完整模型的处理结果与瓶颈生成的全连接模型的准确性相匹配。

import matplotlib.pyplot as plt
import operator

#retrieve what the softmax based class assignments would be from results
resultMaxClassIDs = [ max(enumerate(result), key=operator.itemgetter(1))[0] for result in results]

#resultMaxClassIDs should be equal to range(inclusive_images) so we subtract the two and plot the log of the absolute value
#looking for spikes that indicate the values aren't equal
plt.plot([np.log(np.abs(x)+10) for x in (np.array(resultMaxClassIDs) - np.array(range(inclusive_images)))])

results: spikes are misclassifications

问题是:当我使用这个完整的模型并尝试对其进行训练时,即使验证率保持在 99% 以上,准确性也会下降到 0。

model2 = fullModel

for layer in model2.layers[:-2]:
layer.trainable = False

# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
#model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])

model2.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])

train_datagen = ImageDataGenerator(rescale=1. / 255)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')

callback = [EarlyStopping(monitor='acc', min_delta=0, patience=3, verbose=0, mode='auto', baseline=None)]
# fine-tune the model
model2.fit_generator(
#train_generator,
validation_generator,
steps_per_epoch=nb_train_samples//batch_size,
validation_steps = nb_validation_samples//batch_size,
epochs=epochs,
validation_data=validation_generator)

Epoch 1/50 89/89 [==============================] - 388s 4s/step - loss: 13.5787 - acc: 0.0000e+00 - val_loss: 0.0353 - val_acc: 0.9937

随着事情的进展,情况会变得更糟

Epoch 21/50 89/89 [==============================] - 372s 4s/step - loss: 7.3850 - acc: 0.0035 - val_loss: 0.5813 - val_acc: 0.8272

我唯一能想到的是,在最后一列火车上训练标签以某种方式分配不当,但我之前使用 VGG16 使用类似代码成功地做到了这一点。

我搜索了代码,试图找到一个差异来解释为什么在 99% 的时间内做出准确预测的模型会降低其训练准确性,同时在微调期间保持验证准确性,但我无法弄清楚。任何帮助将不胜感激。

关于代码和环境的信息:

看起来很奇怪的事情,但本来就是这样的:

  • 每个类别只有 1 张图片。该 NN 旨在分类其环境和方向条件的物体受控。每个类(class)只有一张可接受的图片与正确的环境和轮换情况相对应。
  • 测试集和验证集相同。这个 NN 是唯一的设计用于正在接受培训的类(class)。图像它将处理类示例的副本。这是我的意图使模型过度适应这些类

我正在使用:

  • window 10
  • Anaconda客户端1.6.14下的Python 3.5.6
  • 凯拉斯 2.2.2
  • Tensorflow 1.10.0 作为后端
  • CUDA 9.0
  • CuDNN 8.0

我已经 checkout :

  1. Keras accuracy discrepancy in fine-tuned model
  2. VGG16 Keras fine tuning: low accuracy
  3. Keras: model accuracy drops after reaching 99 percent accuracy and loss 0.01
  4. Keras inception v3 retraining and finetuning error
  5. How to find which version of TensorFlow is installed in my system?

但它们似乎无关。

最佳答案

注意:由于您的问题有点奇怪并且在没有经过训练的模型和数据集的情况下难以调试,因此这个答案只是在考虑了许多可能出错的事情之后的(最佳)猜测。请提供您的反馈,如果它不起作用,我将删除此答案。

由于 inception_V3 包含 BatchNormalization 层,当您将 trainable 参数设置为 False 时,问题可能是由于该层的(不知何故模棱两可或意外的)行为(1234)。

现在,让我们看看这是否是问题的根源:as suggested by @fchollet , 定义微调模型时设置学习阶段:

from keras import backend as K

K.set_learning_phase(0)

base_model = applications.inception_v3.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width,img_height,3))

for layer in base_model.layers:
layer.trainable = False

K.set_learning_phase(1)

top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(1000, activation='relu'))
top_model.add(Dense(inclusive_images, activation='softmax'))

top_model.load_weights(top_model_weights_path)

#combine base and top model
fullModel = Model(input= base_model.input, output= top_model(base_model.output))

fullModel.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])


#####################################################################
# Here, define the generators and then fit the model same as before #
#####################################################################

旁注:这不会对您的情况造成任何问题,但请记住,当您使用 top_model(base_model.output) 时,整个 Sequential 模型(即top_model) 存储为 fullModel 的一层。您可以使用 fullModel.summary()print(fullModel.layers[-1]) 来验证这一点。因此,当您使用时:

for layer in model2.layers[:-2]:
layer.trainable = False

你实际上并没有卡住 base_model 的最后一层。但是,由于它是一个 Concatenate 层,因此没有可训练的参数,因此不会出现任何问题,它会按照您的预期运行。

关于python - Keras:微调 Inception 时精度下降,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52282108/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com