我在尝试实现GoogleNet的Inception网络来对我在进行的分类项目中的图像进行分类,我之前使用过相同的代码,但使用的是AlexNet网络,训练过程一切正常,但是一旦我将网络改为GoogleNet架构,代码就不断抛出以下错误:
ValueError: Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 3 array(s), but instead got the following list of 1 arrays: [array([[0., 1.], [1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.], [0., 1.], [1., 0.], [0., 1.], [0., 1.], [0., 1.], [0., ...
这是我的完整代码:
import kerasfrom keras.optimizers import SGDfrom keras.preprocessing.image import ImageDataGeneratorfrom keras.models import Modelfrom keras.layers import Input, Dropout, Dense, Flatten, Conv2Dfrom keras.layers import GlobalAveragePooling2Dfrom keras.layers import AveragePooling2D, MaxPool2D, concatenateData = 'data/train/'batch_size = 64NUM_EPOCHS = 20def inception_module(x, filters_1x1, filters_3x3_reduce, filters_3x3, filters_5x5_reduce, filters_5x5, filters_pool_proj, name=None):conv_1x1 = Conv2D(filters_1x1, (1, 1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(x)conv_3x3 = Conv2D(filters_3x3_reduce, (1, 1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(x)conv_3x3 = Conv2D(filters_3x3, (3, 3), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_3x3)conv_5x5 = Conv2D(filters_5x5_reduce, (1, 1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(x)conv_5x5 = Conv2D(filters_5x5, (5, 5), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_5x5)pool_proj = MaxPool2D((3, 3), strides=(1, 1), padding='same')(x)pool_proj = Conv2D(filters_pool_proj, (1, 1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(pool_proj)output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=3, name=name)return outputkernel_init = keras.initializers.glorot_uniform()bias_init = keras.initializers.Constant(value=0.2)input_layer = Input(shape=(224, 224, 3))x = Conv2D(64, (7, 7), padding='same', strides=(2, 2), activation='relu', name='conv_1_7x7/2',)(input_layer)x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_1_3x3/2')(x)x = Conv2D(64, (1, 1), padding='same', strides=(1, 1), activation='relu', name='conv_2a_3x3/1')(x)x = Conv2D(192, (3, 3), padding='same', strides=(1, 1), activation='relu', name='conv_2b_3x3/1')(x)x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_2_3x3/2')(x)x = inception_module(x, filters_1x1=64, filters_3x3_reduce=96, filters_3x3=128, filters_5x5_reduce=16, filters_5x5=32, filters_pool_proj=32, name='inception_3a')x = inception_module(x, filters_1x1=128, filters_3x3_reduce=128, filters_3x3=192, filters_5x5_reduce=32, filters_5x5=96, filters_pool_proj=64, name='inception_3b')x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_3_3x3/2')(x)x = inception_module(x, filters_1x1=192, filters_3x3_reduce=96, filters_3x3=208, filters_5x5_reduce=16, filters_5x5=48, filters_pool_proj=64, name='inception_4a')x1 = AveragePooling2D((5, 5), strides=3)(x)x1 = Conv2D(128, (1, 1), padding='same', activation='relu')(x1)x1 = Flatten()(x1)x1 = Dense(1024, activation='relu')(x1)x1 = Dropout(0.7)(x1)x1 = Dense(10, activation='softmax', name='auxilliary_output_1')(x1)x = inception_module(x, filters_1x1=160, filters_3x3_reduce=112, filters_3x3=224, filters_5x5_reduce=24, filters_5x5=64, filters_pool_proj=64, name='inception_4b')x = inception_module(x, filters_1x1=128, filters_3x3_reduce=128, filters_3x3=256, filters_5x5_reduce=24, filters_5x5=64, filters_pool_proj=64, name='inception_4c')x = inception_module(x, filters_1x1=112, filters_3x3_reduce=144, filters_3x3=288, filters_5x5_reduce=32, filters_5x5=64, filters_pool_proj=64, name='inception_4d')x2 = AveragePooling2D((5, 5), strides=3)(x)x2 = Conv2D(128, (1, 1), padding='same', activation='relu')(x2)x2 = Flatten()(x2)x2 = Dense(1024, activation='relu')(x2)x2 = Dropout(0.7)(x2)x2 = Dense(10, activation='softmax', name='auxilliary_output_2')(x2)x = inception_module(x, filters_1x1=256, filters_3x3_reduce=160, filters_3x3=320, filters_5x5_reduce=32, filters_5x5=128, filters_pool_proj=128, name='inception_4e')x = MaxPool2D((3, 3), padding='same', strides=(2, 2), name='max_pool_4_3x3/2')(x)x = inception_module(x, filters_1x1=256, filters_3x3_reduce=160, filters_3x3=320, filters_5x5_reduce=32, filters_5x5=128, filters_pool_proj=128, name='inception_5a')x = inception_module(x, filters_1x1=384, filters_3x3_reduce=192, filters_3x3=384, filters_5x5_reduce=48, filters_5x5=128, filters_pool_proj=128, name='inception_5b')x = GlobalAveragePooling2D(name='avg_pool_5_3x3/1')(x)x = Dropout(0.4)(x)x = Dense(10, activation='softmax', name='output')(x)model = Model(input_layer, [x, x1, x2], name='inception_v1')model.compile(SGD(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])train_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2, dtype=None,)train_generator = train_datagen.flow_from_directory(Data, target_size=(224, 224), batch_size=batch_size, class_mode='categorical', subset='training' )validation_generator = train_datagen.flow_from_directory(Data, target_size=(224, 224), batch_size=batch_size, class_mode='categorical', subset='validation' )Model = model.fit_generator(generator=train_generator, steps_per_epoch=train_generator.samples // batch_size, epochs=NUM_EPOCHS, verbose=1, shuffle=True, validation_data=validation_generator, validation_steps=validation_generator.samples // batch_size)
回答:
GoogleNet与AlexNet不同,在GoogleNet中,您的模型有3个输出,一个主输出和两个在训练期间连接到中间层的辅助输出:
outputs = [main, aux1, aux2]
例如:
model = Model(inputs = X_input, outputs = [main, aux1, aux2])model.compile(loss='categorical_crossentropy', loss_weights={'main': 1.0, 'aux1': 0.3, 'aux2': 0.3}, optimizer='sgd', metrics=['accuracy'])
您的模型结构似乎是工作的,但问题出在数据处理上。要解决这个问题,您可以创建自己的自定义生成器:
def Custom_generator(generator, image_dir, batch_size, image_size, subset):My_Data= generator.flow_from_directory( image_dir, target_size=(image_size, image_size), batch_size=batch_size, class_mode='categorical', subset=subset)while True: My_Data_next = My_Data.next() # this returns image batch and 3 sets one-hot vectors of lables yield My_Data_next[0], [My_Data_next[1], My_Data_next[1], My_Data_next[1]]
您的情况下的数据处理代码:
image_size = (224, 224)image_dir= 'data/train/'batch_size = 64NUM_EPOCHS = 20# Get the Data and split 20% for validation.train_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)# Your Custom Generatordef custom_generator(generator, image_dir, batch_size, image_size, subset):My_Data= generator.flow_from_directory( image_dir, target_size=image_size, batch_size=batch_size, class_mode='categorical', subset=subset)while True: My_Data_next = My_Data.next() # this returns image batch and 3 sets one-hot vectors of lables yield My_Data_next[0], [My_Data_next[1], My_Data_next[1], My_Data_next[1]] train_generator = custom_generator( train_datagen, image_dir=image_dir, batch_size=batch_size, image_size=image_size, subset='training') validation_generator = custom_generator( train_datagen, image_dir=image_dir, batch_size=batch_size, image_size=image_size, subset='validation')
希望这能解决错误。