### Keras图像分类模型配置

我正在尝试使用Keras和TensorFlow学习图像分类。我参考了https://www.kaggle.com/c/dogs-vs-cats这个问题。

我的代码如下:

#!/usr/bin/env python# coding: utf-8# # Cats and Dogs Identification Kaggle# # https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html# ## Tensorflow and GPU Memory Config# In[14]:import tensorflow as tffrom tensorflow.keras.models import Sequentialfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_imgfrom tensorflow.keras.layers import Dense, Flatten, Activation, Conv2D, MaxPooling2D,  Reshape, BatchNormalizationfrom keras import backend as K # tf.reset_default_graph()# tf.set_random_seed(42)# tf.config.set_per_process_memory_growth(True)# tf.config.gpu.set_per_process_memory_fraction(0.4)tf.debugging.set_log_device_placement(True)print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))tf.config.experimental.list_physical_devices('GPU')gpus = tf.config.experimental.list_physical_devices('GPU')if gpus:  # Restrict TensorFlow to only allocate 1GB of memory on the first GPU  try:    tf.config.experimental.set_virtual_device_configuration(        gpus[0],        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])    logical_gpus = tf.config.experimental.list_logical_devices('GPU')    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")  except RuntimeError as e:    # Virtual devices must be set before GPUs have been initialized    print(e)print('Keras image_data_format {}'.format(K.image_data_format()))    print('if keras image_data_format is channel_last means data should be loaded in 32X32X3 dimensions')# In[2]:import osimport shutilfrom shutil import copyfilefrom random import seedfrom random import randomdef createPathIfNoExists(path):    if not os.path.exists(path):        os.mkdir(path)cats_dir = '/cats'dogs_dir = '/dogs'source_images = '/home/user/Desktop/dogs_cat_keras'data_dir = source_images + '/data'train_dir = data_dir + '/train'train_cats_dir = train_dir + cats_dirtrain_dogs_dir = train_dir + dogs_dirvalidation_dir = data_dir + '/validation'validation_cats_dir = validation_dir + cats_dirvalidation_dogs_dir = validation_dir + dogs_dircreatePathIfNoExists(data_dir)createPathIfNoExists(train_dir)createPathIfNoExists(train_cats_dir)createPathIfNoExists(train_dogs_dir)createPathIfNoExists(validation_dir)createPathIfNoExists(validation_cats_dir)createPathIfNoExists(validation_dogs_dir)print('Source directory {}'.format(source_images))print('Data directory {}'.format(data_dir))print('train directory {}'.format(train_dir))print('train cats directory {}'.format(train_cats_dir))print('train dogs directory {}'.format(train_dogs_dir))print('validation directory {}'.format(validation_dir))print('validation directory {}'.format(validation_cats_dir))print('validation directory {}'.format(validation_dogs_dir))# inputFiles = source_images + '/train'# cats = [inputFiles+'/' + d for d in os.listdir(inputFiles) if d.startswith('cat')]# print('All cats count {}'.format(len(cats)))# dogs = [inputFiles+'/' + d for d in os.listdir(inputFiles) if d.startswith('dog')]# print('All dogs count {}'.format(len(dogs)))# data_split_70 = 8400 # train_cats = cats[:data_split_70]# validation_cats = cats[data_split_70:]# train_dogs = dogs[:data_split_70]# validation_dogs = dogs[data_split_70:]# print('Total train cats {} and validation cats {}'.format(len(train_cats), len(validation_cats)))# print('Total train dogs {} and validation dogs {}'.format(len(train_dogs), len(validation_dogs)))# # Put train cats data in train directory# for item in train_cats: #     if os.path.isfile(item):#         shutil.copyfile(item, train_cats_dir + '/' + os.path.basename(item))# # Put validation cats data in validation directory# for item in validation_cats:#     if os.path.isfile(item):#         shutil.copyfile(item, validation_cats_dir + '/' + os.path.basename(item))# # Put train cats data in train directory# for item in train_dogs:#     if os.path.isfile(item):#         shutil.copyfile(item, train_dogs_dir + '/' + os.path.basename(item))# # Put validation cats data in validation directory# for item in validation_dogs:#     if os.path.isfile(item):#         shutil.copyfile(item, validation_dogs_dir + '/' + os.path.basename(item))    # ## General imports# In[15]:import datetimeimport numpy as npfrom sklearn.pipeline import Pipelinefrom sklearn.model_selection import KFold, GridSearchCV, RandomizedSearchCVfrom sklearn.preprocessing import MinMaxScalerget_ipython().run_line_magic('matplotlib', 'inline')import matplotlib.pyplot as pltfrom sklearn.neighbors import KNeighborsClassifierimport h5py# ## Data Import# In[16]:dataGenerator = ImageDataGenerator(rotation_range=40,                                  width_shift_range=0.2,                                  height_shift_range=0.2,                                  shear_range=0.2,                                  zoom_range=0.2,                                  horizontal_flip=True,                                  fill_mode='nearest')# ### Image Display# In[17]:img = load_img('/home/user/Desktop/dogs_cat_keras/data/train/cats/cat.12467.jpg')  # this is a PIL imagex = img_to_array(img)  # this is a Numpy array with shape (width X height X 3)print(x.shape)# print(type(img))# print(img.size)# print(img.mode)plt.imshow(x/255)plt.show()print('Shape before reshape {}'.format(x.shape))## Generate data = 0x = x.reshape((1,) + x.shape)  # this is a Numpy array with shape (1, width, height, 3)print('Shape after reshape {}'.format(x.shape))# ### Generate augumented data using image generator# In[6]:# import os# augumented_data_path = data_path + '/preview'# if not os.path.exists(augumented_data_path):#     os.mkdir(augumented_data_path)# i = 0# for batch in dataGenerator.flow(x, batch_size=1,#                           save_to_dir=augumented_data_path, save_prefix='cat', save_format='jpeg'):#     i += 1#     if i > 20:#         break  # otherwise the generator would loop indefinitely# ### Building Graph # In[23]:img_width, img_height = 150, 150input_shape = (img_width, img_height, 3) nnModel = Sequential()nnModel.add(Flatten(input_shape=(input_shape)))nnModel.add(Dense(32, activation='relu'))nnModel.add(Dense(16, activation='relu'))nnModel.add(Dense(2, activation='softmax'))# ### Compile function# In[26]:nnModel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])# ### Train Generator# In[20]:batch_size = 32train_data_gen = ImageDataGenerator(rescale = 1/255,                                    shear_range=0.2,                                    zoom_range=0.2,                                    horizontal_flip=True)train_generator = train_data_gen.flow_from_directory(        train_dir,  # this is the target directory        target_size=(img_width, img_height),  # all images will be resized to 150x150        batch_size=batch_size,        class_mode='binary')print('Train path {}'.format(train_dir))test_data_gen = ImageDataGenerator(rescale = 1/255)print('Test path {}'.format(validation_dir))validation_generator = test_data_gen.flow_from_directory(        validation_dir,  # this is the target directory        target_size=(img_width, img_height),  # all images will be resized to 150x150        batch_size=batch_size,        class_mode='binary')# In[24]:nnModel.summary()# ### Train# In[27]:nnModel.fit_generator(train_generator,                       steps_per_epoch=2048,                       epochs=64,                       validation_data=validation_generator,                       validation_steps=1024)

我遇到了以下异常

<ipython-input-27-59bf3c75cdc4> in <module>      3                       epochs=64,      4                       validation_data=validation_generator,----> 5                       validation_steps=1024)...A target array with shape (32, 1) was passed for an output of shape (None, 2) while using as loss `binary_crossentropy`. This loss expects targets to have the same shape as the output.

问题陈述中有很多笔记本解决方案,但在这里我不想使用CNN进行学习。请帮助我解决这个问题。


回答:

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注