TensorFlow: 卷积网络中维度不兼容错误

import tensorflow as tfimport numpy as npimport scipy as sciimport cv2import input_data_conv# 参数learning_rate = 0.001training_iters = 200000batch_size = 64display_step = 20n_classes=101 # 类别数量# 输入数据和类别global train_data,train_class,test_data,test_classs,train_i,test_itest_i, train_i = 0,0train_data=input_data_conv.train_list_filetrain_class=input_data_conv.train_single_classestest_data=input_data_conv.test_single_framestest_classs=input_data_conv.test_single_classes# 网络参数n_input = [227, 227, 3 ]# MNIST 数据输入(图像形状:227*227*3)dropout = 0.5 # Dropout,保留单元的概率# tf 图输入x = tf.placeholder(tf.float32, [None, 227,227,3])y = tf.placeholder(tf.float32, [None, n_classes])keep_prob = tf.placeholder(tf.float32) # dropout(保留概率)def init_weights(shape):    return tf.Variable(tf.random_normal(shape, stddev=0.01))def conv2d(name, l_input, w, b,s):    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, s, s, 1], padding='SAME'),b), name=name)def max_pool(name, l_input, k,s):    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding='SAME', name=name)def norm(name, l_input, lsize):    return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.0001 / 9.0, beta=0.75, name=name)def vgg_single_frame(_X, _weights, _biases, _dropout):    # 重塑输入图像    _X = tf.reshape(_X, shape=[-1, 227, 227, 3])    conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'],s=2)    pool1 = max_pool('pool1', conv1, k=3,s=2)    norm1 = norm('norm1', pool1, lsize=5)    conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'],s=2)    pool2 = max_pool('pool2', conv2, k=3,s=2)    norm2 = norm('norm2', pool2, lsize=5)    conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'],s=2)    conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'],s=2)    conv5 = conv2d('conv4', conv4, _weights['wc5'], _biases['bc5'],s=2)    pool5 = max_pool('pool5', conv5, k=3,s=2)    # 全连接层    dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # 重塑conv3输出以适应全连接层输入    dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc6') # Relu激活    dense1 = tf.nn.dropout(dense1, _dropout)    dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc7') # Relu激活    dense2 = tf.nn.dropout(dense2, _dropout)    # 输出,类别预测    out = tf.matmul(dense2, _weights['out']) + _biases['out']    return outweights = {    'wc1': tf.Variable(tf.random_normal([7, 7, 1, 96])), # 7x7卷积,1输入,96输出,步长2    'wc2': tf.Variable(tf.random_normal([5, 5, 96, 384])), # 5x5卷积,32输入,64输出    'wc3': tf.Variable(tf.random_normal([3, 3, 384, 512])),#s 2 ,p a    'wc4': tf.Variable(tf.random_normal([3, 3, 512, 512])),#s 2, p 1    'wc5': tf.Variable(tf.random_normal([3, 3, 512, 384])),#s 2, p 1    'wd1': tf.Variable(tf.random_normal([7*7*64, 4096])), # 全连接,7*7*64输入,1024输出    'wd2': tf.Variable(tf.random_normal([4096, 4096])), # 全连接,7*7*64输入,1024输出    'out': tf.Variable(tf.random_normal([4096, n_classes])) # 1024输入,10输出(类别预测)}biases = {    'bc1': tf.Variable(tf.random_normal([96])),    'bc2': tf.Variable(tf.random_normal([384])),    'bc3': tf.Variable(tf.random_normal([512])),    'bc4': tf.Variable(tf.random_normal([512])),    'bc5': tf.Variable(tf.random_normal([384])),    'bd1': tf.Variable(tf.random_normal([4096])),    'bd2': tf.Variable(tf.random_normal([4096])),    'out': tf.Variable(tf.random_normal([n_classes]))}def train_next_batch(batch_size):    temp_data=np.ndarray(shape=(batch_size,227,227,3),dtype=float)    temp_data=np.ndarray(shape=(batch_size,n_classes),dtype=float)    for num,x in train_data[train_i:train_i+batch_size]:        temp_data[num,:,:,:]=cv2.imread(x,1)pred = vgg_single_frame(x, weights, biases, keep_prob)cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# 评估模型correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))# 初始化变量init = tf.initialize_all_variables()with tf.Session() as sess:    sess.run(init)    step = 1    # 继续训练直到达到最大迭代次数    while step * batch_size < training_iters:        batch_xs, batch_ys = train_next_batch(batch_size)        # 使用批次数据进行训练        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})        if step % display_step == 0:            # 计算批次准确率            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})            # 计算批次损失            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)        step += 1    print "Optimization Finished!"    # 计算256张测试图像的准确率    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

我想运行上面的代码,并向该网络输入大小为[227, 227, 3]的图像。然而,当我尝试构建网络时,出现了以下错误:

I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcublas.so locallyI tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcudnn.so locallyI tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcufft.so locallyI tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcuda.so.1 locallyI tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcurand.so locallyTraceback (most recent call last):  File "/home/anilil/projects/pycharm-community-5.0.4/helpers/pydev/pydevd.py", line 2411, in <module>    globals = debugger.run(setup['file'], None, None, is_module)  File "/home/anilil/projects/pycharm-community-5.0.4/helpers/pydev/pydevd.py", line 1802, in run    launch(file, globals, locals)  # execute the script  File "/media/anilil/Data/charm/Cnn/build_vgg_model.py", line 104, in <module>    pred = vgg_single_frame(x, weights, biases, keep_prob)  File "/media/anilil/Data/charm/Cnn/build_vgg_model.py", line 50, in vgg_single_frame    conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'],s=2)  File "/media/anilil/Data/charm/Cnn/build_vgg_model.py", line 38, in conv2d    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, s, s, 1], padding='SAME'),b), name=name)  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 211, in conv2d    use_cudnn_on_gpu=use_cudnn_on_gpu, name=name)  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op    op_def=op_def)  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2042, in create_op    set_shapes_for_outputs(ret)  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1528, in set_shapes_for_outputs    shapes = shape_func(op)  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/common_shapes.py", line 187, in conv2d_shape    input_shape[3].assert_is_compatible_with(filter_shape[2])  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_shape.py", line 94, in assert_is_compatible_with    % (self, other))ValueError: Dimensions Dimension(3) and Dimension(1) are not compatible

我认为weights['wc1']变量的形状可能不对,但我不知道正确的形状是什么。


回答:

问题在于你的输入图像(在_X中)有3个通道(可能是红色、绿色和蓝色),而conv1层的卷积滤波器(在_weights['wc1']中)期望1个输入通道。

解决这个问题至少有两种方法:

  1. 重新定义_weights['wc1']以接受3个输入通道:

    weights = {    'wc1': tf.Variable(tf.random_normal([7, 7, 3, 96])), # ...    # ...}
  2. 使用tf.image.rgb_to_grayscale()操作将你的输入图像_X转换为1个输入通道:

    _X = tf.image.rgb_to_grayscale(_X)

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注