通过线性回归进行TensorFlow图像分割

之前我构建了一个实现二元图像分割的网络——前景和背景。我通过设置两个分类来实现这一点。现在,我不想进行二元分类,而是想对每个像素进行线性回归。

假设图像视图中有一个3D表面,我希望分割该表面的确切中间位置,并赋予其线性值10。表面的边缘值设为5。当然,介于两者之间的体素值在5到10之间。然后,随着体素远离表面,值会迅速降至零。

在二元分类中,我得到了一张图像,其中前景部分为1,另一张图像中背景部分为1——换句话说,这是一种分类 🙂 现在,我希望只有一张真实标签图像,其值如下所示…

enter image description here

通过这个线性回归示例,我假设我可以简单地将成本函数改为最小二乘函数——cost = tf.square(y - pred)。当然,我也会更改真实标签。

然而,当我这样做时,我的预测输出为NaN。我的最后一层是矩阵权重值的线性和乘以最终输出。我猜这与此有关?我不能将其设为tf.nn.softmax()函数,因为那会将值归一化到0到1之间。

所以我认为cost = tf.square(y - pred)是问题的根源。我接下来尝试了这个… cost = tf.reduce_sum(tf.square(y - pred)),但这不起作用。

然后我尝试了这个(在这里推荐的这里cost = tf.reduce_sum(tf.pow(pred - y, 2))/(2 * batch_size),但这也不起作用。

我应该以不同的方式初始化权重吗?还是归一化权重?

完整代码如下所示:


import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
# 参数
learning_rate = 0.001
training_iters = 1000000
batch_size = 2
display_step = 1
# 网络参数
n_input_x = 396 # 输入图像x维度
n_input_y = 396 # 输入图像y维度
n_classes = 1 # 二元分类——在表面上还是不在
n_steps = 396
n_hidden = 128
n_output = n_input_y * n_classes
dropout = 0.75 # Dropout,保留单元的概率
# tf图形输入
x = tf.placeholder(tf.float32, [None, n_input_x, n_input_y])
y = tf.placeholder(tf.float32, [None, n_input_x * n_input_y], name="ground_truth")
keep_prob = tf.placeholder(tf.float32) #dropout(保留概率)
# 创建一些简化用的包装器
def conv2d(x, W, b, strides=1):
    # Conv2D包装器,带有偏置和relu激活
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu(x)
def maxpool2d(x, k=2):
    # MaxPool2D包装器
    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
                          padding='SAME')
def deconv2d(prev_layer, w, b, output_shape, strides):
    # 反卷积层
    deconv = tf.nn.conv2d_transpose(prev_layer, w, output_shape=output_shape, strides=strides, padding="VALID")
    deconv = tf.nn.bias_add(deconv, b)
    deconv = tf.nn.relu(deconv)
    return deconv
# 创建模型
def net(x, cnn_weights, cnn_biases, dropout):
    # 重塑输入图像
    x = tf.reshape(x, shape=[-1, 396, 396, 1])
    with tf.name_scope("conv1") as scope:
    # 卷积层
        conv1 = conv2d(x, cnn_weights['wc1'], cnn_biases['bc1'])
        # 最大池化(下采样)
        #conv1 = tf.nn.local_response_normalization(conv1)
        conv1 = maxpool2d(conv1, k=2)
    # 卷积层
    with tf.name_scope("conv2") as scope:
        conv2 = conv2d(conv1, cnn_weights['wc2'], cnn_biases['bc2'])
        # 最大池化(下采样)
        # conv2 = tf.nn.local_response_normalization(conv2)
        conv2 = maxpool2d(conv2, k=2)
    # 卷积层
    with tf.name_scope("conv3") as scope:
        conv3 = conv2d(conv2, cnn_weights['wc3'], cnn_biases['bc3'])
        # 最大池化(下采样)
        # conv3 = tf.nn.local_response_normalization(conv3)
        conv3 = maxpool2d(conv3, k=2)
    temp_batch_size = tf.shape(x)[0] #batch_size形状
    with tf.name_scope("deconv1") as scope:
        output_shape = [temp_batch_size, 99, 99, 64]
        strides = [1,2,2,1]
        # conv4 = deconv2d(conv3, weights['wdc1'], biases['bdc1'], output_shape, strides)
        deconv = tf.nn.conv2d_transpose(conv3, cnn_weights['wdc1'], output_shape=output_shape, strides=strides, padding="SAME")
        deconv = tf.nn.bias_add(deconv, cnn_biases['bdc1'])
        conv4 = tf.nn.relu(deconv)
        # conv4 = tf.nn.local_response_normalization(conv4)
    with tf.name_scope("deconv2") as scope:
        output_shape = [temp_batch_size, 198, 198, 32]
        strides = [1,2,2,1]
        conv5 = deconv2d(conv4, cnn_weights['wdc2'], cnn_biases['bdc2'], output_shape, strides)
        # conv5 = tf.nn.local_response_normalization(conv5)
    with tf.name_scope("deconv3") as scope:
        output_shape = [temp_batch_size, 396, 396, 1]
        #这次不使用ReLu——因为是输出层
        conv6 = tf.nn.conv2d_transpose(conv5, cnn_weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
        x = tf.nn.bias_add(conv6, cnn_biases['bdc3'])
    # 包含dropout
    #conv6 = tf.nn.dropout(conv6, dropout)
    x = tf.reshape(conv6, [-1, n_input_x, n_input_y])
    # 准备数据形状以匹配`rnn`函数要求
    # 当前数据输入形状:(batch_size, n_steps, n_input)
    # 置换batch_size和n_steps
    x = tf.transpose(x, [1, 0, 2])
    # 重塑为(n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input_x])
    # 分割以获得'n_steps'个形状为(batch_size, n_hidden)的张量列表
    # 此输入形状是`rnn`函数所需的
    x = tf.split(0, n_steps, x)
    # 定义一个lstm单元,使用tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True, activation=tf.nn.relu)
    # lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 12, state_is_tuple=True)
    # lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.8)
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
    # 线性激活,使用rnn内部循环的最后输出
    # pdb.set_trace()
    output = []
    for i in xrange(396):
        output.append(tf.matmul(outputs[i], lstm_weights[i]) + lstm_biases[i])
    return output
cnn_weights = {
    # 5x5卷积,1个输入,32个输出
    'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
    # 5x5卷积,32个输入,64个输出
    'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
    # 5x5卷积,32个输入,64个输出
    'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),
    'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),
    'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),
    'wdc3' : tf.Variable(tf.random_normal([2, 2, 1, 32])),
}
cnn_biases = {
    'bc1': tf.Variable(tf.random_normal([32])),
    'bc2': tf.Variable(tf.random_normal([64])),
    'bc3': tf.Variable(tf.random_normal([128])),
    'bdc1': tf.Variable(tf.random_normal([64])),
    'bdc2': tf.Variable(tf.random_normal([32])),
    'bdc3': tf.Variable(tf.random_normal([1])),
}
lstm_weights = {}
lstm_biases = {}
for i in xrange(396):
    lstm_weights[i] = tf.Variable(tf.random_normal([n_hidden, n_output]))
    lstm_biases[i] = tf.Variable(tf.random_normal([n_output]))
# 构建模型
# with tf.name_scope("net") as scope:
pred = net(x, cnn_weights, cnn_biases, keep_prob)
# pdb.set_trace()
pred = tf.pack(pred)
pred = tf.transpose(pred, [1,0,2])
pred = tf.reshape(pred, [-1, n_input_x * n_input_y])
with tf.name_scope("opt") as scope:
    # cost = tf.reduce_sum(tf.square(y-pred))
    cost = tf.reduce_sum(tf.pow((pred-y),2)) / (2*batch_size)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# 评估模型
with tf.name_scope("acc") as scope:
    # 准确率是预测和真实标签矩阵之间的差异
    correct_pred = tf.equal(0,tf.cast(tf.sub(cost,y), tf.int32))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 初始化变量
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# 启动图形
with tf.Session() as sess:
    sess.run(init)
    summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph) #初始化图形以供tensorboard使用
    step = 1
    # 导入数据
    data = scroll_data.read_data('/home/kendall/Desktop/')
    # 继续训练直到达到最大迭代次数
    while step * batch_size < training_iters:
        batch_x, batch_y = data.train.next_batch(batch_size)
        # 运行优化操作(反向传播)
        # pdb.set_trace()
        batch_x = batch_x.reshape((batch_size, n_input_x, n_input_y))
        batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        step = step + 1
        if step % display_step == 0:
            batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                              y: batch_y})
            # 进行预测
            im = Image.open('/home/kendall/Desktop/cropped/temp data0001.tif')
            batch_x = np.array(im)
            batch_x = batch_x.reshape((1, n_input_x, n_input_y))
            batch_x = batch_x.astype(float)
            prediction = sess.run(pred, feed_dict={x: batch_x})
            prediction = prediction.reshape((1, n_input_x * n_input_y))
            prediction = tf.nn.softmax(prediction)
            prediction = prediction.eval()
            prediction = prediction.reshape((n_input_x, n_input_y))
            # my_accuracy = accuracy_custom(temp_arr1,batch_y[0,:,:,0])
            #
            # print "Step = " + str(step) + "  |  Accuracy = " + str(my_accuracy)
            print "Step = " + str(step) + "  |  Accuracy = " + str(acc)
            # csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(my_accuracy) + ".csv"
            csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(acc) + ".csv"
            np.savetxt(csv_file, prediction, delimiter=",")

回答:

正如评论中所说,良好的权重初始化是模型成功的关键:

  • 过高:模型将无法学习,并可能产生NaN值
  • 过低:模型将非常缓慢地学习,因为梯度太小(参见梯度消失)

TensorFlow中已经提供了很好的初始化方法这里(作为贡献),请随意使用它们。

Related Posts

L1-L2正则化的不同系数

我想对网络的权重同时应用L1和L2正则化。然而,我找不…

使用scikit-learn的无监督方法将列表分类成不同组别,有没有办法?

我有一系列实例,每个实例都有一份列表,代表它所遵循的不…

f1_score metric in lightgbm

我想使用自定义指标f1_score来训练一个lgb模型…

通过相关系数矩阵进行特征选择

我在测试不同的算法时,如逻辑回归、高斯朴素贝叶斯、随机…

可以将机器学习库用于流式输入和输出吗?

已关闭。此问题需要更加聚焦。目前不接受回答。 想要改进…

在TensorFlow中,queue.dequeue_up_to()方法的用途是什么?

我对这个方法感到非常困惑,特别是当我发现这个令人费解的…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注