我正在使用多标签数据训练CaffeNet。然而,在训练阶段损失值并未减少。我现在正在尝试检查backward()
是否正常工作。我有以下代码来检查是否存在梯度。
import numpy as np import os.path as osp import matplotlib.pyplot as plt from pprint import pprint from copy import copy % matplotlib inline plt.rcParams['figure.figsize'] = (6, 6) caffe_root = '../' # 本文件应位于 {caffe_root}/examples sys.path.append(caffe_root + 'python') import caffe # 如果你看到"No module named _caffe",可能是你没有构建pycaffe或者路径错误。 from caffe import layers as L, params as P # 定义网络prototxt的快捷方式。 sys.path.append("pycaffe/layers") # 我们将使用的数据层在这个目录中。 sys.path.append("pycaffe") # 工具文件在这个文件夹中 import tools # 这里包含我们需要的一些工具 # 设置数据根目录,例如: peta_root = osp.join('/root/data/PETA/') # 这些是PASCAL类别,我们稍后会用到。 #classes = np.asarray(['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'])# 确保我们已经下载了CaffeNet的权重。如果没有 os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'): print("正在下载预训练的CaffeNet模型...") !../scripts/download_model_binary.py ../models/bvlc_reference_caffenet# 初始化caffe为GPU模式caffe.set_mode_gpu()caffe.set_device(1)# 常用结构的辅助函数def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1): conv = L.Convolution(bottom, kernel_size=ks, stride=stride, num_output=nout, pad=pad, group=group)#,weight_filler=dict(type='xavier')) return conv, L.ReLU(conv, in_place=True)# 另一个辅助函数def fc_relu(bottom, nout): fc = L.InnerProduct(bottom, num_output=nout) return fc, L.ReLU(fc, in_place=True)# 再一个辅助函数def max_pool(bottom, ks, stride=1): return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)# 主网络规格包装器def caffenet_multilabel(data_layer_params, datalayer): # 设置python数据层 n = caffe.NetSpec() n.data, n.label = L.Python(module = 'peta_multilabel_datalayers', layer = datalayer, ntop = 2, param_str=str(data_layer_params)) # 网络本身 n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4) n.pool1 = max_pool(n.relu1, 3, stride=2) n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75) n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2) n.pool2 = max_pool(n.relu2, 3, stride=2) n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75) n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1) n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2) n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2) n.pool5 = max_pool(n.relu5, 3, stride=2) n.fc6, n.relu6 = fc_relu(n.pool5, 4096) n.drop6 = L.Dropout(n.relu6, in_place=True) n.fc7, n.relu7 = fc_relu(n.drop6, 4096) n.drop7 = L.Dropout(n.relu7, in_place=True) n.score = L.InnerProduct(n.drop7, num_output=2) n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label) return str(n.to_proto())workdir = './peta_multilabel_with_datalayer'if not os.path.isdir(workdir): os.makedirs(workdir)solverprototxt = tools.CaffeSolver(trainnet_prototxt_path = osp.join(workdir, "trainnet.prototxt"), testnet_prototxt_path = osp.join(workdir, "valnet.prototxt"))solverprototxt.sp['display'] = "1"solverprototxt.sp['base_lr'] = "0.0001"solverprototxt.write(osp.join(workdir, 'solver.prototxt'))# 写入训练网络。with open(osp.join(workdir, 'trainnet.prototxt'), 'w') as f: # 以python字典的形式为数据层提供参数。简单至极! data_layer_params = dict(batch_size = 128, im_shape = [227, 227], split = 'train', peta_root = peta_root) f.write(caffenet_multilabel(data_layer_params, 'PetaMultilabelDataLayerSync'))# 写入验证网络。with open(osp.join(workdir, 'valnet.prototxt'), 'w') as f: data_layer_params = dict(batch_size = 128, im_shape = [227, 227], split = 'val', peta_root = peta_root) f.write(caffenet_multilabel(data_layer_params, 'PetaMultilabelDataLayerSync'))solver = caffe.SGDSolver(osp.join(workdir, 'solver.prototxt'))#solver.net.copy_from(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')solver.test_nets[0].share_with(solver.net)#solver.step(1)solver.net.top_namessolver.net.backward()solver.step(1)print solver.net.params['fc6'][0].data[...]print solver.net.blobs['fc6'].data[...]print solver.net.blobs['fc6'].diff[...]
然而,梯度的输出似乎为零,权重也完全没有更新。
[[ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] ..., [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.]][[ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] ..., [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.]][[ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] ..., [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.]]
有人知道这是怎么回事吗?
回答:
看一下你的params
的值:它们全是零。你没有为你的层定义filler
,因此得到的都是零。
为权重定义随机初始化器,然后再运行一次。
仅运行backward()
是没有意义的 – 损失是在forward()
过程中计算的,没有损失信息通过网络传递给反向传递使用。
在backward()
之前调用forward()
以完成一次完整的前向-后向传递。