我正在尝试使用自己的数据学习CNN。我的数据形状是(1224, 15, 23)
。其中1224是数据的数量,每个数据的形状是(15, 23)
。CNN是使用PyTorch构建的。
我认为没有逻辑错误,因为conv2D需要4维张量,而我输入的是(batch, channel, x, y)
的格式。
当我创建Net类的实例时,遇到了这个错误。
TypeError: argument 0 is not a Variable
我已经使用PyTorch半年了,但这是我第一次遇到这个错误,我仍然感到困惑。
这是我的代码。
class Net(nn.Module): def __init__(self, n): super(Net,self).__init__() self.conv = nn.Sequential(nn.Conv2d(1, 32, kernel_size=3, stride=1), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=3, stride=1), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, stride=1), # 64 x 9 x 17 nn.ReLU() ) conv_out_size = self._get_conv_out(input_shape) self.fc = nn.Sequential(nn.Linear(64 * 9 * 17, 128), nn.ReLU(), nn.Linear(128, n) ) def _get_conv_out(self, shape): o = self.conv(torch.zeros(1, *shape)) return int(np.prod(o.size())) def forward(self, x): conv_out = self.conv(x).view(x.size()[0], -1) return sefl.fc(conv_out)
if __name__=='__main__': num_epochs = 1 num_classes = 2 input_shape = train_img[0].shape # 1, 15, 23 net = Net(num_classes) iteration = 51 BATCH_SIZE = 24 LEARNING_RATE = 0.0001 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE) loss_list= [] batch_index = 0 # train for epoch in range(num_epochs): for i in range(iteration): input_img = torch.FloatTensor(train_img[batch_index: batch_index + BATCH_SIZE]) print(input_img.size()) # 24, 1, 15, 23 outputs = net(input_img) loss = criterion(outputs, labels) loss_list.append(loss.item()) # Backprop opimizer.zero_grad() loss.backward() optimizer.step()
错误信息如下:
---------------------------------------------------------------------------TypeError Traceback (most recent call last)<ipython-input-179-0f6bc7588c29> in <module> 4 input_shape = train_img[0].shape # 1, 15, 23 5 ----> 6 net = Net(num_classes) 7 iteration = 51 8 BATCH_SIZE = 24<ipython-input-178-8a68d4a0dc4a> in __init__(self, n) 11 ) 12 ---> 13 conv_out_size = self._get_conv_out(input_shape) 14 self.fc = nn.Sequential(nn.Linear(64 * 9 * 17, 128), 15 nn.ReLU(),<ipython-input-178-8a68d4a0dc4a> in _get_conv_out(self, shape) 18 19 def _get_conv_out(self, shape):---> 20 o = self.conv(torch.zeros(1, *shape)) 21 return int(np.prod(o.size())) 22 C:\DTools\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs) 355 result = self._slow_forward(*input, **kwargs) 356 else:--> 357 result = self.forward(*input, **kwargs) 358 for hook in self._forward_hooks.values(): 359 hook_result = hook(self, input, result)C:\DTools\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\container.py in forward(self, input) 65 def forward(self, input): 66 for module in self._modules.values():---> 67 input = module(input) 68 return input 69 C:\DTools\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs) 355 result = self._slow_forward(*input, **kwargs) 356 else:--> 357 result = self.forward(*input, **kwargs) 358 for hook in self._forward_hooks.values(): 359 hook_result = hook(self, input, result)C:\DTools\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\modules\conv.py in forward(self, input) 280 def forward(self, input): 281 return F.conv2d(input, self.weight, self.bias, self.stride,--> 282 self.padding, self.dilation, self.groups) 283 284 C:\DTools\Anaconda3\envs\tensorflow\lib\site-packages\torch\nn\functional.py in conv2d(input, weight, bias, stride, padding, dilation, groups) 88 _pair(0), groups, torch.backends.cudnn.benchmark, 89 torch.backends.cudnn.deterministic, torch.backends.cudnn.enabled)---> 90 return f(input, weight, bias) 91 92 TypeError: argument 0 is not a Variable
回答:
你的代码实际上在PyTorch >= 0.4.1
版本上是可以运行的。我猜你的PyTorch版本低于0.4,因此你需要在以下行中传递一个Variable。
o = conv(torch.autograd.Variable(torch.zeros(1, *x.shape)))
在PyTorch >= 0.4.1
版本中,Variable的概念已经不存在了。因此,torch.FloatTensor
可以直接传递给神经网络层。