使用以下代码,我创建了每个训练数据的10个实例,每个实例具有100个维度。每个100维度包含3个维度。因此它的形状为:(3, 100, 10)。这模拟了10个实例,每个实例有100个像素,每个像素有3个通道,以模拟RGB值
我已经将此模型设置为仅在1和0之间分类。
在应用softmax层时,我收到了以下错误:
RuntimeError: 预期参数#1 ‘input’的张量应具有与’result’张量相同的维度;但4不等于3(在检查cudnn_convolution的参数时)
我使用的是0.4.0版本(通过print(torch.__version__)
检查)。如何正确设置softmax层的维度?我认为我的维度是正确的?
%reset -fimport osimport torchfrom skimage import io, transformimport numpy as npimport matplotlib.pyplot as pltfrom torch.utils.data import Dataset, DataLoaderfrom torchvision import transforms, utilsimport torch.utils.data as data_utilsimport torchvisionimport numpy as npfrom sklearn.preprocessing import scaleimport torch.nn.functional as Fimport torch.nn as nnimport torch.nn.functional as Ffrom random import randintbatch_size_value = 10train_dataset = []mu, sigma = 0, 0.1 # 均值和标准差num_instances = 10# 创建3000个实例并重塑为(3 , 100, 10),这模拟了10个实例,每个实例有100个像素 # 每个像素有3个通道,以模拟RGB值for i in range(num_instances) : image = [] image_x = np.random.normal(mu, sigma, 3000).reshape((3 , 100, 10)) train_dataset.append(image_x)mu, sigma = 100, 0.80 # 均值和标准差for i in range(num_instances) : image = [] image_x = np.random.normal(mu, sigma, 3000).reshape((3 , 100, 10)) train_dataset.append(image_x)labels_1 = [1 for i in range(num_instances)]labels_0 = [0 for i in range(num_instances)]labels = labels_1 + labels_0print(labels)x2 = torch.tensor(train_dataset).float()y2 = torch.tensor(labels).long()my_train2 = data_utils.TensorDataset(x2, y2)train_loader2 = data_utils.DataLoader(my_train2, batch_size=batch_size_value, shuffle=False)# print(x2)# 设备配置device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')print('device' , device)# device = 'cpu'# 超参数num_epochs = 10num_classes = 2batch_size = 5learning_rate = 0.001# 卷积神经网络(两个卷积层)class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, kernel_size=5) self.conv2 = nn.Conv2d(6, 16, kernel_size=5) self.fc1 = nn.Linear(864, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, num_classes) def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out)model = ConvNet().to(device)# 损失和优化器criterion = nn.CrossEntropyLoss()optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)# 训练模型total_step = len(train_loader2)for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader2): images = images.to(device) labels = labels.to(device) # 前向传播 outputs = model(images)# print(images) loss = criterion(outputs, labels) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() if (i % 10) == 0: print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, total_step, loss.item()))
更新:
删除以下几行:
out = F.relu(self.conv2(out))out = F.max_pool2d(out, 2)
解决了维度小于内核的问题。
回答:
你的代码中有一个结构性问题和一个错误。以下是解决方案。
class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, kernel_size=5) self.conv2 = nn.Conv2d(6, 16, kernel_size=1) # kernel_size 从5改为1 self.fc1 = nn.Linear(384, 120) # 全连接层节点数从864改为384 self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, num_classes) def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out # 不要忘记返回输出