RuntimeError: 期望标量类型为Long,但找到Float (Pytorch)

我尝试了很多次来修复这个问题,我也使用了functional.py中的示例代码,然后得到了相同的“loss”值。我该如何修复这个问题?

我的库如下:

import matplotlib.pyplot as pltimport torchimport torch.nn as nnimport numpy as npimport matplotlibimport pandas as pdfrom torch.autograd import Variablefrom torch.utils.data import DataLoader,TensorDatasetfrom sklearn.model_selection import train_test_splitimport warningsimport osimport torchvisionimport torchvision.datasets as dsetsimport torchvision.transforms as transformstrain=pd.read_csv("train.csv",dtype=np.float32)    targets_numpy = train.label.valuesfeatures_numpy = train.loc[:,train.columns != "label"].values/255 # normalization    features_train, features_test, targets_train, targets_test = train_test_split(features_numpy, targets_numpy,test_size = 0.2, random_state = 42)featuresTrain=torch.from_numpy(features_train)targetsTrain=torch.from_numpy(targets_train)        featuresTest=torch.from_numpy(features_test)targetsTest=torch.from_numpy(targets_test)         batch_size=100n_iterations=10000num_epochs=n_iterations/(len(features_train)/batch_size)num_epochs=int(num_epochs)    train=torch.utils.data.TensorDataset(featuresTrain,targetsTrain) test=torch.utils.data.TensorDataset(featuresTest,targetsTest)print(type(train))       train_loader=DataLoader(train,batch_size=batch_size,shuffle=False)test_loader=DataLoader(test,batch_size=batch_size,shuffle=False)print(type(train_loader))    plt.imshow(features_numpy[226].reshape(28,28))plt.axis("off")plt.title(str(targets_numpy[226]))plt.show()class ANNModel(nn.Module):    def __init__(self,input_dim,hidden_dim,output_dim):        super(ANNModel,self).__init__()        self.fc1=nn.Linear(input_dim,hidden_dim)        self.relu1=nn.ReLU()        self.fc2=nn.Linear(hidden_dim,hidden_dim)        self.tanh2=nn.Tanh()        self.fc4=nn.Linear(hidden_dim,output_dim)    def forward (self,x): #forward ile elde edilen layer lar bağlanır            out=self.fc1(x)            out=self.relu1(out)            out=self.fc2(out)            out=self.tanh2(out)        out=self.fc4(out)        return out        input_dim=28*28hidden_dim=150  output_dim=10         model=ANNModel(input_dim,hidden_dim,output_dim)        error=nn.CrossEntropyLoss()        learning_rate=0.02optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)  count=0loss_list=[]iteration_list=[]accuracy_list = []for epoch in range(num_epochs):    for i,(images,labels) in enumerate(train_loader):         train=Variable(images.view(-1,28*28))        labels=Variable(labels)        #print(labels)        #print(outputs)          optimizer.zero_grad()        #forward propagation        outputs=model(train)            #outputs=torch.randn(784,10,requires_grad=True)        ##labels=torch.randn(784,10).softmax(dim=1)        loss=error(outputs,labels)                loss.backward()           optimizer.step()                count+=1                 if count % 50 == 0:            correct=0            total=0                  for images,labels in test_loader:            test=Variable(images.view(-1,28*28))                outputs=model(test)                    predicted=torch.max(outputs.data,1)[1] #mantık???                   total+= len(labels)                    correct+=(predicted==labels).sum()            accuracy=100  *correct/float(total)                 loss_list.append(loss.data)        iteration_list.append(count)        accuracy_list.append(accuracy)        if count % 500 == 0:            print('Iteration: {}  Loss: {}  Accuracy: {} %'.format(count, loss.data, accuracy))

错误信息如下:

---------------------------------------------------------------------------RuntimeError                              Traceback (most recent call last)<ipython-input-9-9e53988ad250> in <module>()     26     #outputs=torch.randn(784,10,requires_grad=True)     27     ##labels=torch.randn(784,10).softmax(dim=1)---> 28     loss=error(outputs,labels)     29      30 2 frames/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)   2844     if size_average is not None or reduce is not None:   2845         reduction = _Reduction.legacy_get_string(size_average, reduce)-> 2846     return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)   2847    2848 RuntimeError: expected scalar type Long but found Float

回答:

看起来张量“labels”的数据类型是FloatTensor。然而,nn.CrossEntropyLoss期望目标类型为LongTensor。这意味着你应该检查“labels”的类型。如果是这种情况,你应该使用以下代码将“labels”的数据类型从FloatTensor转换为LongTensor:

loss=error(outputs,labels.long())

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注