PyTorch中每轮(epoch)的准确率

我使用PyTorch制作了一个聊天机器人,想在每轮训练中显示准确率。我能显示损失值,但不知道如何显示准确率。

这是我的代码:

from nltk_utils import tokenize, stem, bag_of_wordsimport jsonimport numpy as npimport torchimport torch.nn as nnfrom torch.utils.data import Dataset, DataLoaderfrom model import NeuralNetfrom torch.autograd import Variableall_words=[]tags=[]xy=[]questionsP1=[]questionsP2=[]questionsP3=[]questionsP4=[]questionTag={}with open('new.json', encoding="utf8") as file:        data = json.load(file)        for intent in data["intents"]:    for proficiency in intent["proficiency"]:        for questions in proficiency["questions"]:            for responses in questions["responses"]:                        wrds = tokenize(responses)                all_words.extend(wrds)                xy.append((wrds, questions["tag"]))                            if questions["tag"] in tags:                print(questions["tag"])                    if questions["tag"] not in tags:                tags.append(questions["tag"])                                if proficiency["level"] == "P1":                questionsP1.append(questions["question"])                questionTag[questions["question"]]=questions["tag"]                                if proficiency["level"] == "P2":                questionsP2.append(questions["question"])                questionTag[questions["question"]]=questions["tag"]                            if proficiency["level"] == "P3":                questionsP3.append(questions["question"])                questionTag[questions["question"]]=questions["tag"]                            if proficiency["level"] == "P4":                questionsP4.append(questions["question"])                questionTag[questions["question"]]=questions["tag"]ignore_words = ['?', '!', '.', ',']all_words = [stem(x) for x in all_words if x not in ignore_words]all_words = sorted(set(all_words))tags = sorted(set(tags))X_train = []y_train = []for tokenized_response, tag in xy:    bag = bag_of_words(tokenized_response, all_words)    print(bag)    X_train.append( bag )    label = tags.index( tag )    y_train.append( label )print(y_train)X_train = np.array( X_train )y_train = np.array( y_train )class ChatDataset(Dataset):    def __init__(self):        self.n_samples = len(X_train)        self.x_data = X_train        self.y_data = y_train        def __getitem__(self, index):        return self.x_data[index], self.y_data[index]        def __len__(self):        return self.n_samples#HyperParametersbatch_size = 8hidden_size = 8output_size = len(tags)input_size = len(X_train[0])learning_rate = 0.001num_epochs = 994dataset = ChatDataset()train_loader = DataLoader(dataset = dataset, batch_size=batch_size, shuffle = True, num_workers = 2)device = 'cpu'model = NeuralNet(input_size, hidden_size, output_size).to(device)#loss and optimizer criterion = nn.CrossEntropyLoss()optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)for epoch in range( num_epochs ):    for (words, labels) in train_loader:        words = words.to(device)        labels = labels.to(device)        #Forward        outputs = model(words)        loss = criterion(outputs, labels)        #backward and optimizer step        optimizer.zero_grad()        loss.backward()        optimizer.step()        print(f'epoch {epoch + 1}/ {num_epochs}, loss={loss.item(): .4f}')print(f'final loss, loss={loss.item(): .4f}')data = {    "model_state": model.state_dict(),    "input_size": input_size,    "output_size": output_size,    "hidden_size": hidden_size,    "all_words": all_words,    "tags": tags,}FILE = "data.pth"torch.save(data, FILE)with open('new.json', 'r') as f:    intents = json.load(f)bot_name = "Sam"while True:    sentence = input("You: ")    if sentence == 'quit':        break    sentence = tokenize(sentence)    X = bag_of_words(sentence, all_words)    X = X.reshape( 1, X.shape[0])    X = torch.from_numpy( X )    output = model( X )    _, predicted = torch.max(output, dim=1)    tag = tags[predicted.item()]    print(tag)    probs = torch.softmax(output, dim=1)    probs = probs[0][predicted.item()]        print( probs.item() )    if probs.item() > 0.75:        for intent in intents["intents"]:            for proficiency in intent["proficiency"]:                for questions in proficiency["questions"]:                    if questions["tag"] == tag:                        print(f'{bot_name}: {questions["question"]}')    else:        print(f'{bot_name}: Probability Too Low')print(f'Training Complete. File saved to {FILE}')

我的聊天机器人运行得有点反常… 我在尝试将答案映射到正确的问题上。任何帮助都会受到欢迎。


回答:

根据你的代码,labels包含了outputs中应该具有最高值的索引,以便样本被计为正确的预测。

所以要计算验证准确率:

correct = 0total = 0model.eval()with torch.no_grad():    for (words, labels) in validation_loader:        words = words.to(device)        labels = labels.to(device)        total += labels.shape[0]        outputs = model(words)        correct += torch.sum(labels == outputs.argmax(dim=-1))accuracy = correct / total

Related Posts

使用LSTM在Python中预测未来值

这段代码可以预测指定股票的当前日期之前的值,但不能预测…

如何在gensim的word2vec模型中查找双词组的相似性

我有一个word2vec模型,假设我使用的是googl…

dask_xgboost.predict 可以工作但无法显示 – 数据必须是一维的

我试图使用 XGBoost 创建模型。 看起来我成功地…

ML Tuning – Cross Validation in Spark

我在https://spark.apache.org/…

如何在React JS中使用fetch从REST API获取预测

我正在开发一个应用程序,其中Flask REST AP…

如何分析ML.NET中多类分类预测得分数组?

我在ML.NET中创建了一个多类分类项目。该项目可以对…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注