音乐分类模型总是给出相同的预测

enter image description here我在尝试使用CNN模型进行音乐分类,但总是得到相同的错误预测。我使用了GTZAN数据集的歌曲

import librosaimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltimport osimport csv# Preprocessingfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import LabelEncoder, StandardScaler#Kerasimport kerasfrom keras import modelsfrom keras import layers# generating a datasetheader = 'filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'for i in range(1, 21):    header += f' mfcc{i}'header += ' label'header = header.split()file = open('data.csv', 'w', newline='')with file:    writer = csv.writer(file)    writer.writerow(header)genres = 'blues classical country disco hiphop jazz metal pop reggae rock'.split()for g in genres:    for filename in os.listdir(f'C:/Users/USER/Desktop/sem8/AI/project/gtzan.keras/data/genres/{g}'):        songname = f'C:/Users/USER/Desktop/sem8/AI/project/gtzan.keras/data/genres/{g}/{filename}'        y, sr = librosa.load(songname, mono=True, duration=30)        chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)        rmse = librosa.feature.rms(y=y)        spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)        spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)        rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)        zcr = librosa.feature.zero_crossing_rate(y)        mfcc = librosa.feature.mfcc(y=y, sr=sr)        to_append = f'{filename} {np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'            for e in mfcc:            to_append += f' {np.mean(e)}'        to_append += f' {g}'        file = open('datatest3.csv', 'a', newline='')        with file:            writer = csv.writer(file)            writer.writerow(to_append.split())# reading dataset from csvdata = pd.read_csv('datatest3.csv')data.head()# Dropping unneccesary columnsdata = data.drop(['filename'],axis=1)data.head()genre_list = data.iloc[:, -1]encoder = LabelEncoder()y = encoder.fit_transform(genre_list)print(y)# normalizingscaler = StandardScaler()X = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype = float))# spliting of dataset into train and test datasetX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# creating a modelmodel = models.Sequential()model.add(layers.Dense(256, activation='relu', input_shape=(X_train.shape[1],)))model.add(layers.Dense(128, activation='relu'))model.add(layers.Dense(64, activation='relu'))model.add(layers.Dense(10, activation='softmax'))model.compile(optimizer='adam',              loss='sparse_categorical_crossentropy',              metrics=['accuracy'])history = model.fit(X_train,                    y_train,                    epochs=20,                    batch_size=128)# calculate accuracytest_loss, test_acc = model.evaluate(X_test,y_test)print('test_acc: ',test_acc)# predictionspredictions = model.predict(X_test)np.argmax(predictions[0])

这是我创建CSV文件的方式

data = pd.read_csv('C:\\Users\\USER\\Desktop\\sem8\\AI\\project\\try\\datatest.csv')data.head()genre_list = data.iloc[:, -1]encoder = LabelEncoder()y = encoder.fit_transform(genre_list)print(y)# normalizingscaler = StandardScaler()X = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype = float))# spliting of dataset into train and test datasetX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# creating a modelmodel = models.Sequential()model.add(layers.Dense(256, activation='relu', input_shape=(X_train.shape[1],)))model.add(layers.Dense(128, activation='relu'))model.add(layers.Dense(64, activation='relu'))model.add(layers.Dense(10, activation='softmax'))model.compile(optimizer='adam',              loss='sparse_categorical_crossentropy',              metrics=['accuracy'])model.summary()              history = model.fit(X_train,                    y_train,                    epochs=20,                    batch_size=128)# calculate accuracytest_loss, test_acc = model.evaluate(X_test,y_test)print('test_acc: ',test_acc)# predictionsprint(X_test[0])predictions = model.predict(X_test)print(np.argmax(predictions[0]))# model.summary()model.save("C:\\Users\\USER\\Desktop\\sem8\\AI\\project\\try\\mymodel.h5")print("Saved model to disk")

然后我创建并保存了模型

genres = {    'metal': 0, 'disco': 1, 'classical': 2, 'hiphop': 3, 'jazz': 4,     'country': 5, 'pop': 6, 'blues': 7, 'reggae': 8, 'rock': 9}def majority_voting(scores, dict_genres):    preds = np.argmax(scores, axis = 1)    values, counts = np.unique(preds, return_counts=True)    counts = np.round(counts/np.sum(counts), 2)    votes = {k:v for k, v in zip(values, counts)}    votes = {k: v for k, v in sorted(votes.items(), key=lambda item: item[1], reverse=True)}    return [(get_genres(x, dict_genres), prob) for x, prob in votes.items()]def get_genres(key, dict_genres):    # Transforming data to help on transformation    labels = []    tmp_genre = {v:k for k,v in dict_genres.items()}    return tmp_genre[key]def prepare(filename):    # y, sr = librosa.load(filename, mono=True, duration=30)    # return y    # signal, sr = librosa.load(filename, sr=None)    y, sr = librosa.load(filename, mono=True, duration=30)    chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)    rmse = librosa.feature.rms(y=y)    spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)    spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)    rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)    zcr = librosa.feature.zero_crossing_rate(y)    mfcc = librosa.feature.mfcc(y=y, sr=sr)    to_append = f'{np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'        for e in mfcc:        to_append += f' {np.mean(e)}'    # Append the result to the data structure    # features = get_features(signal, sr)    song = pd.DataFrame([to_append.split()])    return songmodel=models.load_model("C:\\Users\\USER\\Desktop\\sem8\\AI\\project\\try\\mymodel.h5")print(prepare("C:\\Users\\USER\\Desktop\\sem8\\AI\\project\\try\\song1.mp3"))newsong=prepare("C:\\Users\\USER\\Desktop\\sem8\\AI\\project\\try\\song1.mp3")song = (np.array(newsong)).reshape(26)print(song.shape)print(song)prediction = model.predict(song)votes = majority_voting(prediction, genres)print("This song is a {} song".format(votes[0][0]))print("most likely genres are: {}".format(votes[:3]))print(prediction)

这是我尝试进行预测的方式,
但我总是得到相同的结果,
而且我认为我所做的一切都是正确的

This song is a hiphop songmost likely genres are: [('hiphop', 1.0)][[0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]]

回答:

问题在于预测时数据的归一化处理,它应该与用于训练的数据一样,使用StandardScaler进行处理

Related Posts

Keras Dense层输入未被展平

这是我的测试代码: from keras import…

无法将分类变量输入随机森林

我有10个分类变量和3个数值变量。我在分割后直接将它们…

如何在Keras中对每个输出应用Sigmoid函数?

这是我代码的一部分。 model = Sequenti…

如何选择类概率的最佳阈值?

我的神经网络输出是一个用于多标签分类的预测类概率表: …

在Keras中使用深度学习得到不同的结果

我按照一个教程使用Keras中的深度神经网络进行文本分…

‘MatMul’操作的输入’b’类型为float32,与参数’a’的类型float64不匹配

我写了一个简单的TensorFlow代码,但不断遇到T…

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注