因此,我使用scikit-learn的支持向量分类器(svm.SVC)结合管道和网格搜索构建了一个小示例。经过拟合和评估后,我得到了一个非常有趣的ROC曲线:它只弯曲了一次。
我原以为会得到一个更像曲线的形状。谁能解释这种行为?以下是可用的最小工作示例代码:
# Importsimport sklearn as sklimport numpy as npimport matplotlib.pyplot as pltfrom sklearn.datasets import make_classificationfrom sklearn import preprocessingfrom sklearn import svmfrom sklearn.model_selection import GridSearchCVfrom sklearn.pipeline import Pipelinefrom sklearn import metricsfrom tempfile import mkdtempfrom shutil import rmtreefrom sklearn.externals.joblib import Memorydef plot_roc(y_test, y_pred): fpr, tpr, thresholds = skl.metrics.roc_curve(y_test, y_pred, pos_label=1) roc_auc = skl.metrics.auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC曲线 (面积 ={0:.2f})'.format(roc_auc)) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('假阳性率') plt.ylabel('真阳性率') plt.title('接收者操作特征曲线示例') plt.legend(loc="lower right") plt.show();# 生成一个随机数据集X, y = skl.datasets.make_classification(n_samples=1400, n_features=11, n_informative=5, n_classes=2, weights=[0.94, 0.06], flip_y=0.05, random_state=42)X_train, X_test, y_train, y_test = skl.model_selection.train_test_split(X, y, test_size=0.3, random_state=42)#实例化分类器normer = preprocessing.Normalizer()svm1 = svm.SVC(probability=True, class_weight={1: 10})cached = mkdtemp()memory = Memory(cachedir=cached, verbose=3)pipe_1 = Pipeline(steps=[('normalization', normer), ('svm', svm1)], memory=memory)cv = skl.model_selection.KFold(n_splits=5, shuffle=True, random_state=42)param_grid = [ {"svm__kernel": ["linear"], "svm__C": [1, 10, 100, 1000]}, {"svm__kernel": ["rbf"], "svm__C": [1, 10, 100, 1000], "svm__gamma": [0.001, 0.0001]} ]grd = GridSearchCV(pipe_1, param_grid, scoring='roc_auc', cv=cv)#训练y_pred = grd.fit(X_train, y_train).predict(X_test)rmtree(cached)#评估confmatrix = skl.metrics.confusion_matrix(y_test, y_pred)print(confmatrix)plot_roc(y_test, y_pred)
回答:
你的 plot_roc(y_test, y_pred)
函数在内部调用了 roc_curve
。
根据 roc_curve的文档:
y_score : array, shape = [n_samples]
目标得分,可以是正类的概率估计、置信值,或者是未经阈值处理的决策测量(如一些分类器的“decision_function”返回的)。
因此,当 y_pred
是正类的概率而不是硬预测类时,效果最好。
尝试以下代码:
y_pred = grd.fit(X_train, y_train).predict_proba(X_test)[:,1]
然后将 y_pred
发送到绘图方法中。