我在一个任务上使用基准实验。我采用了嵌套重采样的策略(https://mlr-org.github.io/mlr-tutorial/devel/html/nested_resampling/index.html)。我使用内部重采样策略创建了一个学习器。例如,这里是一个针对c50的简陋示例:
### C50 ############################################################################################################################classif_c50 = makeLearner("classif.C50", predict.type="prob")##The wrappers are presented in reverse order of application###One-Hot Encodingclassif_c50 = makeDummyFeaturesWrapper(classif_c50, method = "1-of-n")###Missing Data Imputationclassif_c50 = makeImputeWrapper(classif_c50, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))##### Tuning #####inner_resamp = makeResampleDesc("CV", iters=3)ctrl = makeTuneControlRandom(maxit=3L)hypss = makeParamSet( makeIntegerParam("trials", lower = 1, upper = 30) ,makeNumericParam("CF", lower = 0, upper = 1))classif_c50 = makeTuneWrapper(classif_c50, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)### C50 ############################################################################################################################
然后,我使用外部重采样策略创建了一个基准实验,如下所示(bench_data是我的数据框):
outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(bench_data))trainTask = makeClassifTask(id=training_task_name, data=bench_data, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)
我无法找到使用getBMR<>函数来提取内部重采样结果的方法?我是否错过了什么方法?
编辑:可复现的示例
# Required Packages# Load required packageslibrary(mlr)#library(dplyr)library(parallelMap)library(parallel)# Algorithmsiterations = 10Lcv_iters = 2### classif.gamboost ############################################################################################################################classif_gamboost = makeLearner("classif.gamboost", predict.type="prob")##The wrappers are presented in reverse order of application###One-Hot Encodingclassif_gamboost = makeDummyFeaturesWrapper(classif_gamboost, method = "1-of-n")###Missing Data Imputationclassif_gamboost = makeImputeWrapper(classif_gamboost, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))##### Tuning #####inner_resamp = makeResampleDesc("CV", iters=cv_iters)ctrl = makeTuneControlRandom(maxit=iterations)hypss = makeParamSet( makeDiscreteParam("baselearner", values=c("btree")), #,"bols","btree","bbs" makeIntegerParam("dfbase", lower = 1, upper = 5), makeDiscreteParam("family", values=c("Binomial")), makeDiscreteParam("mstop", values=c(10,50,100,250,500,1000)))classif_gamboost = makeTuneWrapper(classif_gamboost, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)### classif.gamboost ############################################################################################################################### Random Forest ############################################################################################################################classif_rforest = makeLearner("classif.randomForestSRC", predict.type="prob")##The wrappers are presented in reverse order of application###One-Hot Encodingclassif_rforest = makeDummyFeaturesWrapper(classif_rforest, method = "1-of-n")###Missing Data Imputationclassif_rforest = makeImputeWrapper(classif_rforest, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))##### Tuning #####inner_resamp = makeResampleDesc("CV", iters=cv_iters)ctrl = makeTuneControlRandom(maxit=iterations)hypss = makeParamSet( makeIntegerParam("mtry", lower = 1, upper = 30) ,makeIntegerParam("ntree", lower = 100, upper = 500) ,makeIntegerParam("nodesize", lower = 1, upper = 100))classif_rforest = makeTuneWrapper(classif_rforest, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)### Random Forest ############################################################################################################################trainData = mtcarstarget_feature = "am"training_task_name = "trainingTask"trainData[[target_feature]] = as.factor(trainData[[target_feature]])trainTask = makeClassifTask(id=training_task_name, data=trainData, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)train_indices = 1:25valid_indices = 26:32outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(trainData))no_of_cores = detectCores()parallelStartSocket(no_of_cores, level=c("mlr.tuneParams"), logging = TRUE)lrns = list(classif_gamboost, classif_rforest)res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(logloss, auc, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)parallelStop()getBMRPerformances(res, as.df=TRUE)
回答:
这里有两种方法可以从基准对象中提取优化路径:
通过获取基准调优结果:
z <- getBMRTuneResults(res)
然后遍历每个调优结果的优化路径,并使用generateHyperParsEffectData
提取超参数效果:
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T))
或者只获取数据:
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T)$data)
或者通过稍微修改评论中@Giuseppe的建议,获取BMR模型,然后提取调优结果:
models <- getBMRModels(res, drop = TRUE)tune.result = lapply(models, function(x) getTuneResult(x[[1]])) lapply(tune.result, function(x) as.data.frame(x$opt.path))