我已经训练了一个lightgbm模型,我想绘制学习曲线。我该怎么做呢?例如,在Keras中,历史记录将返回指标,以便我可以在训练结束后绘制它们。这里是如何处理这项任务的?
我的代码如下:
def f_lgboost(data, params):
model = lgb.LGBMClassifier(**params)
X_train = data['X_train']
y_train = data['y_train']
X_dev = data['X_dev']
y_dev = data['y_dev']
X_test = data['X_test']
categorical_feature= ['Ticker_code', 'Category_code']
X_train[categorical_feature] = X_train[categorical_feature].astype('category')
X_dev[categorical_feature] = X_dev[categorical_feature].astype('category')
X_test[categorical_feature] = X_test[categorical_feature].astype('category')
feature_name = X_train.columns.to_list()
model.fit(X_train, y_train, eval_set = [(X_dev, y_dev)], eval_metric = 'auc', early_stopping_rounds = 20,
categorical_feature = categorical_feature, feature_name = feature_name)
y_pred_train = model.predict_proba(X_train)[:, 1].ravel()
y_pred_dev = model.predict_proba(X_dev)[:, 1].ravel()
from sklearn.metrics import roc_auc_score
auc_train = roc_auc_score(y_train, y_pred_train)
auc_dev = roc_auc_score(y_dev, y_pred_dev)
from sklearn.metrics import precision_recall_fscore_support
precision, recall ,fscore, support = precision_recall_fscore_support(y_dev, (y_pred_dev > 0.5).astype(int), beta=0.5)
y_pred_test = model.predict_proba(X_test)[:, 1].ravel()
print(f'auc_train: {auc_train}, auc_dev : {auc_dev}, precision : {precision}, recall: {recall}, fscore : {fscore}')
Results = {
'params' : params,
'data' : data,
'lg_boost_model' : bst,
'y_pred_train' : y_pred_train,
'y_pred_dev' : y_pred_dev,
'y_pred_test' : y_pred_test,
'auc_train' : auc_train,
'auc_dev' : auc_dev,
'precision_dev': precision,
'recall_dev' : recall,
'fscore_dev' : fscore,
'support_dev' : support
}
return Results
发布于 2020-03-07 08:10:31
在scikit-learn API中,可以通过attribute lightgbm.LGBMModel.evals_result_
获得学习曲线。它们将包括使用方法fit
的参数eval_set
中指定的数据集计算的指标(因此,您通常希望在那里指定训练集和验证集)。还有内置的绘图函数lightgbm.plot_metric
,它直接接受model.evals_result_
或model
。
下面是一个完整的最小示例:
import lightgbm as lgb
import sklearn.datasets, sklearn.model_selection
X, y = sklearn.datasets.load_boston(return_X_y=True)
X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split(X, y, random_state=7054)
model = lgb.LGBMRegressor(objective='mse', seed=8798, num_threads=1)
model.fit(X_train, y_train, eval_set=[(X_val, y_val), (X_train, y_train)], verbose=10)
lgb.plot_metric(model)
这是结果图:
https://stackoverflow.com/questions/60132246
复制相似问题