gpt4 book ai didi

python - 使用 hyperopt 进行评估和预测的不同结果指标

转载 作者:行者123 更新时间:2023-12-04 13:24:15 26 4
gpt4 key购买 nike

这是我第一次调整 XGBoost 的超参数。我的计划是使用 hyperopt 找到最佳超参数。

def obj (params):
xgb_model=xgb.XGBRegressor(
n_estimator=params['n_estimator'],
learning_rate=params['learning_rate'],
booster=params['booster'],
gamma=params['gamma'],
max_depth=int(params['max_depth']),
min_child_weight=int(params['min_child_weight']),
colsample_bytree=int(params['colsample_bytree']),
reg_lambda=params['reg_lambda'],reg_alpha=params['reg_alpha']
)
evaluation=[(X_train,Y_train),(X_test,Y_test)]
xgb_model.fit(X_train, Y_train,
eval_set=evaluation,
verbose=False)
pred = xgb_model.predict(X_test)
r2_value=r2_score(y_true=Y_test,y_pred=pred)
mape=MAPE(pred,Y_test)
print('R2-Value:',r2_value)
print('MAPE Value :',mape)
print(xgb_model.get_params)
return {'loss': -r2_value, 'status': STATUS_OK ,'model':xgb_model }

params={'n_estimator':450,
'learning_rate':hp.loguniform('learning_rate',np.log(0.01),np.log(1)),
'booster':hp.choice('booster',['gbtree','dart','gblinear']),
'reg_lambda':hp.uniform('reg_lambda',0,2.5),
'reg_alpha':hp.uniform('reg_alpha',0,2.5),
'colsample_bytree':hp.uniform('colsample_bytree',0,1),
'gamma':hp.uniform('gamma',0,10),
'max_depth':hp.quniform('max_depth',3,10,1),
'min_child_weight':hp.quniform('min_child_weight',0,10,1),'seed': 0}

trials = Trials()
best_hyperparams = fmin(fn = obj,
space = params,
algo = tpe.suggest,
max_evals = 100,
trials = trials)
我根据 R2 分数和 MAPE 显示损失值。运行代码后我得到了最好的损失值。
enter image description here
当我使用该超参数时,我得到了与以前不同的 MAPE 和 R2 结果。
model=xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0, gamma=4.478273315667381,
importance_type='gain', learning_rate=0.49914654574533074,
max_delta_step=0, max_depth=8, min_child_weight=4, missing=None,
n_estimator=450, n_estimators=100, n_jobs=1, nthread=None,
objective='reg:linear', random_state=0,
reg_alpha=1.4575139694808485, reg_lambda=1.7326686243254332,
scale_pos_weight=1, seed=None, silent=None, subsample=1,
verbosity=1)

model.fit(X_train,Y_train)
model.predict(X_test)
enter image description here
你能给我一些解释,为什么会发生?

最佳答案

要使 XGBoost 结果可重现,您需要设置 n_jobs=1除了修复随机种子,请参阅 this answer和下面的代码。

import numpy as np
import xgboost as xgb
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_percentage_error
from hyperopt import hp, fmin, tpe, Trials, STATUS_OK

# generate the data
X, y = make_regression(random_state=0)

# split the data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, random_state=0)

# define the model
def XGBModel(params):

return xgb.XGBRegressor(
n_estimator=params['n_estimator'],
learning_rate=params['learning_rate'],
booster=params['booster'],
gamma=params['gamma'],
max_depth=int(params['max_depth']),
min_child_weight=int(params['min_child_weight']),
colsample_bytree=int(params['colsample_bytree']),
reg_lambda=params['reg_lambda'],
reg_alpha=params['reg_alpha'],
random_state=0, # fix the random seed
n_jobs=1, # set the number of parallel jobs equal to one
)

# define the objective function
def obj(params):

# fit the model
xgb_model = XGBModel(params)
xgb_model.fit(X_train, Y_train, eval_set=[(X_train, Y_train), (X_test, Y_test)], verbose=False)
pred = xgb_model.predict(X_test)

# score the model
r2_value = r2_score(y_true=Y_test, y_pred=pred)
mape = mean_absolute_percentage_error(y_true=Y_test, y_pred=pred)

return {'loss': - r2_value, 'mape': mape, 'status': STATUS_OK, 'model': xgb_model}

# define the hyperparameter space
params = {
'n_estimator': 1000,
'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(1)),
'booster': hp.choice('booster', ['gbtree', 'dart', 'gblinear']),
'reg_lambda': hp.uniform('reg_lambda', 0, 2.5),
'reg_alpha': hp.uniform('reg_alpha', 0, 2.5),
'colsample_bytree': hp.uniform('colsample_bytree', 0, 1),
'gamma': hp.uniform('gamma', 0, 10),
'max_depth': hp.quniform('max_depth', 3, 10, 1),
'min_child_weight': hp.quniform('min_child_weight', 0, 10, 1),
}

# tune the hyperparameters
trials = Trials()
best_hyperparams = fmin(fn=obj, space=params, algo=tpe.suggest, max_evals=10, trials=trials, rstate=np.random.RandomState(0))

# extract the best scores
print('R2-Value:', - trials.best_trial['result']['loss'])
print('MAPE Value :', trials.best_trial['result']['mape'])
# R2-Value: 0.5388751508268976
# MAPE Value : 4.700583518398514

# extract the best model
best_model = trials.best_trial['result']['model']

# fit the best model
best_model.fit(X_train, Y_train, eval_set=[(X_train, Y_train), (X_test, Y_test)], verbose=False)
pred = best_model.predict(X_test)

# score the best model
r2_value = r2_score(y_true=Y_test, y_pred=pred)
mape = mean_absolute_percentage_error(y_true=Y_test, y_pred=pred)

print('R2-Value:', r2_value)
print('MAPE Value :', mape)
# R2-Value: 0.5388751508268976
# MAPE Value : 4.700583518398514

关于python - 使用 hyperopt 进行评估和预测的不同结果指标,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/69521240/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com