目标:对比各种模型
对比各种模型,XGBoost直接判了RandomForest、GBDT、决策树、SVM等死刑,XGB+LR精度还能提升。 XGBoost:目前树模型的天花板,所有决策树中,XGBoost的精度最高,运行速度也还不错,所以竞赛中,结构化数据的比赛,基本都是用它了。 另外,实验表明,XGBoost+LR精度还能进一步提升。
对比内容:
模型对比 具体: 1、 比较在测试集上的AUC表现 2、 比较模型完成端到端训练预测的时间 3、 了解算法的优缺点
最终对比结果:
结果如下: 40万条数据。
模型 | 测试精度AUC | 运行时间(秒) |
---|
Xgboost | 0.9688 | 7.5972 | Xgboost + LR | 0.9724 | 13.1655 | RF+LR | 0.92432 | 2.3115 | GDBT+LR | 0.96249 | 18.1669 | LR | 0.9337 | 0.3479 | SVM | 0.8703 | 1104.25 |
可以看到:
- XGBoost模型的准确度高过GDBT等树模型,运行速度也尚可接受,另外如果想进一步提升XGBoost的准确度,可以采用XGBoost+LR的方式,还能进一步提升,在数据挖掘比赛中可以试试。
- LR模型的训练和响应速度真实很不错,0.3秒就搞定了40万条数据,模型准确度不算高。
- SVM最费时。所以目前估计慢慢也被淘汰了。
代码:
这里用到了一个装饰器,来统计每个函数的运行时间,装饰器这样避免了很多重复性代码
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn import svm
from sklearn.pipeline import make_pipeline
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
import functools
import time
def runtime_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = time.time()
result = function(*args, **kwargs)
end = time.time()
print("function runtime is", end - start ,'S')
return result
return wrapper
np.random.seed(10000)
n_estimator = 10
X, y = make_classification(n_samples=400000, n_features=100, shuffle=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8)
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train, y_train, test_size=0.5)
@runtime_decorator
def RandomForestLR():
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lr = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lr.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lr = rf_lr.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lr, tpr_rf_lr, _ = roc_curve(y_test, y_pred_rf_lr)
auc = roc_auc_score(y_test, y_pred_rf_lr)
print("RF+LR:", auc)
return fpr_rf_lr, tpr_rf_lr
@runtime_decorator
def GdbtLR():
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lr = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lr.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lr = grd_lr.predict_proba(grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lr, tpr_grd_lr, _ = roc_curve(y_test, y_pred_grd_lr)
auc = roc_auc_score(y_test, y_pred_grd_lr)
print("GDBT+LR:", auc)
return fpr_grd_lr, tpr_grd_lr
@runtime_decorator
def Gdbt():
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd.fit(X_train, y_train)
y_pred_grd = grd.predict_proba(X_test)
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
auc = roc_auc_score(y_test, y_pred_grd)
print("GDBT:", auc)
return fpr_grd, tpr_grd
@runtime_decorator
def Xgboost():
xgboost = xgb.XGBClassifier(nthread=4, learning_rate=0.08, \
n_estimators=50, max_depth=5, gamma=0, subsample=0.9, colsample_bytree=0.5)
xgboost.fit(X_train, y_train)
y_xgboost_test = xgboost.predict_proba(X_test)[:, 1]
fpr_xgboost, tpr_xgboost, _ = roc_curve(y_test, y_xgboost_test)
auc = roc_auc_score(y_test, y_xgboost_test)
print("Xgboost:", auc)
return fpr_xgboost, tpr_xgboost
@runtime_decorator
def Lr():
lm = LogisticRegression()
lm.fit(X_train, y_train)
y_lr_test = lm.predict_proba(X_test)[:, 1]
fpr_lr, tpr_lr, _ = roc_curve(y_test, y_lr_test)
auc = roc_auc_score(y_test, y_lr_test)
print("LR:", auc)
return fpr_lr, tpr_lr
@runtime_decorator
def XgboostLr():
xgboost = xgb.XGBClassifier(nthread=4, learning_rate=0.08, \
n_estimators=50, max_depth=5, gamma=0, subsample=0.9, colsample_bytree=0.5)
xgb_enc = OneHotEncoder()
xgb_lr = LogisticRegression(n_jobs=4, C=0.1, penalty='l2')
xgboost.fit(X_train, y_train)
xgb_enc.fit(xgboost.apply(X_train)[:, :])
xgb_lr.fit(xgb_enc.transform(xgboost.apply(X_train_lr)[:, :]), y_train_lr)
y_xgb_lr_test = xgb_lr.predict_proba(xgb_enc.transform(xgboost.apply(X_test)[:, :]))[:, 1]
fpr_xgb_lr, tpr_xgb_lr, _ = roc_curve(y_test, y_xgb_lr_test)
auc = roc_auc_score(y_test, y_xgb_lr_test)
print("Xgboost + LR:", auc)
return fpr_xgb_lr, tpr_xgb_lr
@runtime_decorator
def Svm():
Svc = svm.SVC()
Svc.fit(X_train, y_train)
y_svm_test = Svc.predict(X_test)
fpr_svm, tpr_svm, _ = roc_curve(y_test, y_svm_test)
auc = roc_auc_score(y_test, y_svm_test)
print("SVM:", auc)
return fpr_svm, tpr_svm
if __name__ == '__main__':
fpr_rf_lr, tpr_rf_lr = RandomForestLR()
fpr_grd_lr, tpr_grd_lr = GdbtLR()
fpr_xgboost, tpr_xgboost = Xgboost()
fpr_lr, tpr_lr = Lr()
fpr_xgb_lr, tpr_xgb_lr = XgboostLr()
fpr_svm, tpr_svm = Svm()
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf_lr, tpr_rf_lr, label='RF + LR')
plt.plot(fpr_grd_lr, tpr_grd_lr, label='GBT + LR')
plt.plot(fpr_xgboost, tpr_xgboost, label='XGB')
plt.plot(fpr_lr, tpr_lr, label='LR')
plt.plot(fpr_xgb_lr, tpr_xgb_lr, label='XGB + LR')
plt.plot(fpr_svm, tpr_svm, label='SVM')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf_lr, tpr_rf_lr, label='RF + LR')
plt.plot(fpr_grd_lr, tpr_grd_lr, label='GBT + LR')
plt.plot(fpr_xgboost, tpr_xgboost, label='XGB')
plt.plot(fpr_lr, tpr_lr, label='LR')
plt.plot(fpr_xgb_lr, tpr_xgb_lr, label='XGB + LR')
plt.plot(fpr_svm, tpr_svm, label='SVM')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
|