导入数据
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression,Lasso,Ridge,ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,StandardScaler,PolynomialFeatures
train = pd.read_csv(r"C:\Users\dream\Documents\Tencent Files\1799785728\FileRecv\zhengqi_train的副本.txt",sep = "\t")
test = pd.read_csv(r"C:\Users\dream\Documents\Tencent Files\1799785728\FileRecv\zhengqi_test的副本.txt",sep = "\t")
train["origin"] = "train"
test["origin"] = "test"
data_all = pd.concat([train,test])
print(data_all.shape)
data_all.head()
结果:
(4813, 40)
特征分布做特征选择?
#特征探索
#38个特征,将一些不重要的特征删除
#特征分布情况,训练和测试数据特征分布不均匀删除
plt.figure(figsize=(9,38*6))
for i,col in enumerate(data_all.columns[:-2]):
cond = data_all["origin"] == "train"
train_col = data_all[col][cond] #训练数据
cond = data_all["origin"] == "test"
test_col = data_all[col][cond] #测试数据
axes = plt.subplot(38,1,i+1)
ax = sns.kdeplot(train_col,shade = True,ax = axes)
sns.kdeplot(test_col,shade = True,ax = ax)
?
drop_labels = ["V11","V17","V5","V22"]
data_all.drop(drop_labels,axis = 1,inplace = True)
按相关性进行特征选择?
#相关性系数,找到7个相关性不大的属性
corr = data_all.corr()
cond = corr.loc["target"].abs() < 0.1
drop_labels = corr.loc["target"].index[cond]
drop_labels
#查看了属性分布,分布不好的删除
drop_labels = ['V14', 'V21']
data_all.drop(drop_labels,axis = 1,inplace = True)
#画热力图
plt.figure(figsize = (20,16))
mcorr = train.corr()
mask = np.zeros_like(mcorr, dtype = np.bool)
mask[np.triu_indices_from(mask)] = True #角分线右侧为True
cmap = sns.diverging_palette(220, 10, as_cmap = True) #颜色设定
g = sns.heatmap(mcorr, mask = mask, cmap = cmap, square = True, annot = True, fmt = "0.2f") #annot注释的意思
?归一化操作
#归一化操作
data = data_all.iloc[:,:-2]
stand = StandardScaler()
data2 = stand.fit_transform(data)
data2
结果:?
array([[ 0.56597193, 0.01591954, -0.14297122, ..., -5.1011359 ,
-2.60811143, -3.50838833],
[ 0.96800866, 0.43695666, 0.06604898, ..., 0.36409451,
-0.33491851, -0.7301463 ],
[ 1.01301277, 0.56796822, 0.23506531, ..., 0.36409451,
0.76517485, -0.58913402],
...,
[-2.61531873, -3.56439613, -3.4022862 , ..., -1.53698564,
-2.54410599, 1.61205774],
[-2.66132294, -3.64640336, -3.27127354, ..., -1.53698564,
-2.54910642, 1.43104197],
[-2.32129187, -3.03734966, -3.21426803, ..., -1.53698564,
-1.12298539, 1.9880905 ]])
cols = data_all.columns
data_all_std = pd.DataFrame(data2,columns = cols[:-2])
data_all.index = np.arange(4813)
data_all_std = pd.merge(data_all_std, data_all.iloc[:,-2:], right_index=True,left_index=True)
data_all_std
异常值处理
#异常值处理
from sklearn.linear_model import RidgeCV
data_all_std.head()
ridge = RidgeCV(alphas = [0.0001,0.001,0.01,0.1,0.2,0.5,1,2,3,4,5,10,20,30,50])
cond = data_all_std["origin"] == "train"
X_train = data_all_std[cond].iloc[:,:-2]
y_train = data_all_std[cond]["target"]
ridge.fit(X_train,y_train)
#预测,预测值肯定会和真实值有一定的偏差,偏差特别大,当作异常值
y_ = ridge.predict(X_train)
display(y_train[:100])
display(y_[:100])
cond = abs((y_train - y_)) > y_train.std()*0.8
cond.sum()
结果:
0 0.175
1 0.676
2 0.633
3 0.206
4 0.384
...
95 0.752
96 0.669
97 -0.144
98 0.900
99 0.194
Name: target, Length: 100, dtype: float64
array([ 0.75989853, 0.67404957, 0.698052 , 0.26892304, 0.49745972,
0.34479599, 0.56407434, 0.65300985, 0.87863292, 1.10631617,
1.14381793, 1.12698163, -0.95261698, -1.14030135, -0.34147371,
-1.18688819, -0.3570815 , -1.460894 , -0.46738849, 0.20482328,
0.35996288, 0.13898213, 0.62397264, -0.03227538, -1.86061414,
-1.98766287, 0.42573393, 1.0067669 , 1.20290325, 0.58591695,
0.37433565, -0.28182672, -0.48744524, -0.51845319, 0.39441366,
0.80716124, 0.55676449, 0.31542392, -0.06445492, -1.0259794 ,
-0.02866304, -0.56256517, -0.10068991, 0.30875929, 0.27981951,
0.61037409, 0.857841 , 0.73098524, 0.84739155, 0.64799208,
0.30178805, 0.32478358, 0.15008424, -0.10947188, -0.06684293,
0.00231872, 0.05027197, -0.52516486, -0.56590262, -0.37154893,
0.3188893 , -0.11236033, -0.23167792, 0.72161844, 0.99240655,
0.58891927, 0.5994198 , 0.49652549, 0.44206633, 0.13839513,
0.52453052, 0.25111694, -0.74958371, -0.32670514, -0.43136215,
-0.15506445, -0.65928348, -0.26046392, 0.1737138 , 0.15865807,
0.26452441, 0.00243442, 0.49720354, 0.21106626, 0.81243771,
0.95389209, 0.97652435, 0.5122789 , 0.98314069, 0.88664648,
1.05821294, 1.02201547, 1.15680039, 0.35648533, 0.64549981,
0.48716165, 0.50820392, -0.30307674, 0.58959149, 0.2912567 ])
88
#画图
plt.figure(figsize=(12,6))
axes = plt.subplot(1,3,1)
axes.scatter(y_train,y_)
axes.scatter(y_train[cond],y_[cond],c = "r", s = 20)
axes = plt.subplot(1,3,2)
axes.scatter(y_train,y_train - y_)
axes.scatter(y_train[cond],(y_train - y_)[cond],c = "r", s = 20)
axes = plt.subplot(1,3,3)
(y_train - y_).hist(bins = 50,ax = axes)
(y_train - y_)[cond].hist(bins = 50,ax = axes ,color = "r")
#异常值过滤
drop_index = cond[cond].index
print(data_all_std.shape)
data_all_std.drop(drop_index,axis = 0,inplace = True)
data_all_std.shape
?结果:
(4813, 34)
(4725, 34)
机器学习模型?
def detect_model(estimators,data):
for key,estimator in estimators.items():
estimator.fit(data[0],data[2])
y_ = estimator.predict(data[1])
mse = mean_squared_error(data[3],y_)
print("-----------------mse%s"%(key),mse)
r2 = estimator.score(data[1],data[3])
print("-----------------r2%s"%(key),r2)
print("\n")
cond = data_all_std["origin"] == "train"
X = data_all_std[cond].iloc[:,:-2]
y = data_all_std[cond]["target"]
data = train_test_split(X,y,test_size = 0.2)
?
estimators = {}
estimators["knn"] = KNeighborsRegressor()
estimators["linear"] = LinearRegression()
estimators["ridge"] = Ridge()
estimators["lasso"] = Lasso()
estimators["elasticnet"] = ElasticNet()
estimators["forest"] = RandomForestRegressor()
estimators["gbdt"] = GradientBoostingRegressor()
estimators["ada"] = AdaBoostRegressor()
estimators["extreme"] = ExtraTreesRegressor()
estimators["svm_rbf"] = SVR(kernel="rbf")
estimators["svm_poly"] = SVR(kernel="poly")
estimators["xgb"] = XGBRegressor()
detect_model(estimators,data)
?
结果:
-----------------mseknn 0.15662542121428571
-----------------r2knn 0.8337471706629949
-----------------mselinear 0.08535934118139471
-----------------r2linear 0.909393814415771
-----------------mseridge 0.0854724544808449
-----------------r2ridge 0.9092737482992783
-----------------mselasso 0.9420917733705356
-----------------r2lasso -1.0238966341447053e-08
-----------------mseelasticnet 0.5575013990170281
-----------------r2elasticnet 0.40823025900047394
-----------------mseforest 0.09774924294
-----------------r2forest 0.8962423335986341
-----------------msegbdt 0.09536685168518329
-----------------r2gbdt 0.8987711650209556
-----------------mseada 0.11436047541876887
-----------------r2ada 0.8786100464707883
-----------------mseextreme 0.09095247737375
-----------------r2extreme 0.9034568808730732
-----------------msesvm_rbf 0.10915159147460973
-----------------r2svm_rbf 0.8841391086542493
-----------------msesvm_poly 0.26247224325213714
-----------------r2svm_poly 0.7213941854087838
-----------------msexgb 0.0951415171478096
-----------------r2xgb 0.8990103503594229
estimators = {}
#estimators["linear"] = LinearRegression()
#estimators["ridge"] = Ridge() 两个线性回归容易过拟合
estimators["forest"] = RandomForestRegressor()
estimators["gbdt"] = GradientBoostingRegressor()
estimators["ada"] = AdaBoostRegressor()
estimators["extreme"] = ExtraTreesRegressor()
estimators["svm_rbf"] = SVR(kernel="rbf")
estimators["xgb"] = XGBRegressor()
cond = data_all_std["origin"] == "train"
X_train = data_all_std[cond].iloc[:,:-2]
y_train = data_all_std[cond]["target"]
cond = data_all_std["origin"] == "test"
X_test = data_all_std[cond].iloc[:,:-2]
#一个算法一个预测结果,将结果合并
y_pred = []
for key,model in estimators.items():
model.fit(X_train,y_train)
y_ = model.predict(X_test)
y_pred.append(y_)
y_ = np.mean(y_pred,axis = 0)
#一个算法一个预测结果,将结果合并
y_pred = []
for key,model in estimators.items():
model.fit(X_train,y_train)
y_ = model.predict(X_test)
y_pred.append(y_)
y_ = np.mean(y_pred,axis = 0)
pd.Series(y_).to_csv("./emsemble.txt",index = False)
# y_ 预测值,和真实值之间差距,将预测值当成新的特征,让我们算法进行再学习
for key,model in estimators.items():
model.fit(X_train,y_train)
y_ = model.predict(X_train)
X_train[key] = y_
y_ = model.predict(X_test)
X_test[key] = y_
y_pred = []
for key,model in estimators.items():
model.fit(X_train,y_train)
y_ = model.predict(X_test)
y_pred.append(y_)
y_ = np.mean(y_pred,axis = 0)
pd.Series(y_).to_csv("./emsemble2.txt",index = False)
数据归一化
#归一化的数据
data_all_norm = pd.DataFrame(data3,columns = data_all.columns[:-2])
data_all_norm
data_all_norm = pd.merge(data_all_norm,data_all.iloc[:,-2:],left_index=True,right_index=True)
特征工程——box-cox变换
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
def scale_minmax(data):
return (data - data.min())/(data.max()-data.min())
fcols = 6
frows = len(data_all_norm.columns[:-2])
plt.figure(figsize=(4*fcols,4*frows))
i=0
for var in data_all_norm.columns[:-2]:
dat = data_all_norm[[var, 'target']].dropna()
#这条线就是数据的分布dist:distribution
i+=1
plt.subplot(frows,fcols,i)
sns.distplot(dat[var] , fit=stats.norm);
plt.title(var+' Original')
plt.xlabel('')
#第二张图,skew是统计分析中的一个属性,越是正态,该系数越小
i+=1
plt.subplot(frows,fcols,i)
_=stats.probplot(dat[var], plot=plt)
plt.title('skew='+'{:.4f}'.format(stats.skew(dat[var])))
plt.xlabel('')
plt.ylabel('')
#第三张图,散点图
i+=1
plt.subplot(frows,fcols,i)
plt.plot(dat[var], dat['target'],'.',alpha=0.5)
plt.title('corr='+'{:.2f}'.format(np.corrcoef(dat[var], dat['target'])[0][1]))
#数据分布图
i+=1
plt.subplot(frows,fcols,i)
trans_var, lambda_var = stats.boxcox(dat[var].dropna()+1)
trans_var = scale_minmax(trans_var)
sns.distplot(trans_var , fit=stats.norm);
plt.title(var+' Tramsformed')
plt.xlabel('')
#偏斜度
i+=1
plt.subplot(frows,fcols,i)
_=stats.probplot(trans_var, plot=plt)
plt.title('skew='+'{:.4f}'.format(stats.skew(trans_var)))
plt.xlabel('')
plt.ylabel('')
#散点图
i+=1
plt.subplot(frows,fcols,i)
plt.plot(trans_var, dat['target'],'.',alpha=0.5)
plt.title('corr='+'{:.2f}'.format(np.corrcoef(trans_var,dat['target'])[0][1]))
|