IT数码 购物 网址 头条 软件 日历 阅读 图书馆
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
图片批量下载器
↓批量下载图片,美女图库↓
图片自动播放器
↓图片自动播放器↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁
 
   -> 人工智能 -> 天池工业蒸汽预测 -> 正文阅读

[人工智能]天池工业蒸汽预测

导入数据

import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression,Lasso,Ridge,ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor

from xgboost import XGBRegressor
from lightgbm import LGBMRegressor

from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,StandardScaler,PolynomialFeatures
train = pd.read_csv(r"C:\Users\dream\Documents\Tencent Files\1799785728\FileRecv\zhengqi_train的副本.txt",sep = "\t")
test = pd.read_csv(r"C:\Users\dream\Documents\Tencent Files\1799785728\FileRecv\zhengqi_test的副本.txt",sep = "\t")

train["origin"] = "train"
test["origin"] = "test"

data_all = pd.concat([train,test])
print(data_all.shape)
data_all.head()

结果:

(4813, 40)

特征分布做特征选择?

#特征探索
#38个特征,将一些不重要的特征删除
#特征分布情况,训练和测试数据特征分布不均匀删除
plt.figure(figsize=(9,38*6))
for i,col in enumerate(data_all.columns[:-2]):
    cond = data_all["origin"] == "train"
    train_col = data_all[col][cond]    #训练数据
    cond = data_all["origin"] == "test"
    test_col = data_all[col][cond]    #测试数据
    axes = plt.subplot(38,1,i+1)
    ax = sns.kdeplot(train_col,shade = True,ax = axes)
    sns.kdeplot(test_col,shade = True,ax = ax)

?

drop_labels = ["V11","V17","V5","V22"]
data_all.drop(drop_labels,axis = 1,inplace = True)

按相关性进行特征选择?

#相关性系数,找到7个相关性不大的属性
corr = data_all.corr()
cond = corr.loc["target"].abs() < 0.1
drop_labels = corr.loc["target"].index[cond]
drop_labels
#查看了属性分布,分布不好的删除
drop_labels = ['V14', 'V21']
data_all.drop(drop_labels,axis = 1,inplace = True)
#画热力图
plt.figure(figsize = (20,16))
mcorr = train.corr()
mask = np.zeros_like(mcorr, dtype = np.bool)

mask[np.triu_indices_from(mask)] = True     #角分线右侧为True
cmap = sns.diverging_palette(220, 10, as_cmap = True)      #颜色设定
g = sns.heatmap(mcorr, mask = mask, cmap = cmap, square = True, annot = True, fmt = "0.2f")      #annot注释的意思

?归一化操作

#归一化操作
data = data_all.iloc[:,:-2]
stand = StandardScaler()
data2 = stand.fit_transform(data)
data2

结果:?

array([[ 0.56597193,  0.01591954, -0.14297122, ..., -5.1011359 ,
        -2.60811143, -3.50838833],
       [ 0.96800866,  0.43695666,  0.06604898, ...,  0.36409451,
        -0.33491851, -0.7301463 ],
       [ 1.01301277,  0.56796822,  0.23506531, ...,  0.36409451,
         0.76517485, -0.58913402],
       ...,
       [-2.61531873, -3.56439613, -3.4022862 , ..., -1.53698564,
        -2.54410599,  1.61205774],
       [-2.66132294, -3.64640336, -3.27127354, ..., -1.53698564,
        -2.54910642,  1.43104197],
       [-2.32129187, -3.03734966, -3.21426803, ..., -1.53698564,
        -1.12298539,  1.9880905 ]])
cols = data_all.columns
data_all_std = pd.DataFrame(data2,columns = cols[:-2])
data_all.index = np.arange(4813)
data_all_std = pd.merge(data_all_std, data_all.iloc[:,-2:], right_index=True,left_index=True)
data_all_std

异常值处理

#异常值处理
from sklearn.linear_model import RidgeCV
data_all_std.head()

ridge = RidgeCV(alphas = [0.0001,0.001,0.01,0.1,0.2,0.5,1,2,3,4,5,10,20,30,50])
cond = data_all_std["origin"] == "train"
X_train = data_all_std[cond].iloc[:,:-2]
y_train = data_all_std[cond]["target"]

ridge.fit(X_train,y_train)
#预测,预测值肯定会和真实值有一定的偏差,偏差特别大,当作异常值
y_ = ridge.predict(X_train)
display(y_train[:100])
display(y_[:100])

cond = abs((y_train - y_)) > y_train.std()*0.8
cond.sum()

结果:

0     0.175
1     0.676
2     0.633
3     0.206
4     0.384
      ...  
95    0.752
96    0.669
97   -0.144
98    0.900
99    0.194
Name: target, Length: 100, dtype: float64
array([ 0.75989853,  0.67404957,  0.698052  ,  0.26892304,  0.49745972,
        0.34479599,  0.56407434,  0.65300985,  0.87863292,  1.10631617,
        1.14381793,  1.12698163, -0.95261698, -1.14030135, -0.34147371,
       -1.18688819, -0.3570815 , -1.460894  , -0.46738849,  0.20482328,
        0.35996288,  0.13898213,  0.62397264, -0.03227538, -1.86061414,
       -1.98766287,  0.42573393,  1.0067669 ,  1.20290325,  0.58591695,
        0.37433565, -0.28182672, -0.48744524, -0.51845319,  0.39441366,
        0.80716124,  0.55676449,  0.31542392, -0.06445492, -1.0259794 ,
       -0.02866304, -0.56256517, -0.10068991,  0.30875929,  0.27981951,
        0.61037409,  0.857841  ,  0.73098524,  0.84739155,  0.64799208,
        0.30178805,  0.32478358,  0.15008424, -0.10947188, -0.06684293,
        0.00231872,  0.05027197, -0.52516486, -0.56590262, -0.37154893,
        0.3188893 , -0.11236033, -0.23167792,  0.72161844,  0.99240655,
        0.58891927,  0.5994198 ,  0.49652549,  0.44206633,  0.13839513,
        0.52453052,  0.25111694, -0.74958371, -0.32670514, -0.43136215,
       -0.15506445, -0.65928348, -0.26046392,  0.1737138 ,  0.15865807,
        0.26452441,  0.00243442,  0.49720354,  0.21106626,  0.81243771,
        0.95389209,  0.97652435,  0.5122789 ,  0.98314069,  0.88664648,
        1.05821294,  1.02201547,  1.15680039,  0.35648533,  0.64549981,
        0.48716165,  0.50820392, -0.30307674,  0.58959149,  0.2912567 ])
88
#画图
plt.figure(figsize=(12,6))
axes = plt.subplot(1,3,1)
axes.scatter(y_train,y_)
axes.scatter(y_train[cond],y_[cond],c = "r", s = 20)

axes = plt.subplot(1,3,2)
axes.scatter(y_train,y_train - y_)
axes.scatter(y_train[cond],(y_train - y_)[cond],c = "r", s = 20)

axes = plt.subplot(1,3,3)
(y_train - y_).hist(bins = 50,ax = axes)
(y_train - y_)[cond].hist(bins = 50,ax = axes ,color = "r")

#异常值过滤
drop_index = cond[cond].index
print(data_all_std.shape)
data_all_std.drop(drop_index,axis = 0,inplace = True)
data_all_std.shape

?结果:

(4813, 34)
(4725, 34)

机器学习模型?

def detect_model(estimators,data):
    for key,estimator in estimators.items():
        estimator.fit(data[0],data[2])
        y_ = estimator.predict(data[1])
        mse = mean_squared_error(data[3],y_)
        print("-----------------mse%s"%(key),mse)
        r2 = estimator.score(data[1],data[3])
        print("-----------------r2%s"%(key),r2)
        print("\n")

cond = data_all_std["origin"] == "train"
X = data_all_std[cond].iloc[:,:-2]
y = data_all_std[cond]["target"]
data = train_test_split(X,y,test_size = 0.2)

?
estimators = {}
estimators["knn"] = KNeighborsRegressor()
estimators["linear"] = LinearRegression()
estimators["ridge"] = Ridge()
estimators["lasso"] = Lasso()
estimators["elasticnet"] = ElasticNet()
estimators["forest"] = RandomForestRegressor()
estimators["gbdt"] = GradientBoostingRegressor()
estimators["ada"] = AdaBoostRegressor()
estimators["extreme"] = ExtraTreesRegressor()
estimators["svm_rbf"] = SVR(kernel="rbf")
estimators["svm_poly"] = SVR(kernel="poly")
estimators["xgb"] = XGBRegressor()

detect_model(estimators,data)

?

结果:

-----------------mseknn 0.15662542121428571
-----------------r2knn 0.8337471706629949


-----------------mselinear 0.08535934118139471
-----------------r2linear 0.909393814415771


-----------------mseridge 0.0854724544808449
-----------------r2ridge 0.9092737482992783


-----------------mselasso 0.9420917733705356
-----------------r2lasso -1.0238966341447053e-08


-----------------mseelasticnet 0.5575013990170281
-----------------r2elasticnet 0.40823025900047394


-----------------mseforest 0.09774924294
-----------------r2forest 0.8962423335986341


-----------------msegbdt 0.09536685168518329
-----------------r2gbdt 0.8987711650209556


-----------------mseada 0.11436047541876887
-----------------r2ada 0.8786100464707883


-----------------mseextreme 0.09095247737375
-----------------r2extreme 0.9034568808730732


-----------------msesvm_rbf 0.10915159147460973
-----------------r2svm_rbf 0.8841391086542493


-----------------msesvm_poly 0.26247224325213714
-----------------r2svm_poly 0.7213941854087838


-----------------msexgb 0.0951415171478096
-----------------r2xgb 0.8990103503594229
estimators = {}
#estimators["linear"] = LinearRegression()
#estimators["ridge"] = Ridge()  两个线性回归容易过拟合
estimators["forest"] = RandomForestRegressor()
estimators["gbdt"] = GradientBoostingRegressor()
estimators["ada"] = AdaBoostRegressor()
estimators["extreme"] = ExtraTreesRegressor()
estimators["svm_rbf"] = SVR(kernel="rbf")
estimators["xgb"] = XGBRegressor()

cond = data_all_std["origin"] == "train"
X_train = data_all_std[cond].iloc[:,:-2]
y_train = data_all_std[cond]["target"]
cond = data_all_std["origin"] == "test"
X_test = data_all_std[cond].iloc[:,:-2]

#一个算法一个预测结果,将结果合并
y_pred = []
for key,model in estimators.items():
    model.fit(X_train,y_train)
    y_ = model.predict(X_test)
    y_pred.append(y_)
    
y_ = np.mean(y_pred,axis = 0)
#一个算法一个预测结果,将结果合并
y_pred = []
for key,model in estimators.items():
    model.fit(X_train,y_train)
    y_ = model.predict(X_test)
    y_pred.append(y_)
    
y_ = np.mean(y_pred,axis = 0)
pd.Series(y_).to_csv("./emsemble.txt",index = False)
    
# y_ 预测值,和真实值之间差距,将预测值当成新的特征,让我们算法进行再学习
for key,model in estimators.items():
    model.fit(X_train,y_train)
    y_ = model.predict(X_train)
    X_train[key] = y_
    y_ = model.predict(X_test)
    X_test[key] = y_

y_pred = []
for key,model in estimators.items():
    model.fit(X_train,y_train)
    y_ = model.predict(X_test)
    y_pred.append(y_)
y_ = np.mean(y_pred,axis = 0)

pd.Series(y_).to_csv("./emsemble2.txt",index = False)

数据归一化

#归一化的数据
data_all_norm = pd.DataFrame(data3,columns = data_all.columns[:-2])
data_all_norm

data_all_norm = pd.merge(data_all_norm,data_all.iloc[:,-2:],left_index=True,right_index=True)

特征工程——box-cox变换

import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
def scale_minmax(data):
    return (data - data.min())/(data.max()-data.min())
fcols = 6
frows = len(data_all_norm.columns[:-2])
plt.figure(figsize=(4*fcols,4*frows))
i=0

for var in data_all_norm.columns[:-2]:
        dat = data_all_norm[[var, 'target']].dropna()

#这条线就是数据的分布dist:distribution
        i+=1
        plt.subplot(frows,fcols,i)
        sns.distplot(dat[var] , fit=stats.norm);
        plt.title(var+' Original')
        plt.xlabel('')

#第二张图,skew是统计分析中的一个属性,越是正态,该系数越小
        i+=1
        plt.subplot(frows,fcols,i)
        _=stats.probplot(dat[var], plot=plt)
        plt.title('skew='+'{:.4f}'.format(stats.skew(dat[var])))
        plt.xlabel('')
        plt.ylabel('')
#第三张图,散点图        
        i+=1
        plt.subplot(frows,fcols,i)
        plt.plot(dat[var], dat['target'],'.',alpha=0.5)
        plt.title('corr='+'{:.2f}'.format(np.corrcoef(dat[var], dat['target'])[0][1]))

#数据分布图
        i+=1
        plt.subplot(frows,fcols,i)
        trans_var, lambda_var = stats.boxcox(dat[var].dropna()+1)
        trans_var = scale_minmax(trans_var)      
        sns.distplot(trans_var , fit=stats.norm);
        plt.title(var+' Tramsformed')
        plt.xlabel('')
        
#偏斜度
        i+=1
        plt.subplot(frows,fcols,i)
        _=stats.probplot(trans_var, plot=plt)
        plt.title('skew='+'{:.4f}'.format(stats.skew(trans_var)))
        plt.xlabel('')
        plt.ylabel('')

#散点图
        i+=1
        plt.subplot(frows,fcols,i)
        plt.plot(trans_var, dat['target'],'.',alpha=0.5)
        plt.title('corr='+'{:.2f}'.format(np.corrcoef(trans_var,dat['target'])[0][1]))

  人工智能 最新文章
2022吴恩达机器学习课程——第二课(神经网
第十五章 规则学习
FixMatch: Simplifying Semi-Supervised Le
数据挖掘Java——Kmeans算法的实现
大脑皮层的分割方法
【翻译】GPT-3是如何工作的
论文笔记:TEACHTEXT: CrossModal Generaliz
python从零学(六)
详解Python 3.x 导入(import)
【答读者问27】backtrader不支持最新版本的
上一篇文章      下一篇文章      查看所有文章
加:2021-11-20 18:23:44  更:2021-11-20 18:26:13 
 
开发: C++知识库 Java知识库 JavaScript Python PHP知识库 人工智能 区块链 大数据 移动开发 嵌入式 开发工具 数据结构与算法 开发测试 游戏开发 网络协议 系统运维
教程: HTML教程 CSS教程 JavaScript教程 Go语言教程 JQuery教程 VUE教程 VUE3教程 Bootstrap教程 SQL数据库教程 C语言教程 C++教程 Java教程 Python教程 Python3教程 C#教程
数码: 电脑 笔记本 显卡 显示器 固态硬盘 硬盘 耳机 手机 iphone vivo oppo 小米 华为 单反 装机 图拉丁

360图书馆 购物 三丰科技 阅读网 日历 万年历 2024年11日历 -2024/11/27 4:36:03-

图片自动播放器
↓图片自动播放器↓
TxT小说阅读器
↓语音阅读,小说下载,古典文学↓
一键清除垃圾
↓轻轻一点,清除系统垃圾↓
图片批量下载器
↓批量下载图片,美女图库↓
  网站联系: qq:121756557 email:121756557@qq.com  IT数码