import numpy
import urllib.request
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
import pylab as plot
url="http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
data=urllib.request.urlopen(url)
xlist=[]
labels=[]
names=[]
firstline=True
for line in data:
if firstline:
names=line.strip().split(b';')
firstline=False
else:
row=line.strip().split(b';')
labels.append(float(row[-1]))
row.pop()
floatrow=[float(num) for num in row]
xlist.append(floatrow)
nrows=len(xlist)
ncols=len(xlist[1])
x=numpy.array(xlist)
y=numpy.array(labels)
winenames=numpy.array(names)
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.30,random_state=531)
mseoos=[]
ntreelist=range(50,500,10)
for itrees in ntreelist:
depth=None
maxfeat=4
winerandomforestmodel=ensemble.RandomForestRegressor(n_estimators=itrees,max_depth=depth,max_features=maxfeat,oob_score=False,random_state=531)
winerandomforestmodel.fit(xtrain,ytrain)
prediction=winerandomforestmodel.predict(xtest)
mseoos.append(mean_squared_error(ytest,prediction))
print("MSE")
print(mseoos[-1])
plot.plot(ntreelist,mseoos)
plot.xlabel("number of trees")
plot.ylabel("fang cha")
plot.show()
featureimportance=winerandomforestmodel.feature_importances_
featureimportance=featureimportance/featureimportance.max()
sorted_idx=numpy.argsort(featureimportance)
barpos=numpy.arange(sorted_idx.shape[0]) + .5
plot.barh(barpos,featureimportance[sorted_idx],align='center')
plot.yticks(barpos,winenames[sorted_idx])
plot.xlabel("variable importance")
plot.show()
|