#1.读取数据(可以下载,也可以直接读取)
#下载
from os import read, sep
import pandas as pd
# df = pd.io.parsers.read_csv(filepath='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
# header=None,sep=",",)
#读取
df = pd.read_csv("iris.data")
#指定列名
feature_dict = {i:label for i,label in zip(range(4),
('sepal length in cm',
'sepal width in cm',
'petal length in cm',
'petal width in cm', ))}
df.columns = [l for i,l in sorted(feature_dict.items())] + ['class label']
print(df.head())
#2.需要将四维降到二维,特征已经是数值数据,不许处理但需转换标签
from sklearn.preprocessing import LabelEncoder
X = df[['sepal length in cm','sepal width in cm','petal length in cm','petal width in cm']].values
y = df['class label'].values
enc =LabelEncoder()
label_encoder = enc.fit(y)
y = label_encoder.transform(y) + 1
import numpy as np
#设置小数点数
np.set_printoptions(precision=4)
# 创建列表保存均值
mean_vectors = []
for yi in range(1,4): # y = 1,2,3
#求当前类别的各个特征的均值
mean_vectors.append(np.mean(X[y==yi], axis=0)) # axis=0 按行求平均值
print('均值的类别 %s: %s\n' %(yi, mean_vectors[yi-1])) #两个%s分别对应元组中第一个和第二个元素
#3.计算散布矩阵
#原始数据中的四个特征
S_W = np.zeros((4,4))
# 分别计算三个类别自己的
for cl,mv in zip(range(1,4), mean_vectors):
class_sc_mat = np.zeros((4,4)) # 每个类的散点矩阵
#选中属于当前列别的数据
for row in X[y == cl]: #cl取1,2,3
#这里对各个特征进行分别进行计算
row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors
class_sc_mat += (row-mv).dot((row-mv).T)
S_W += class_sc_mat # sum class scatter matrices
print('类内的散布矩阵:\n', S_W)
#全局的均值
overall_mean = np.mean(X, axis=0)
#构建类间散布矩阵
S_B = np.zeros((4,4))
#对各个类别进行计算
for i,mean_vec in enumerate(mean_vectors):
#当前类别的样本数
n = X[y==i+1,:].shape[0]
mean_vec = mean_vec.reshape(4,1) # 做列向量
overall_mean = overall_mean.reshape(4,1) # make column vector
#安照公式计算
S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T)
print('类间散布矩阵:\n', S_B)
#得到了两个散布矩阵后,组合在一起,然后求矩阵的特征向量
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
#得到每一个特征值和对应的特征向量
for i in range(len(eig_vals)):
eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('\n特征向量 {}: \n{}'.format(i+1, eigvec_sc.real))
print('特征值 {:}: {:.2e}'.format(i+1, eig_vals[i].real))
#Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('按降序排列的特征值特征值排序结果:\n')
for i in eig_pairs:
print(i[0])
print('特征值占总体的百分率:\n')
eigv_sum = sum(eig_vals)
for i,j in enumerate(eig_pairs):
print('特征值 {0:}: {1:.2%}'.format(i+1, (j[0]/eigv_sum).real)) #:.2%保留两位有效小数
#选取前两维的特征
W = np.hstack((eig_pairs[0][1].reshape(4,1), eig_pairs[1][1].reshape(4,1)))
print('Matrix W:\n', W.real)
#执行降维操作
X_lda = X.dot(W)
assert X_lda.shape == (149,2), "The matrix is not 150x2 dimensional."
#可视化展示
from matplotlib import pyplot as plt
label_dict = {1: 'Setosa', 2: 'Versicolor', 3:'Virginica'}
#在原始数据中随机选取两维进行展示
def plot_step_():
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X[:,0].real[y == label],
y=X[:,1].real[y == label],
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label]
)
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title('降维前随机选取的2维数据')
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
plot_step_()
def plot_step_lda():
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X_lda[:,0].real[y == label],
y=X_lda[:,1].real[y == label],
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label]
)
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title('LDA: Iris projection onto the first 2 linear discriminants')
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
def plot_scikit_lda(X, title):
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X[:,0][y == label],
y=X[:,1][y == label] * -1, # flip the figure
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label])
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(title)
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# LDA方法直接
sklearn_lda = LDA(n_components=2)
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
plot_step_lda()
plot_scikit_lda(X_lda_sklearn, title='默认 LDA via scikit-learn')
# Python3 输入和输出
# str.format() 的基本使用如下:
print('{}网址: "{}!"'.format('菜鸟教程', 'www.runoob.com'))
# 菜鸟教程网址: "www.runoob.com!"
# 在括号中的数字用于指向传入对象在 format() 中的位置
print('{0} 和 {1}'.format('Google', 'Runoob'))
# Google 和 Runoob
print('{1} 和 {0}'.format('Google', 'Runoob'))
# Runoob 和 Google
format("关键字参数")
print('{name}网址: {site}'.format(name='菜鸟教程', site='www.runoob.com'))
# 菜鸟教程网址: www.runoob.com
# 位置及关键字参数可以任意的结合:
print('站点列表 {0}, {1}, 和 {other}。'.format('Google', 'Runoob', other='Taobao'))
# 站点列表 Google, Runoob, 和 Taobao。
|