一:下载实验所需要的包:
pip install scikit-image
pip install playsound
pip install pandas
pip install sklearn
二:图片预处理:
将人脸检测出来并对图片进行裁剪
import dlib # 人脸识别的库dlib
import numpy as np # 数据处理的库numpy
import cv2 # 图像处理的库OpenCv
import os
# dlib预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('D:\\shape_predictor_68_face_landmarks.dat')
# 读取图像的路径
path_read = "C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files"
num=0
for file_name in os.listdir(path_read):
#aa是图片的全路径
aa=(path_read +"/"+file_name)
#读入的图片的路径中含非英文
img=cv2.imdecode(np.fromfile(aa, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
#获取图片的宽高
img_shape=img.shape
img_height=img_shape[0]
img_width=img_shape[1]
# 用来存储生成的单张人脸的路径
path_save="C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files1"
# dlib检测
dets = detector(img,1)
print("人脸数:", len(dets))
for k, d in enumerate(dets):
if len(dets)>1:
continue
num=num+1
# 计算矩形大小
# (x,y), (宽度width, 高度height)
pos_start = tuple([d.left(), d.top()])
pos_end = tuple([d.right(), d.bottom()])
# 计算矩形框大小
height = d.bottom()-d.top()
width = d.right()-d.left()
# 根据人脸大小生成空的图像
img_blank = np.zeros((height, width, 3), np.uint8)
for i in range(height):
if d.top()+i>=img_height:# 防止越界
continue
for j in range(width):
if d.left()+j>=img_width:# 防止越界
continue
img_blank[i][j] = img[d.top()+i][d.left()+j]
img_blank = cv2.resize(img_blank, (200, 200), interpolation=cv2.INTER_CUBIC)
cv2.imencode('.jpg', img_blank)[1].tofile(path_save+"\\"+"file"+str(num)+".jpg") # 正确方法
三:数据的划分;
import os, shutil
# 原始数据集路径
original_dataset_dir = 'C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files1'
# 新的数据集
base_dir = 'C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files2'
os.mkdir(base_dir)
# 训练图像、验证图像、测试图像的目录
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir, 'smile')
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'unsmile')
os.mkdir(train_dogs_dir)
validation_cats_dir = os.path.join(validation_dir, 'smile')
os.mkdir(validation_cats_dir)
validation_dogs_dir = os.path.join(validation_dir, 'unsmile')
os.mkdir(validation_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'smile')
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'unsmile')
os.mkdir(test_dogs_dir)
# 复制1000张笑脸图片到train_c_dir
fnames = ['file{}.jpg'.format(i) for i in range(1,900)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['file{}.jpg'.format(i) for i in range(900, 1350)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to test_cats_dir
fnames = ['file{}.jpg'.format(i) for i in range(1350, 1800)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['file{}.jpg'.format(i) for i in range(2127,3000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
fnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
fnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
四:Dlib提取人脸特征:
# 从人脸图像文件中提取人脸特征存入 CSV
# Features extraction from images and save into features_all.csv
# return_128d_features() 获取某张图像的128D特征
# compute_the_mean() 计算128D特征均值
from cv2 import cv2 as cv2
import os
import dlib
from skimage import io
import csv
import numpy as np
# 要读取人脸图像文件的路径
path_images_from_camera = "D:/myworkspace/JupyterNotebook/Smile/files2/test/"
# Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()
# Dlib 人脸预测器
predictor = dlib.shape_predictor("D:/shape_predictor_68_face_landmarks.dat")
# Dlib 人脸识别模型
# Face recognition model, the object maps human faces into 128D vectors
face_rec = dlib.face_recognition_model_v1("D:/dlib_face_recognition_resnet_model_v1.dat")
# 返回单张图像的 128D 特征
def return_128d_features(path_img):
img_rd = io.imread(path_img)
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB)
faces = detector(img_gray, 1)
print("%-40s %-20s" % ("检测到人脸的图像 / image with faces detected:", path_img), '\n')
# 因为有可能截下来的人脸再去检测,检测不出来人脸了
# 所以要确保是 检测到人脸的人脸图像 拿去算特征
if len(faces) != 0:
shape = predictor(img_gray, faces[0])
face_descriptor = face_rec.compute_face_descriptor(img_gray, shape)
else:
face_descriptor = 0
print("no face")
return face_descriptor
# 将文件夹中照片特征提取出来, 写入 CSV
def return_features_mean_personX(path_faces_personX):
features_list_personX = []
photos_list = os.listdir(path_faces_personX)
if photos_list:
for i in range(len(photos_list)):
# 调用return_128d_features()得到128d特征
print("%-40s %-20s" % ("正在读的人脸图像 / image to read:", path_faces_personX + "/" + photos_list[i]))
features_128d = return_128d_features(path_faces_personX + "/" + photos_list[i])
# print(features_128d)
# 遇到没有检测出人脸的图片跳过
if features_128d == 0:
i += 1
else:
features_list_personX.append(features_128d)
i1=str(i+1)
add="D:/myworkspace/JupyterNotebook/Smile/feature/face_feature"+i1+".csv"
print(add)
with open(add, "w", newline="") as csvfile:
writer1 = csv.writer(csvfile)
writer1.writerow(features_128d)
else:
print("文件夹内图像文件为空 / Warning: No images in " + path_faces_personX + '/', '\n')
# 计算 128D 特征的均值
# N x 128D -> 1 x 128D
if features_list_personX:
features_mean_personX = np.array(features_list_personX).mean(axis=0)
else:
features_mean_personX = '0'
return features_mean_personX
# 读取某人所有的人脸图像的数据
people = os.listdir(path_images_from_camera)
people.sort()
with open("D:/myworkspace/JupyterNotebook/Smile/feature/features2_all.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for person in people:
print("##### " + person + " #####")
# Get the mean/average features of face/personX, it will be a list with a length of 128D
features_mean_personX = return_features_mean_personX(path_images_from_camera + person)
writer.writerow(features_mean_personX)
print("特征均值 / The mean of features:", list(features_mean_personX))
print('\n')
print("所有录入人脸数据存入 / Save all the features of faces registered into: D:/myworkspace/JupyterNotebook/Smile/feature/features2_all.csv")
运行结果:
smile:
?nosmile:
?五:进行人脸微笑识别:
# pandas 读取 CSV
import pandas as pd
# 分割数据
from sklearn.model_selection import train_test_split
# 用于数据预加工标准化
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression # 线性模型中的 Logistic 回归模型
from sklearn.neural_network import MLPClassifier # 神经网络模型中的多层网络模型
from sklearn.svm import LinearSVC # SVM 模型中的线性 SVC 模型
from sklearn.linear_model import SGDClassifier # 线性模型中的随机梯度下降模型
import joblib
# 从 csv 读取数据
def pre_data():
# 41 维表头
column_names = []
for i in range(0, 40):
column_names.append("feature_" + str(i + 1))
column_names.append("output")
# read csv
rd_csv = pd.read_csv("D:/myworkspace/JupyterNotebook/Smile/data/data_csvs/data.csv", names=column_names)
# 输出 csv 文件的维度
# print("shape:", rd_csv.shape)
X_train, X_test, y_train, y_test = train_test_split(
# input 0-40
# output 41
rd_csv[column_names[0:40]],
rd_csv[column_names[40]],
# 25% for testing, 75% for training
test_size=0.25,
random_state=33)
return X_train, X_test, y_train, y_test
path_models = "D:/myworkspace/JupyterNotebook/Smile/data/data_models/"
# LR, logistic regression, 逻辑斯特回归分类(线性模型)
def model_LR():
# get data
X_train_LR, X_test_LR, y_train_LR, y_test_LR = pre_data()
# 数据预加工
# 标准化数据,保证每个维度的特征数据方差为1,均值为0。使得预测结果不会被某些维度过大的特征值而主导
ss_LR = StandardScaler()
X_train_LR = ss_LR.fit_transform(X_train_LR)
X_test_LR = ss_LR.transform(X_test_LR)
# 初始化 LogisticRegression
LR = LogisticRegression()
# 调用 LogisticRegression 中的 fit() 来训练模型参数
LR.fit(X_train_LR, y_train_LR)
# save LR model
joblib.dump(LR, path_models + "model_LR.m")
# 评分函数
score_LR = LR.score(X_test_LR, y_test_LR)
print("The accurary of LR:", score_LR)
# print(type(ss_LR))
return (ss_LR)
model_LR()
# MLPC, Multi-layer Perceptron Classifier, 多层感知机分类(神经网络)
def model_MLPC():
# get data
X_train_MLPC, X_test_MLPC, y_train_MLPC, y_test_MLPC = pre_data()
# 数据预加工
ss_MLPC = StandardScaler()
X_train_MLPC = ss_MLPC.fit_transform(X_train_MLPC)
X_test_MLPC = ss_MLPC.transform(X_test_MLPC)
# 初始化 MLPC
MLPC = MLPClassifier(hidden_layer_sizes=(13, 13, 13), max_iter=500)
# 调用 MLPC 中的 fit() 来训练模型参数
MLPC.fit(X_train_MLPC, y_train_MLPC)
# save MLPC model
joblib.dump(MLPC, path_models + "model_MLPC.m")
# 评分函数
score_MLPC = MLPC.score(X_test_MLPC, y_test_MLPC)
print("The accurary of MLPC:", score_MLPC)
return (ss_MLPC)
model_MLPC()
# Linear SVC, Linear Supported Vector Classifier, 线性支持向量分类(SVM支持向量机)
def model_LSVC():
# get data
X_train_LSVC, X_test_LSVC, y_train_LSVC, y_test_LSVC = pre_data()
# 数据预加工
ss_LSVC = StandardScaler()
X_train_LSVC = ss_LSVC.fit_transform(X_train_LSVC)
X_test_LSVC = ss_LSVC.transform(X_test_LSVC)
# 初始化 LSVC
LSVC = LinearSVC()
# 调用 LSVC 中的 fit() 来训练模型参数
LSVC.fit(X_train_LSVC, y_train_LSVC)
# save LSVC model
joblib.dump(LSVC, path_models + "model_LSVC.m")
# 评分函数
score_LSVC = LSVC.score(X_test_LSVC, y_test_LSVC)
print("The accurary of LSVC:", score_LSVC)
return ss_LSVC
model_LSVC()
# SGDC, Stochastic Gradient Decent Classifier, 随机梯度下降法求解(线性模型)
def model_SGDC():
# get data
X_train_SGDC, X_test_SGDC, y_train_SGDC, y_test_SGDC = pre_data()
# 数据预加工
ss_SGDC = StandardScaler()
X_train_SGDC = ss_SGDC.fit_transform(X_train_SGDC)
X_test_SGDC = ss_SGDC.transform(X_test_SGDC)
# 初始化 SGDC
SGDC = SGDClassifier(max_iter=5)
# 调用 SGDC 中的 fit() 来训练模型参数
SGDC.fit(X_train_SGDC, y_train_SGDC)
# save SGDC model
joblib.dump(SGDC, path_models + "model_SGDC.m")
# 评分函数
score_SGDC = SGDC.score(X_test_SGDC, y_test_SGDC)
print("The accurary of SGDC:", score_SGDC)
return ss_SGDC
model_SGDC()
?图片检测模型:检测图片中人物是否微笑:
# use the saved model
import joblib
from smile_dlib_tezhengdian import get_features
import smile_test1
import cv2
# path of test img
path_test_img = "C:/Users/28205/Documents/Tencent Files/2820535964/FileRecv/test_nosmile.jpg"
# 提取单张40维度特征
positions_lip_test = get_features(path_test_img)
# path of models
path_models = "D:/myworkspace/JupyterNotebook/Smile/data/data_models/"
print("The result of"+path_test_img+":")
print('\n')
# ######### LR ###########
LR = joblib.load(path_models+"model_LR.m")
ss_LR = smile_test1.model_LR()
X_test_LR = ss_LR.transform([positions_lip_test])
y_predict_LR = str(LR.predict(X_test_LR)[0]).replace('0', "no smile").replace('1', "with smile")
print("LR:", y_predict_LR)
# ######### LSVC ###########
LSVC = joblib.load(path_models+"model_LSVC.m")
ss_LSVC = smile_test1.model_LSVC()
X_test_LSVC = ss_LSVC.transform([positions_lip_test])
y_predict_LSVC = str(LSVC.predict(X_test_LSVC)[0]).replace('0', "no smile").replace('1', "with smile")
print("LSVC:", y_predict_LSVC)
# ######### MLPC ###########
MLPC = joblib.load(path_models+"model_MLPC.m")
ss_MLPC = smile_test1.model_MLPC()
X_test_MLPC = ss_MLPC.transform([positions_lip_test])
y_predict_MLPC = str(MLPC.predict(X_test_MLPC)[0]).replace('0', "no smile").replace('1', "with smile")
print("MLPC:", y_predict_MLPC)
# ######### SGDC ###########
SGDC = joblib.load(path_models+"model_SGDC.m")
ss_SGDC = smile_test1.model_SGDC()
X_test_SGDC = ss_SGDC.transform([positions_lip_test])
y_predict_SGDC = str(SGDC.predict(X_test_SGDC)[0]).replace('0', "no smile").replace('1', "with smile")
print("SGDC:", y_predict_SGDC)
img_test = cv2.imread(path_test_img)
img_height = int(img_test.shape[0])
img_width = int(img_test.shape[1])
# show the results on the image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_test, "LR: "+y_predict_LR, (int(img_height/10), int(img_width/10)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "LSVC: "+y_predict_LSVC, (int(img_height/10), int(img_width/10*2)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "MLPC: "+y_predict_MLPC, (int(img_height/10), int(img_width/10)*3), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "SGDC: "+y_predict_SGDC, (int(img_height/10), int(img_width/10)*4), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.namedWindow("img", 2)
cv2.imshow("img", img_test)
cv2.waitKey(0)
结果:
?
|