提取图像中的特征点,并在原图像中显示出来
import cv2
img_path = r'../image/paojie.jpg'
img = cv2.imread(img_path)
cv2.imshow("original", img)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sift = cv2.SIFT_create()
keypoints, descriptor = sift.detectAndCompute(gray, None)
cv2.drawKeypoints(image=img,
outImage=img,
keypoints=keypoints,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
color=(51, 163, 236))
cv2.imshow("SIFT", img)
img = cv2.imread(img_path)
cv2.waitKey(0)
cv2.destroyAllWindows()
提取图像中的SIFT特征点,并利用PCA(主成分分析)进行降维,并提取特征值。注:SIFT提取出来的特征是一个128维的矩阵,我在这里利用PCA主成分分析将矩阵降为100维。
import cv2
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
img_path = r'../image/paojie.jpg'
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sift = cv2.SIFT_create()
keypoints, descriptor = sift.detectAndCompute(gray, None)
descriptor = StandardScaler().fit_transform(descriptor)
pca = PCA(n_components=100)
pca.fit(descriptor)
print(pca.singular_values_)
print(pca.components_)
cv2.drawKeypoints(image=img,
outImage=img,
keypoints=keypoints,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
color=(255, 0, 255))
cv2.imshow("SIFT", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
flags不同,flags为绘制点的模式
import cv2
img = cv2.imread('../image/paojie.jpg', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('origin', img);
detector = cv2.SIFT_create()
keypoints = detector.detect(gray, None)
img = cv2.drawKeypoints(image=gray,
keypoints=keypoints,
outImage=None,
color=(255, 0, 255))
cv2.imshow('test', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
opencv4.4版本由于sift专利原因已经无法使用,opencv4.2与python3.8不匹配,python版本需降到3.6,opencv版本需降到3.4.2.17
pip uninstall opencv-python
pip install opencv-python==3.4.2.17
pip install opencv-contrib-python==3.4.2.17
import pandas as pd
import pickle
给定两张图片,计算其SIFT特征匹配结果
from matplotlib import pyplot as plt
from imagedt.decorator import time_cost
import cv2
print('cv version: ', cv2.__version__)
def bgr_rgb(img):
(r, g, b) = cv2.split(img)
return cv2.merge([b, g, r])
def orb_detect(image_a, image_b):
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(image_a, None)
kp2, des2 = orb.detectAndCompute(image_b, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(image_a, kp1, image_b, kp2, matches[:100], None, flags=2)
return bgr_rgb(img3)
@time_cost
def sift_detect(img1, img2, detector='surf'):
if detector.startswith('si'):
print("sift detector......")
sift = cv2.xfeatures2d.SURF_create()
else:
print("surf detector......")
sift = cv2.xfeatures2d.SURF_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
return bgr_rgb(img3)
if __name__ == "__main__":
image_a = cv2.imread('../image/tower2.jpg')
image_b = cv2.imread('../image/tower2.jpg')
img = sift_detect(image_a, image_b)
plt.imshow(img)
plt.show()
首先获取全部图片的特征数据
import cv2
import numpy as np
from os import walk
from os.path import join
def create_descriptors(folder):
files = []
for (dirpath, dirnames, filenames) in walk(folder):
files.extend(filenames)
for f in files:
if '.jpg' in f:
save_descriptor(folder, f, cv2.xfeatures2d.SIFT_create())
def save_descriptor(folder, image_path, feature_detector):
if image_path.endswith("npy"):
return
img = cv2.imread(join(folder,image_path), 0)
keypoints, descriptors = feature_detector.detectAndCompute(img, None)
descriptor_file = image_path.replace("jpg", "npy")
np.save(join(folder, descriptor_file), descriptors)
if __name__=='__main__':
path = 'D://PycharmProjects//pythonProject//image'
create_descriptors(path)
将图片的特征数据保存在npy文件。下一步是根据选择的图域这些特征数据文件进行匹配,从而找出最佳匹配的图片。
from os.path import join
from matplotlib import pyplot as plt
from os import walk
import numpy as np
import cv2
query = cv2.imread('C://Users//Garfield//Desktop//007.png', 0)
folder = 'C://Users//Garfield//Desktop//dataunion'
descriptors = []
for (dirpath, dirnames, filenames) in walk(folder):
for f in filenames:
if f.endswith("npy"):
descriptors.append(f)
print(descriptors)
sift = cv2.xfeatures2d.SIFT_create()
query_kp, query_ds = sift.detectAndCompute(query, None)
index_params = dict(algorithm=0, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
potential_culprits = {}
for d in descriptors:
matches = flann.knnMatch(query_ds, np.load(join(folder, d)), k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
print("img is %s ! matching rate is (%d)" % (d, len(good)))
potential_culprits[d] = len(good)
max_matches = None
potential_suspect = None
for culprit, matches in potential_culprits.items():
if max_matches == None or matches > max_matches:
max_matches = matches
potential_suspect = culprit
print("potential suspect is %s" % potential_suspect.replace("npy", "").upper())
|