别人的
import cv2 as cv
import numpy as np
import pdb
buttons=cv.imread('/home/pi/Desktop/myfile/python/surf/buttons.jpg')
interface=cv.imread('/home/pi/Desktop/myfile/python/surf/device1.png')
button1=buttons[26:86,66:134,:]
surf=cv.xfeatures2d.SURF_create(100)
kp1,des1=surf.detectAndCompute(button1,None)
kp2,des2=surf.detectAndCompute(interface,None)
bf=cv.BFMatcher()
matches=bf.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,_ = button1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img2 = cv.polylines(interface,[np.int32(dst)],True,255,3, cv.LINE_AA)
cv.imshow('sp',img2)
cv.waitKey(0)
cv.destroyAllWindows()
exit()
自己的
import numpy as np
import cv2 as cv
def GuassianKernel(sigma, dim):
kernel=np.arange(dim)-dim//2
kernel,_=np.meshgrid(kernel,kernel)
kernel=(0.5/np.pi/sigma/sigma)*np.exp(-kernel**2)
return result
def get_keypoints(img):
sigma=1.52
n=3
Stack=n+3
Octive=int(np.log2(min(img.shape[:2])))-3
k=2**(1/n)
sigma = [[(k ** s) * sigma0 * (1 << oct) for s in range(Stack)] for oct in range(Octave)]
samplePyramid = [img[::(1 << oct), ::(1 << oct)] for oct in range(Octave)]
GuassianPyramid = []
for i in range(Octave):
GuassianPyramid.append([])
for j in range(Stack):
dim = 2 * int(3 * sigma[i][j] + 0.5) + 1
kernel = GuassianKernel(sigma[i][j], dim)
conv = convolve(kernel, samplePyramid[i], [dim // 2, dim // 2, dim // 2, dim // 2], [1, 1])
def surfmatch(img1, img2, Hessian):
if len(img1.shape):
pass
surf测试test.cpp
#include <iostream>
#include "opencv2/opencv.hpp"
#include "surfmatch.hpp"
using namespace std;
using namespace cv;
int main()
{
cout << "Locate 3D test." << CV_VERSION << endl;
Mat imgshow;
Mat box = imread("/home/pi/Desktop/myfile/hand.png");
Mat scene = imread("/home/pi/Desktop/myfile/camera.png");
Detector detector(box);
scene = scene(Rect(640, 0, 640, 480));
detector.find_known_object(scene, imgshow);
imshow("test1", imgshow);
waitKey(0);
return 0;
}
摄像头检测camera.cpp
#include <iostream>
#include "opencv2/opencv.hpp"
#include "surfmatch.hpp"
using namespace std;
using namespace cv;
int main()
{
cout << "Locate 3D test." << CV_VERSION << endl;
Mat frame, Lcap, Rcap, imgshowL, imgshowR;
Mat device = imread("/home/pi/Desktop/myfile/device2.png");
Mat box = device(Rect(275, 108, 340, 222));
Detector detector(box);
VideoCapture cap(0);
cap.set(CAP_PROP_FRAME_WIDTH, 1280);
cap.set(CAP_PROP_FRAME_HEIGHT, 480);
char ch;
if (!cap.isOpened()) return 1;
while (1){
if (!cap.read(frame)) break;
Lcap = frame(Rect(0,0,640,480));
Rcap = frame(Rect(640, 0, 640, 480));
if (detector.find_known_object(Lcap, imgshowL)) continue;
if (detector.find_known_object(Rcap, imgshowR)) continue;
imshow("test1", imgshowL);
imshow("test2", imgshowR);
ch = (char)waitKey(10);
if (ch == 'q') break;
}
cap.release();
return 0;
}
检测器类detector.hpp
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/xfeatures2d.hpp"
class Detector{
public:
Detector(const cv::Mat& _box);
int find_known_object(const cv::Mat& src, cv::Mat& dst);
private:
cv::Mat box;
cv::Ptr<cv::xfeatures2d::SURF> detector;
std::vector<cv::KeyPoint> objKeypoints;
cv::Mat objDescriptors;
};
Detector::Detector(const cv::Mat& _box)
{
box = _box.clone();
detector = cv::xfeatures2d::SURF::create();
detector->setHessianThreshold(400);
detector->detectAndCompute(box, cv::Mat(), objKeypoints, objDescriptors);
}
int Detector::find_known_object(const cv::Mat& src, cv::Mat& dst)
{
using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;
std::vector<KeyPoint> keypoints;
Mat descriptors;
FlannBasedMatcher matcher;
vector<DMatch> matches, goodMatches;
dst = src.clone();
detector->detectAndCompute(src, Mat(), keypoints, descriptors);
try { matcher.match(objDescriptors, descriptors, matches); }
catch (const exception &) { return 1; }
double dist, mindist = 100;
for (int i = 0; i < objDescriptors.rows; i++) {
dist = matches[i].distance;
if (dist < mindist)
mindist = dist;
}
for (int i = 0; i < objDescriptors.rows; i++)
if (matches[i].distance <= max(3 * mindist, 0.08))
goodMatches.push_back(matches[i]);
vector<Point2f>vecobj, vecsce;
for (int i = 0; i < goodMatches.size(); i++) {
vecobj.push_back(objKeypoints[goodMatches[i].queryIdx].pt);
vecsce.push_back(keypoints[goodMatches[i].trainIdx].pt);
}
cv::Mat H;
try { H = findHomography(vecobj, vecsce, RHO); }
catch (const exception &) { return 2; }
vector<Point2f>objCorners(4);
objCorners[0] = Point(0, 0);
objCorners[1] = Point(box.cols, 0);
objCorners[2] = Point(box.cols, box.rows);
objCorners[3] = Point(0, box.rows);
vector<Point2f>sceneCorners(4);
try { perspectiveTransform(objCorners, sceneCorners, H); }
catch (const exception &) { return 3; }
line(dst, sceneCorners[0], sceneCorners[1], Scalar(0, 255, 0), 1);
line(dst, sceneCorners[1], sceneCorners[2], Scalar(0, 255, 0), 1);
line(dst, sceneCorners[2], sceneCorners[3], Scalar(0, 255, 0), 1);
line(dst, sceneCorners[3], sceneCorners[0], Scalar(0, 255, 0), 1);
return 0;
}
|