前言
对于特征提取算法的理论学习以及代码实现有很多,本文主要对自己用到的部分进行总结。主要包括特征点的提取,以及图像匹配和融合。
- 这里将特征提取单独拿出来,是为了想更清楚的学习如果实现特征提取,同时统计特征点的数量。
- 另外总结基于SIFT、ORB的特征匹配的实现方式。
- 针对误匹配对的情况,用RANSAC算法进行剔除。
SIFT特征点提取
- 该部分我们可以选择在彩色图或者灰度图的基础上进行特征点提取。
- 也可以选择具体特征点数目或者自动生成特征点个数
- KeyPoint是SIFT算法里面的关键点,包含了各种特点:坐标、 特征点领域直径、特征点的方向、特征点的强度、特征点所在的图像金字塔的组、用于聚类的id
vector<KeyPoint>keypoints;
- src图像中检测到的SIFT特征点存储到keypoints中。
detector->detect(src, keypoints, Mat());
整体代码:
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
Mat src;
int main(int argc, char** argv)
{
src = imread("./data2/101.png", IMREAD_GRAYSCALE);
if (!src.data)
{
cout << "图片加载失败" << endl;
return -1;
}
imshow("加载的灰度图像", src);
int numfeature = 400;
Ptr<SIFT>detector = SIFT::create(numfeature);
vector<KeyPoint>keypoints;
detector->detect(src, keypoints, Mat());
printf("所有的特征点个数:%d", keypoints.size());
Mat resultImg;
drawKeypoints(src, keypoints, resultImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("SIFT特征点提取", resultImg);
imwrite("./效果图/SIFT特征点提取.jpg", resultImg);
waitKey(0);
return 0;
}
ORB特征点提取
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
Mat src;
int main(int argc, char** argv)
{
src = imread("./data2/101.png", IMREAD_GRAYSCALE);
if (!src.data)
{
cout << "图片加载失败" << endl;
return -1;
}
namedWindow("加载的灰度图像", CV_WINDOW_NORMAL);
imshow("加载的灰度图像", src);
int numfeature = 400;
Ptr<ORB>detector = ORB::create(numfeature);
vector<KeyPoint>keypoints;
detector->detect(src, keypoints, Mat());
printf("所有的特征点个数:%d", keypoints.size());
Mat resultImg;
drawKeypoints(src, keypoints, resultImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("特征点提取", resultImg);
imwrite("./效果图/特征点提取.jpg", resultImg);
waitKey(0);
return 0;
}
FAST角点检测且阈值可调节
- 阈值可自动调节,首先给予一个初值为40的阈值。
- 将特征点个数以及阈值打印出来。
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
using namespace std;
using namespace cv;
int thre = 40;
Mat src;
void trackBar(int, void*);
int main(int argc, char** argv)
{
src = imread("./data2/88.jpg", IMREAD_GRAYSCALE);
if (src.empty())
{
printf("无图像加载 \n");
return -1;
}
namedWindow("input", WINDOW_NORMAL);
imshow("input", src);
namedWindow("output", WINDOW_NORMAL);
createTrackbar("threshould", "output", &thre, 255, trackBar);
waitKey(0);
return 0;
}
void trackBar(int, void*)
{
std::vector<KeyPoint> keypoints;
Mat dst = src.clone();
Ptr<FastFeatureDetector> detector = FastFeatureDetector::create(thre);
printf("阈值:%d", thre);
detector->detect(src, keypoints);
printf("检测到的所有的特征点个数:%d", keypoints.size());
drawKeypoints(dst, keypoints, dst, Scalar::all(-1), DrawMatchesFlags::DRAW_OVER_OUTIMG);
imshow("角点检测图", dst);
imwrite("./效果图/角点检测图.jpg", dst);
}
如下是设置阈值为40的角点检测图:
SIFT特征匹配
- 该代码可打印检测到的特征点的数量,对于数量采用两种方式来设置,一是设置具体的特征点个数,二是自动统计特征点的个数,可根据自己的实验情况来选择。
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace std;
using namespace cv;
int main(int* argv, int** argc)
{
Mat imgRight = imread("./data/2.jpg");
Mat imgLeft = imread("./data/1.jpg");
int numfeature = 400;
Ptr<SIFT>Detector = SIFT::create(numfeature);
vector<KeyPoint> kpCat, kpSmallCat;
Mat descriptorCat, descriptorSmallCat;
Detector->detectAndCompute(imgRight, Mat(), kpCat, descriptorCat);
printf("检测到的左图所有的特征点个数:%d", kpCat.size());
Detector->detectAndCompute(imgLeft, Mat(), kpSmallCat, descriptorSmallCat);
printf("检测到的右图所有的特征点个数:%d", kpCat.size());
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE);
std::vector<DMatch> matchers;
matcher->match(descriptorCat, descriptorSmallCat, matchers);
Mat imgMatches;
drawMatches(imgRight, kpCat, imgLeft, kpSmallCat, matchers, imgMatches);
namedWindow("特征点匹配图", CV_WINDOW_NORMAL);
imshow("特征点匹配图", imgMatches);
imwrite("./效果图/特征点匹配图.jpg", imgMatches);
waitKey(0);
return true;
}
ORB特征匹配
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace std;
using namespace cv;
int main(int* argv, int** argc)
{
Mat imgRight = imread("./data/2.jpg");
Mat imgLeft = imread("./data/1.jpg");
int numfeature = 400;
Ptr<ORB>Detector = ORB::create(numfeature);
vector<KeyPoint> kpCat, kpSmallCat;
Mat descriptorCat, descriptorSmallCat;
Detector->detectAndCompute(imgRight, Mat(), kpCat, descriptorCat);
printf("检测到的左图所有的特征点个数:%d", kpCat.size());
Detector->detectAndCompute(imgLeft, Mat(), kpSmallCat, descriptorSmallCat);
printf("检测到的右图所有的特征点个数:%d", kpCat.size());
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE);
std::vector<DMatch> matchers;
matcher->match(descriptorCat, descriptorSmallCat, matchers);
Mat imgMatches;
drawMatches(imgRight, kpCat, imgLeft, kpSmallCat, matchers, imgMatches);
namedWindow("特征点匹配图", CV_WINDOW_NORMAL);
imshow("特征点匹配图", imgMatches);
imwrite("./效果图/特征点匹配图.jpg", imgMatches);
waitKey(0);
return true;
}
SURF图像匹配并进行加权平均融合
几个重要的步骤:
surf = SURF::create(500);
auto surf = SURF::create(); //自动生成特征点的个数
- 计算特征点间距离,并进行排序,便于后续误匹配对剔除
sort(matches.begin(), matches.end()); //根据match里面特征对的距离从小到大排序
warpPerspective(a, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), b.rows));
- 图像融合,采用加权平均融合方式,重点在于权值的计算
alpha = (processWidth - (j - start)) / processWidth;
完整代码:
可以根据需要来选择不同的特征提取方法
#include <iostream>
#include <stdio.h>
#include <string>
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include"opencv2/flann.hpp"
#include"opencv2/xfeatures2d.hpp"
#include"opencv2/ml.hpp"
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace cv::ml;
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
typedef struct
{
Point2f left_top;
Point2f left_bottom;
Point2f right_top;
Point2f right_bottom;
}four_corners_t;
four_corners_t corners;
void CalcCorners(const Mat& H, const Mat& src)
{
double v2[] = { 0, 0, 1 };
double v1[3];
Mat V2 = Mat(3, 1, CV_64FC1, v2);
Mat V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.left_top.x = v1[0] / v1[2];
corners.left_top.y = v1[1] / v1[2];
v2[0] = 0;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
corners.left_bottom.x = v1[0] / v1[2];
corners.left_bottom.y = v1[1] / v1[2];
v2[0] = src.cols;
v2[1] = 0;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
corners.right_top.x = v1[0] / v1[2];
corners.right_top.y = v1[1] / v1[2];
v2[0] = src.cols;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
corners.right_bottom.x = v1[0] / v1[2];
corners.right_bottom.y = v1[1] / v1[2];
}
int main()
{
Mat a = imread("./data2/n.png", 1);
Mat b = imread("./data2/m.png", 1);
namedWindow("右图a", WINDOW_NORMAL);
namedWindow("左图b", WINDOW_NORMAL);
imshow("右图a", a);
imshow("左图b", b);
Ptr<SURF> surf;
surf = SURF::create(500);
vector<KeyPoint>keypoints;
surf->detect(a, keypoints, Mat());
printf("a图中所有的特征点个数:%d\n", keypoints.size());
surf->detect(b, keypoints, Mat());
printf("b图中所有的特征点个数:%d\n", keypoints.size());
BFMatcher matcher;
Mat c, d;
vector<KeyPoint>key1, key2;
vector<DMatch> matches;
surf->detectAndCompute(a, Mat(), key1, c);
surf->detectAndCompute(b, Mat(), key2, d);
matcher.match(d, c, matches);
sort(matches.begin(), matches.end());
vector< DMatch > good_matches;
int ptsPairs = std::min(100, (int)(matches.size() * 0.5));
cout << "匹配点对:" << ptsPairs << endl;
for (int i = 0; i < ptsPairs; i++)
{
good_matches.push_back(matches[i]);
}
Mat outimg;
drawMatches(b, key2, a, key1, good_matches, outimg, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
namedWindow("特征点匹配", WINDOW_NORMAL);
imshow("特征点匹配", outimg);
imwrite("特征点匹配.jpg", outimg);
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i < good_matches.size(); i++)
{
imagePoints2.push_back(key2[good_matches[i].queryIdx].pt);
imagePoints1.push_back(key1[good_matches[i].trainIdx].pt);
}
Mat homo = findHomography(imagePoints1, imagePoints2, RANSAC);
cout << "采用了RANSAC算法后的homo变换矩阵为:\n" << homo << endl << endl;
CalcCorners(homo, a);
cout << "输出配准图的左上左下以及右上右下角四个顶点的坐标\n"<< endl;
cout << "left_top:" << corners.left_top << endl;
cout << "left_bottom:" << corners.left_bottom << endl;
cout << "right_top:" << corners.right_top << endl;
cout << "right_bottom:" << corners.right_bottom << endl;
Mat imageTransform1, imageTransform2;
warpPerspective(a, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), b.rows));
namedWindow("右图经过透视矩阵变换图", WINDOW_NORMAL);
imshow("右图经过透视矩阵变换图", imageTransform1);
imwrite("右图经过透视矩阵变换图.jpg", imageTransform1);
int dst_width = imageTransform1.cols;
int dst_height = b.rows;
Mat dst(dst_height, dst_width, CV_8UC3);
dst.setTo(0);
imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
b.copyTo(dst(Rect(0, 0, b.cols, b.rows)));
namedWindow("融合后的拼接图", WINDOW_NORMAL);
imshow("融合后的拼接图", dst);
imwrite("融合后的拼接图.jpg", dst);
OptimizeSeam(b, imageTransform1, dst);
namedWindow("未融合拼接图片", WINDOW_NORMAL);
imshow("未融合拼接图片", dst);
imwrite("未融合拼接图片.jpg", dst);
waitKey();
return 0;
}
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
int start = MIN(corners.left_top.x, corners.left_bottom.x);
cout << "重合区域的左边界:" << start << endl << endl;
double processWidth = img1.cols - start;
cout << "重合区域的宽度:" << processWidth << endl << endl;
int rows = dst.rows;
int cols = img1.cols;
double alpha = 1;
for (int i = 0; i < rows; i++)
{
uchar* p = img1.ptr<uchar>(i);
uchar* t = trans.ptr<uchar>(i);
uchar* d = dst.ptr<uchar>(i);
for (int j = start; j < cols; j++)
{
if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
{
alpha = 1;
}
else
{
alpha = (processWidth - (j - start)) / processWidth;
}
d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
}
}
}
补充:后续我们要进行软硬件分别实现算法并进行时间的对比,因此下面提供运行时间的获取方法
获取运行时间
代码:
用clock()函数,得到系统启动以后的毫秒级时间,然后除以CLOCKS_PER_SEC,就可以换成“秒”,标准c函数。
clock_t clock ( void );
#include <time.h>
clock_t t = clock();
long sec = t / CLOCKS_PER_SEC;
记录时钟周期
融入代码中必须加入头文件 #include<ctime>
#include<iostream>
#include<ctime>
using namespace std;
int main()
{
time_t begin,end;
double ret;
begin=clock();
end=clock();
ret=double(end-begin)/CLOCKS_PER_SEC;
cout<<"runtime: "<<ret<<endl;
}
例如:对sift特征点检测的时间获取
#include<opencv2/opencv.hpp>
#include<iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui_c.h>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<vector>
#include<ctime>
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
Mat src;
int main(int argc, char** argv)
{
time_t begin, end;
double ret;
begin = clock();
src = imread("./data2/101.png");
if (!src.data)
{
cout << "图片加载失败" << endl;
return -1;
}
namedWindow("加载的灰度图像", CV_WINDOW_NORMAL);
imshow("加载的灰度图像", src);
int numfeature = 400;
Ptr<SIFT>detector = SIFT::create(numfeature);
vector<KeyPoint>keypoints;
detector->detect(src, keypoints, Mat());
printf("所有的特征点个数:%zd\n", keypoints.size());
Mat resultImg;
drawKeypoints(src, keypoints, resultImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("SIFT特征点提取", resultImg);
imwrite("./效果图/SIFT特征点提取.jpg", resultImg);
end = clock();
ret = double(end - begin) / CLOCKS_PER_SEC;
cout << "runtime:\n " << ret << endl;
waitKey(0);
return 0;
}
因此对分辨率为1000*566图像进行SIFT特征提取,提取数量为400,耗时0.667s
|