OpenCV2.4.4中调用SIFT特征检测器进行图像匹配
OpenCV中一些相关结构说明:
特征点类:
- class KeyPoint
- {
- Point2f pt; //坐标
- float size; //特征点邻域直径
- float angle; //特征点的方向,值为[0,360),负值表示不使用
- float response; //
- int octave; //特征点所在的图像金字塔的组
- int class_id; //用于聚类的id
- }
存放匹配结果的结构:
- struct DMatch
- {
- //三个构造函数
- DMatch(): queryIdx(-1), trainIdx(-1),imgIdx(-1),distance(std::numeric_limits<float>::max()) {}
- DMatch(int _queryIdx, int _trainIdx, float _distance ) :
- queryIdx( _queryIdx),trainIdx( _trainIdx), imgIdx(-1),distance( _distance) {}
- DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
- queryIdx(_queryIdx), trainIdx( _trainIdx), imgIdx( _imgIdx),distance( _distance) {}
- intqueryIdx; //此匹配对应的查询图像的特征描述子索引
- inttrainIdx; //此匹配对应的训练(模板)图像的特征描述子索引
- intimgIdx; //训练图像的索引(若有多个)
- float distance; //两个特征向量之间的欧氏距离,越小表明匹配度越高。
- booloperator < (const DMatch &m) const;
- };
说明:以两个特征点描述子(特征向量)之间的欧氏距离作为特征点匹配的相似度准则,假设特征点对p和q的
特征描述子分别为Desp和Desq,则其欧氏距离定义为:
所以每个匹配分别对应训练图像(train)和查询图像(query)中的一个特征描述子(特征向量)。
- #include "opencv2/highgui/highgui.hpp"
- #include "opencv2/imgproc/imgproc.hpp"
- #include "opencv2/nonfree/nonfree.hpp"
- #include "opencv2/nonfree/features2d.hpp"
- #include <iostream>
- #include <stdio.h>
- #include <stdlib.h>
- using namespace cv;
- using namespace std;
- int main()
- {
- initModule_nonfree();//初始化模块,使用SIFT或SURF时用到
- Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );//创建SIFT特征检测器
- Ptr<DescriptorExtractor> descriptor_extractor = DescriptorExtractor::create( "SIFT" );//创建特征向量生成器
- Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create( "BruteForce" );//创建特征匹配器
- if( detector.empty() || descriptor_extractor.empty() )
- cout<<"fail to create detector!";
- //读入图像
- Mat img1 = imread("desk.jpg");
- Mat img2 = imread("desk_glue.jpg");
- //特征点检测
- double t = getTickCount();//当前滴答数
- vector<KeyPoint> keypoints1,keypoints2;
- detector->detect( img1, keypoints1 );//检测img1中的SIFT特征点,存储到keypoints1中
- detector->detect( img2, keypoints2 );
- cout<<"图像1特征点个数:"<<keypoints1.size()<<endl;
- cout<<"图像2特征点个数:"<<keypoints2.size()<<endl;
- //根据特征点计算特征描述子矩阵,即特征向量矩阵
- Mat descriptors1,descriptors2;
- descriptor_extractor->compute( img1, keypoints1, descriptors1 );
- descriptor_extractor->compute( img2, keypoints2, descriptors2 );
- t = ((double)getTickCount() - t)/getTickFrequency();
- cout<<"SIFT算法用时:"<<t<<"秒"<<endl;
- cout<<"图像1特征描述矩阵大小:"<<descriptors1.size()
- <<",特征向量个数:"<<descriptors1.rows<<",维数:"<<descriptors1.cols<<endl;
- cout<<"图像2特征描述矩阵大小:"<<descriptors2.size()
- <<",特征向量个数:"<<descriptors2.rows<<",维数:"<<descriptors2.cols<<endl;
- //画出特征点
- Mat img_keypoints1,img_keypoints2;
- drawKeypoints(img1,keypoints1,img_keypoints1,Scalar::all(-1),0);
- drawKeypoints(img2,keypoints2,img_keypoints2,Scalar::all(-1),0);
- //imshow("Src1",img_keypoints1);
- //imshow("Src2",img_keypoints2);
- //特征匹配
- vector<DMatch> matches;//匹配结果
- descriptor_matcher->match( descriptors1, descriptors2, matches );//匹配两个图像的特征矩阵
- cout<<"Match个数:"<<matches.size()<<endl;
- //计算匹配结果中距离的最大和最小值
- //距离是指两个特征向量间的欧式距离,表明两个特征的差异,值越小表明两个特征点越接近
- double max_dist = 0;
- double min_dist = 100;
- for(int i=0; i<matches.size(); i++)
- {
- double dist = matches[i].distance;
- if(dist < min_dist) min_dist = dist;
- if(dist > max_dist) max_dist = dist;
- }
- cout<<"最大距离:"<<max_dist<<endl;
- cout<<"最小距离:"<<min_dist<<endl;
- //筛选出较好的匹配点
- vector<DMatch> goodMatches;
- for(int i=0; i<matches.size(); i++)
- {
- if(matches[i].distance < 0.31 * max_dist)
- {
- goodMatches.push_back(matches[i]);
- }
- }
- cout<<"goodMatch个数:"<<goodMatches.size()<<endl;
- //画出匹配结果
- Mat img_matches;
- //红色连接的是匹配的特征点对,绿色是未匹配的特征点
- drawMatches(img1,keypoints1,img2,keypoints2,goodMatches,img_matches,
- Scalar::all(-1)/*CV_RGB(255,0,0)*/,CV_RGB(0,255,0),Mat(),2);
- imshow("MatchSIFT",img_matches);
- waitKey(0);
- return 0;
- }
结果:
效果图: