欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

程序员文章站 2022-06-11 15:39:46
...

当前使用版本opencv3.4.0,需要安装opencv_contrib

surf特征点检测

  • surf算法为每个检测到的特征定义了位置和尺度,尺度值可以用于定义围绕特征点的窗口大小,不论物体的尺度在窗口是怎么样的,都将包含相同的视觉信息,这些信息用于表示特征点以使得它们与众不同。

SURF 算法,全称是 Speeded-Up Robust Features

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    Mat srcImage1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage1.data || !srcImage2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }
   

    int minHessian = 400;  //定义SURF中的hessian阈值特征点检测算子
    // SURF与SurfFeatureDetector等价
     //定义一个SurfFeatureDetector(SURF) 特征检测类对象
    Ptr<cv::xfeatures2d::SurfFeatureDetector>detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据

    //【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
    detector->detect( srcImage1, keypoints_1 );
    detector->detect( srcImage2, keypoints_2 );

    //【4】绘制特征关键点.
    Mat img_keypoints_1; Mat img_keypoints_2;
    drawKeypoints( srcImage1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
    // drawKeypoints( srcImage1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    drawKeypoints( srcImage2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

    //【5】显示效果图
    imshow("特征点检测效果图1", img_keypoints_1 );
    imshow("特征点检测效果图2", img_keypoints_2 );

    waitKey(0);
    return 0;
}

OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

绘制关键点:drawKeypoints

CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
                               const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );

SURF特征描述

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    Mat srcImage1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage1.data || !srcImage2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    int minHessian = 3000;  //定义SURF中的hessian阈值特征点检测算子
     //定义一个SurfFeatureDetector(SURF) 特征检测类对象
    Ptr<cv::xfeatures2d::SurfFeatureDetector>detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;//vector模板类是能够存放任意类型的动态数组,能够增加和压缩数据

    //【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
    detector->detect( srcImage1, keypoints_1 );
    detector->detect( srcImage2, keypoints_2 );

    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<xfeatures2d::SurfDescriptorExtractor> extractor = xfeatures2d::SurfDescriptorExtractor::create();
    Mat descriptors1, descriptors2;
    extractor->compute( srcImage1, keypoints_1, descriptors1 );
    extractor->compute( srcImage2, keypoints_2, descriptors2 );

    //【5】使用BruteForce进行匹配
    // 实例化一个匹配器
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    std::vector< DMatch > matches;
    //匹配两幅图中的描述子(descriptors)
    matcher->match( descriptors1, descriptors2, matches );

    //【6】绘制从两个图像中匹配出的关键点
    Mat imgMatches;
    drawMatches( srcImage1, keypoints_1, srcImage2, keypoints_2, matches, imgMatches );//进行绘制

    //【7】显示效果图
    imshow("匹配图", imgMatches );
    waitKey(0);
    return 0;
}

OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

使用FLANN进行特征点匹配

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    //【1】载入源图片
    Mat srcImage_1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage_2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage_1.data || !srcImage_2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    //【2】利用SURF检测器检测的关键点
    int minHessian = 3000;
    Ptr<SURF>detector = SURF::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector->detect( srcImage_1, keypoints_1 );
    detector->detect( srcImage_2, keypoints_2 );

    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<SURF> extractor = SurfDescriptorExtractor::create();
    Mat descriptors_1, descriptors_2;
    extractor->compute( srcImage_1, keypoints_1, descriptors_1 );
    extractor->compute( srcImage_2, keypoints_2, descriptors_2 );

    //【4】采用FLANN算法匹配描述符向量
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    std::vector< DMatch > matches;
    matcher->match( descriptors_1, descriptors_2, matches );

    //【5】快速计算关键点之间的最大和最小距离
    double max_dist = 0; double min_dist = 100;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    printf("> 最大距离(Max dist) : %f \n", max_dist );
    printf("> 最小距离(Min dist) : %f \n", min_dist );

    //【6】存下符合条件的匹配结果(即其距离小于2* min_dist的),使用radiusMatch同样可行
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        if( matches[i].distance < 2*min_dist )
        { good_matches.push_back( matches[i]); }
    }



    //【7】绘制出符合条件的匹配点
    Mat img_matches;
    drawMatches( srcImage_1, keypoints_1, srcImage_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //【8】输出相关匹配点信息
    for( int i = 0; i < good_matches.size(); i++ )
    { printf( ">符合条件的匹配点 [%d] 特征点1: %d  -- 特征点2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }


    //【9】显示效果图
    imshow( "匹配效果图", img_matches );


    //按任意键退出程序
    waitKey(0);
    return 0;
}

OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

寻找已知物体

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>
#include <cv.hpp>


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    //【1】载入源图片
    Mat srcImage_1 = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat srcImage_2 = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !srcImage_1.data || !srcImage_2.data )//检测是否读取成功
    { printf("读取图片错误,请确定目录下是否有imread函数指定名称的图片存在~! \n"); return false; }


    //【2】利用SURF检测器检测的关键点
    int minHessian = 300;
    Ptr<SURF>detector = SURF::create(minHessian);
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector->detect( srcImage_1, keypoints_1 );
    detector->detect( srcImage_2, keypoints_2 );


    ///【4】 使用SIFT算子提取特征(计算特征向量)
    Ptr<SURF> extractor = SurfDescriptorExtractor::create();
    Mat descriptors_1, descriptors_2;
    extractor->compute( srcImage_1, keypoints_1, descriptors_1 );
    extractor->compute( srcImage_2, keypoints_2, descriptors_2 );

   //【5】使用FLANN匹配算子进行匹配
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    std::vector< DMatch > matches;
    matcher->match( descriptors_1, descriptors_2, matches );

    //【5】快速计算关键点之间的最大和最小距离
    double max_dist = 0; double min_dist = 100;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    printf("> 最大距离(Max dist) : %f \n", max_dist );
    printf("> 最小距离(Min dist) : %f \n", min_dist );

    //【6】存下符合条件的匹配结果(即其距离小于3* min_dist的),使用radiusMatch同样可行
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        if( matches[i].distance < 3*min_dist )
        { good_matches.push_back( matches[i]); }
    }



    //【7】绘制出符合条件的匹配点
    Mat img_matches;
    drawMatches( srcImage_1, keypoints_1, srcImage_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //定义两个局部变量
    vector<Point2f> obj;
    vector<Point2f> scene;

    //从匹配成功的匹配对中获取关键点
    for( unsigned int i = 0; i < good_matches.size(); i++ )
    {
        obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
    }

    Mat H = findHomography( obj, scene, CV_RANSAC );//计算透视变换

    //从待测图片中获取角点
    vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( srcImage_1.cols, 0 );
    obj_corners[2] = cvPoint( srcImage_1.cols, srcImage_1.rows ); obj_corners[3] = cvPoint( 0, srcImage_1.rows );
    vector<Point2f> scene_corners(4);

    //进行透视变换
    perspectiveTransform( obj_corners, scene_corners, H);

    //绘制出角点之间的直线
    line( img_matches, scene_corners[0] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[1] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar(255, 0, 123), 4 );
    line( img_matches, scene_corners[1] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[2] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );
    line( img_matches, scene_corners[2] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[3] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );
    line( img_matches, scene_corners[3] + Point2f( static_cast<float>(srcImage_1.cols), 0), scene_corners[0] + Point2f( static_cast<float>(srcImage_1.cols), 0), Scalar( 255, 0, 123), 4 );

    //显示最终结果
    imshow( "效果图", img_matches );

    //按任意键退出程序
    waitKey(0);
    return 0;
}

aaa

#include "opencv2/highgui.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/line_descriptor.hpp"
#include <iostream>
#include <opencv2/calib3d/calib3d_c.h>
#include <cv.hpp>

using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

int main()
{
    Mat imgObject = imread( "/home/oceanstar/桌面/1.png", CV_LOAD_IMAGE_GRAYSCALE );
    Mat imgScene = imread( "/home/oceanstar/桌面/2.png", CV_LOAD_IMAGE_GRAYSCALE );

    if( !imgObject.data || !imgScene.data )
    {
        cout<< " --(!) Error reading images "<<endl;
        return -1;
    }



    double begin = clock();

    int minHessian = 700;
    //SiftFeatureDetector detector;
    Ptr<cv::xfeatures2d::SIFT>detector = cv::xfeatures2d::SIFT::create(minHessian);
    vector<KeyPoint> keypointsObject, keypointsScene;

    detector->detect( imgObject, keypointsObject );
    detector->detect( imgScene, keypointsScene );
    cout<<"object--number of keypoints: "<<keypointsObject.size()<<endl;
    cout<<"scene--number of keypoints: "<<keypointsScene.size()<<endl;

///-- Step 2: 使用SIFT算子提取特征(计算特征向量)
    Ptr<xfeatures2d::SiftDescriptorExtractor> extractor = xfeatures2d::SiftDescriptorExtractor::create();
    Mat descriptorsObject, descriptorsScene;
    extractor->compute( imgObject, keypointsObject, descriptorsObject );
    extractor->compute( imgScene, keypointsScene, descriptorsScene );

    ///-- Step 3: 使用FLANN法进行匹配
    FlannBasedMatcher matcher;
    vector< DMatch > allMatches;
    matcher.match( descriptorsObject, descriptorsScene, allMatches );
    cout<<"number of matches before filtering: "<<allMatches.size()<<endl;

    //-- 计算关键点间的最大最小距离
    double maxDist = 0;
    double minDist = 100;
    for( int i = 0; i < descriptorsObject.rows; i++ )
    {
        double dist = allMatches[i].distance;
        if( dist < minDist )
            minDist = dist;
        if( dist > maxDist )
            maxDist = dist;
    }
    printf("	max dist : %f \n", maxDist );
    printf("	min dist : %f \n", minDist );

    //-- 过滤匹配点,保留好的匹配点(这里采用的标准:distance<3*minDist)
    vector< DMatch > goodMatches;
    for( int i = 0; i < descriptorsObject.rows; i++ )
    {
        if( allMatches[i].distance < 2*minDist )
            goodMatches.push_back( allMatches[i]);
    }
    cout<<"number of matches after filtering: "<<goodMatches.size()<<endl;

    //-- 显示匹配结果
    Mat resultImg;
    drawMatches( imgObject, keypointsObject, imgScene, keypointsScene,
                 goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(),
                 DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不显示未匹配的点
    );
    //-- 输出匹配点的对应关系
    for( int i = 0; i < goodMatches.size(); i++ )
        printf( "	good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i,
                goodMatches[i].queryIdx, goodMatches[i].trainIdx );

    ///-- Step 4: 使用findHomography找出相应的透视变换
    vector<Point2f> object;
    vector<Point2f> scene;
    for( size_t i = 0; i < goodMatches.size(); i++ )
    {
        //-- 从好的匹配中获取关键点: 匹配关系是关键点间具有的一 一对应关系,可以从匹配关系获得关键点的索引
        //-- e.g. 这里的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一对关键点的索引
        object.push_back( keypointsObject[ goodMatches[i].queryIdx ].pt );
        scene.push_back( keypointsScene[ goodMatches[i].trainIdx ].pt );
    }
    Mat H = findHomography( object, scene, CV_RANSAC );

    ///-- Step 5: 使用perspectiveTransform映射点群,在场景中获取目标位置
    std::vector<Point2f> objCorners(4);
    objCorners[0] = cvPoint(0,0);
    objCorners[1] = cvPoint( imgObject.cols, 0 );
    objCorners[2] = cvPoint( imgObject.cols, imgObject.rows );
    objCorners[3] = cvPoint( 0, imgObject.rows );
    std::vector<Point2f> sceneCorners(4);
    perspectiveTransform( objCorners, sceneCorners, H);

    //-- 在被检测到的目标四个角之间划线
    line( resultImg, sceneCorners[0] + Point2f( imgObject.cols, 0), sceneCorners[1] + Point2f( imgObject.cols, 0), Scalar(0, 255, 0), 4 );
    line( resultImg, sceneCorners[1] + Point2f( imgObject.cols, 0), sceneCorners[2] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
    line( resultImg, sceneCorners[2] + Point2f( imgObject.cols, 0), sceneCorners[3] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );
    line( resultImg, sceneCorners[3] + Point2f( imgObject.cols, 0), sceneCorners[0] + Point2f( imgObject.cols, 0), Scalar( 0, 255, 0), 4 );

    //-- 显示检测结果
    imshow("detection result", resultImg );

    double end = clock();
    cout<<"\nSIFT--elapsed time: "<<(end - begin)/CLOCKS_PER_SEC*1000<<" ms\n";


    waitKey(0);
    return 0;
}

OpenCV中feature2D学习——SIFT和SURF算法实现目标检测

相关标签: # C++