若該文為原創文章,未經允許不得轉載
原博主部落格地址:https://blog.csdn.net/qq21497936
原博主部落格導航:https://blog.csdn.net/qq21497936/article/details/102478062
本文章部落格地址:https://blog.csdn.net/qq21497936/article/details/107837715
各位讀者,知識無窮而人力有窮,要麼改需求,要麼找專業人士,要麼自己研究
紅胖子(紅模仿)的博文大全:開發技術集合(包含Qt實用技術、樹莓派、三維、OpenCV、OpenGL、ffmpeg、OSG、微控制器、軟硬結合等等)持續更新中…(點選傳送門)
上一篇:《OpenCV開發筆記(六十八):紅胖子8分鐘帶你使用特徵點Flann最鄰近差值匹配識別(圖文並茂+淺顯易懂+程式原始碼)》
下一篇:持續補充中…
紅胖子,來也!
特徵點、匹配,那麼如何使用特徵點和匹配來識別已有的物體,也就剩最關鍵的最後一步:尋找已知的物體了。
尺度不變特徵變換(Scale-invariant feature transform,SIFT),是用於影像處理領域的一種描述。這種描述具有尺度不變性,可在影像中檢測出關鍵點,是一種區域性特徵描述子。
SURF演算法採用了很多方法來對每一步進行優化從而提高速度。分析顯示在結果效果相當的情況下SURF的速度是SIFT的3倍。SURF善於處理具有模糊和旋轉的影像,但是不善於處理視角變化和光照變化。(SIFT特徵是區域性特徵,其對旋轉、尺度縮放、亮度變化保持不變性,對視角變化、仿射變換、噪聲也保持一定程度的穩定性)。
針對影像場景的特點,選擇不同的特徵點,列出之前特徵點相關的博文:
《OpenCV開發筆記(六十三):紅胖子8分鐘帶你深入瞭解SIFT特徵點(圖文並茂+淺顯易懂+程式原始碼)》
《OpenCV開發筆記(六十四):紅胖子8分鐘帶你深入瞭解SURF特徵點(圖文並茂+淺顯易懂+程式原始碼)》
《OpenCV開發筆記(六十五):紅胖子8分鐘帶你深入瞭解ORB特徵點(圖文並茂+淺顯易懂+程式原始碼)》
最佳特徵匹配總是嘗試所有可能的匹配,從而使得它總能夠找到最佳匹配,這也是BruteForce(暴力法)的原始含義,涉及到的類為BFMatcher類。
《OpenCV開發筆記(六十七):紅胖子8分鐘帶你深入瞭解特徵點暴力匹配(圖文並茂+淺顯易懂+程式原始碼)》
一種近似法,演算法更快但是找到的是最近鄰近似匹配,所以當我們需要找到一個相對好的匹配但是不需要最佳匹配的時候往往使用FlannBasedMatcher。
《OpenCV開發筆記(六十八):紅胖子8分鐘帶你使用特徵點Flann最鄰近差值匹配識別(圖文並茂+淺顯易懂+程式原始碼)》
對已知物體:過濾、去噪後、提取已知物體的特徵點;
對場景:過濾、去噪後、提取場景的特徵點;
對已知物體特徵點集合和場景中的特徵點集合去匹配,計算投影矩陣;
若成功計算變換矩陣就表示識別到物體;
通過原始的四個點位置進行變換矩陣計算,即可得到場景中的已知物體的四個頂點,該四個頂點連線起來就是已知物體的位置。
Mat findHomography(InputArray srcPoints,
InputArray dstPoints,
int method = 0,
double ransacReprojThreshold = 3,
OutputArray mask=noArray(),
const int maxIters = 2000,
const double confidence = 0.995);
- 引數一:InputArray型別的srcPoints,源平面上的對應點,可以是CV_32FC2的矩陣型別或者vector;
- 引數二:InputArray型別的dstPoints;目標平面上的對應點 , 可 以 是
CV 32FC2 的矩陣型別或者 vector; - 引數三:int型別的method,用於計算單應矩陣的方法,如下圖:
- 引數四:double型別的ransacReprojThreshold,最大允許重投影錯誤將點對視為內聯線(僅用於RANSAC和RHO方法);
- 引數五:OutputArray型別的mask,由魯棒方法(RANSAC或LMEDS)設定的可選輸出掩碼。注意輸入掩碼值被忽略。;
- 引數六:const int型別的maxIters,RANSAC迭代的最大數量。;
- 引數七:const double型別的confidence,置信水平,介於0和1之間;
void perspectiveTransform( InputArray src,
InputArray dst,
InputArray m);
- 引數一:InputArray型別的src,輸入兩通道或三通道浮點陣列;每個元素是要轉換的二維/三維向量。
- 引數二:InputArray型別的dst,與src大小和型別相同的輸出陣列;
- 引數三:InputArray型別的h,3x3或4x4浮點轉換矩陣。
void OpenCVManager::testFindKnownObject()
{
QString fileName1 = "21.jpg";
QString fileName2 = "24.jpg";
int width = 400;
int height = 300;
cv::Mat srcMat = cv::imread(fileName1.toStdString());
cv::Mat srcMat3 = cv::imread(fileName2.toStdString());
cv::resize(srcMat, srcMat, cv::Size(width, height));
cv::resize(srcMat3, srcMat3, cv::Size(width, height));
cv::String windowName = _windowTitle.toStdString();
cvui::init(windowName);
cv::Mat windowMat = cv::Mat(cv::Size(srcMat.cols * 2, srcMat.rows * 3),
srcMat.type());
cv::Ptr<cv::xfeatures2d::SIFT> _pSift = cv::xfeatures2d::SiftFeatureDetector::create();
cv::Ptr<cv::xfeatures2d::SURF> _pSurf = cv::xfeatures2d::SurfFeatureDetector::create(450, 10, 10, true, true);
cv::Ptr<cv::Feature2D> _pFeature2D;
cv::Ptr<cv::DescriptorMatcher> _pDescriptorMatcher;
int type = 0;
int findType = 0;
int k1x = 25;
int k1y = 25;
int k2x = 75;
int k2y = 25;
int k3x = 75;
int k3y = 75;
int k4x = 25;
int k4y = 75;
// 定義匹配器
cv::Ptr<cv::FlannBasedMatcher> pFlannBasedMatcher = cv::FlannBasedMatcher::create();
cv::Ptr<cv::BFMatcher> pBFMatcher = cv::BFMatcher::create();
// 定義結果存放
std::vector<cv::DMatch> listDMatch;
// 儲存特徵點檢測器檢測特徵後的描述字
cv::Mat descriptor1;
cv::Mat descriptor2;
bool moveFlag = true; // 移動的標誌,不用每次都匹配
std::vector<cv::Point2f> obj_corners(4);
std::vector<cv::Point2f> scene_corners(4);
windowMat = cv::Scalar(0, 0, 0);
while(true)
{
cv::Mat mat;
{
std::vector<cv::KeyPoint> keyPoints1;
std::vector<cv::KeyPoint> keyPoints2;
int typeOld = type;
int findTypeOld = findType;
int k1xOld = k1x;
int k1yOld = k1y;
int k2xOld = k2x;
int k2yOld = k2y;
int k3xOld = k3x;
int k3yOld = k3y;
int k4xOld = k4x;
int k4yOld = k4y;
mat = windowMat(cv::Range(srcMat.rows * 0, srcMat.rows * 1),
cv::Range(srcMat.cols * 0, srcMat.cols * 1));
mat = cv::Scalar(0);
cvui::trackbar(windowMat, 0 + width * 0, 0 + height * 0, 165, &type, 0, 1);
cv::String str;
switch(type)
{
case 0:
str = "sift";
_pFeature2D = _pSift;
break;
case 1:
str = "surf";
_pFeature2D = _pSurf;
break;
default:
break;
}
cvui::printf(windowMat, width / 4 + width * 0 - 20, 40 + height * 0, str.c_str());
cvui::trackbar(windowMat, width / 2 + width * 0, 0 + height * 0, 165, &findType, 0, 1);
switch(findType)
{
case 0:
str = "BFMatcher";
_pDescriptorMatcher = pBFMatcher;
break;
case 1:
str = "FlannBasedMatcher";
_pDescriptorMatcher = pFlannBasedMatcher;
break;
default:
break;
}
cvui::printf(windowMat, width / 4 * 3 + width * 0 - 20, 40 + height * 0, str.c_str());
cvui::printf(windowMat, 0 + width * 0, 60 + height * 0, "k1x");
cvui::trackbar(windowMat, 0 + width * 0, 70 + height * 0, 165, &k1x, 0, 100);
cvui::printf(windowMat, 0 + width * 0, 120 + height * 0, "k1y");
cvui::trackbar(windowMat, 0 + width * 0, 130 + height * 0, 165, &k1y, 0, 100);
cvui::printf(windowMat, width / 2 + width * 0, 60 + height * 0, "k2x");
cvui::trackbar(windowMat, width / 2 + width * 0, 70 + height * 0, 165, &k2x, 0, 100);
cvui::printf(windowMat, width / 2 + width * 0, 120 + height * 0, "k2y");
cvui::trackbar(windowMat, width / 2 + width * 0, 130 + height * 0, 165, &k2y, 0, 100);
cvui::printf(windowMat, 0 + width * 0, 30 + height * 0 + height / 2, "k3x");
cvui::trackbar(windowMat, 0 + width * 0, 40 + height * 0 + height / 2, 165, &k3x, 0, 100);
cvui::printf(windowMat, 0 + width * 0, 90 + height * 0 + height / 2, "k3y");
cvui::trackbar(windowMat, 0 + width * 0, 100 + height * 0 + height / 2, 165, &k3y, 0, 100);
cvui::printf(windowMat, width / 2 + width * 0, 30 + height * 0 + height / 2, "k4x");
cvui::trackbar(windowMat, width / 2 + width * 0, 40 + height * 0 + height / 2, 165, &k4x, 0, 100);
cvui::printf(windowMat, width / 2 + width * 0, 90 + height * 0 + height / 2, "k4y");
cvui::trackbar(windowMat, width / 2 + width * 0, 100 + height * 0 + height / 2, 165, &k4y, 0, 100);
if( k1xOld != k1x || k1yOld != k1y
|| k2xOld != k2x || k2yOld != k2y
|| k3xOld != k3x || k3yOld != k3y
|| k4xOld != k4x || k4yOld != k4y
|| typeOld != type || findTypeOld != findType)
{
typeOld = type;
findTypeOld = findType;
moveFlag = true;
}
std::vector<cv::Point2f> srcPoints;
std::vector<cv::Point2f> dstPoints;
srcPoints.push_back(cv::Point2f(0.0f, 0.0f));
srcPoints.push_back(cv::Point2f(srcMat.cols - 1, 0.0f));
srcPoints.push_back(cv::Point2f(srcMat.cols - 1, srcMat.rows - 1));
srcPoints.push_back(cv::Point2f(0.0f, srcMat.rows - 1));
dstPoints.push_back(cv::Point2f(srcMat.cols * k1x / 100.0f, srcMat.rows * k1y / 100.0f));
dstPoints.push_back(cv::Point2f(srcMat.cols * k2x / 100.0f, srcMat.rows * k2y / 100.0f));
dstPoints.push_back(cv::Point2f(srcMat.cols * k3x / 100.0f, srcMat.rows * k3y / 100.0f));
dstPoints.push_back(cv::Point2f(srcMat.cols * k4x / 100.0f, srcMat.rows * k4y / 100.0f));
cv::Mat M = cv::getPerspectiveTransform(srcPoints, dstPoints);
cv::Mat srcMat2;
cv::warpPerspective(srcMat3,
srcMat2,
M,
cv::Size(srcMat.cols, srcMat.rows),
cv::INTER_LINEAR,
cv::BORDER_CONSTANT,
cv::Scalar::all(0));
mat = windowMat(cv::Range(srcMat.rows * 0, srcMat.rows * 1),
cv::Range(srcMat.cols * 1, srcMat.cols * 2));
cv::addWeighted(mat, 0.0f, srcMat2, 1.0f, 0.0f, mat);
if(moveFlag)
{
moveFlag = false;
//特徵點檢測
// _pSift->detect(srcMat, keyPoints1);
_pFeature2D->detectAndCompute(srcMat, cv::Mat(), keyPoints1, descriptor1);
//繪製特徵點(關鍵點)
cv::Mat resultShowMat;
cv::drawKeypoints(srcMat,
keyPoints1,
resultShowMat,
cv::Scalar(0, 0, 255),
cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
mat = windowMat(cv::Range(srcMat.rows * 1, srcMat.rows * 2),
cv::Range(srcMat.cols * 0, srcMat.cols * 1));
cv::addWeighted(mat, 0.0f, resultShowMat, 1.0f, 0.0f, mat);
//特徵點檢測
// _pSift->detect(srcMat2, keyPoints2);
_pFeature2D->detectAndCompute(srcMat2, cv::Mat(), keyPoints2, descriptor2);
//繪製特徵點(關鍵點)
cv::Mat resultShowMat2;
cv::drawKeypoints(srcMat2,
keyPoints2,
resultShowMat2,
cv::Scalar(0, 0, 255),
cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
mat = windowMat(cv::Range(srcMat.rows * 1, srcMat.rows * 2),
cv::Range(srcMat.cols * 1, srcMat.cols * 2));
cv::addWeighted(mat, 0.0f, resultShowMat2, 1.0f, 0.0f, mat);
// FlannBasedMatcher最近鄰匹配
_pDescriptorMatcher->match(descriptor1, descriptor2, listDMatch);
// drawMatch繪製出來,並排顯示了,高度一樣,寬度累加(因為兩個寬度相同,所以是兩倍了)
cv::Mat matchesMat;
cv::drawMatches(srcMat,
keyPoints1,
srcMat2,
keyPoints2,
listDMatch,
matchesMat);
mat = windowMat(cv::Range(srcMat.rows * 2, srcMat.rows * 3),
cv::Range(srcMat.cols * 0, srcMat.cols * 2));
cv::addWeighted(mat, 0.0f, matchesMat, 1.0f, 0.0f, mat);
// 定義兩個區域性變數
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
// 從匹配成功的匹配對中獲取關鍵點
for(int index = 0; index < listDMatch.size(); index++)
{
obj.push_back(keyPoints1[listDMatch[index].queryIdx].pt);
scene.push_back(keyPoints2[listDMatch[index].trainIdx].pt);
}
// 計算透視變換
cv::Mat H = cv::findHomography(obj, scene, CV_RANSAC);
// 從待測圖片中獲取角點
obj_corners[0] = cv::Point2f(0,0);
obj_corners[1] = cv::Point2f(srcMat.cols,0);
obj_corners[2] = cv::Point2f(srcMat.cols, srcMat.rows);
obj_corners[3] = cv::Point2f(0, srcMat.rows);
// 進行透視變換
cv::perspectiveTransform(obj_corners, scene_corners, H);
}
// 繪製出角點之間的線
qDebug() << __FILE__ << __LINE__
<< scene_corners[0].x
<< scene_corners[0].y
<< scene_corners[1].x
<< scene_corners[1].y;
cv::line(windowMat,
scene_corners[0] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
scene_corners[1] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
cv::Scalar(0, 0, 255), 2);
cv::line(windowMat,
scene_corners[1] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
scene_corners[2] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
cv::Scalar(0, 0, 255), 2);
cv::line(windowMat,
scene_corners[2] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
scene_corners[3] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
cv::Scalar(0, 0, 255), 2);
cv::line(windowMat,
scene_corners[3] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
scene_corners[0] + cv::Point2f(srcMat.cols * 1, srcMat.rows * 0),
cv::Scalar(0, 0, 255), 2);
}
cv::imshow(windowName, windowMat);
// 更新
cvui::update();
// 顯示
// esc鍵退出
if(cv::waitKey(25) == 27)
{
break;
}
}
}
對應版本號v1.63.0
上一篇:《OpenCV開發筆記(六十八):紅胖子8分鐘帶你使用特徵點Flann最鄰近差值匹配識別(圖文並茂+淺顯易懂+程式原始碼)》
下一篇:持續補充中…