stitching.cpp魚眼影象拼接融合 原始碼分析
之前執行OpenCV官方示例的cpp時 看到stitching.cpp拼接融合還不錯 然後我在MATLAB上 用之前編的經緯對映法校正三幅魚眼影象後 不知道該怎樣儲存下校正好的圖 如果save或者save as 那麼會有figure的白色邊緣 不能用來拼接 所以我直接截圖 儲存為jpg 這樣就沒有白色邊緣了:
校正後的:然後把這三幅MATLAB執行出來的結果
傳遞給OpenCV的stitching.cpp 然後執行出來:
現在開始分析影象拼接融合的原始碼:stitching.cpp:
#include <iostream>
#include <fstream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/stitcher.hpp"
using namespace std;
using namespace cv;
bool try_use_gpu = false;
vector<Mat> imgs;
string result_name = "result3.jpg"; //儲存全景圖的檔名 可以在工程目錄下找到
void printUsage();
int parseCmdArgs(int argc, char** argv);
int main(int argc, char* argv[])
{
int retval = parseCmdArgs(argc, argv);
if (retval) return -1;
Mat pano;
Stitcher stitcher = Stitcher::createDefault(try_use_gpu); //這個函式預設不使用GPU!
Stitcher::Status status = stitcher.stitch(imgs, pano); //找尋待拼接的兩幅圖的旋轉角度 並生成最後的全景圖 這個函式才是要分析的重頭戲!
if (status != Stitcher::OK) //確保拼接成功
{
cout << "Can't stitch images, error code = " << int(status) << endl;
return -1;
}
namedWindow("stitching result");
imshow("stitching result", pano);
waitKey(0);
imwrite(result_name,pano);
return 0;
}
void printUsage() //預設不用GPU
{
cout <<
"Rotation model images stitcher.\n\n"
"stitching img1 img2 [...imgN]\n\n"
"Flags:\n"
" --try_use_gpu (yes|no)\n"
" Try to use GPU. The default value is 'no'. All default values\n"
" are for CPU mode.\n"
" --output <result_img>\n"
" The default is 'result.jpg'.\n";
}
int parseCmdArgs(int argc, char** argv) //這個函式就是讀入待拼接的圖片 放在imgs這個裝待拼圖片的容器裡面
{
if (argc == 1)
{
printUsage();
return -1;
}
for (int i = 1; i < argc; ++i)
{
if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
{
printUsage();
return -1;
}
else if (string(argv[i]) == "--try_use_gpu")
{
if (string(argv[i + 1]) == "no")
try_use_gpu = false;
else if (string(argv[i + 1]) == "yes")
try_use_gpu = true;
else
{
cout << "Bad --try_use_gpu flag value\n";
return -1;
}
i++;
}
else if (string(argv[i]) == "--output")
{
result_name = argv[i + 1];
i++;
}
else
{
Mat img = imread(argv[i]);
if (img.empty())
{
cout << "Can't read image '" << argv[i] << "'\n";
return -1;
}
imgs.push_back(img);
}
}
return 0;
}
上面stitching.cpp很好理解 現在來看涉及影象拼接、融合的函式原始碼 :Stitcher::Status status = stitcher.stitch(imgs, pano); 用CMake開啟原始碼 檢視原始碼:
可以看到stitcher類下各個函式的定義 stitch()是可過載的 而程式裡用的是第一個形式 這個定義非常簡單 所以接下來又需要去看Status status = estimateTransform(images, rois);(估算幾幅待拼圖之間的關係 比如仿射變換 旋轉 平移 縮放等等)和composePanorama(pano);(影象融合 組成全景圖)這兩個函式的定義 estimateTransform也簡短
Stitcher::Status Stitcher::estimateTransform(InputArray images, const vector<vector<Rect> > &rois)
{
images.getMatVector(imgs_);
rois_ = rois;
Status status;
if ((status = matchImages()) != OK) //要看這個的原始碼
return status;
//接下來還要看這個函式estimateCameraParams();(估算相機引數)的原始碼
estimateCameraParams();
return OK;
}
於是又接著找到:
Stitcher::Status Stitcher::matchImages()
{
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS; //待拼影象不能少於2幅圖
}
work_scale_ = 1;
seam_work_aspect_ = 1;
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
Mat full_img, img;
features_.resize(imgs_.size()); //這是??
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
LOGLN("Finding features...");
#if ENABLE_LOG
int64 t = getTickCount(); //找尋特徵點計時
#endif
for (size_t i = 0; i < imgs_.size(); ++i)
{
full_img = imgs_[i]; //依次讀取每幅待拼影象給full_img
full_img_sizes_[i] = full_img.size(); //將每次讀入影象的大小mxn給full_img_sizes
//registr_resol_是影象匹配的解析度大小,影象的面積尺寸變為registr_resol_*100000
??
if (registr_resol_ < 0)
{
img = full_img;
work_scale_ = 1;
is_work_scale_set = true;
}
else
{
if (!is_work_scale_set)
{
//預處理 將影象縮放full_img.size().area()表示面積m、n的乘積
//計算work_scale,將影象resize到面積在registr_resol_*10^6以下
work_scale_ = min(1.0, sqrt(registr_resol_ * 1e6 / full_img.size().area()));
is_work_scale_set = true;
}
//將full_img和img的尺寸都縮放到計算出來的work_scale_下?!
resize(full_img, img, Size(), work_scale_, work_scale_);
}
if (!is_seam_scale_set)
{
//seam_est_resol_是拼接縫畫素的大小 ,和匹配預設值一樣有預設值?
seam_scale_ = min(1.0, sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));
seam_work_aspect_ = seam_scale_ / work_scale_; //和上個if中的差不多意思
is_seam_scale_set = true;
}
if (rois_.empty())
//如果rois_是空矩陣 就找尋特徵點給features_ (這個待會看原始碼)
(*features_finder_)(img, features_[i]);
else
{
vector<Rect> rois(rois_[i].size());
for (size_t j = 0; j < rois_[i].size(); ++j)
{
Point tl(cvRound(rois_[i][j].x * work_scale_), cvRound(rois_[i][j].y * work_scale_));
Point br(cvRound(rois_[i][j].br().x * work_scale_), cvRound(rois_[i][j].br().y * work_scale_));
rois[j] = Rect(tl, br);
}
(*features_finder_)(img, features_[i], rois);
}
features_[i].img_idx = (int)i; //說明是第幾幅圖的特徵點
LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
//將源影象resize到seam_scale_*10^6,並存入seam_est_imgs_[]中
resize(full_img, img, Size(), seam_scale_, seam_scale_);
seam_est_imgs_[i] = img.clone();
}
// Do it to save memory
features_finder_->collectGarbage(); //這裡要看原始碼!!怎麼找尋的
full_img.release(); //release釋放 待會兒去讀下一幅圖
img.release();
//找尋特徵點至此結束 計算出時間
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
LOG("Pairwise matching");
#if ENABLE_LOG
t = getTickCount(); //開始進行匹配match
#endif
(*features_matcher_)(features_, pairwise_matches_, matching_mask_);
features_matcher_->collectGarbage(); //要看原始碼!!
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); //匹配結束
// Leave only images we are sure are from the same panorama
//conf_thresh_是兩幅圖來自同一全景圖的置信度 來判斷讀入的圖片是否屬於同一全景圖
indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
vector<Mat> seam_est_imgs_subset;
vector<Mat> imgs_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
full_img_sizes_ = full_img_sizes_subset;
//檢查由上述篩選來自同一副全景圖的圖片的數量是否大於2幅圖
if ((int)imgs_.size() < 2)
{
LOGLN("Need more images");
return ERR_NEED_MORE_IMGS;
}
return OK;
}
結合這個人的http://blog.csdn.net/hanshuning/article/details/41960401和這個人的http://www.geekcome.com/content-10-8390-1.html分析上面的
在matchImage()分析完後 要分析estimateCameraParams():
void Stitcher::estimateCameraParams()
{
detail::HomographyBasedEstimator estimator;
estimator(features_, pairwise_matches_, cameras_);
for (size_t i = 0; i < cameras_.size(); ++i)
{
Mat R;
cameras_[i].R.convertTo(R, CV_32F);
cameras_[i].R = R;
LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
}
bundle_adjuster_->setConfThresh(conf_thresh_);
(*bundle_adjuster_)(features_, pairwise_matches_, cameras_);
// Find median focal length and use it as final image scale
vector<double> focals;
for (size_t i = 0; i < cameras_.size(); ++i)
{
LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
focals.push_back(cameras_[i].focal);
}
std::sort(focals.begin(), focals.end());
if (focals.size() % 2 == 1)
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
if (do_wave_correct_)
{
vector<Mat> rmats;
for (size_t i = 0; i < cameras_.size(); ++i)
rmats.push_back(cameras_[i].R);
detail::waveCorrect(rmats, wave_correct_kind_);
for (size_t i = 0; i < cameras_.size(); ++i)
cameras_[i].R = rmats[i];
}
}
接下來生成全景圖(影象融合等):composePanorama(pano);即看這個函式的原始碼
Stitcher::Status Stitcher::composePanorama(OutputArray pano)
{
return composePanorama(vector<Mat>(), pano); //就這一句 繼續追蹤下去 檢視composePanorama(vector<Mat>(), pano);的原始碼
}
如下所示:
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
{
LOGLN("Warping images (auxiliary)... ");
vector<Mat> imgs;
images.getMatVector(imgs);
if (!imgs.empty())
{
CV_Assert(imgs.size() == imgs_.size());
Mat img;
seam_est_imgs_.resize(imgs.size());
for (size_t i = 0; i < imgs.size(); ++i)
{
imgs_[i] = imgs[i];
resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
seam_est_imgs_[i] = img.clone();
}
vector<Mat> seam_est_imgs_subset;
vector<Mat> imgs_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
}
Mat &pano_ = pano.getMatRef();
#if ENABLE_LOG
int64 t = getTickCount();
#endif
vector<Point> corners(imgs_.size());
vector<Mat> masks_warped(imgs_.size());
vector<Mat> images_warped(imgs_.size());
vector<Size> sizes(imgs_.size());
vector<Mat> masks(imgs_.size());
// Prepare image masks
for (size_t i = 0; i < imgs_.size(); ++i)
{
masks[i].create(seam_est_imgs_[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
for (size_t i = 0; i < imgs_.size(); ++i)
{
Mat_<float> K;
cameras_[i].K().convertTo(K, CV_32F);
K(0,0) *= (float)seam_work_aspect_;
K(0,2) *= (float)seam_work_aspect_;
K(1,1) *= (float)seam_work_aspect_;
K(1,2) *= (float)seam_work_aspect_;
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<Mat> images_warped_f(imgs_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Find seams
exposure_comp_->feed(corners, images_warped, masks_warped);
seam_finder_->find(images_warped_f, corners, masks_warped);
// Release unused memory
seam_est_imgs_.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
LOGLN("Compositing...");
#if ENABLE_LOG
t = getTickCount();
#endif
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
//double compose_seam_aspect = 1;
double compose_work_aspect = 1;
bool is_blender_prepared = false;
double compose_scale = 1;
bool is_compose_scale_set = false;
Mat full_img, img;
for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
{
LOGLN("Compositing image #" << indices_[img_idx] + 1);
// Read image and resize it if necessary
full_img = imgs_[img_idx];
if (!is_compose_scale_set)
{
if (compose_resol_ > 0)
compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area()));
is_compose_scale_set = true;
// Compute relative scales
//compose_seam_aspect = compose_scale / seam_scale_;
compose_work_aspect = compose_scale / work_scale_;
// Update warped image scale
warped_image_scale_ *= static_cast<float>(compose_work_aspect);
w = warper_->create((float)warped_image_scale_);
// Update corners and sizes
for (size_t i = 0; i < imgs_.size(); ++i)
{
// Update intrinsics
cameras_[i].focal *= compose_work_aspect;
cameras_[i].ppx *= compose_work_aspect;
cameras_[i].ppy *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes_[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
}
Mat K;
cameras_[i].K().convertTo(K, CV_32F);
Rect roi = w->warpRoi(sz, K, cameras_[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
}
if (std::abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale);
else
img = full_img;
full_img.release();
Size img_size = img.size();
Mat K;
cameras_[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
// Compensate exposure
exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();
// Make sure seam mask has proper size
dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
if (!is_blender_prepared)
{
blender_->prepare(corners, sizes);
is_blender_prepared = true;
}
// Blend the current image
blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
}
Mat result, result_mask;
blender_->blend(result, result_mask);
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
// so convert it to avoid user confusing
result.convertTo(pano_, CV_8U);
return OK;
}
相關文章
- iOS 影象處理 - 影象拼接iOS
- 影象拼接基礎學習筆記筆記
- MATLAB中的影象融合Matlab
- MATLAB影象旋轉原始碼Matlab原始碼
- Java程式碼中字串拼接方式分析Java字串
- 原始碼|String拼接操作”+”的優化?原始碼優化
- [Python影象處理] 五.影象融合、加法運算及影象型別轉換Python型別
- Retrofit原始碼分析三 原始碼分析原始碼
- 集合原始碼分析[2]-AbstractList 原始碼分析原始碼
- 集合原始碼分析[1]-Collection 原始碼分析原始碼
- 集合原始碼分析[3]-ArrayList 原始碼分析原始碼
- Guava 原始碼分析之 EventBus 原始碼分析Guava原始碼
- 眼圖分析
- 魚眼相機成像模型和畸變模型模型
- Android 原始碼分析之 AsyncTask 原始碼分析Android原始碼
- 【JDK原始碼分析系列】ArrayBlockingQueue原始碼分析JDK原始碼BloC
- 以太坊原始碼分析(36)ethdb原始碼分析原始碼
- 以太坊原始碼分析(38)event原始碼分析原始碼
- 以太坊原始碼分析(41)hashimoto原始碼分析原始碼
- 以太坊原始碼分析(43)node原始碼分析原始碼
- 以太坊原始碼分析(52)trie原始碼分析原始碼
- 深度 Mybatis 3 原始碼分析(一)SqlSessionFactoryBuilder原始碼分析MyBatis原始碼SQLSessionUI
- 以太坊原始碼分析(51)rpc原始碼分析原始碼RPC
- 【Android原始碼】Fragment 原始碼分析Android原始碼Fragment
- 【Android原始碼】Intent 原始碼分析Android原始碼Intent
- k8s client-go原始碼分析 informer原始碼分析(6)-Indexer原始碼分析K8SclientGo原始碼ORMIndex
- k8s client-go原始碼分析 informer原始碼分析(4)-DeltaFIFO原始碼分析K8SclientGo原始碼ORM
- 以太坊原始碼分析(20)core-bloombits原始碼分析原始碼OOM
- 以太坊原始碼分析(24)core-state原始碼分析原始碼
- 以太坊原始碼分析(29)core-vm原始碼分析原始碼
- 【MyBatis原始碼分析】select原始碼分析及小結MyBatis原始碼
- redis原始碼分析(二)、redis原始碼分析之sds字串Redis原始碼字串
- ArrayList 原始碼分析原始碼
- kubeproxy原始碼分析原始碼
- [原始碼分析]ArrayList原始碼
- redux原始碼分析Redux原始碼
- preact原始碼分析React原始碼
- Snackbar原始碼分析原始碼