視訊模組 視訊分析

會思考的魚發表於2017-05-14

opencv_video模組:運動估計、特徵跟蹤及前景提取函式和類;(視訊分析元件,該模組包括運動估計,背景分離,物件跟蹤等視訊處理相關內容)

從這個單元,可以找到可用於您的視訊流的演算法,如:運動提取,特徵跟蹤和前景提取。

標題:如何使用背景減法方法

相容性:> OpenCV 2.4.6

作者:Domenico Daniele Bloisi

我們將學習如何從兩個視訊和影象序列中提取前景蒙版並顯示它們。

背景減法(BS)是通過使用靜態攝像機產生前景掩碼(即,包含屬於場景中的移動物體的畫素的二進位制影象)的常用和廣泛使用的技術。

顧名思義,BS計算當前幀和背景模型之間的減法的前景掩碼,該背景模型包含場景的靜態部分,或者更一般地,考慮到觀察到的場景的特徵可以被視為背景的所有內容。

背景建模包括兩個主要步驟:

背景初始化;
背景更新

在第一步中,計算背景的初始模型,而在第二步中,更新模型以適應場景中的可能變化。

在本教程中,我們將學習如何使用OpenCV執行BS。 作為輸入,我們將使用來自公開可用資料集背景模型挑戰(BMC)的資料。

目標:

在本教程中,您將學習如何:
通過使用VideoCapture或影象序列通過使用imread從視訊中讀取資料;
使用BackgroundSubtractor類建立和更新背景模型;
使用imshow獲取並顯示前景蒙板;
使用imwrite儲存輸出以定量評估結果。

原始碼;

在下面你可以找到原始碼。 我們將讓使用者選擇處理視訊檔案或一系列影象。

兩種不同的方法用於生成兩個前景蒙版:

  1. MOG
  2. MOG2

結果以及輸入資料顯示在螢幕上。

//opencv-bg_sub.cpp
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>

using namespace cv;
using namespace std;

//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;

//function declarations
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);

void help()
{
  cout
  << "--------------------------------------------------------------------------"  << endl
  << "This program shows how to use background subtraction methods provided by "   << endl
  << " OpenCV. You can process both videos (-vid) and images (-img)."              << endl
                                                                                   << endl
  << "Usage:"                                                                      << endl
  << "./bs {-vid <video filename>|-img <image filename>}"                          << endl
  << "for example: ./bs -vid video.avi"                                            << endl
  << "or: ./bs -img /data/images/1.png"                                            << endl
  << "--------------------------------------------------------------------------"  << endl
  << endl;
}

int main(int argc, char* argv[])
{
  //print help information
  help();

  //check for the input parameter correctness
  if(argc != 3) {
    cerr <<"Incorret input list" << endl;
    cerr <<"exiting..." << endl;
    return EXIT_FAILURE;
  }

  //create GUI windows
  namedWindow("Frame");
  namedWindow("FG Mask MOG");
  namedWindow("FG Mask MOG 2");

  //create Background Subtractor objects
  pMOG = createBackgroundSubtractorMOG(); //MOG approach
  pMOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach

  if(strcmp(argv[1], "-vid") == 0) {
    //input data coming from a video
    processVideo(argv[2]);
  }
  else if(strcmp(argv[1], "-img") == 0) {
    //input data coming from a sequence of images
    processImages(argv[2]);
  }
  else {
    //error in reading input parameters
    cerr <<"Please, check the input parameters." << endl;
    cerr <<"Exiting..." << endl;
    return EXIT_FAILURE;
  }
  //destroy GUI windows
  destroyAllWindows();
  return EXIT_SUCCESS;
}

void processVideo(char* videoFilename) {
  //create the capture object
  VideoCapture capture(videoFilename);
  if(!capture.isOpened()){
    //error in opening the video input
    cerr << "Unable to open video file: " << videoFilename << endl;
    exit(EXIT_FAILURE);
  }
  //read input data. ESC or 'q' for quitting
  while( (char)keyboard != 'q' && (char)keyboard != 27 ){
    //read the current frame
    if(!capture.read(frame)) {
      cerr << "Unable to read next frame." << endl;
      cerr << "Exiting..." << endl;
      exit(EXIT_FAILURE);
    }
    //update the background model
    pMOG->apply(frame, fgMaskMOG);
    pMOG2->apply(frame, fgMaskMOG2);
    //get the frame number and write it on the current frame
    stringstream ss;
    rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
              cv::Scalar(255,255,255), -1);
    ss << capture.get(CAP_PROP_POS_FRAMES);
    string frameNumberString = ss.str();
    putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
            FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
    //show the current frame and the fg masks
    imshow("Frame", frame);
    imshow("FG Mask MOG", fgMaskMOG);
    imshow("FG Mask MOG 2", fgMaskMOG2);
    //get the input from the keyboard
    keyboard = waitKey( 30 );
  }
  //delete capture object
  capture.release();
}

void processImages(char* fistFrameFilename) {
  //read the first file of the sequence
  frame = imread(fistFrameFilename);
  if(!frame.data){
    //error in opening the first image
    cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
    exit(EXIT_FAILURE);
  }
  //current image filename
  string fn(fistFrameFilename);
  //read input data. ESC or 'q' for quitting
  while( (char)keyboard != 'q' && (char)keyboard != 27 ){
    //update the background model
    pMOG->apply(frame, fgMaskMOG);
    pMOG2->apply(frame, fgMaskMOG2);
    //get the frame number and write it on the current frame
    size_t index = fn.find_last_of("/");
    if(index == string::npos) {
      index = fn.find_last_of("\\");
    }
    size_t index2 = fn.find_last_of(".");
    string prefix = fn.substr(0,index+1);
    string suffix = fn.substr(index2);
    string frameNumberString = fn.substr(index+1, index2-index-1);
    istringstream iss(frameNumberString);
    int frameNumber = 0;
    iss >> frameNumber;
    rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
              cv::Scalar(255,255,255), -1);
    putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
            FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
    //show the current frame and the fg masks
    imshow("Frame", frame);
    imshow("FG Mask MOG", fgMaskMOG);
    imshow("FG Mask MOG 2", fgMaskMOG2);
    //get the input from the keyboard
    keyboard = waitKey( 30 );
    //search for the next image in the sequence
    ostringstream oss;
    oss << (frameNumber + 1);
    string nextFrameNumberString = oss.str();
    string nextFrameFilename = prefix + nextFrameNumberString + suffix;
    //read the next frame
    frame = imread(nextFrameFilename);
    if(!frame.data){
      //error in opening the next image in the sequence
      cerr << "Unable to open image frame: " << nextFrameFilename << endl;
      exit(EXIT_FAILURE);
    }
    //update the path of the current frame
    fn.assign(nextFrameFilename);
  }
}

使用drawContours來清理背景分割結果的示例 :segment_objects.cpp

#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/video/background_segm.hpp"
#include <stdio.h>
#include <string>
using namespace std;
using namespace cv;
static void help()
{
    printf("\n"
            "This program demonstrated a simple method of connected components clean up of background subtraction\n"
            "When the program starts, it begins learning the background.\n"
            "You can toggle background learning on and off by hitting the space bar.\n"
            "Call\n"
            "./segment_objects [video file, else it reads camera 0]\n\n");
}
static void refineSegments(const Mat& img, Mat& mask, Mat& dst)
{
    int niters = 3;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    Mat temp;
    dilate(mask, temp, Mat(), Point(-1,-1), niters);
    erode(temp, temp, Mat(), Point(-1,-1), niters*2);
    dilate(temp, temp, Mat(), Point(-1,-1), niters);
    findContours( temp, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE );
    dst = Mat::zeros(img.size(), CV_8UC3);
    if( contours.size() == 0 )
        return;
    // iterate through all the top-level contours,
    // draw each connected component with its own random color
    int idx = 0, largestComp = 0;
    double maxArea = 0;
    for( ; idx >= 0; idx = hierarchy[idx][0] )
    {
        const vector<Point>& c = contours[idx];
        double area = fabs(contourArea(Mat(c)));
        if( area > maxArea )
        {
            maxArea = area;
            largestComp = idx;
        }
    }
    Scalar color( 0, 0, 255 );
    drawContours( dst, contours, largestComp, color, FILLED, LINE_8, hierarchy );
}
int main(int argc, char** argv)
{
    VideoCapture cap;
    bool update_bg_model = true;
    CommandLineParser parser(argc, argv, "{help h||}{@input||}");
    if (parser.has("help"))
    {
        help();
        return 0;
    }
    string input = parser.get<std::string>("@input");
    if (input.empty())
        cap.open(0);
    else
        cap.open(input);
    if( !cap.isOpened() )
    {
        printf("\nCan not open camera or video file\n");
        return -1;
    }
    Mat tmp_frame, bgmask, out_frame;
    cap >> tmp_frame;
    if(tmp_frame.empty())
    {
        printf("can not read data from the video source\n");
        return -1;
    }
    namedWindow("video", 1);
    namedWindow("segmented", 1);
    Ptr<BackgroundSubtractorMOG2> bgsubtractor=createBackgroundSubtractorMOG2();
    bgsubtractor->setVarThreshold(10);
    for(;;)
    {
        cap >> tmp_frame;
        if( tmp_frame.empty() )
            break;
        bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
        refineSegments(tmp_frame, bgmask, out_frame);
        imshow("video", tmp_frame);
        imshow("segmented", out_frame);
        char keycode = (char)waitKey(30);
        if( keycode == 27 )
            break;
        if( keycode == ' ' )
        {
            update_bg_model = !update_bg_model;
            printf("Learn background is in state = %d\n",update_bg_model);
        }
    }
    return 0;
}





相關文章