計算機視覺 | Matlab實現單目視覺里程計基於SURF特徵(程式碼類)

衝動的MJ發表於2018-12-13

博主github:https://github.com/MichaelBeechan   

博主CSDN:https://blog.csdn.net/u011344545 

=================================================================

SURF特徵提取參考:https://blog.csdn.net/u011344545/article/details/84978195

%% Name:Michael Beechan
%% School:Chongqing University of Technology
%% Time:2018.12.12
%% Function:Monocular Visual Odometry

 

%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Estimating the pose of the second view relative to the first view
%% Bootstrapping estimating camera trajectory using global bundle adjustment
%% Estimating remaining camera trajectory using windowed bundle adjustment
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% [1] "Towards a Simulation Driven Stereo Vision System". Proceedings of ICPR, pp.1038-1042, 2012.
%% [2] "Realistic CG Stereo Image Dataset with Ground Truth Disparity Maps", Proceedings of ICPR workshop TrakMark2012, pp.40-42, 2012.
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%% Read Input Image Sequence and Ground Truth
images = imageDatastore(fullfile(toolboxdir('vision'), 'visiondata', 'NewTsukuba'));
%% Create the camera parameters object using camera intrinsics from the NewTsukuba dataset
K = [615 0 320; 0 615 240; 0 0 1]';
cameraParams = cameraParameters('IntrinsicMatrix', K);

 

%% Load ground truth camera poses
load(fullfile(toolboxdir('vision'), 'visiondata', 'visualOdometryGroundTruth.mat'));

 

%% Create a View Set Containing the First View of the Sequence
%% Create an empty viewSet object to manage the data associated with each view
vSet = viewSet;
%% Read and display the first image
Irgb = readimage(images, 1);
player = vision.VideoPlayer('Position', [20, 400, 650, 510]);
step(player, Irgb);

 

%% Convert to gray scale and undistort
prevI = undistortImage(rgb2gray(Irgb), cameraParams);

 

%% detect Features
prevPoints = detectSURFFeatures(prevI, 'MetricThreshold', 500);

 

%% Select a subset of features, uniformly distributed throughout the image
numPoints = 150;
prevPoints = selectUniform(prevPoints, numPoints, size(prevI));

 

%% Extract features. Using 'Upright' features improves matching quality
%% if the camera motion involves little or no in-plane rotation
prevFeatures = extractFeatures(prevI, prevPoints, 'Upright', true);

 

%% Add the first view. Place the camera associated with the first view at the origin,
%% oriented along the Z-axis
viewId = 1;
vSet = addView(vSet, viewId, 'Points', prevPoints, 'Orientation', eye(3), 'Location', [0 0 0]);

 

%% Plot Initial Camera Pose
%% Setup axis
figure
axis([-220, 50, -140, 20, -50, 300]);

 

%% Set Y-axis to be vertical pointing down
view(gca, 3);
set(gca, 'CameraUpVector', [0, -1, 0]);
camorbit(gca, -120, 0, 'data', [0, 1, 0]);

grid on
xlabel('X (cm)');
ylabel('Y (cm)');
zlabel('Z (cm)');
hold on

%% Plot estimated camera pose
cameraSize = 7;
camEstimated = plotCamera('Size', cameraSize, ...
    'Location', vSet.Views.Location{1}, ...
    'Orientation', vSet.Views.Orientation{1}, ...
    'Color', 'g', 'Opacity', 0);

 

%% Plot actual camera pose
camActual = plotCamera('Size', cameraSize, ...
    'Location', groundTruthPoses.Location{1}, ...
    'Orientation', groundTruthPoses.Orientation{1}, ...
    'Color', 'b', 'Opacity', 0);

 

%% Initialize camera trajectories
trajectoryEstimated = plot3(0, 0, 0, 'g-');
trajectoryActual    = plot3(0, 0, 0, 'b-');

legend('Estimated Trajectory', 'Actual Trajectory');
title('Camera Trajectory');

%% Estimate the Pose of the Second View:using helperDetectAndMatchFeatures and helperEstimateRelativePose
%% Read and diaplay the image
viewId = 2;
Irgb = readimage(images, viewId);
step(player, Irgb);

 

%% Convert to gray scale and undistort
I = undistortImage(rgb2gray(Irgb), cameraParams);

 

%% Match features between the previous and the current image
[currPoints, currFeatures, indexPairs] = helperDetectAndMatchFeatures(...
    prevFeatures, I);

 

%% Estimate the pose of the current view relative to the previous view
[orient, loc, inlierIdx] = helperEstimateRelativePose(...
    prevPoints(indexPairs(:,1)), currPoints(indexPairs(:,2)), cameraParams);

 

%% Exclude epipolar outliers
indexPairs = indexPairs(inlierIdx, :);

 

%% Add the current view to the view set
vSet = addView(vSet, viewId, 'Points', currPoints, 'Orientation', orient, ...
    'Location', loc);

 

%% Store the point matches between the previous and the current views
vSet = addConnection(vSet, viewId-1, viewId, 'Matches', indexPairs);

 

%% Compute scale factor from the ground truth using helperNormalizaeViewSet
vSet = helperNormalizeViewSet(vSet, groundTruthPoses);

 

%% Update camera trajectory plot using helperUpdateCameraPlots and helperUpdateCameraTrajectories
helperUpdateCameraPlots(viewId, camEstimated, camActual, poses(vSet), ...
    groundTruthPoses);
helperUpdateCameraTrajectories(viewId, trajectoryEstimated, trajectoryActual,...
    poses(vSet), groundTruthPoses);

prevI = I;
prevFeatures = currFeatures;
prevPoints   = currPoints;

%% Bootstrap Estimating Camera Trajectory Using Global Bundle Adjustment
%% Use helperFindEpipolarInliers to find the matches that satisfy the epipolar constraint
%%  use helperFind3Dto2DCorrespondences to triangulate 3-D points from the previous two views and find the corresponding 2-D points in the current view
for viewId = 3:15
    % Read and display the next image
    Irgb = readimage(images, viewId);
    step(player, Irgb);
   
    % Convert to gray scale and undistort.
    I = undistortImage(rgb2gray(Irgb), cameraParams);
   
    % Match points between the previous and the current image.
    [currPoints, currFeatures, indexPairs] = helperDetectAndMatchFeatures(...
        prevFeatures, I);
     
    % Eliminate outliers from feature matches.
    inlierIdx = helperFindEpipolarInliers(prevPoints(indexPairs(:,1)),...
        currPoints(indexPairs(:, 2)), cameraParams);
    indexPairs = indexPairs(inlierIdx, :);
   
    % Triangulate points from the previous two views, and find the
    % corresponding points in the current view.
    [worldPoints, imagePoints] = helperFind3Dto2DCorrespondences(vSet,...
        cameraParams, indexPairs, currPoints);
   
    % Since RANSAC involves a stochastic process, it may sometimes not
    % reach the desired confidence level and exceed maximum number of
    % trials. Disable the warning when that happens since the outcomes are
    % still valid.
    warningstate = warning('off','vision:ransac:maxTrialsReached');
   
    % Estimate the world camera pose for the current view.
    [orient, loc] = estimateWorldCameraPose(imagePoints, worldPoints, ...
        cameraParams, 'Confidence', 99.99, 'MaxReprojectionError', 0.8);
   
    % Restore the original warning state
    warning(warningstate)
   
    % Add the current view to the view set.
    vSet = addView(vSet, viewId, 'Points', currPoints, 'Orientation', orient, ...
        'Location', loc);
   
    % Store the point matches between the previous and the current views.
    vSet = addConnection(vSet, viewId-1, viewId, 'Matches', indexPairs);   
   
    tracks = findTracks(vSet); % Find point tracks spanning multiple views.
       
    camPoses = poses(vSet);    % Get camera poses for all views.
   
    % Triangulate initial locations for the 3-D world points.
    xyzPoints = triangulateMultiview(tracks, camPoses, cameraParams);
   
    % Refine camera poses using bundle adjustment.
    [~, camPoses] = bundleAdjustment(xyzPoints, tracks, camPoses, ...
        cameraParams, 'PointsUndistorted', true, 'AbsoluteTolerance', 1e-9,...
        'RelativeTolerance', 1e-9, 'MaxIterations', 300);
       
    vSet = updateView(vSet, camPoses); % Update view set.
   
    % Bundle adjustment can move the entire set of cameras. Normalize the
    % view set to place the first camera at the origin looking along the
    % Z-axes and adjust the scale to match that of the ground truth.
    vSet = helperNormalizeViewSet(vSet, groundTruthPoses);
   
    % Update camera trajectory plot.
    helperUpdateCameraPlots(viewId, camEstimated, camActual, poses(vSet), ...
        groundTruthPoses);
    helperUpdateCameraTrajectories(viewId, trajectoryEstimated, ...
        trajectoryActual, poses(vSet), groundTruthPoses);
   
    prevI = I;
    prevFeatures = currFeatures;
    prevPoints   = currPoints; 
end

%% Estimate Remaining Camera Trajectory Using Windowed Bundle Adjustment
for viewId = 16:numel(images.Files)
    % Read and display the next image
    Irgb = readimage(images, viewId);
    step(player, Irgb);
   
    % Convert to gray scale and undistort.
    I = undistortImage(rgb2gray(Irgb), cameraParams);

 

    % Match points between the previous and the current image.
    [currPoints, currFeatures, indexPairs] = helperDetectAndMatchFeatures(...
        prevFeatures, I);   
         
    % Triangulate points from the previous two views, and find the
    % corresponding points in the current view.
    [worldPoints, imagePoints] = helperFind3Dto2DCorrespondences(vSet, ...
        cameraParams, indexPairs, currPoints);

 

    % Since RANSAC involves a stochastic process, it may sometimes not
    % reach the desired confidence level and exceed maximum number of
    % trials. Disable the warning when that happens since the outcomes are
    % still valid.
    warningstate = warning('off','vision:ransac:maxTrialsReached');
   
    % Estimate the world camera pose for the current view.
    [orient, loc] = estimateWorldCameraPose(imagePoints, worldPoints, ...
        cameraParams, 'MaxNumTrials', 5000, 'Confidence', 99.99, ...
        'MaxReprojectionError', 0.8);
   
    % Restore the original warning state
    warning(warningstate)
   
    % Add the current view and connection to the view set.
    vSet = addView(vSet, viewId, 'Points', currPoints, 'Orientation', orient, ...
        'Location', loc);
    vSet = addConnection(vSet, viewId-1, viewId, 'Matches', indexPairs);
       
    % Refine estimated camera poses using windowed bundle adjustment. Run
    % the optimization every 7th view.
    if mod(viewId, 7) == 0       
        % Find point tracks in the last 15 views and triangulate.
        windowSize = 15;
        startFrame = max(1, viewId - windowSize);
        tracks = findTracks(vSet, startFrame:viewId);
        camPoses = poses(vSet, startFrame:viewId);
        [xyzPoints, reprojErrors] = triangulateMultiview(tracks, camPoses, ...
            cameraParams);
                               
        % Hold the first two poses fixed, to keep the same scale.
        fixedIds = [startFrame, startFrame+1];
       
        % Exclude points and tracks with high reprojection errors.
        idx = reprojErrors < 2;
       
        [~, camPoses] = bundleAdjustment(xyzPoints(idx, :), tracks(idx), ...
            camPoses, cameraParams, 'FixedViewIDs', fixedIds, ...
            'PointsUndistorted', true, 'AbsoluteTolerance', 1e-9,...
            'RelativeTolerance', 1e-9, 'MaxIterations', 300);
       
        vSet = updateView(vSet, camPoses); % Update view set.
    end
   
    % Update camera trajectory plot.
    helperUpdateCameraPlots(viewId, camEstimated, camActual, poses(vSet), ...
        groundTruthPoses);   
    helperUpdateCameraTrajectories(viewId, trajectoryEstimated, ...
        trajectoryActual, poses(vSet), groundTruthPoses);   
   
    prevI = I;
    prevFeatures = currFeatures;
    prevPoints   = currPoints; 
end

hold off

 

相關文章