Kinect Fusion Basics-WPF C# Sample程式碼解析
【原文:http://feichizhongwu.blog.51cto.com/8606684/1361339】
各位博友,這是我第一次寫技術部落格,有編輯方面的不足或者技術方面不足的,請諸位諒解。
本人這段時間,一直研讀微軟SDK官方樣例kinect fusion程式碼,總體理解程式流程,在這發一篇小部落格供大家交流學習。由於工程程式碼較長,所以本人打算直接copy程式碼,但加上自己理解註釋,希望同該方向研究的人有所借鑑。
///<summary>
/// 該程式是基於CSharp的WPF程式設計,是基於事件觸發的,程式主要事件有WindowLoaded,ProcessDepthData,
/// ResetReconstruction,SensorDepthFrameReady,FpsTimerTick.程式設計思想為:視窗初始化時候,便會調
/// 用WindowLoaded進行一些類成員 變數的初始化,並設定了時鐘,最後ResetReconstruction了一下,
/// 啟用kinect進行資料捕獲。幀會更新,更新會觸發事件SensorDepthFrameReady,由其中
/// ProcessDepthData處理深度資料並負責深度圖的顯示,當跟蹤錯誤次數超過預設的
/// 次數之後,便會呼叫ResetReconstruction一下。直到WindowClosing之後才關閉應用。
/// </summary>
namespace Microsoft.Samples.Kinect.KinectFusionBasics
{
using System;
using System.Diagnostics;
using System.IO;
using System.Windows;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Threading;
using Microsoft.Kinect;
using Microsoft.Kinect.Toolkit.Fusion;
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window, IDisposable
{
/// <summary>
/// Max tracking error count, we will reset the reconstruction if tracking errors
/// reach this number
/// </summary>
///最大跟蹤錯誤數目
private const int MaxTrackingErrors = 100;
/// <summary>
/// If set true, will automatically reset the reconstruction when MaxTrackingErrors have occurred
/// </summary>
/// 最大跟蹤錯誤之後,如果該變數為true,則自動更新reconstruction
///在ProcessDepthData中呼叫
private const bool AutoResetReconstructionWhenLost = false;
/// <summary>
/// The resolution of the depth image to be processed.
/// </summary>
/// DepthImageFormat是一個列舉型別,有Undefined, Resolution640x480Fps30, Resolution320x240Fps30,Resolution80x60Fps30,
private const DepthImageFormat DepthImageResolution = DepthImageFormat.Resolution640x480Fps30;
/// <summary>
/// The seconds interval to calculate FPS
/// </summary>
private const int FpsInterval = 10;
///<summary>
/// 調整以下幾個變數,可以改變成像立方體的大小,也就可以改變重建範圍
/// </summary>
///
/// <summary>
/// The reconstruction volume voxel density in voxels per meter (vpm)
/// vpm表示每米有多少個voxel,是一種密度
/// 1000mm / 256vpm = ~3.9mm/voxel
/// </summary>
/// 方塊立體單元
private const int VoxelsPerMeter = 256;
/// <summary>
/// The reconstruction volume voxel resolution in the X axis
/// At a setting of 256vpm the volume is 512 / 256 = 2m wide
/// </summary>
/// X方向的畫素總長度
private const int VoxelResolutionX = 512;
/// <summary>
/// The reconstruction volume voxel resolution in the Y axis
/// At a setting of 256vpm the volume is 384 / 256 = 1.5m high
/// </summary>
/// Y方向總長度
private const int VoxelResolutionY = 384;
/// <summary>
/// The reconstruction volume voxel resolution in the Z axis
/// At a setting of 256vpm the volume is 512 / 256 = 2m deep
/// </summary>
private const int VoxelResolutionZ = 512;
/// <summary>
/// The reconstruction volume processor type. This parameter sets whether AMP or CPU processing
/// is used. Note that CPU processing will likely be too slow for real-time processing.
/// </summary>
/// 重建處理器型別,是CPU重建還是AMP重建(GPU實時重建)
private const ReconstructionProcessor ProcessorType = ReconstructionProcessor.Amp;
/// <summary>
/// The zero-based device index to choose for reconstruction processing if the
/// ReconstructionProcessor AMP options are selected.
/// Here we automatically choose a device to use for processing by passing -1,
/// </summary>
private const int DeviceToUse = -1;
/// <summary>
/// Parameter to translate the reconstruction based on the minimum depth setting. When set to
/// false, the reconstruction volume +Z axis starts at the camera lens and extends into the scene.
/// Setting this true in the constructor will move the volume forward along +Z away from the
/// camera by the minimum depth threshold to enable capture of very small reconstruction volumes
/// by setting a non-identity world-volume transformation in the ResetReconstruction call.
/// Small volumes should be shifted, as the Kinect hardware has a minimum sensing limit of ~0.35m,
/// inside which no valid depth is returned, hence it is difficult to initialize and track robustly
/// when the majority of a small volume is inside this distance.
/// </summary>
private bool translateResetPoseByMinDepthThreshold = true;
/// <summary>
/// Minimum depth distance threshold in meters. Depth pixels below this value will be
/// returned as invalid (0). Min depth must be positive or 0.
/// </summary>
/// DefaultMinimumDepth=0.5f,Min depth為正數
private float minDepthClip = FusionDepthProcessor.DefaultMinimumDepth;
/// <summary>
/// Maximum depth distance threshold in meters. Depth pixels above this value will be
/// returned as invalid (0). Max depth must be greater than 0.
/// </summary>
/// DefaultMaximumDepth=8.0f
private float maxDepthClip = FusionDepthProcessor.DefaultMaximumDepth;
/// <summary>
/// Active Kinect sensor
/// </summary>
private KinectSensor sensor;
/// <summary>
/// Bitmap that will hold color information
/// </summary>
/// 在WindowLoaded中賦值,並在在其中繫結Image控制元件
private WriteableBitmap colorBitmap;
/// <summary>
/// Intermediate storage for the depth data converted to color
/// </summary>
/// 快取,用於儲存轉化為顏色值的深度資料
private int[] colorPixels;
/// <summary>
/// Intermediate storage for the depth float data converted from depth image frame
/// </summary>
/// 從深度幀轉化的浮點型深度資料
private FusionFloatImageFrame depthFloatBuffer;
/// <summary>
/// Intermediate storage for the point cloud data converted from depth float image frame
/// </summary>
/// 從深度幀轉化而來的點雲資料
private FusionPointCloudImageFrame pointCloudBuffer;
/// <summary>
/// Raycast shaded surface image
/// </summary>
private FusionColorImageFrame shadedSurfaceColorFrame;
/// <summary>
/// The transformation between the world and camera view coordinate system
/// </summary>
/// 變換矩陣
private Matrix4 worldToCameraTransform;
/// <summary>
/// The default transformation between the world and volume coordinate system
/// </summary>
private Matrix4 defaultWorldToVolumeTransform;
/// <summary>
/// The Kinect Fusion volume
/// </summary>
private Reconstruction volume;
/// <summary>
/// The timer to calculate FPS
/// </summary>
/// System.Windows.Threading名稱空間,計時器
/// 幀率 = processedFrameCount / FpsInterval
private DispatcherTimer fpsTimer;
/// <summary>
/// The count of the frames processed in the FPS interval
/// </summary>
/// 在採集間隔(FpsInterval=10)內,處理幀的數目
private int processedFrameCount;
/// <summary>
/// The tracking error count
/// </summary>
/// 用於儲存連續跟蹤錯誤次數
private int trackingErrorCount;
/// <summary>
/// The sensor depth frame data length
/// </summary>
private int frameDataLength;
/// <summary>
/// wether the depth frames to be processed
/// </summary>
private bool processingFrame;
/// <summary>
/// Track whether Dispose has been called
/// </summary>
private bool disposed;
/// <summary>
/// Initializes a new instance of the MainWindow class.
/// </summary>
public MainWindow()
{
this.InitializeComponent();
}
/// <summary>
/// Finalizes an instance of the MainWindow class.
/// This destructor will run only if the Dispose method does not get called.
/// </summary>
~MainWindow()
{
this.Dispose(false);
}
/// <summary>
/// Get the image size of fusion images and bitmap.
/// </summary>
/// 返回深度影象的大小,DepthImageResolution為上面全域性變數設定,該變數ImageSize為結構體變數,可以直接呼叫
public static Size ImageSize
{
get
{//GetImageSize函式為私有定義函式,在以下有定義
//DepthImageResolution=DepthImageFormat.Resolution640x480Fps30;則返回Size(640,480)
return GetImageSize(DepthImageResolution);
}
}
/// <summary>
/// Dispose the allocated frame buffers and reconstruction.
/// </summary>
/// 記憶體釋放函式,和Dispose(bool disposing)結合使用
public void Dispose()
{
this.Dispose(true);
// This object will be cleaned up by the Dispose method.
GC.SuppressFinalize(this);
}
/// <summary>
/// Frees all memory associated with the FusionImageFrame.
/// </summary>
/// <param name="disposing">Whether the function was called from Dispose.</param>
/// 記憶體釋放函式
protected virtual void Dispose(bool disposing)
{
if (!this.disposed)
{
if (null != this.depthFloatBuffer)
{//depthFloatBuffer為深度資料緩衝儲存區
this.depthFloatBuffer.Dispose();
}
if (null != this.pointCloudBuffer)
{
this.pointCloudBuffer.Dispose();
}
if (null != this.shadedSurfaceColorFrame)
{
this.shadedSurfaceColorFrame.Dispose();
}
if (null != this.volume)
{
this.volume.Dispose();
}
this.disposed = true;
}
}
/// <summary>
/// Get the depth image size from the input depth image format.
/// </summary>
/// <param name="imageFormat">The depth image format.</param>
/// <returns>The widht and height of the input depth image format.</returns>
/// 在ImageSize被呼叫
/// 輸入深度影象的格式
private static Size GetImageSize(DepthImageFormat imageFormat)
{
switch (imageFormat)
{
case DepthImageFormat.Resolution320x240Fps30:
return new Size(320, 240);
case DepthImageFormat.Resolution640x480Fps30:
return new Size(640, 480);
case DepthImageFormat.Resolution80x60Fps30:
return new Size(80, 60);
}
throw new ArgumentOutOfRangeException("imageFormat");
}
/// <summary>
/// Execute startup tasks
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
/// WPF視窗初始化載入函式
/// 初始化了sensor和fpsTimer兩個變數
private void WindowLoaded(object sender, RoutedEventArgs e)
{
// Look through all sensors and start the first connected one.
// This requires that a Kinect is connected at the time of app startup.
// To make your app robust against plug/unplug,
// it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit
//功能:遍歷所有kinect sensors並開始第一個連線上的kinect
//這個函式要求在app啟動之前至少要有一個kinect接入電腦
//.............
//要求必須使用Microsoft.Kinect.Toolkit中的KinectSensorChooser進行程式設計
//KinectSensor.KinectSensors返回一個kinect裝置陣列
//該迴圈功能為了找出具備能力的kinect裝置
foreach (var potentialSensor in KinectSensor.KinectSensors)
{
if (potentialSensor.Status == KinectStatus.Connected)
{//sensor為全域性變數,表示該kinect
this.sensor = potentialSensor;
break;
}
}
//如果沒有可以使用的kinect sensor,則退出視窗初始化
if (null == this.sensor)
{//statusBarText為視窗提示欄名,跟蹤之後,在狀態列中輸出字串“NoKinectReady”
this.statusBarText.Text = Properties.Resources.NoKinectReady;
return;
}
// Turn on the depth stream to receive depth frames
//開啟深度流以接收深度資料
//Enable()為Overloaded. Methods for enabling a sensor to stream out depth data.
//DepthImageStream.Enable ()為預設格式DepthImageFormat.Resolution640x480Fps30.
this.sensor.DepthStream.Enable(DepthImageResolution);
//frameDataLength為深度資料總長度
this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;
// Allocate space to put the color pixels we'll create
this.colorPixels = new int[this.frameDataLength];
// This is the bitmap we'll display on-screen
//C#函式: WriteableBitmap(int pixelWidth, int pixelHeight,
//double dpiX, double dpiY, PixelFormat pixelFormat, BitmapPalette palette);
this.colorBitmap = new WriteableBitmap(
(int)ImageSize.Width,
(int)ImageSize.Height,
96.0,//起點X座標
96.0,//起點Y座標
PixelFormats.Bgr32,//圖片格式
null);//畫板物件
// Set the image we display to point to the bitmap where we'll put the image data
//Image為圖片控制元件名
this.Image.Source = this.colorBitmap;
// Add an event handler to be called whenever there is new depth frame data
//SensorDepthFrameReady為深度幀資料更新的處理函式,也是大的處理框架
//即下一幀深度準備好時候,就呼叫SensorDepthFrameReady更新資料
this.sensor.DepthFrameReady += this.SensorDepthFrameReady;
//VoxelsPerMeter = 256; X:2m Y:1.5m高 Z:2m深
//volParam為Voxel重建的引數例項物件,用於儲存重建引數
var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);
// Set the world-view transform to identity, so the world origin is the initial camera location.
this.worldToCameraTransform = Matrix4.Identity;
try
{
// This creates a volume cube with the Kinect at center of near plane, and volume directly
// in front of Kinect.
//建立一個重建立方體
//ProcessorType = ReconstructionProcessor.Amp;GPU重建
//this.worldToCameraTransform = Matrix4.Identity;
//DeviceToUse=-1
this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);
this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();
//this.translateResetPoseByMinDepthThreshold=true
// Setting this true in the constructor will move the volume forward along +Z away from the
// camera by the minimum depth threshold to enable capture of very small reconstruction volumes
// by setting a non-identity world-volume transformation in the ResetReconstruction call.
if (this.translateResetPoseByMinDepthThreshold)
{
//ResetReconstruction()為主要處理函式
this.ResetReconstruction();
}
}
catch (InvalidOperationException ex)
{//statusBarText為控制元件名字
this.statusBarText.Text = ex.Message;
return;
}
catch (DllNotFoundException)
{
this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;
return;
}
//初始化資料空間
// Depth frames generated from the depth input
this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);
// Point cloud frames generated from the depth float input
this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);
// Create images to raycast the Reconstruction Volume
this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);
// Start the sensor!
try
{
this.sensor.Start();
}
catch (IOException ex)
{
// Device is in use
this.sensor = null;
this.statusBarText.Text = ex.Message;
return;
}
catch (InvalidOperationException ex)
{
// Device is not valid, not supported or hardware feature unavailable
this.sensor = null;
this.statusBarText.Text = ex.Message;
return;
}
// Set Near Mode by default
try
{
this.sensor.DepthStream.Range = DepthRange.Near;
checkBoxNearMode.IsChecked = true;
}
catch
{
// device not near mode capable
}
// Initialize and start the FPS timer
this.fpsTimer = new DispatcherTimer();
//FpsTimerTick為時鐘處理響應函式
//private const int FpsInterval = 10
//每10s計時一次 ,並觸發一次計時器,便執行FpsTimerTick
this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);
this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);//時,分,秒
this.fpsTimer.Start();
// Reset the reconstruction
this.ResetReconstruction();
}
/// <summary>
/// Execute shutdown tasks
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void WindowClosing(object sender, System.ComponentModel.CancelEventArgs e)
{
if (null != this.fpsTimer)
{
this.fpsTimer.Stop();
}
if (null != this.sensor)
{
this.sensor.Stop();
}
}
/// <summary>
/// Update the FPS reading in the status text bar
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
/// 將幀率資訊顯示在狀態列上,買10s計算一次幀率,並實現顯示在狀態列
private void FpsTimerTick(object sender, EventArgs e)
{
// Update the FPS reading
//FpsInterval=10為常數
this.statusBarText.Text = string.Format(
System.Globalization.CultureInfo.InvariantCulture,
Properties.Resources.Fps,
(double)this.processedFrameCount / FpsInterval);
// Reset the frame count,該變數在 ProcessDepthData函式中被更新,
//當成功處理幀資料一次,就更新一次資料
this.processedFrameCount = 0;
}
/// <summary>
/// Event handler for Kinect sensor's DepthFrameReady event
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
/// 該函式為新幀準備好時被呼叫,在WindowLoaded函式中被宣告
private void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null && !this.processingFrame)
{
//this.frameDataLength為幀影象元素的長度,在WindowLoaded函式中被賦值
var depthPixels = new DepthImagePixel[this.frameDataLength];
// Copy the pixel data from the image to a temporary array
//從深度圖中提取深度資料到暫時陣列中
depthFrame.CopyDepthImagePixelDataTo(depthPixels);
this.Dispatcher.BeginInvoke(
DispatcherPriority.Background,
(Action<DepthImagePixel[]>)((d) => { this.ProcessDepthData(d); }),
depthPixels);
// Mark that one frame will be processed
//標記新的幀資料可以被處理,處理完之後,賦值為false
this.processingFrame = true;
}
}
}
/// <summary>
/// Process the depth input
/// </summary>
/// <param name="depthPixels">The depth data array to be processed</param>
/// 處理輸入深度資料
/// SensorDepthFrameReady函式中,呼叫了ProcessDepthData函式,SensorDepthFrameReady中確定了
/// 作為引數的深度資料陣列(型別DepthImagePixel)
private void ProcessDepthData(DepthImagePixel[] depthPixels)
{
//Assert檢查提示,如果條件錯誤,就會輸出提示 ,並顯示一個資訊框
//以下三個變數均在WindowLoaded中被賦值
Debug.Assert(null != this.volume, "volume should be initialized");
Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");
Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");
//depthFloatBuffer在WindowLoaded中被宣告空間,但沒有儲存資料
//將深度圖幀轉化為深度浮點資料幀
try
{
// Convert the depth image frame to depth float image frame
// Converts Kinect depth frames in unsigned short format to depth frames in float format
// representing distance from the camera in meters (parallel to the optical center axis).
FusionDepthProcessor.DepthToDepthFloatFrame(
depthPixels,//輸入資料陣列
(int)ImageSize.Width,
(int)ImageSize.Height,
this.depthFloatBuffer,//緩衝資料區,前13位
FusionDepthProcessor.DefaultMinimumDepth,//0.35f
FusionDepthProcessor.DefaultMaximumDepth,//8.0f
false);
//ProcessFrame函式:
//處理每一幀的函式(On_GPU)
//有兩個功能:1.AlignDepthFloatToReconstruction:對其深度浮點型資料depthFloatBuffer到重建立方體當中。
//2.IntegrateFrame:使得幀融合在一起
// After this call completes, if a visible output image of the reconstruction
// is required, the user can call CalculatePointCloud and then ShadePointCloud.
// The maximum image resolution supported in this function is 640x480.
// ProcessFrame will first calculate the camera pose and then integrate
// if tracking is successful
//成功返回true,如果在對齊輸入深度資料有問題時候並不能計算出有用的變換,則返回false
bool trackingSucceeded = this.volume.ProcessFrame(
this.depthFloatBuffer,//輸入深度浮點資料
FusionDepthProcessor.DefaultAlignIterationCount,//最大對齊次數
FusionDepthProcessor.DefaultIntegrationWeight,//最大融合權重
this.volume.GetCurrentWorldToCameraTransform());//最新一次的變換矩陣,則需要在以下更新變化矩陣
// If camera tracking failed, no data integration or raycast for reference
// point cloud will have taken place, and the internal camera pose
// will be unchanged.
// 成功返回true,如果在對齊輸入深度資料有問題時候並不能計算出有用的變換,則返回false
if (!trackingSucceeded)
{
//The tracking error count跟蹤錯誤總次數
//用於記錄連續跟蹤錯誤次數
this.trackingErrorCount++;
// Show tracking error on status bar
this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;
}
else//跟蹤成功
{
Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();
// Set the camera pose and reset tracking errors
this.worldToCameraTransform = calculatedCameraPose;//更新(世界座標---相機座標)變換矩陣
//更新該變數,新的一次相機正常tracking,則需要更新該變數
this.trackingErrorCount = 0;
}
//達到最大跟蹤錯誤次數之後,則自動更新reconstruction
if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)
{
// Auto Reset due to bad tracking
this.statusBarText.Text = Properties.Resources.ResetVolume;
// Automatically Clear Volume and reset tracking if tracking fails
this.ResetReconstruction();
}
//用於顯示到Image控制元件上,即reconstruction如果需要顯示出來,就需要計算出PointCloud和ShadePointCloud
// Calculate the point cloud
//返回3D座標資料點和一個零交叉稠密表面的標準。儲存到pointCloudBuffer中
this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);
// Shade point cloud and render
FusionDepthProcessor.ShadePointCloud(
this.pointCloudBuffer,//輸入資料
this.worldToCameraTransform,
this.shadedSurfaceColorFrame,//輸出資料
null);
//this.colorPixels在WindowLoaded中開闢了空間裡,沒有初始化資料
this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);
// Write the pixel data into our bitmap
//this.colorBitmap在WindowLoaded中開闢了空間裡,沒有初始化資料
this.colorBitmap.WritePixels(
new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
this.colorPixels,//int[]
this.colorBitmap.PixelWidth * sizeof(int),
0);
// The input frame was processed successfully, increase the processed frame count
++this.processedFrameCount;
}
catch (InvalidOperationException ex)
{
this.statusBarText.Text = ex.Message;
}
finally
{
//表示該幀資料已經處理完畢,在SensorDepthFrameReady被賦值true,之後就達到這裡
this.processingFrame = false;
}
}
/// <summary>
/// Reset the reconstruction to initial value
/// </summary>
/// 1.該函式在WindowLoaded中被呼叫了第一次初始化
///2.在 ProcessDepthData函式達到最大跟蹤錯誤次數trackingErrorCount之後,則自動更新reconstruction
private void ResetReconstruction()
{
// Reset tracking error counter
this.trackingErrorCount = 0;
// Set the world-view transform to identity, so the world origin is the initial camera location.
this.worldToCameraTransform = Matrix4.Identity;
if (null != this.volume)
{
// Translate the reconstruction volume location away from the world origin by an amount equal
// to the minimum depth threshold. This ensures that some depth signal falls inside the volume.
// If set false, the default world origin is set to the center of the front face of the
// volume, which has the effect of locating the volume directly in front of the initial camera
// position with the +Z axis into the volume along the initial camera direction of view.
//this.translateResetPoseByMinDepthThreshold變數
//Setting this true in the constructor will move the volume forward along +Z away from the
// camera by the minimum depth threshold to enable capture of very small reconstruction volumes
// by setting a non-identity world-volume transformation in the ResetReconstruction call.
//如果為true,則將reconstruction cube 一直往遠處延伸,以捕捉更小的體積,
if (this.translateResetPoseByMinDepthThreshold)
{
Matrix4 worldToVolumeTransform = this.defaultWorldToVolumeTransform;
// Translate the volume in the Z axis by the minDepthThreshold distance
float minDist = (this.minDepthClip < this.maxDepthClip) ? this.minDepthClip : this.maxDepthClip;
worldToVolumeTransform.M43 -= minDist * VoxelsPerMeter;
this.volume.ResetReconstruction(this.worldToCameraTransform, worldToVolumeTransform);
}
else
{
this.volume.ResetReconstruction(this.worldToCameraTransform);
}
}
if (null != this.fpsTimer)
{
// Reset the processed frame count and reset the FPS timer
this.fpsTimer.Stop();
this.processedFrameCount = 0;
this.fpsTimer.Start();
}
}
/// <summary>
/// Handles the user clicking on the reset reconstruction button
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void ButtonResetReconstructionClick(object sender, RoutedEventArgs e)
{
if (null == this.sensor)
{
this.statusBarText.Text = Properties.Resources.ConnectDeviceFirst;
return;
}
// reset the reconstruction and update the status text
this.ResetReconstruction();
this.statusBarText.Text = Properties.Resources.ResetReconstruction;
}
/// <summary>
/// Handles the checking or un-checking of the near mode combo box
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void CheckBoxNearModeChanged(object sender, RoutedEventArgs e)
{
if (this.sensor != null)
{
// will not function on non-Kinect for Windows devices
try
{
if (this.checkBoxNearMode.IsChecked.GetValueOrDefault())
{
this.sensor.DepthStream.Range = DepthRange.Near;
}
else
{
this.sensor.DepthStream.Range = DepthRange.Default;
}
}
catch (InvalidOperationException)
{
}
}
}
}
}
相關文章
- Kinect for Windows SDK開發入門:Kinect Fusion2016-09-22Windows
- recastnavigation.Sample_TempObstacles程式碼註解 - rcBuildHeightfieldLayers2024-06-21ASTNavigationUI
- 應用程式日誌Sample2007-02-02
- 【轉】Kinect嚐鮮(1)——第一個程式2019-02-16
- 深入解析C# List<T>的原始碼2023-11-30C#原始碼
- Javacc sample2007-08-28Java
- Kafka程式碼解析2018-10-11Kafka
- ERNIE程式碼解析2022-01-28
- C# 非同步程式設計全面解析2015-06-28C#非同步程式設計
- SAP QM 事務程式碼QPR3顯示一個Physical Sample Record2021-06-30
- C#入門程式碼2006-04-23C#
- [C#] 程式碼規範2024-11-07C#
- c# 智慧升級程式程式碼2009-07-13C#
- 微信小程式掃碼解析小程式碼2020-08-13微信小程式
- sample a texture as a rendertarget2015-01-13
- netty sample2014-10-08Netty
- 原始碼解析Grpc攔截器(C#版本)2021-09-17原始碼RPCC#
- C# XML解析2018-03-30C#XML
- c#解析HTML2014-06-07C#HTML
- C# 呼叫Python程式碼2020-12-25C#Python
- C#使用RabbitMq佇列(Sample,Work,Fanout,Direct等模式的簡單使用)2020-10-16C#MQ佇列模式
- Kinect開發學習筆記之(一)Kinect介紹和應用2016-08-02筆記
- Kinect開發學習筆記之(三)Kinect開發環境配置2016-08-02筆記開發環境
- 深度解析:清理爛程式碼2013-07-03
- EP01程式碼解析2024-09-07
- table type usage sample:2010-09-19
- 【翻譯】Kinect v1和Kinect v2的徹底比較2016-08-02
- C#垃圾程式碼生成器2017-12-13C#
- 編寫更好的C#程式碼2015-05-08C#
- .NET(C#)程式碼效能優化2009-01-20C#優化
- 動態執行c#程式碼2024-11-15C#
- 微軟開源 Kinect 挽留開發者2013-03-14微軟
- Kinect開發學習筆記之(二)Kinect開發學習資源整理2016-08-02筆記
- MSIL入門(一)C#程式碼與IL程式碼對比2020-07-05C#
- 使用pegjs解析java程式碼2020-10-20JSJava
- javascript this 用法例項程式碼解析2017-04-07JavaScript
- C#條碼生成及列印例項程式碼2016-11-06C#
- C#網路程式設計經典程式碼2009-03-04C#程式設計