Kinect Fusion Basics-WPF C# Sample程式碼解析

查志強發表於2016-09-22

【原文:http://feichizhongwu.blog.51cto.com/8606684/1361339

各位博友,這是我第一次寫技術部落格,有編輯方面的不足或者技術方面不足的,請諸位諒解。

   本人這段時間,一直研讀微軟SDK官方樣例kinect fusion程式碼,總體理解程式流程,在這發一篇小部落格供大家交流學習。由於工程程式碼較長,所以本人打算直接copy程式碼,但加上自己理解註釋,希望同該方向研究的人有所借鑑。j_0057.gif


///<summary>

///  該程式是基於CSharp的WPF程式設計,是基於事件觸發的,程式主要事件有WindowLoaded,ProcessDepthData,

/// ResetReconstruction,SensorDepthFrameReady,FpsTimerTick.程式設計思想為:視窗初始化時候,便會調

/// 用WindowLoaded進行一些類成員 變數的初始化,並設定了時鐘,最後ResetReconstruction了一下,

/// 啟用kinect進行資料捕獲。幀會更新,更新會觸發事件SensorDepthFrameReady,由其中

/// ProcessDepthData處理深度資料並負責深度圖的顯示,當跟蹤錯誤次數超過預設的

/// 次數之後,便會呼叫ResetReconstruction一下。直到WindowClosing之後才關閉應用。

/// </summary>



namespace Microsoft.Samples.Kinect.KinectFusionBasics

{

   using System;

   using System.Diagnostics;

   using System.IO;

   using System.Windows;

   using System.Windows.Media;

   using System.Windows.Media.Imaging;

   using System.Windows.Threading;

   using Microsoft.Kinect;

   using Microsoft.Kinect.Toolkit.Fusion;


   /// <summary>

   /// Interaction logic for MainWindow.xaml

   /// </summary>

   public partial class MainWindow : Window, IDisposable

   {

       /// <summary>

       /// Max tracking error count, we will reset the reconstruction if tracking errors

       /// reach this number

       /// </summary>

       ///最大跟蹤錯誤數目

       private const int MaxTrackingErrors = 100;


       /// <summary>

       /// If set true, will automatically reset the reconstruction when MaxTrackingErrors have occurred

       /// </summary>

       /// 最大跟蹤錯誤之後,如果該變數為true,則自動更新reconstruction

       ///在ProcessDepthData中呼叫

       private const bool AutoResetReconstructionWhenLost = false;


       /// <summary>

       /// The resolution of the depth image to be processed.

       /// </summary>

       /// DepthImageFormat是一個列舉型別,有Undefined, Resolution640x480Fps30,   Resolution320x240Fps30,Resolution80x60Fps30,

       private const DepthImageFormat DepthImageResolution = DepthImageFormat.Resolution640x480Fps30;


       /// <summary>

       /// The seconds interval to calculate FPS

       /// </summary>

       private const int FpsInterval = 10;


       ///<summary>

       /// 調整以下幾個變數,可以改變成像立方體的大小,也就可以改變重建範圍

       /// </summary>

       ///

       /// <summary>

       /// The reconstruction volume voxel density in voxels per meter (vpm)

       /// vpm表示每米有多少個voxel,是一種密度

       /// 1000mm / 256vpm = ~3.9mm/voxel

       /// </summary>

       /// 方塊立體單元

       private const int VoxelsPerMeter = 256;


       /// <summary>

       /// The reconstruction volume voxel resolution in the X axis

       /// At a setting of 256vpm the volume is 512 / 256 = 2m wide

       /// </summary>

       /// X方向的畫素總長度

       private const int VoxelResolutionX = 512;


       /// <summary>

       /// The reconstruction volume voxel resolution in the Y axis

       /// At a setting of 256vpm the volume is 384 / 256 = 1.5m high

       /// </summary>

       /// Y方向總長度

       private const int VoxelResolutionY = 384;


       /// <summary>

       /// The reconstruction volume voxel resolution in the Z axis

       /// At a setting of 256vpm the volume is 512 / 256 = 2m deep

       /// </summary>

       private const int VoxelResolutionZ = 512;


       /// <summary>

       /// The reconstruction volume processor type. This parameter sets whether AMP or CPU processing

       /// is used. Note that CPU processing will likely be too slow for real-time processing.

       /// </summary>

       /// 重建處理器型別,是CPU重建還是AMP重建(GPU實時重建)

       private const ReconstructionProcessor ProcessorType = ReconstructionProcessor.Amp;


       /// <summary>

       /// The zero-based device index to choose for reconstruction processing if the

       /// ReconstructionProcessor AMP options are selected.

       /// Here we automatically choose a device to use for processing by passing -1,

       /// </summary>

       private const int DeviceToUse = -1;


       /// <summary>

       /// Parameter to translate the reconstruction based on the minimum depth setting. When set to

       /// false, the reconstruction volume +Z axis starts at the camera lens and extends into the scene.

       /// Setting this true in the constructor will move the volume forward along +Z away from the

       /// camera by the minimum depth threshold to enable capture of very small reconstruction volumes

       /// by setting a non-identity world-volume transformation in the ResetReconstruction call.

       /// Small volumes should be shifted, as the Kinect hardware has a minimum sensing limit of ~0.35m,

       /// inside which no valid depth is returned, hence it is difficult to initialize and track robustly  

       /// when the majority of a small volume is inside this distance.

       /// </summary>

       private bool translateResetPoseByMinDepthThreshold = true;


       /// <summary>

       /// Minimum depth distance threshold in meters. Depth pixels below this value will be

       /// returned as invalid (0). Min depth must be positive or 0.

       /// </summary>

       /// DefaultMinimumDepth=0.5f,Min depth為正數

       private float minDepthClip = FusionDepthProcessor.DefaultMinimumDepth;


       /// <summary>

       /// Maximum depth distance threshold in meters. Depth pixels above this value will be

       /// returned as invalid (0). Max depth must be greater than 0.

       /// </summary>

       /// DefaultMaximumDepth=8.0f

       private float maxDepthClip = FusionDepthProcessor.DefaultMaximumDepth;


       /// <summary>

       /// Active Kinect sensor

       /// </summary>

       private KinectSensor sensor;


       /// <summary>

       /// Bitmap that will hold color information

       /// </summary>

       /// 在WindowLoaded中賦值,並在在其中繫結Image控制元件

       private WriteableBitmap colorBitmap;


       /// <summary>

       /// Intermediate storage for the depth data converted to color

       /// </summary>

       /// 快取,用於儲存轉化為顏色值的深度資料

       private int[] colorPixels;


       /// <summary>

       /// Intermediate storage for the depth float data converted from depth image frame

       /// </summary>

       /// 從深度幀轉化的浮點型深度資料

       private FusionFloatImageFrame depthFloatBuffer;


       /// <summary>

       /// Intermediate storage for the point cloud data converted from depth float image frame

       /// </summary>

       /// 從深度幀轉化而來的點雲資料

       private FusionPointCloudImageFrame pointCloudBuffer;


       /// <summary>

       /// Raycast shaded surface image

       /// </summary>

       private FusionColorImageFrame shadedSurfaceColorFrame;


       /// <summary>

       /// The transformation between the world and camera view coordinate system

       /// </summary>

       /// 變換矩陣

       private Matrix4 worldToCameraTransform;


       /// <summary>

       /// The default transformation between the world and volume coordinate system

       /// </summary>

       private Matrix4 defaultWorldToVolumeTransform;


       /// <summary>

       /// The Kinect Fusion volume

       /// </summary>

       private Reconstruction volume;


       /// <summary>

       /// The timer to calculate FPS

       /// </summary>

       /// System.Windows.Threading名稱空間,計時器

       /// 幀率  =   processedFrameCount / FpsInterval

       private DispatcherTimer fpsTimer;


       /// <summary>

       /// The count of the frames processed in the FPS interval

       /// </summary>

       /// 在採集間隔(FpsInterval=10)內,處理幀的數目

       private int processedFrameCount;


       /// <summary>

       /// The tracking error count

       /// </summary>

       /// 用於儲存連續跟蹤錯誤次數

       private int trackingErrorCount;


       /// <summary>

       /// The sensor depth frame data length

       /// </summary>

       private int frameDataLength;


       /// <summary>

       /// wether the depth frames to be processed

       /// </summary>

       private bool processingFrame;


       /// <summary>

       /// Track whether Dispose has been called

       /// </summary>

       private bool disposed;


       /// <summary>

       /// Initializes a new instance of the MainWindow class.

       /// </summary>

       public MainWindow()

       {

           this.InitializeComponent();

       }


       /// <summary>

       /// Finalizes an instance of the MainWindow class.

       /// This destructor will run only if the Dispose method does not get called.

       /// </summary>

       ~MainWindow()

       {

           this.Dispose(false);

       }


       /// <summary>

       /// Get the image size of fusion images and bitmap.

       /// </summary>

       /// 返回深度影象的大小,DepthImageResolution為上面全域性變數設定,該變數ImageSize為結構體變數,可以直接呼叫

       public static Size ImageSize

       {

           get

           {//GetImageSize函式為私有定義函式,在以下有定義


               //DepthImageResolution=DepthImageFormat.Resolution640x480Fps30;則返回Size(640,480)

               return GetImageSize(DepthImageResolution);

           }

       }


       /// <summary>

       /// Dispose the allocated frame buffers and reconstruction.

       /// </summary>

       /// 記憶體釋放函式,和Dispose(bool disposing)結合使用

       public void Dispose()

       {

           this.Dispose(true);


           // This object will be cleaned up by the Dispose method.

           GC.SuppressFinalize(this);

       }


       /// <summary>

       /// Frees all memory associated with the FusionImageFrame.

       /// </summary>

       /// <param name="disposing">Whether the function was called from Dispose.</param>

       /// 記憶體釋放函式

       protected virtual void Dispose(bool disposing)

       {

           if (!this.disposed)

           {

               if (null != this.depthFloatBuffer)

               {//depthFloatBuffer為深度資料緩衝儲存區

                   this.depthFloatBuffer.Dispose();

               }


               if (null != this.pointCloudBuffer)

               {

                   this.pointCloudBuffer.Dispose();

               }


               if (null != this.shadedSurfaceColorFrame)

               {

                   this.shadedSurfaceColorFrame.Dispose();

               }


               if (null != this.volume)

               {

                   this.volume.Dispose();

               }


               this.disposed = true;

           }

       }


       /// <summary>

       /// Get the depth image size from the input depth image format.

       /// </summary>

       /// <param name="imageFormat">The depth image format.</param>

       /// <returns>The widht and height of the input depth image format.</returns>

       /// 在ImageSize被呼叫

       /// 輸入深度影象的格式

       private static Size GetImageSize(DepthImageFormat imageFormat)

       {

           switch (imageFormat)

           {

               case DepthImageFormat.Resolution320x240Fps30:

                   return new Size(320, 240);


               case DepthImageFormat.Resolution640x480Fps30:

                   return new Size(640, 480);


               case DepthImageFormat.Resolution80x60Fps30:

                   return new Size(80, 60);

           }


           throw new ArgumentOutOfRangeException("imageFormat");

       }


       /// <summary>

       /// Execute startup tasks

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       /// WPF視窗初始化載入函式

       /// 初始化了sensor和fpsTimer兩個變數

       private void WindowLoaded(object sender, RoutedEventArgs e)

       {

           // Look through all sensors and start the first connected one.

           // This requires that a Kinect is connected at the time of app startup.

           // To make your app robust against plug/unplug,

           // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit


           //功能:遍歷所有kinect sensors並開始第一個連線上的kinect

           //這個函式要求在app啟動之前至少要有一個kinect接入電腦

           //.............

           //要求必須使用Microsoft.Kinect.Toolkit中的KinectSensorChooser進行程式設計

           //KinectSensor.KinectSensors返回一個kinect裝置陣列

           //該迴圈功能為了找出具備能力的kinect裝置

           foreach (var potentialSensor in KinectSensor.KinectSensors)

           {

               if (potentialSensor.Status == KinectStatus.Connected)

               {//sensor為全域性變數,表示該kinect

                   this.sensor = potentialSensor;

                   break;

               }

           }


           //如果沒有可以使用的kinect sensor,則退出視窗初始化

           if (null == this.sensor)

           {//statusBarText為視窗提示欄名,跟蹤之後,在狀態列中輸出字串“NoKinectReady”

               this.statusBarText.Text = Properties.Resources.NoKinectReady;

               return;

           }


           // Turn on the depth stream to receive depth frames

           //開啟深度流以接收深度資料

           //Enable()為Overloaded. Methods for enabling a sensor to stream out depth data.

           //DepthImageStream.Enable ()為預設格式DepthImageFormat.Resolution640x480Fps30.

           this.sensor.DepthStream.Enable(DepthImageResolution);

           //frameDataLength為深度資料總長度

           this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;


           // Allocate space to put the color pixels we'll create

           this.colorPixels = new int[this.frameDataLength];


           // This is the bitmap we'll display on-screen


           //C#函式: WriteableBitmap(int pixelWidth, int pixelHeight,

           //double dpiX, double dpiY, PixelFormat pixelFormat, BitmapPalette palette);

           this.colorBitmap = new WriteableBitmap(

               (int)ImageSize.Width,

               (int)ImageSize.Height,

               96.0,//起點X座標

               96.0,//起點Y座標

               PixelFormats.Bgr32,//圖片格式

               null);//畫板物件


           // Set the image we display to point to the bitmap where we'll put the image data

           //Image為圖片控制元件名

           this.Image.Source = this.colorBitmap;


           // Add an event handler to be called whenever there is new depth frame data

           //SensorDepthFrameReady為深度幀資料更新的處理函式,也是大的處理框架

           //即下一幀深度準備好時候,就呼叫SensorDepthFrameReady更新資料

           this.sensor.DepthFrameReady += this.SensorDepthFrameReady;


           //VoxelsPerMeter = 256;       X:2m   Y:1.5m高     Z:2m深

           //volParam為Voxel重建的引數例項物件,用於儲存重建引數

           var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);


           // Set the world-view transform to identity, so the world origin is the initial camera location.

           this.worldToCameraTransform = Matrix4.Identity;


           try

           {


               // This creates a volume cube with the Kinect at center of near plane, and volume directly

               // in front of Kinect.

               //建立一個重建立方體

               //ProcessorType = ReconstructionProcessor.Amp;GPU重建

               //this.worldToCameraTransform = Matrix4.Identity;

               //DeviceToUse=-1

               this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);


               this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();


               //this.translateResetPoseByMinDepthThreshold=true

             //   Setting this true in the constructor will move the volume forward along +Z away from the

       // camera by the minimum depth threshold to enable capture of very small reconstruction volumes

      // by setting a non-identity world-volume transformation in the ResetReconstruction call.

               if (this.translateResetPoseByMinDepthThreshold)

               {

                   //ResetReconstruction()為主要處理函式

                   this.ResetReconstruction();

               }

           }

           catch (InvalidOperationException ex)

           {//statusBarText為控制元件名字

               this.statusBarText.Text = ex.Message;

               return;

           }

           catch (DllNotFoundException)

           {

               this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;

               return;

           }


           //初始化資料空間

           // Depth frames generated from the depth input

           this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);


           // Point cloud frames generated from the depth float input

           this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);


           // Create images to raycast the Reconstruction Volume

           this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);


           // Start the sensor!

           try

           {

               this.sensor.Start();

           }

           catch (IOException ex)

           {

               // Device is in use

               this.sensor = null;

               this.statusBarText.Text = ex.Message;


               return;

           }

           catch (InvalidOperationException ex)

           {

               // Device is not valid, not supported or hardware feature unavailable

               this.sensor = null;

               this.statusBarText.Text = ex.Message;


               return;

           }


           // Set Near Mode by default

           try

           {

               this.sensor.DepthStream.Range = DepthRange.Near;

               checkBoxNearMode.IsChecked = true;

           }

           catch

           {

               // device not near mode capable

           }


           // Initialize and start the FPS timer

           this.fpsTimer = new DispatcherTimer();

           //FpsTimerTick為時鐘處理響應函式

           //private const int FpsInterval = 10

           //每10s計時一次 ,並觸發一次計時器,便執行FpsTimerTick

           this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);

           this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);//時,分,秒


           this.fpsTimer.Start();


           // Reset the reconstruction

           this.ResetReconstruction();

       }


       /// <summary>

       /// Execute shutdown tasks

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       private void WindowClosing(object sender, System.ComponentModel.CancelEventArgs e)

       {

           if (null != this.fpsTimer)

           {

               this.fpsTimer.Stop();

           }


           if (null != this.sensor)

           {

               this.sensor.Stop();

           }

       }


       /// <summary>

       /// Update the FPS reading in the status text bar

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       /// 將幀率資訊顯示在狀態列上,買10s計算一次幀率,並實現顯示在狀態列

       private void FpsTimerTick(object sender, EventArgs e)

       {

           // Update the FPS reading

           //FpsInterval=10為常數

           this.statusBarText.Text = string.Format(

               System.Globalization.CultureInfo.InvariantCulture,

               Properties.Resources.Fps,

               (double)this.processedFrameCount / FpsInterval);


           // Reset the frame count,該變數在 ProcessDepthData函式中被更新,

           //當成功處理幀資料一次,就更新一次資料

           this.processedFrameCount = 0;

       }


       /// <summary>

       /// Event handler for Kinect sensor's DepthFrameReady event

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       /// 該函式為新幀準備好時被呼叫,在WindowLoaded函式中被宣告

       private void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)

       {

           using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())

           {

               if (depthFrame != null && !this.processingFrame)

               {

                   //this.frameDataLength為幀影象元素的長度,在WindowLoaded函式中被賦值

                   var depthPixels = new DepthImagePixel[this.frameDataLength];


                   // Copy the pixel data from the image to a temporary array

                   //從深度圖中提取深度資料到暫時陣列中

                   depthFrame.CopyDepthImagePixelDataTo(depthPixels);


                   this.Dispatcher.BeginInvoke(

                       DispatcherPriority.Background,

                       (Action<DepthImagePixel[]>)((d) => { this.ProcessDepthData(d); }),

                       depthPixels);


                   // Mark that one frame will be processed

                   //標記新的幀資料可以被處理,處理完之後,賦值為false

                   this.processingFrame = true;

               }

           }

       }


       /// <summary>

       /// Process the depth input

       /// </summary>

       /// <param name="depthPixels">The depth data array to be processed</param>

       /// 處理輸入深度資料

       /// SensorDepthFrameReady函式中,呼叫了ProcessDepthData函式,SensorDepthFrameReady中確定了

       /// 作為引數的深度資料陣列(型別DepthImagePixel)


       private void ProcessDepthData(DepthImagePixel[] depthPixels)

       {

           //Assert檢查提示,如果條件錯誤,就會輸出提示 ,並顯示一個資訊框

           //以下三個變數均在WindowLoaded中被賦值

           Debug.Assert(null != this.volume, "volume should be initialized");

           Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");

           Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");


           //depthFloatBuffer在WindowLoaded中被宣告空間,但沒有儲存資料

           //將深度圖幀轉化為深度浮點資料幀

           try

           {

                 // Convert the depth image frame to depth float image frame

                //   Converts Kinect depth frames in unsigned short format to depth frames in float format

                 // representing distance from the camera in meters (parallel to the optical center axis).

               FusionDepthProcessor.DepthToDepthFloatFrame(

                   depthPixels,//輸入資料陣列

                   (int)ImageSize.Width,

                   (int)ImageSize.Height,

                   this.depthFloatBuffer,//緩衝資料區,前13位

                   FusionDepthProcessor.DefaultMinimumDepth,//0.35f

                   FusionDepthProcessor.DefaultMaximumDepth,//8.0f

                   false);


               //ProcessFrame函式:

               //處理每一幀的函式(On_GPU)

               //有兩個功能:1.AlignDepthFloatToReconstruction:對其深度浮點型資料depthFloatBuffer到重建立方體當中。

               //2.IntegrateFrame:使得幀融合在一起

               // After this call completes, if a visible output image of the reconstruction

                // is required, the user can call CalculatePointCloud and then ShadePointCloud.

               // The maximum image resolution supported in this function is 640x480.


               // ProcessFrame will first calculate the camera pose and then integrate

               // if tracking is successful

               //成功返回true,如果在對齊輸入深度資料有問題時候並不能計算出有用的變換,則返回false

               bool trackingSucceeded = this.volume.ProcessFrame(

                   this.depthFloatBuffer,//輸入深度浮點資料

                   FusionDepthProcessor.DefaultAlignIterationCount,//最大對齊次數

                   FusionDepthProcessor.DefaultIntegrationWeight,//最大融合權重

                   this.volume.GetCurrentWorldToCameraTransform());//最新一次的變換矩陣,則需要在以下更新變化矩陣


               // If camera tracking failed, no data integration or raycast for reference

               // point cloud will have taken place, and the internal camera pose

               // will be unchanged.


              // 成功返回true,如果在對齊輸入深度資料有問題時候並不能計算出有用的變換,則返回false

               if (!trackingSucceeded)

               {

                   //The tracking error count跟蹤錯誤總次數

                   //用於記錄連續跟蹤錯誤次數

                   this.trackingErrorCount++;


                   // Show tracking error on status bar

                   this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;

               }

               else//跟蹤成功

               {

                   Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();


                   // Set the camera pose and reset tracking errors

                   this.worldToCameraTransform = calculatedCameraPose;//更新(世界座標---相機座標)變換矩陣

                   //更新該變數,新的一次相機正常tracking,則需要更新該變數

                   this.trackingErrorCount = 0;

               }


               //達到最大跟蹤錯誤次數之後,則自動更新reconstruction

               if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)

               {

                   // Auto Reset due to bad tracking

                   this.statusBarText.Text = Properties.Resources.ResetVolume;


                   // Automatically Clear Volume and reset tracking if tracking fails

                   this.ResetReconstruction();

               }


               //用於顯示到Image控制元件上,即reconstruction如果需要顯示出來,就需要計算出PointCloud和ShadePointCloud

               // Calculate the point cloud

               //返回3D座標資料點和一個零交叉稠密表面的標準。儲存到pointCloudBuffer中

               this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);


               // Shade point cloud and render

               FusionDepthProcessor.ShadePointCloud(

                   this.pointCloudBuffer,//輸入資料

                   this.worldToCameraTransform,

                   this.shadedSurfaceColorFrame,//輸出資料

                   null);


               //this.colorPixels在WindowLoaded中開闢了空間裡,沒有初始化資料

               this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);


               // Write the pixel data into our bitmap

               //this.colorBitmap在WindowLoaded中開闢了空間裡,沒有初始化資料

               this.colorBitmap.WritePixels(

                   new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),

                   this.colorPixels,//int[]

                   this.colorBitmap.PixelWidth * sizeof(int),

                   0);


               // The input frame was processed successfully, increase the processed frame count

               ++this.processedFrameCount;

           }

           catch (InvalidOperationException ex)

           {

               this.statusBarText.Text = ex.Message;

           }

           finally

           {

               //表示該幀資料已經處理完畢,在SensorDepthFrameReady被賦值true,之後就達到這裡

               this.processingFrame = false;

           }

       }


       /// <summary>

       /// Reset the reconstruction to initial value

       /// </summary>

       /// 1.該函式在WindowLoaded中被呼叫了第一次初始化

       ///2.在  ProcessDepthData函式達到最大跟蹤錯誤次數trackingErrorCount之後,則自動更新reconstruction

       private void ResetReconstruction()

       {

           // Reset tracking error counter

           this.trackingErrorCount = 0;


           // Set the world-view transform to identity, so the world origin is the initial camera location.

           this.worldToCameraTransform = Matrix4.Identity;


           if (null != this.volume)

           {

               // Translate the reconstruction volume location away from the world origin by an amount equal

               // to the minimum depth threshold. This ensures that some depth signal falls inside the volume.

               // If set false, the default world origin is set to the center of the front face of the

               // volume, which has the effect of locating the volume directly in front of the initial camera

               // position with the +Z axis into the volume along the initial camera direction of view.



               //this.translateResetPoseByMinDepthThreshold變數

               //Setting this true in the constructor will move the volume forward along +Z away from the

       // camera by the minimum depth threshold to enable capture of very small reconstruction volumes        

               // by setting a non-identity world-volume transformation in the ResetReconstruction call.

               //如果為true,則將reconstruction cube 一直往遠處延伸,以捕捉更小的體積,

               if (this.translateResetPoseByMinDepthThreshold)

               {

                   Matrix4 worldToVolumeTransform = this.defaultWorldToVolumeTransform;


                   // Translate the volume in the Z axis by the minDepthThreshold distance

                   float minDist = (this.minDepthClip < this.maxDepthClip) ? this.minDepthClip : this.maxDepthClip;

                   worldToVolumeTransform.M43 -= minDist * VoxelsPerMeter;


                   this.volume.ResetReconstruction(this.worldToCameraTransform, worldToVolumeTransform);

               }

               else

               {

                   this.volume.ResetReconstruction(this.worldToCameraTransform);

               }

           }


           if (null != this.fpsTimer)

           {

               // Reset the processed frame count and reset the FPS timer

               this.fpsTimer.Stop();


               this.processedFrameCount = 0;

               this.fpsTimer.Start();

           }

       }


       /// <summary>

       /// Handles the user clicking on the reset reconstruction button

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       private void ButtonResetReconstructionClick(object sender, RoutedEventArgs e)

       {

           if (null == this.sensor)

           {

               this.statusBarText.Text = Properties.Resources.ConnectDeviceFirst;

               return;

           }


           // reset the reconstruction and update the status text

           this.ResetReconstruction();

           this.statusBarText.Text = Properties.Resources.ResetReconstruction;

       }


       /// <summary>

       /// Handles the checking or un-checking of the near mode combo box

       /// </summary>

       /// <param name="sender">object sending the event</param>

       /// <param name="e">event arguments</param>

       private void CheckBoxNearModeChanged(object sender, RoutedEventArgs e)

       {

           if (this.sensor != null)

           {

               // will not function on non-Kinect for Windows devices

               try

               {

                   if (this.checkBoxNearMode.IsChecked.GetValueOrDefault())

                   {

                       this.sensor.DepthStream.Range = DepthRange.Near;

                   }

                   else

                   {

                       this.sensor.DepthStream.Range = DepthRange.Default;

                   }

               }

               catch (InvalidOperationException)

               {

               }

           }

       }

   }

}


相關文章