Kinect SDK 1.5 Face Tracking-> use the super simplified version displayed by opencv

Source: Internet
Author: User
Tags microsoft website

[2012-10-10] The multithreading in this article is not very effective. Please use the kernel event method of the waitForMultiObjects method in the Microsoft example.

In the past two days, I have read the new SDK1.5 code. To solve face recognition problems with SDK1.5, I have to read its face tracking code, in the end, the connected monomai guessed it. As for how to use it and more details, you need to read Microsoft articles by yourself. Click the link to open a Microsoft website address.

Below is the code

VS2010 + opencv2.3.1 + Kinect SDK1.5

Driver or something. You can install it on your own. If you have a basic knowledge, learn the basics first. The Code is based on the SDK1.5 I updated earlier. If you do not understand it, read the previous article ~

// Win32_KinectFaceTracking.cpp: defines the entry point of the console application. // # Include "stdafx. h "// ------------------------------------------------------ # define _ WINDOWS # include <FaceTrackLib. h> HRESULT coloring (IFTImage * pColorImg, IFTModel * pModel, incluconst * pCameraConfig, FLOAT const * pSUCoef, FLOAT zoomFactor, POINT viewOffset, IFTResult * pAAMRlt, UINT32 color ); // ------------------------------------------------------ # include <vector> # include <deque> # Include <iomanip> # include <stdexcept> # include <string> # include <iostream> # include "opencv2 \ opencv. hpp "using namespace std; using namespace cv; # include <windows. h> # include <mmsystem. h> # include <assert. h> # include <strsafe. h> # include "NuiApi. h "# define COLOR_WIDTH640 # define color_hight#define DEPTH_WIDTH320 # define DEPTH_HIGHT240 # define SKELETON_WIDTH 640 # define SKELETON_HIGHT 480 # define CHANNEL3BYTE Buf [DEPTH_WIDTH * DEPTH_HIGHT * CHANNEL]; int drawColor (HANDLE h); int drawDepth (HANDLE h); int drawSkeleton (); // --- face tracking bytes BYTE * colorBuffer, * depthBuffer; IFTImage * pColorFrame; IFTImage * pDepthFrame; FT_VECTOR3D m_hint3D [2]; // HANDLE h1; HANDLE h3; HANDLE h5; HANDLE h2; HANDLE h4; D Word winapi VideoFunc (LPVOID pParam) {// cout <"video start! "<Endl; while (TRUE) {if (WaitForSingleObject (h1, INFINITE) = WAIT_OBJECT_0) {drawColor (h2);} // Sleep (10 ); // cout <"video" <endl ;}} dword winapi DepthFunc (LPVOID pParam) {// cout <"depth start! "<Endl; while (TRUE) {if (WaitForSingleObject (h3, INFINITE) = WAIT_OBJECT_0) {drawDepth (h4);} // Sleep (10 ); // cout <"depth" <endl ;}} dword winapi SkeletonFunc (LPVOID pParam) {// HANDLE h = (HANDLE) pParam; // cout <"skeleton start! "<Endl; while (TRUE) {if (WaitForSingleObject (h3, INFINITE) = WAIT_OBJECT_0) drawSkeleton (); // Sleep (10 ); // cout <"skeleton" <endl ;}} dword winapi TrackFace (LPVOID pParam) {cout <"track face start! "<Endl; while (TRUE) {// do somethingSleep (16); cout <" track face "<endl ;}// random int drawColor (HANDLE h) {const NUI_IMAGE_FRAME * pImageFrame = NULL; HRESULT hr = NuiImageStreamGetNextFrame (h, 0, & pImageFrame); if (FAILED (hr )) {cout <"Get Color Image Frame Failed" <endl; return-1;} INuiFrameTexture * pTexture = pImageFrame-> PFrameTexture; NUI_LOCKED_RECT LockedRect; pTexture-> LockRect (0, & LockedRect, NULL, 0); if (LockedRect. Pitch! = 0) {BYTE * pBuffer = (BYTE *) LockedRect. pBits; colorBuffer = pBuffer; memcpy (pColorFrame-> GetBuffer (), PBYTE (LockedRect. pBits), min (pColorFrame-> GetBufferSize (), UINT (pTexture-> BufferLen (); Mat temp (COLOR_HIGHT, COLOR_WIDTH, CV_8UC4, pBuffer ); imshow ("B", temp); waitKey (1);} NuiImageStreamReleaseFrame (h, pImageFrame); return 0;} int drawDepth (HANDLE h) {const NUI_IMAGE_FRAME * pImageFrame = NULL; HRESULT hr = NuiImageStreamGetNextFrame (h, 0, & pImageFrame); if (FAILED (hr) {cout <"Get Depth Image Frame Failed" <endl; return-1 ;} INuiFrameTexture * pTexture = pImageFrame-> pFrameTexture; NUI_LOCKED_RECT LockedRect; pTexture-> LockRect (0, & LockedRect, NULL, 0); if (LockedRect. pitch! = 0) {USHORT * pBuff = (USHORT *) LockedRect. pBits; // depthBuffer = pBuff; memcpy (pDepthFrame-> GetBuffer (), PBYTE (LockedRect. pBits), min (pDepthFrame-> GetBufferSize (), UINT (pTexture-> BufferLen (); for (int I = 0; I <DEPTH_WIDTH * DEPTH_HIGHT; I ++) {BYTE index = pBuff [I] & 0x07; USHORT realDepth = (pBuff [I] & 0xFFF8)> 3; BYTE scale = 255-(BYTE) (256 * realDepth/0x0fff); buf [CHANNEL * I] = buf [CHANNEL * I + 1] = buf [CHANNEL * I + 2] = 0; switch (index) {case 0: buf [CHANNEL * I] = scale/2; buf [CHANNEL * I + 1] = scale/2; buf [CHANNEL * I + 2] = scale/2; break; case 1: buf [CHANNEL * I] = scale; break; case 2: buf [CHANNEL * I + 1] = scale; break; case 3: buf [CHANNEL * I + 2] = scale; break; case 4: buf [CHANNEL * I] = scale; buf [CHANNEL * I + 1] = scale; break; case 5: buf [CHANNEL * I] = scale; buf [CHANNEL * I + 2] = scale; break; case 6: buf [CHANNEL * I + 1] = scale; buf [CHANNEL * I + 2] = scale; break; case 7: buf [CHANNEL * I] = 255-scale/2; buf [CHANNEL * I + 1] = 255-scale/2; buf [CHANNEL * I + 2] = 255-scale/2; break;} Mat B (DEPTH_HIGHT, DEPTH_WIDTH, CV_8UC3, buf); imshow ("depth", B ); waitKey (1);} NuiImageStreamReleaseFrame (h, pImageFrame); return 0;} int drawSkeleton () {NUI_SKELETON_FRAME SkeletonFrame; cv: Point pt [20]; mat skeletonMat = Mat (SKELETON_HIGHT, SKELETON_WIDTH, CV_8UC3, Scalar (0, 0); HRESULT hr = NuiSkeletonGetNextFrame (0, & SkeletonFrame); if (FAILED (hr) {cout <"Get Skeleton Image Frame Failed" <endl; return-1;} bool bFoundSkeleton = false; for (int I = 0; I <NUI_SKELETON_COUNT; I ++) {if (SkeletonFrame. skeletonData [I]. eTrackingState = NUI_SKELETON_TRACKED) {bFoundSkeleton = true ;}// Has skeletons! If (bFoundSkeleton) {NuiTransformSmooth (& SkeletonFrame, NULL); for (int I = 0; I <NUI_SKELETON_COUNT; I ++) {if (SkeletonFrame. skeletonData [I]. eTrackingState = NUI_SKELETON_TRACKED) {for (int j = 0; j <NUI_SKELETON_POSITION_COUNT; j ++) {float fx, fy; trim (SkeletonFrame. skeletonData [I]. skeletonPositions [j], & fx, & fy); pt [j]. x = (int) (fx * SKELETON_WIDTH)/320; pt [j]. y = (Int) (fy * SKELETON_HIGHT)/240; circle (skeletonMat, pt [j], 5, CV_RGB (, 0);} cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_HEAD], pt [kernel], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [kernel], pt [NUI_SKELETON_POSITION_SPINE], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_SPINE], pt [NUI_SKELETON_POSITION_HIP_CENTER], CV_RGB (0, 0,255); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_HAND_RIGHT], pt [kernel], CV_RGB (, 0); cv: line (skeletonMat, mat, pt [partition], pt [partition], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_ELBOW_RIGHT], pt [partition], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_S HOULDER_RIGHT], pt [kernel], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_SHOULDER_CENTER], pt [kernel], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [partition], pt [NUI_SKELETON_POSITION_ELBOW_LEFT], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_ELBOW_LEFT], pt [NUI_SKELETON_POSITION_WRIST_LE FT], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [partition], pt [NUI_SKELETON_POSITION_HAND_LEFT], CV_RGB (0,255, 0); cv :: line (skeletonMat, pt [NUI_SKELETON_POSITION_HIP_CENTER], pt [partition], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [partition], pt [partition], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION _ KNEE_RIGHT], pt [kernel], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [kernel], pt [NUI_SKELETON_POSITION_FOOT_RIGHT], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_HIP_CENTER], pt [centers], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [NUI_SKELETON_POSITION_HIP_LEFT], pt [NUI_SKELETON_POSITION_KNEE_LEFT], CV_RGB (0,255, 0); c V: line (skeletonMat, pt [partition], pt [NUI_SKELETON_POSITION_ANKLE_LEFT], CV_RGB (0,255, 0); cv: line (skeletonMat, pt [partition], pt [NUI_SKELETON_POSITION_FOOT_LEFT], CV_RGB (0,255, 0); m_hint3D [0]. x = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_POSITION_SHOULDER_CENTER]. x; m_hint3D [0]. y = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_P OSITION_SHOULDER_CENTER]. y; m_hint3D [0]. z = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_POSITION_SHOULDER_CENTER]. z; m_hint3D [1]. x = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_POSITION_HEAD]. x; m_hint3D [1]. y = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_POSITION_HEAD]. y; m_hint3D [1]. z = SkeletonFrame. skeletonData [I]. skeletonPositions [NUI_SKELETON_POSITION_HE AD]. z ;}} imshow ("skeleton", skeletonMat); waitKey (1); return 0 ;}int main (int argc, char * argv []) {// initialize NUIHRESULT hr = NuiInitialize (NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_SKELETON); if (hr! = S_ OK) {cout <"NuiInitialize failed" <endl; return hr;} // enable the color map information channel h1 = CreateEvent (NULL, TRUE, FALSE, NULL); h2 = NULL; hr = NuiImageStreamOpen (NUI_IMAGE_TYPE_COLOR, nui_image_resolution_640xlarge, 0, 2, h1, & h2); if (FAILED (hr )) {cout <"cocould not open image stream video" <endl; return hr;} h3 = CreateEvent (NULL, TRUE, FALSE, NULL); h4 = NULL; hr = NuiImageStreamOpen (NUI_IMAGE_TYPE_DEPTH_AND _ PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240, 0, 2, h3, & h4); if (FAILED (hr) {cout <"cocould not open depth stream video" <endl; return hr;} h5 = CreateEvent (NULL, TRUE, FALSE, NULL); hr = NuiSkeletonTrackingEnable (h5, 0); if (FAILED (hr )) {cout <"cocould not open skeleton stream video" <endl; return hr;} HANDLE hThread1, hThread2, hThread3; hThread1 = CreateThread (NULL, 0, VideoFunc, h2, 0, NULL); hThread2 = C ReateThread (NULL, 0, DepthFunc, h4, 0, NULL); hThread3 = CreateThread (NULL, 0, SkeletonFunc, NULL, 0, NULL ); m_hint3D [0] = FT_VECTOR3D (0, 0, 0); m_hint3D [1] = FT_VECTOR3D (0, 0, 0); pColorFrame = FTCreateImage (); pDepthFrame = FTCreateImage (); IFTFaceTracker * pFT = FTCreateFaceTracker (); if (! PFT) {return-1; // Handle errors} FT_CAMERA_CONFIG myCameraConfig = {640,480, rows}; // width, height, focal lengthFT_CAMERA_CONFIG depthConfig; depthConfig. focalLength = NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS; depthConfig. width = 320; depthConfig. height = 240; // it seems that you must fill it out here, and you must fill it in correctly !! Hr = pFT-> Initialize (& myCameraConfig, & depthConfig, NULL, NULL); if (FAILED (hr) {return-2; // Handle errors} // Create IFTResult to hold a face tracking resultIFTResult * pFTResult = NULL; hr = pFT-> CreateFTResult (& pFTResult); if (FAILED (hr )) {return-11;} // prepare Image and SensorData for 640x480 RGB imagesif (! PColorFrame) {return-12; // Handle errors} // Attach assumes that the camera code provided by the application // is filling the buffer cameraFrameBuffer // pColorFrame-> Attach (640,480, colorBuffer, latency, 640*3); hr = pColorFrame-> Allocate (640,480, FTIMAGEFORMAT_UINT8_B8G8R8X8); if (FAILED (hr) {return hr ;} hr = pDepthFrame-> Allocate (320,240, FTIMAGEFORMAT_UINT16_D13P3); if (FAILED (Hr) {return hr;} FT_SENSOR_DATA sensorData; sensorData. pVideoFrame = pColorFrame; sensorData. pDepthFrame = pDepthFrame; sensorData. zoomFactor = 1.0f; POINT point; point. x = 0; point. y = 0; sensorData. viewOffset = point; bool isTracked = false; int iFaceTrackTimeCount = 0; // Track a facewhile (true) {// Call your camera method to process IO and fill the camera buffer // cameraObj. processIO (cameraFrameBuffer); // re Place with your methodif (! IsTracked) {hr = pFT-> StartTracking (& sensorData, NULL, m_hint3D, pFTResult); if (SUCCEEDED (hr) & SUCCEEDED (pFTResult-> GetStatus ())) {isTracked = true;} else {// Handle errorsisTracked = false;} else {// Continue tracking. it uses a previusly known face position, // so it is an inexpensive call. hr = pFT-> ContinueTracking (& sensorData, m_hint3D, pFTResult); if (FAILED (hr) | FAILED (pFTResult-> GetStatus ())){/ /Handle errorsisTracked = false ;}} if (isTracked) {printf ("tracked !!!!!!!!!!!!!!! \ N "); IFTModel * ftModel; HRESULT hr = pFT-> GetFaceModel (& ftModel); FLOAT * pSU = NULL; UINT numSU; BOOL suConverged; pFT-> GetShapeUnits (NULL, & pSU, & numSU, & suConverged); POINT viewOffset = {0, 0}; hr = VisualizeFaceModel (pColorFrame, ftModel, & myCameraConfig, pSU, 1.0, viewOffset, pFTResult, 0x00FFFF00); if (FAILED (hr) printf ("display FAILED !! \ N "); Mat tempMat (COLOR_HIGHT, COLOR_WIDTH, CV_8UC4, pColorFrame-> GetBuffer (); imshow (" faceTracking ", tempMat); waitKey (1 );} // printf ("% d \ n", pFTResult-> GetStatus (); // Do something with pFTResult. sleep (16); iFaceTrackTimeCount ++; if (iFaceTrackTimeCount> 16*1000) break; // Terminate on some criteria .} // Clean up. pFTResult-> Release (); pColorFrame-> Release (); pFT-> Release (); CloseHandle (hThread1); CloseHandle (hThrea D2); CloseHandle (hThread3); Sleep (60000); NuiShutdown (); return 0;} HRESULT VisualizeFaceModel (IFTImage * pColorImg, IFTModel * pModel, FT_CAMERA_CONFIG const * pCameraConfig, FLOAT const * pSUCoef, FLOAT zoomFactor, POINT viewOffset, IFTResult * pAAMRlt, UINT32 color) {if (! PColorImg |! PModel |! PCameraConfig |! PSUCoef |! PAAMRlt) {return E_POINTER;} HRESULT hr = S_ OK; UINT vertexCount = pModel-> GetVertexCount (); Limit * pPts2D = reinterpret_cast <FT_VECTOR2D *> (_ malloca (sizeof (percent) * vertexCount); if (pPts2D) {FLOAT * pAUs; UINT auCount; hr = pAAMRlt-> GetAUCoefficients (& pAUs, & auCount); if (SUCCEEDED (hr )) {FLOAT scale, rotationXYZ [3], translationXYZ [3]; hr = pAAMRlt-> Get3DPose (& scale, rotationXYZ, translationXYZ); I F (SUCCEEDED (hr) {hr = pModel-> GetProjectedShape (pCameraConfig, zoomFactor, viewOffset, pSUCoef, pModel-> GetSUCount (), pAUs, auCount, scale, rotationXYZ, translationXYZ, pPts2D, vertexCount); if (SUCCEEDED (hr) {POINT * p3DMdl = reinterpret_cast <POINT *> (_ malloca (sizeof (POINT) * vertexCount); if (p3DMdl) {for (UINT I = 0; I <vertexCount; ++ I) {p3DMdl [I]. x = LONG (pPts2D [I]. x + 0.5f); p3DMdl [I]. y = LONG (p Pts2D [I]. y + 0.5f);} FT_TRIANGLE * pTriangles; UINT triangleCount; hr = pModel-> GetTriangles (& pTriangles, & triangleCount); if (SUCCEEDED (hr )) {struct EdgeHashTable {UINT32 * pEdges; UINT edgesAlloc; void Insert (int a, int B) {UINT32 v = (min (a, B) <16) | max (a, B, b); UINT32 index = (v + (v <8) * 49157, I; for (I = 0; I <edgesAlloc-1 & pEdges [(index + I) & (edgesAlloc-1)] & v! = PEdges [(index + I) & (edgesAlloc-1)]; ++ I) {} pEdges [(index + I) & (edgesAlloc-1)] = v ;}} eht; eht. edgesAlloc = 1 <UINT (log (2.f * (1 + vertexCount + triangleCount)/log (2.f)); eht. pEdges = reinterpret_cast <UINT32 *> (_ malloca (sizeof (UINT32) * eht. edgesAlloc); if (eht. pEdges) {ZeroMemory (eht. pEdges, sizeof (UINT32) * eht. edgesAlloc); for (UINT I = 0; I <triangleCount; ++ I) {eht. insert (pTriangles [I ]. I, pTriangles [I]. j); eht. insert (pTriangles [I]. j, pTriangles [I]. k); eht. insert (pTriangles [I]. k, pTriangles [I]. i) ;}for (UINT I = 0; I <eht. edgesAlloc; ++ I) {if (eht. pEdges [I]! = 0) {pColorImg-> DrawLine (p3DMdl [eht. pEdges [I]> 16], p3DMdl [eht. pEdges [I] & 0 xFFFF], color, 1) ;}}_ freea (eht. pEdges);} // Render the face rect in magentaRECT rectFace; hr = pAAMRlt-> GetFaceRect (& rectFace); if (SUCCEEDED (hr) {POINT leftTop = {rectFace. left, rectFace. top}; POINT rightTop = {rectFace. right-1, rectFace. top}; POINT leftBottom = {rectFace. left, rectFace. bottom-1}; POINT rightBottom = {rectFace. right-1, rectFace. bottom-1}; UINT32 nColor = 0xff00ff; SUCCEEDED (hr = pColorImg-> DrawLine (leftTop, rightTop, nColor, 1 )) & SUCCEEDED (hr = pColorImg-> DrawLine (rightTop, rightBottom, nColor, 1) & Pull (hr = pColorImg-> DrawLine (rightBottom, leftBottom, nColor, 1 )) & SUCCEEDED (hr = pColorImg-> DrawLine (leftBottom, leftTop, nColor, 1) ;}}_ freea (p3DMdl);} else {hr = E_OUTOFMEMORY ;}}}} _ freea (pPts2D);} else {hr = E_OUTOFMEMORY;} return hr ;}

Last Image

Download VS2010 project source code without points ~

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.