Recently help others to do a project, mainly using the camera face recognition
GitHub Address: Https://github.com/qugang/AVCaptureVideoTemplate
To use the iOS camera, you need to use the Avfoundation library, I do not introduce the contents of the library.
You need to use the Avcapturesession class to start the camera.
Then get the camera to transmit each frame of data, need to use Avcapturevideodataoutputsamplebufferdelegate delegate.
First, add the code for the camera device in the viewdidload, locate the camera device, and then turn on the camera.
Capturesession.sessionpreset =Avcapturesessionpresetlow Let devices=avcapturedevice.devices () forDeviceinchDevices {if(Device.hasmediatype (Avmediatypevideo)) {if(Device.position = =avcapturedeviceposition.front) {Capturedevice= Device as?AvcapturedeviceifCapturedevice! =Nil {println ("Capture Device found") beginsession () }}}
BeginSession, turn on the camera:
func beginsession () {var err:nserror? =Nil Capturesession.addinput (avcapturedeviceinput (device:capturedevice, Error:&err)) Let output=avcapturevideodataoutput () let Cameraqueue= Dispatch_queue_create ("Cameraqueue", dispatch_queue_serial) output.setsamplebufferdelegate (self, queue:cameraqueue) output.videosettings /c7>=[Kcvpixelbufferpixelformattypekey:kcvpixelformattype_32bgra] capturesession.addoutput (output) ifErr! =Nil {println ("error: \ (err?. Localizeddescription)")} Previewlayer=Avcapturevideopreviewlayer (session:capturesession) Previewlayer?. Videogravity ="Avlayervideogravityresizeaspect"Previewlayer?. frame =self.view.bounds Self.view.layer.addSublayer (previewlayer) capturesession.startrunning ()}
After opening, implement the Captureoutput method:
Func captureoutput (captureoutput:avcaptureoutput!, Didoutputsamplebuffer samplebuffer:cmsamplebuffer!, Fromconnection connection:avcaptureconnection!) { if(Self.isstart) {Let resultimage=Samplebuffertoimage (Samplebuffer) Let context= Cicontext (options:[kcicontextusesoftwarerenderer:true]) Let DETECOTR=Cidetector (Oftype:cidetectortypeface, Context:context, Options:[cidetectoraccuracy:cidetectoraccuracyhigh]) Let Ciimage=Ciimage (image:resultimage) let Results:nsarray= Detecotr.featuresinimage (ciimage,options: ["cidetectorimageorientation":6]) forRinchResults {Let face:cifacefeature= R as!cifacefeature; Let Faceimage= UIImage (CGImage:context.createCGImage (Ciimage, fromRect:face.bounds), scale:1.0, Orientation:. right) NSLog ("Face found at (%f,%f) of dimensions%fx%f", face.bounds.origin.x, face.bounds.origin.y,pickuiimager.frame.origin.x, PICKUIIMAGER.FRAME.ORIGIN.Y) Dispatch_async (Dispatch_get_main_queue ()) {if(Self.isstart) {self.dismissviewcontrolleranimated (true, Completion:nil) self.didreceivememorywarning () Self.callback! (face:faceimage!)} Self.isstart=false } } } }
Using Cidetector to get a face on every frame of the picture, Cidetector can also get a wink, with a smiling face, if you want to use the official View API in detail
Above is the key code, set a delay of 2 seconds, after 2 seconds to start face detection.
All code:
////Viewcontroller.swift//avsessiontest////Created by Qugang on 15/7/8.//Copyright (c) 2015 Qugang. All rights reserved.//Import Uikitimport avfoundationclassAvcaptirevideopiccontroller:uiviewcontroller,avcapturevideodataoutputsamplebufferdelegate {var CallBack:(( Face:uiimage)())?Let capturesession=avcapturesession () var capturedevice:avcapturedevice?var previewlayer:avcapturevideopreviewlayer?var pickuiimager:uiimageview= Uiimageview (Image:uiimage (named:"PICK_BG")) var line:uiimageview= Uiimageview (Image:uiimage (named:" Line")) var timer:nstimer!var upordown=truevar isstart=false Overridefunc viewdidload () {super.viewdidload () Capturesession.sessionpreset=Avcapturesessionpresetlow Let devices=avcapturedevice.devices () forDeviceinchDevices {if(Device.hasmediatype (Avmediatypevideo)) {if(Device.position = =avcapturedeviceposition.front) {Capturedevice= Device as?AvcapturedeviceifCapturedevice! =Nil {println ("Capture Device found") BeginSession ()}}} Pickuiimage R.frame= CGRect (X:self.view.bounds.width/2- -, Y:self.view.bounds.height/2- -, Width: $, Height: $) Line.frame= CGRect (X:self.view.bounds.width/2- -, Y:self.view.bounds.height/2- -, Width: $, Height:2) Self.view.addSubview (Pickuiimager) Self.view.addSubview (line) Timer= Nstimer.scheduledtimerwithtimeinterval (0.01, Target:self, selector:"animationsate", Userinfo:nil, repeats:true) Nstimer.scheduledtimerwithtimeinterval (2, Target:self, selector:"isstarttrue", Userinfo:nil, repeats:false)} func isstarttrue () {Self.isstart=true } Overridefunc didreceivememorywarning () {super.didreceivememorywarning () capturesession.stoprunning () } func animationsate () {ifUpordown {if(LINE.FRAME.ORIGIN.Y >= PICKUIIMAGER.FRAME.ORIGIN.Y + $) {Upordown=false } Else{LINE.FRAME.ORIGIN.Y+=2 } } Else { if(LINE.FRAME.ORIGIN.Y <=pickuiimager.frame.origin.y) {Upordown=true } Else{LINE.FRAME.ORIGIN.Y-=2}}} func BeginSession () {var err:nserror? =Nil Capturesession.addinput (avcapturedeviceinput (device:capturedevice, Error:&err)) Let output=avcapturevideodataoutput () let Cameraqueue= Dispatch_queue_create ("Cameraqueue", dispatch_queue_serial) output.setsamplebufferdelegate (self, queue:cameraqueue) output.videosettings /c1>=[Kcvpixelbufferpixelformattypekey:kcvpixelformattype_32bgra] capturesession.addoutput (output) ifErr! =Nil {println ("error: \ (err?. Localizeddescription)")} Previewlayer=Avcapturevideopreviewlayer (session:capturesession) Previewlayer?. Videogravity ="Avlayervideogravityresizeaspect"Previewlayer?. frame =self.view.bounds Self.view.layer.addSublayer (previewlayer) capturesession.startrunning ()} Func captureoutput (Captureoutput:avcaptureoutput!, Didoutputsamplebuffer samplebuffer:cmsamplebuffer!, Fromconnection connection:avcaptureconnection!) { if(Self.isstart) {Let resultimage=Samplebuffertoimage (Samplebuffer) Let context= Cicontext (options:[kcicontextusesoftwarerenderer:true]) Let DETECOTR=Cidetector (Oftype:cidetectortypeface, Context:context, Options:[cidetectoraccuracy:cidetectoraccuracyhigh]) Let Ciimage=Ciimage (image:resultimage) let Results:nsarray= Detecotr.featuresinimage (ciimage,options: ["cidetectorimageorientation":6]) forRinchResults {Let face:cifacefeature= R as!cifacefeature; Let Faceimage= UIImage (CGImage:context.createCGImage (Ciimage, fromRect:face.bounds), scale:1.0, Orientation:. right) NSLog ("Face found at (%f,%f) of dimensions%fx%f", face.bounds.origin.x, face.bounds.origin.y,pickuiimager.frame.origin.x, PICKUIIMAGER.FRAME.ORIGIN.Y) Dispatch_async (Dispatch_get_main_queue ()) {if(Self.isstart) {self.dismissviewcontrolleranimated (true, Completion:nil) self.didreceivememorywarning () Self.callback! (face:faceimage!)} Self.isstart=false } } } } PrivateFunc Samplebuffertoimage (samplebuffer:cmsamplebuffer!)UIImage {Let imagebuffer:cvimagebufferref=Cmsamplebuffergetimagebuffer (Samplebuffer) cvpixelbufferlockbaseaddress (Imagebuffer,0) Let BaseAddress= Cvpixelbuffergetbaseaddressofplane (Imagebuffer,0) Let Bytesperrow=Cvpixelbuffergetbytesperrow (imagebuffer) Let width=cvpixelbuffergetwidth (imagebuffer) Let height=cvpixelbuffergetheight (imagebuffer) let Colorspace:cgcolorspaceref=Cgcolorspacecreatedevicergb () let Bitspercompornent=8var bitmapinfo= Cgbitmapinfo ((CGBitmapInfo.ByteOrder32Little.rawValue | CGImageAlphaInfo.PremultipliedFirst.rawValue) asUInt32) Let Newcontext= Cgbitmapcontextcreate (baseaddress, width, height, bitspercompornent, Bytesperrow, ColorSpace, Bitmapinfo) asCgcontextref let Imageref:cgimageref=Cgbitmapcontextcreateimage (newcontext) let Resultimage= UIImage (cgimage:imageref, scale:1.0, orientation:UIImageOrientation.Right)!returnResultimage} func imageresize (Imageobj:uiimage, sizechange:cgsize)-uiimage{let Hasalpha=falseLet scale:cgfloat=0.0 uigraphicsbeginimagecontextwithoptions (Sizechange,!Hasalpha, scale) Imageobj.drawinrect (CGRect (Origin:cgpointzero, Size:sizechange)) Let Scaledim Age=Uigraphicsgetimagefromcurrentimagecontext ()returnScaledimage}}
IOS reads pictures of each frame through the camera, and does recognize face recognition (SWIFT)