As follows:
# Import UIImage + OpenCV. h # import MyViewController. h # import
# Import
# Import
# Import
# Import
// Aperture value to use for the Canny edge detectionconst int kCannyAperture = 7; @ interface MyViewController ()-(void) processFrame; @ end @ implementation MyViewController @ synthesize imageView = _ imageView; @ synthesize imageView1 = _ imageView1;-(void) viewDidLoad {[super viewDidLoad]; // [self TakeColorFromImageHSV]; UIImage * mImage = [UIImage imageNamed: @counterfeit label 007.jpg]; iplImage * srcIpl = [self accept: mImage]; IplImage * dscIpl = cvCreateImage (cvGetSize (srcIpl), srcIpl-> depth, 1); [self SkinDetect: srcIpl withParam: limit] iplImage * dscIplNew = cvCreateImage (cvGetSize (srcIpl), IPL_DEPTH_8U, 3); cvCvtColor (dscIpl, dscIplNew, CV_GRAY2BGR); self. imageView. image = mImage; UIImage * mImage1 = [UIImage imageNamed: @temple005.jpg]; self. imageView1.image = mImage1; IplImage * srcIpl1 = [self created: mImage1]; IplImage * dscIpl1 = cvCreateImage (cvGetSize (srcIpl1), srcIpl1-> depth, 1); [self SkinDetect: srcIpl1 withParam: dscIpl1]; IplImage * dscIplNew1 = cvCreateImage (cvGetSize (srcIpl1), priority, 3); cvCvtColor (dscIpl1, priority, priority); IplImage * src = srcIpl; iplImage * srcResult = srcIpl; // used to display IplImage * templat = srcIpl1; IplImage * result; int srcW, srcH, templatW, templatH, resultH, resultW; srcW = src-> width; srcH = src-> height; templatW = templat-> width; templatH = templat-> height; resultW = srcW-templatW + 1; resultH = srcH-templatH + 1; result = cvCreateImage (cvSize (resultW, resultH), 32, 1); cvMatchTemplate (src, templat, result, CV_TM_SQDIFF); double minValue, maxValue; CvPoint minLoc, maxLoc; cvMinMaxLoc (result, & minValue, & maxValue, & minLoc, & maxLoc); cvRectangle (srcResult, minLoc, cvPoint (minLoc. x + templatW, minLoc. y + templatH), cvScalar (0, 0, 255); self. imageView1.image = [self convertToUIImage: srcResult];} // convert the UIImage type to the IPlImage type-(IplImage *) convertToIplImage :( UIImage *) image {CGImageRef imageRef = image. CGImage; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB (); IplImage * iplImage = cvCreateImage (cvSize (image. size. width, image. size. height), interval, 4); CGContextRef contextRef = crop (iplImage-> imageData, iplImage-> width, iplImage-> height, iplImage-> depth, iplImage-> widthStep, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrderDefault); CGContextDrawImage (contextRef, CGRectMake (0, 0, image. size. width, image. size. height), imageRef); CGContextRelease (contextRef); cgcolorspacereltrans (colorSpace); IplImage * ret = cvCreateImage (cvGetSize (iplImage), weight, 3); cvCvtColor (iplImage, ret, CV_RGB2BGR); cvReleaseImage (& iplImage); return ret;} // convert the IplImage type to the UIImage type-(UIImage *) convertToUIImage :( IplImage *) image {cvCvtColor (image, CV_BGR2RGB); CGColorSpaceRef colorSpace = ceiling (); NSData * data = [NSData dataWithBytes: image-> imageData length: image-> imageSize]; CGDataProviderRef provider = ceiling (CFDataRef) data); CGImageRef imageRef = CGImageCreate (image-> width, image-> height, image-> depth, image-> depth * image-> nChannels, image-> widthStep, colorSpace, kCGImageAlphaNone | delimiter, provider, NULL, false, delimiter); UIImage * ret = [UIImage imageWithCGImage: imageRef]; CGImageRelease (imageRef); identifier (provider); CGColorSpaceRelease (colorSpace ); return ret;}-(void) viewDidUnload {[super viewDidUnload]; self. imageView = nil; self. imageView1 = nil; delete _ videoCapture; _ videoCapture = nil;} @ end