Using FFMEPG decoding video is generally generated avframe. Then convert to RGB or YUV. Have never understood the conversion relationship between them. It's actually very simple:
Avframe Turn RGB:
Avframe---> RGB
DATA[0]---RGG data
LINESIZE[0]----width*pixel_size for RGB
Avframe Transfer to YUV:
Avframe--->YUV
DATA[0]----Y data[1]----U
DATA[2]----V
Linesize[0] Length of------Y
LINESIZE[1] (LINESIZE[0]/2)------The length of U or V.
Code:
-(UIImage *) Convertframetorgb: (float) with height: (float) Height {
/* Float with = pcodecctx->width;
float height = pcodecctx->height;*/
if (Pframe->data[0]) {
struct Swscontext * scxt =sws_getcontext (With,height,pix_fmt_yuv420p,with,height,pix_fmt_rgba,sws_point,null,null, NULL);
if (scxt = = NULL) {
return nil;
}
Avpicture_alloc (&picture,pix_fmt_rgba, with, height);
Sws_scale (SCXT, (const uint8_t * *) Pframe->data, pframe->linesize, 0,height,picture.data, picture.linesize);
Cgbitmapinfo Bitmapinfo =kcgbitmapbyteorderdefault;
Cfdataref data = Cfdatacreatewithbytesnocopy (Kcfallocatordefault,picture.data[0],picture.linesize[0]*height, Kcfallocatornull);
Cgdataproviderref provider =cgdataprovidercreatewithcfdata (data);
Cgcolorspaceref colorspace =cgcolorspacecreatedevicergb ();
Cgimageref cgimage = cgimagecreate (With,height,
8,24,picture.linesize[0],
Colorspace,bitmapinfo,provider,null,no,kcgrenderingintentdefault);
Cgcolorspacerelease (ColorSpace);
UIImage *image = [Uiimageimagewithcgimage:cgimage];
Cgimagerelease (Cgimage);
Cgdataproviderrelease (provider);
Cfrelease (data);
return image;
}
return nil;
}
-(NSData *) Framedatatoyuvdata: (UInt8 *) data Size: (int) linesize width: (int) Width height: (int) Height {
if (Linesize < width) {
width = linesize;
}
Nsmutabledata *tempdata = [nsmutabledatadatawithlength:width * height];
Byte *dstdata = tempdata.mutablebytes;
For (Nsuinteger i =0; i < height; ++i) {
memcpy (dstdata, data, width);
Dstdata + = width;
Data + = Linesize;
}
return tempdata;
}
-(void) Convertframetoyuv: (float) with height: (float) Height {
NSData *luma = [Selfframedatatoyuvdata:pframe->data[0]
SIZE:PFRAME->LINESIZE[0]
Width:with
Height:height];
NSData *CHROMA1 = [Selfframedatatoyuvdata:pframe->data[1]
SIZE:PFRAME->LINESIZE[1]
Width:with/2
HEIGHT:HEIGHT/2];
NSData *CHROMA2 = [Selfframedatatoyuvdata:pframe->data[2]
SIZE:PFRAME->LINESIZE[2]
Width:with/2
HEIGHT:HEIGHT/2];
}