Motion JPEG (mjpeg,motion Joint Photographic Experts group,fourcc:mjpg) is a video compression format in which each frame image uses JPEG encoding. In general, we use to see the use of videotoolbox processing is a compressed video format, here I introduce a videotoolbox decoding MJPEG video encoded format of the method, we also use ffmpeg read a MJPEG encoded format of the network video stream.
1. The same we need to create vtdecompressionsessionref first
Create a video decoding session based on video information
if (VIDEOFORMATDESCR = = NULL) {
Get the width and height of the video
int videowidth = pcodecctx->width;
int videoheight = pcodecctx->height;
Create video describing information, including the width, height, and encoding information of the video, where we use the MJPEG corresponding encoding type Kcmvideocodectype_jpeg
Cmvideoformatdescriptioncreate (Kcfallocatordefault, Kcmvideocodectype_jpeg, Videowidth, VideoHeight, NULL, & VIDEOFORMATDESCR);
To create a decoding callback function
Vtdecompressionoutputcallbackrecord callback;
Callback.decompressionoutputcallback = diddecompress;
Callback.decompressionoutputrefcon = (bridge void) self;
Defines the image Pixel format decoded
Nsdictionary destinationimagebufferattributes =[nsdictionary Dictionarywithobjectsandkeys:[nsnumber numberWithBool : NO], (ID) kcvpixelbufferopenglescompatibilitykey,[nsnumber Numberwithint:kcvpixelformattype_32bgra], (ID) Kcvpixelbufferpixelformattypekey,nil];
Create session
Status = Vtdecompressionsessioncreate (Kcfallocatordefault, VIDEOFORMATDESCR, NULL, (Bridge Cfdictionaryref) Destinationimagebufferattributes, &callback, &session);
if (Status! = NOERR) {
NSLog (@ "Init decoder session failed status=%d", (int) status);
}
}
2. Start decoding
Create Blockbuffer, where the data we use directly in the Avpacket to populate
Cmblockbufferref videoblock = NULL;
Status = Cmblockbuffercreatewithmemoryblock (Kcfallocatordefault, Packet.data, Packet.size, KCFAllocatorNull, NULL, 0, Packet.size, 0, &videoblock);
if (Status! = NOERR) {
NSLog (@ "Cmblockbufferref failed status=%d", (int) status);
}
Create Samplebuffer
Cmsamplebufferref samplebuffer = NULL;
Const size_t samplesizearray[] = {packet.size};
Status = Cmsamplebuffercreate (Kcfallocatordefault, Videoblock, true, NULL, NULL, VIDEOFORMATDESCR, 1, 0, NULL, 1, Samplesi Zearray, &samplebuffer);
if (Status! = NOERR) {
NSLog (@ "Cmsamplebufferref failed status=%d", (int) status);
}
Start decoding
Vtdecodeframeflags flags = kvtdecodeframe_enableasynchronousdecompression;
Vtdecodeinfoflags flagout;
Status = Vtdecompressionsessiondecodeframe (session, Samplebuffer, flags, &samplebuffer, &flagout);
if (Status! = NOERR) {
NSLog (@ "Decode falied status =%d", (int) status);
}
Freeing memory
Cfrelease (Videoblock);
Cfrelease (Samplebuffer);
3. Callback result data obtained after decoding
void diddecompress (void Decompressionoutputrefcon, void Sourceframerefcon, osstatus status, Vtdecodeinfoflags Infoflags, Cvimagebufferref Imagebuffer, Cmtime presentationtimestamp, Cmtime presentationduration)
{
if (Status! = NOERR | |!imagebuffer) {
NSLog (@ "error decompresssing frame at time:%.3f Error:%d infoflags:%u", (float) presentationtimestamp.value/ Presentationtimestamp.timescale, (int) status, (unsigned int) infoflags);
Return
}
}