Since the company bought a device that did not provide a decoder, I had to do the decoding work as a last resort. I found a circle on the Internet. H264 decoding is more convenient, which is a series of ffmpeg function libraries. The original equipment also uses this set of function libraries to decode.
Using H264 decoding is divided into several steps:
Note that you must add extern "C" when adding the header file, otherwise an error will occur.
extern "C"
{
#include <avcodec.h>
#include <avformat.h>
#include <avutil.h>
#include <swscale.h>
};
Several global variables are declared here
AVCodec *pCodec = NULL;
AVCodecContext *pCodecCtx = NULL;
SwsContext *img_convert_ctx = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
1. Initialization
int H264_Init(void)
{
/* must be called before using avcodec lib*/
avcodec_init();
/* register all the codecs */
avcodec_register_all();
/* find the h264 video decoder */
pCodec = avcodec_find_decoder(CODEC_ID_H264);
if (!pCodec) {
fprintf(stderr, "codec not found\n");
}
pCodecCtx = avcodec_alloc_context();
/* open the coderc */
if (avcodec_open(pCodecCtx, pCodec) < 0) {
fprintf(stderr, "could not open codec\n");
}
// Allocate video frame
pFrame = avcodec_alloc_frame();
if(pFrame == NULL)
return -1;
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB == NULL)
return -1;
return 0;
}
In the earliest use, global variables were not used, and there were only two functions, init and regisger, in the initialization, and the end result was that non-key frames could not be decoded, and only key frames could be decoded.
2. Decoding
When decoding, the avcodec_decode_video function performs the decoding operation. When the size of the outputbuf is externally defined, pixels * 3, outsize is the size of the returned outputbuf, and the value is also pixels * 3.
The meaning of these words when decoding is to invert the data of YUV420P. In the original use, I found that the solved image is actually a center rotation image. I found some methods on the Internet and found this to be more practical. Real-time decoding is very important. After the image is converted, the RGB image can be converted again, which can also become a positive image, but that efficiency is significantly lower.
pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height-1);
pFrame->linesize[0] *= -1;
pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height/2 - 1);;
pFrame->linesize[1] *= -1;
pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height/2 - 1);;
pFrame->linesize[2] *= -1;
int H264_2_RGB(unsigned char *inputbuf, int frame_size, unsigned char *outputbuf, unsigned int*outsize)
{
int decode_size;
int numBytes;
int av_result;
uint8_t *buffer = NULL;
printf("Video decoding\n");
av_result = avcodec_decode_video(pCodecCtx, pFrame, &decode_size, inputbuf, frame_size);
if (av_result < 0)
{
fprintf(stderr, "decode failed: inputbuf = 0x%x , input_framesize = %d\n", inputbuf, frame_size);
return -1;
}
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,
pCodecCtx->height);
buffer = (uint8_t*)malloc(numBytes * sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_BGR24,
pCodecCtx->width, pCodecCtx->height);
img_convert_ctx = sws_getCachedContext(img_convert_ctx,pCodecCtx->width,pCodecCtx->height,
//PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height,PIX_FMT_RGB24 ,
SWS_X ,NULL,NULL,NULL) ;
if (img_convert_ctx == NULL)
{
printf("can't init convert context!\n") ;
return -1;
}
pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height-1);
pFrame->linesize[0] *= -1;
pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height/2 - 1);;
pFrame->linesize[1] *= -1;
pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height/2 - 1);;
pFrame->linesize[2] *= -1;
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, 0 - pCodecCtx->width, pFrameRGB->data, pFrameRGB->linesize);
if (decode_size)
{
*outsize = pCodecCtx->width * pCodecCtx->height * 3;
memcpy(outputbuf, pFrameRGB->data[0], *outsize);
}
free(buffer);
return 0;
}
3. Release resources
Recycling of resources.
void H264_Release(void)
{
avcodec_close(pCodecCtx);
av_free(pCodecCtx);
av_free(pFrame);
av_free(pFrameRGB);
}