FFMPEG+SDL2.0流媒體開發3---簡易MP4視頻播放器,提取MP4的H264視頻序列解碼並且顯示

來源:互聯網
上載者:User

標籤:yuv   video   sdl   視頻流   編碼   

簡介

之前寫了一遍提取MP4中的音視頻並且解碼,這一篇引入SDL2.0來顯示解碼後的視頻序列 實現一個簡易的 視頻播放器。

我這裡用的FFMPEG和SDL2.0都是最新版的 可能網上的資料不是很多,API介面也變了很多,不過大體的思路還是一樣的。

分析幾個FFMPEG函數

在這之前我們分析幾個代碼中可能引起疑問的FFMPEG幾個函數的原始碼,我已經盡我的能力添加了注釋,因為實在沒有文檔可能有的地方也不是很詳盡  不過大體還是能看懂的

av_image_alloc (分配圖片緩衝區)

我們在FFMPEG中引用了此函數,下面列舉的函數都是這個函數裡所引用到的 我都 添加了注釋  這裡注意下面的

pointers 參數是一個指標數組  實際上他在初始化完畢之後會被賦值成連續的記憶體序列 具體看原始碼

int av_image_alloc(uint8_t *pointers[4], int linesizes[4],                   int w, int h, enum AVPixelFormat pix_fmt, int align){        //擷取描述符    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);  //    int i, ret;    uint8_t *buf;    //如果不存在描述符那麼返回錯誤    if (!desc)        return AVERROR(EINVAL);     //檢測映像寬度 高度    if ((ret = av_image_check_size(w, h, 0, NULL)) < 0)        return ret;    //填充line sizes    if ((ret = av_image_fill_linesizes(linesizes, pix_fmt, align>7 ? FFALIGN(w, 8) : w)) < 0)        return ret;      //初始化0    for (i = 0; i < 4; i++)        linesizes[i] = FFALIGN(linesizes[i], align);    //如果計算的緩衝區尺寸<0    if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, NULL, linesizes)) < 0)        return ret;      //如果失敗 重新分配buf    buf = av_malloc(ret + align);    if (!buf)        return AVERROR(ENOMEM);   //再次調用 分配連續緩衝區  賦值給 pointers    if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, buf, linesizes)) < 0) {     //如果分配失敗那麼釋放 緩衝區        av_free(buf);        return ret;    }  //檢測像素描述符 AV_PIX_FMT_FLAG_PAL 或AV_PIX_FMT_FLAG_PSEUDOPAL //Pixel format has a palette in data[1], values are indexes in this palette./**The pixel format is "pseudo-paletted". This means that FFmpeg treats it as* paletted internally, but the palette is generated by the decoder and is not* stored in the file. **/    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)      //設定系統調色盤        avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);    return ret;}

avpriv_set_systematic_pal2(設定系統調色盤)

//設定系統化調色盤根據不同像素格式

int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt){    int i;    for (i = 0; i < 256; i++) {        int r, g, b;        switch (pix_fmt) {        case AV_PIX_FMT_RGB8:            r = (i>>5    )*36;            g = ((i>>2)&7)*36;            b = (i&3     )*85;            break;        case AV_PIX_FMT_BGR8:            b = (i>>6    )*85;            g = ((i>>3)&7)*36;            r = (i&7     )*36;            break;        case AV_PIX_FMT_RGB4_BYTE:            r = (i>>3    )*255;            g = ((i>>1)&3)*85;            b = (i&1     )*255;            break;        case AV_PIX_FMT_BGR4_BYTE:            b = (i>>3    )*255;            g = ((i>>1)&3)*85;            r = (i&1     )*255;            break;        case AV_PIX_FMT_GRAY8:            r = b = g = i;            break;        default:            return AVERROR(EINVAL);        }        pal[i] = b + (g << 8) + (r << 16) + (0xFFU << 24);    }    return 0;}

av_image_fill_pointers(填充av_image_alloc傳遞的unsigned char** data和linesize)

//返回映像所需的大小 //並且分配了連續緩衝區  將 data 拼接成一個記憶體連續的 序列int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,                           uint8_t *ptr, const int linesizes[4]){       int i, total_size, size[4] = { 0 }, has_plane[4] = { 0 };    //擷取描述符    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); //清null 指標數組    memset(data  , 0, sizeof(data[0])*4);   //如果不存在描述符 返回錯誤    if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)        return AVERROR(EINVAL);    //data[0]初始化為ptr    data[0] = ptr; //如果每行的像素 大於INT類型最大值 -1024/高度 返回    if (linesizes[0] > (INT_MAX - 1024) / height)        return AVERROR(EINVAL);     //初始化size[0]    size[0] = linesizes[0] * height;    //如果 描述符的標誌是AV_PIX_FMT_FLAG_PAL或者AV_PIX_FMT_FLAG_PSEUDOPAL 那麼表明調色盤放在data[1]並且是 256 32位置     if (desc->flags & AV_PIX_FMT_FLAG_PAL ||        desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {        size[0] = (size[0] + 3) & ~3;        data[1] = ptr + size[0];         return size[0] + 256 * 4;    }     /**     * Parameters that describe how pixels are packed.     * If the format has 2 or 4 components, then alpha is last.     * If the format has 1 or 2 components, then luma is 0.     * If the format has 3 or 4 components,     * if the RGB flag is set then 0 is red, 1 is green and 2 is blue;     * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.      */    for (i = 0; i < 4; i++)        has_plane[desc->comp[i].plane] = 1;//下面是計算總的需要的緩衝區大小    total_size = size[0];    for (i = 1; i < 4 && has_plane[i]; i++) {        int h, s = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;        data[i] = data[i-1] + size[i-1];        h = (height + (1 << s) - 1) >> s;        if (linesizes[i] > INT_MAX / h)            return AVERROR(EINVAL);        size[i] = h * linesizes[i];        if (total_size > INT_MAX - size[i])              return AVERROR(EINVAL);        total_size += size[i];    }   //返回總的緩衝區 大小    return total_size;}

av_image_fill_linesizes(填充行線寬)

//填充LineSize數組 ,linesize代表每一剛的線寬 像素為單位int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width){    int i, ret;//擷取格式描述符    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);    int max_step     [4];       /* max pixel step for each plane */    int max_step_comp[4];       /* the component for each plane which has the max pixel step */    //初始化指標數組 0    memset(linesizes, 0, 4*sizeof(linesizes[0]));    //如果不存在那麼返回錯誤    if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)        return AVERROR(EINVAL);    //下面的代碼都是填充線寬的代碼     av_image_fill_max_pixsteps(max_step, max_step_comp, desc);    for (i = 0; i < 4; i++) {         if ((ret = image_get_linesize(width, i, max_step[i], max_step_comp[i], desc)) < 0)            return ret;        linesizes[i] = ret;    }    return 0;}

例子 提取MP4檔案的視頻,並播放實現簡易視頻播放器

#include "stdafx.h"/************************************************************************//* 利用分流器分流MP4檔案音視頻並進行解碼輸出  Programmer小衛-USher 2014/12/17/************************************************************************///開啟#define __STDC_FORMAT_MACROS#ifdef _CPPRTTI extern "C"{#endif#include "libavutil/imgutils.h"    //映像工具 #include "libavutil/samplefmt.h"  // 音頻樣本格式#include "libavutil/timestamp.h"  //時間戳記工具可以 被用於調試和日誌目的 #include "libavformat/avformat.h" //Main libavformat public API header  包含了libavf I/O和   Demuxing  和Muxing 庫   #include "SDL.h"#ifdef _CPPRTTI };#endif//音視頻編碼器上下文static AVCodecContext *pVideoContext,*pAudioContext;static FILE *fVideoFile,*fAudioFile;  //輸出檔案控制代碼static AVStream *pStreamVideo,*pStreamAudio; //媒體流  static unsigned char * videoDstData[4];  //視頻資料 static int videoLineSize[4]; // static int videoBufferSize; //視頻緩衝區大小 static AVFormatContext *pFormatCtx=NULL; //格式上下文static AVFrame*pFrame=NULL ; //static AVPacket pkt;  //解碼媒體包static int ret=0; //狀態static int gotFrame; //擷取到的視頻流//音視頻流的索引static int videoStreamIndex,audioStreamIndex;//解碼媒體包//SDL定義  SDL_Window * pWindow = NULL;SDL_Renderer *pRender = NULL;SDL_Texture *pTexture = NULL;SDL_Rect dstrect = {0,0,800,600};int frame = 0;int indexFrameVideo=0;static int decode_packet(int* gotFrame, int param2){int ret  = 0 ;//解碼資料大小int decodedSize=pkt.size ; //初始化擷取的資料幀為0*gotFrame=0;//如果是視頻流那麼 解包視頻流  if(pkt.stream_index==videoStreamIndex){  //解碼資料到視訊框架if((ret=avcodec_decode_video2(pVideoContext,pFrame,gotFrame,&pkt))<0){  //解碼視訊框架失敗return ret ;}indexFrameVideo++;//copy 解壓後的資料到我們分配的空間中if(*gotFrame){//拷貝資料av_image_copy(videoDstData,videoLineSize, (const uint8_t **)(pFrame->data), pFrame->linesize,pVideoContext->pix_fmt, pVideoContext->width, pVideoContext->height);//寫入資料到緩衝區//fwrite(videoDstData[0], 1, videoBufferSize, fVideoFile);    printf("輸出當前第%d幀,大小:%d\n",indexFrameVideo,videoBufferSize);     int n = SDL_BYTESPERPIXEL(pStreamVideo->codec->pix_fmt);//更新紋理SDL_UpdateTexture(pTexture, &dstrect, (const void*)videoDstData[0], videoLineSize[0]);//拷貝紋理到2D模組SDL_RenderCopy(pRender, pTexture,NULL, &dstrect);//延時 1000ms*1/25SDL_Delay(1000 * 1 / frame);//顯示Render渲染曾SDL_RenderPresent(pRender);}else{printf("第%d幀,丟失\n",indexFrameVideo);}}//音頻不管else if(pkt.stream_index==audioStreamIndex){  ///解碼音頻資訊// if ((ret = avcodec_decode_audio4(pAudioContext, pFrame, gotFrame, &pkt)) < 0)// return ret;// decodedSize = FFMIN(ret, pkt.size);// //算出當前幀的大小// size_t unpadded_linesize = pFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)pFrame->format); // ///寫入資料到音頻檔案// fwrite(pFrame->extended_data[0], 1, unpadded_linesize, fAudioFile);   } //取消所有引用  並且重設frame欄位av_frame_unref(pFrame);return decodedSize ;}int Demuxing(int argc, char** argv){if (argc < 4){printf("Parameter Error!\n");return 0;}//註冊所有混流器 過濾器av_register_all();//註冊所有編碼器avcodec_register_all();//媒體輸入源頭char*pInputFile = argv[1];//視頻輸出檔案char*pOutputVideoFile = argv[3];//音訊輸出檔案char*pOutputAudioFile = argv[2];//分配環境內容pFormatCtx = avformat_alloc_context();//開啟輸入源  並且讀取輸入源的頭部if (avformat_open_input(&pFormatCtx, pInputFile, NULL, NULL) < 0){printf("Open Input Error!\n");return 0;}//擷取流媒體資訊if (avformat_find_stream_info(pFormatCtx, NULL) < 0){printf("擷取流媒體資訊失敗!\n");return 0;}//列印媒體資訊av_dump_format(pFormatCtx, 0, pInputFile, 0);for (unsigned i = 0; i < pFormatCtx->nb_streams; i++){AVStream *pStream = pFormatCtx->streams[i];AVMediaType mediaType = pStream->codec->codec_type;//提取不同的轉碼器if (mediaType == AVMEDIA_TYPE_VIDEO){videoStreamIndex = i;pVideoContext = pStream->codec;pStreamVideo = pStream;fVideoFile = fopen(pOutputVideoFile, "wb"); frame = pVideoContext->framerate.num;if (!fVideoFile){printf("con't open file!\n");goto end;}//計算解碼後一幀映像的大小//int nsize = avpicture_get_size(PIX_FMT_YUV420P, 1280, 720);//分配計算初始化 映像緩衝區 調色盤資料int ret = av_image_alloc(videoDstData, videoLineSize, pVideoContext->width, pVideoContext->height, pVideoContext->pix_fmt, 1);if (ret < 0){printf("Alloc video buffer error!\n");goto end;}//avpicture_fill((AVPicture *)pFrame, videoDstData[0], PIX_FMT_YUV420P, 1280, 720);videoBufferSize = ret;}else if (mediaType == AVMEDIA_TYPE_AUDIO){audioStreamIndex = i;pAudioContext = pStream->codec;pStreamAudio = pStream;fAudioFile = fopen(pOutputAudioFile, "wb");if (!fAudioFile){printf("con't open file!\n");goto end;}//分配視訊框架pFrame = av_frame_alloc();if (pFrame == NULL){av_freep(&videoDstData[0]);printf("alloc audio frame error\n");goto end;}}AVCodec *dec;//根據編碼器id尋找編碼器dec = avcodec_find_decoder(pStream->codec->codec_id);if (dec == NULL){printf("尋找編碼器失敗!\n");goto end;}if (avcodec_open2(pStream->codec, dec, nullptr) != 0){printf("開啟編碼器失敗!\n");goto end;}}av_init_packet(&pkt);pkt.data = NULL;pkt.size = 0;//讀取媒體資料包  資料要大於等於0while (av_read_frame(pFormatCtx, &pkt) >= 0){AVPacket oriPkt = pkt;do{//返回每個包解碼的資料ret = decode_packet(&gotFrame, 0);if (ret < 0)break;//指標後移  空閑記憶體減少pkt.data += ret;pkt.size -= ret;//} while (pkt.size > 0);//釋放之前分配的空間  讀取完畢必須釋放包av_free_packet(&oriPkt);}end://關閉視頻編碼器avcodec_close(pVideoContext);//關閉音頻編碼器avcodec_close(pAudioContext);avformat_close_input(&pFormatCtx);fclose(fVideoFile);fclose(fAudioFile);//釋放編碼幀avcodec_free_frame(&pFrame);//釋放視頻資料區av_free(videoDstData[0]);  return 0;}int _tmain(int argc, char*argv[]){  SDL_Init(SDL_INIT_VIDEO);  //建立視窗pWindow = SDL_CreateWindow("YUV420P", 200, 100, 800, 600, 0);//啟用硬體加速 pRender=SDL_CreateRenderer(pWindow, -1, 0);  dstrect.x = 0;dstrect.y = 0;dstrect.w = 1280;dstrect.h = 720;//建立一個紋理  設定可以Lock  YUV420P 格式 1280*720 pTexture = SDL_CreateTexture(pRender, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, 1280, 720);Demuxing(argc, argv);//釋放SDL_RenderClear(pRender);SDL_DestroyTexture(pTexture);SDL_DestroyRenderer(pRender);SDL_DestroyWindow(pWindow);SDL_Quit();return  0;}

代碼運行介面





FFMPEG+SDL2.0流媒體開發3---簡易MP4視頻播放器,提取MP4的H264視頻序列解碼並且顯示

相關文章

聯繫我們

該頁面正文內容均來源於網絡整理,並不代表阿里雲官方的觀點,該頁面所提到的產品和服務也與阿里云無關,如果該頁面內容對您造成了困擾,歡迎寫郵件給我們,收到郵件我們將在5個工作日內處理。

如果您發現本社區中有涉嫌抄襲的內容,歡迎發送郵件至: info-contact@alibabacloud.com 進行舉報並提供相關證據,工作人員會在 5 個工作天內聯絡您,一經查實,本站將立刻刪除涉嫌侵權內容。

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.