Android audio and video in depth 10 ffmpeg to video plus effects (with source download)

Source: Internet
Author: User

Project address, for star
Https://github.com/979451341/Audio-and-video-learning-materials/tree/master/FFmpeg (avfilter%e8%bf%87%e6%bb%a4% ef%bc%89

1.AVfilter struct Members

This special effect depends on Avfilter, first of all, to say that Avfilter is a member of this struct.

/

* Filter definition. This defines a filter containing the pads, as well as all the

* Callback function for interacting with the filter.

    • /
      typedef struct AVFILTER {

/

* Filter name. Must be non-null and unique in the filter.

    • /

      const char *name;

/

* Describe the filter. It may be empty.

*

You should use the Null_if_config_small () macro definition.

    • /

      const char *description;

/

* The input list is terminated by $0.

*

* NULL if there is no (static) input. Filter instance

    • Avfilter_flag_dynamic_inputs set may have more input than now

* This list.

    • /

      Const AVFILTERPAD *inputs;

/

* The output list is terminated by $0.

*

* NULL if there is no (static) output. Filter instance

    • The avfilter_flag_dynamic_outputs set may have more output than now

* This list.

    • /

      Const AVFILTERPAD *outputs;

/

* A private data class for declaring private avoptions filters.

* This field is not valid for filters that do not have any options declared.

*

* If the field is not empty, it is the first member to filter the private data.

* Must be referred to for Avclass, which will be used by Libavfilter General

* Code for this class.

    • /

      Const Avclass *priv_class;

/

Combined with Avfilterflag

    • /

      int flags;

2.AVfilter Use steps

Now go directly to the code to say this avfilter uses the steps

Because of the use of filters, so need to play video, it is necessary to decode, to paragraph standard preparation code

// sd卡中的视频文件地址,可自行修改或者通过jni传入char *file_name = "/storage/emulated/0/pauseRecordDemo/video/2018-02-03-09-25-34.mp4";//char *file_name = "/storage/emulated/0/video.avi";av_register_all();

Register all Avfilter.
Avfilter_register_all ();//added by WS for Avfilter

AVFormatContext *pFormatCtx = avformat_alloc_context();// Open video fileif (avformat_open_input(&pFormatCtx, file_name, NULL, NULL) != 0) {    LOGD("Couldn‘t open file:%s\n", file_name);    return -1; // Couldn‘t open file}// Retrieve stream informationif (avformat_find_stream_info(pFormatCtx, NULL) < 0) {    LOGD("Couldn‘t find stream information.");    return -1;}// Find the first video streamint videoStream = -1, i;for (i = 0; i < pFormatCtx->nb_streams; i++) {    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO        && videoStream < 0) {        videoStream = i;    }}if (videoStream == -1) {    LOGD("Didn‘t find a video stream.");    return -1; // Didn‘t find a video stream}// Get a pointer to the codec context for the video streamAVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;

Start the filter preparation

AVFilter *buffersrc  = avfilter_get_by_name("buffer");AVFilter *buffersink = avfilter_get_by_name("buffersink");//新版的ffmpeg库必须为buffersinkAVFilterInOut *outputs = avfilter_inout_alloc();AVFilterInOut *inputs  = avfilter_inout_alloc();enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };AVBufferSinkParams *buffersink_params;

Allocates memory for Filtergraph.
Filter_graph = Avfilter_graph_alloc ();
Create and add a filter to the filtergraph.
ret = Avfilter_graph_create_filter (&buffersrc_ctx, Buffersrc, "in",
Args, NULL, filter_graph);
if (Ret < 0) {
LOGD ("Cannot create buffer source\n");
return ret;
}

buffersink_params = av_buffersink_params_alloc();buffersink_params->pixel_fmts = pix_fmts;

Create and add a filter to the filtergraph.
ret = Avfilter_graph_create_filter (&buffersink_ctx, Buffersink, "out",
NULL, Buffersink_params, filter_graph);
Av_free (Buffersink_params);
if (Ret < 0) {
LOGD ("Cannot create buffer sink\n");
return ret;
}

Input and output description assignment to Avfilter

outputs->name       = av_strdup("in");outputs->filter_ctx = buffersrc_ctx;outputs->pad_idx    = 0;outputs->next       = NULL;inputs->name       = av_strdup("out");inputs->filter_ctx = buffersink_ctx;inputs->pad_idx    = 0;inputs->next       = NULL;

Configure and use the filter effect, change the U, v this two pixels, so that the screen display black and white

const char *FILTERS_DESCR = "lutyuv= ' u=128:v=128 '";

Adds a string of graph descriptions to the filtergraph.
if (ret = Avfilter_graph_parse_ptr (filter_graph, FILTERS_DESCR,
&inputs, &outputs, NULL)) < 0) {
LOGD ("Cannot avfilter_graph_parse_ptr\n");
return ret;
}
Check the configuration of the filtergraph.
if (ret = Avfilter_graph_config (filter_graph, NULL)) < 0) {
LOGD ("Cannot avfilter_graph_config\n");
return ret;
}

The next step is rendering

Find the decoder for the video Streamavcodec *pcodec = Avcodec_find_decoder (pcodecctx->codec_id); if (Pcodec = = NULL)    {LOGD ("Codec not Found."); return-1;    Codec not Found}if (Avcodec_open2 (Pcodecctx, Pcodec, NULL) < 0) {LOGD ("Could not open Codec."); return-1; Could not open codec}//get native Windowanativewindow *nativewindow = anativewindow_fromsurface (env, surface);//Get video width high int videowidth = Pcodecctx->width;int Videoheight = pcodecctx->height;//Set the buffer size of the native window, Can be automatically stretched Anativewindow_setbuffersgeometry (NativeWindow, Videowidth, Videoheight, Window_format _rgba_8888);    Anativewindow_buffer windowbuffer;if (Avcodec_open2 (Pcodecctx, Pcodec, NULL) < 0) {LOGD ("Could not open codec."); return-1; Could not open codec}//Allocate video frameavframe *pframe = Av_frame_alloc ();//For rendering avframe *pframergba = Av_frame_al   LOC (); if (Pframergba = = NULL | | pframe = = NULL) {LOGD ("Could not allocate video frame."); return-1;} Determine required buffer size and allocate buffer//buffer data is used for rendering, and the format is rgbaint numbytes = Av_image_get_buffer_size ( Av_pix_fmt_rgba, Pcodecctx->width, Pcodecctx->height, 1); uint8_t *buffer = (u int8_t *) av_malloc (numbytes * sizeof (uint8_t)); Av_image_fill_arrays (Pframergba->data, Pframergba->linesize, Buffer, Av_pix_fmt_rgba, Pcodecctx->width, Pcodecctx->height, 1);//Because the decoded frame format is not RGBA, you need to make a grid before rendering Type conversion struct Swscontext *sws_ctx = Sws_getcontext (Pcodecctx->width, pcodecctx-& Gt;height, PCODECCTX-&GT;PIX_FMT, PC                                            Odecctx->width, Pcodecctx->height,                                            Av_pix_fmt_rgba, Sws_bilinear,               Null                             NULL, NULL); 

Decoding and releasing resources, decoding the data into Filtergraph to display when decoding

while (av_read_frame(pFormatCtx, &packet) >= 0) {    // Is this a packet from the video stream?    if (packet.stream_index == videoStream) {        // Decode video frame        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);        // 并不是decode一次就可解码出一帧        if (frameFinished) {            //added by ws for AVfilter start            pFrame->pts = av_frame_get_best_effort_timestamp(pFrame);            //* 向FilterGraph中加入一个AVFrame。            if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) {                LOGD("Could not av_buffersrc_add_frame");                break;            }

Remove a avframe from the filtergraph.
ret = Av_buffersink_get_frame (Buffersink_ctx, pframe);
if (Ret < 0) {
LOGD ("Could not Av_buffersink_get_frame");
Break
}
Added by WS for Avfilter end

            Lock native Window buffer Anativewindow_lock (NativeWindow, &windowbuffer, 0); Format Conversion Sws_scale (SWS_CTX, (uint8_t const *CONST *) Pframe->data, pframe->linesize,            0, Pcodecctx->height, Pframergba->data, pframergba->linesize);            Get stride uint8_t *DST = (uint8_t *) windowbuffer.bits;            int dststride = windowbuffer.stride * 4;            uint8_t *src = (pframergba->data[0]);            int srcstride = pframergba->linesize[0];            Because the window's stride and the stride of the frame are different, it is necessary to copy the int h line by row;            for (h = 0; h < videoheight; h++) {memcpy (DST + h * dststride, src + H * srcstride, srcstride);        } anativewindow_unlockandpost (NativeWindow); }} av_packet_unref (&packet);} Av_free (buffer); Av_free (Pframergba);//Free the YUV frameav_free (pframe); Avfilter_graph_free (&filter_graph); AddeD by WS for avfilter//Close the Codecsavcodec_close (PCODECCTX); 

This wants to really master, need to avfilter.c at least two times, because the following four structures have not seen their internal members

typedef struct AVFILTERCONTEXT Avfiltercontext;
typedef struct AVFILTERLINK Avfilterlink;
typedef struct AVFILTERPAD Avfilterpad;
typedef struct AVFILTERFORMATS avfilterformats;

Android audio and video in depth 10 ffmpeg to video plus effects (with source download)

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.