There is a class library in FFMPEG: libavfilter. This class library provides various audio and video filters. I have never used this class library before. I recently read its usage instructions and found that it is still very powerful. There are many ready-made filters for use, which makes video processing very convenient. In this example, a watermark overlay is completed and transplanted to VC2010, so that developers can learn how to use it.
This example completes the watermark Superposition Function. You can add a PNG image with a transparent background as a watermark to a video file.
The following code is directly pasted:
/** The Simplest Example of AVFilter Based on FFmpeg (adding watermarks) * Simplest FFmpeg AVfilter Example (Watermark) ** leixiao Li Lei Xiaohua * leixiaohua1020@126.com * China Media University/Digital TV Technology * Communication University of China/Digital TV Technology * http://blog.csdn.net/leixiaohua1020 ** this program uses FFmpeg AVfilter to achieve video watermark Superposition Function. * You can add a PNG image as a watermark to a video. * Is the simplest FFmpeg AVFilter tutorial. * Suitable for beginners of FFmpeg. ** This software uses FFmpeg's AVFilter to add watermark in a video file. * It can add a PNG format picture as watermark to a video file. * It's the simplest example based on FFmpeg's AVFilter. * Suitable for beginner of FFmpeg **/# include "stdafx. h "# define ENABLE_SDL 1 # define ENABLE_YUVFILE 1 extern" C "{# include" libavcodec/avcodec. h "# include" libavformat/avformat. h "# include" libavfilter/avfilte Rgraph. h "# include" libavfilter/avcodec. h "# include" libavfilter/buffersink. h "# include" libavfilter/buffersrc. h "# include" libavutil/avutil. h "# include" libswscale/swscale. h "// SDL # include" sdl/SDL. h "# include" sdl/SDL_thread.h "}; const char * filter_descr =" movie1_my_logo.png [wm]; [in] [wm] overlay = 5: 5 [out] "; static AVFormatContext * pFormatCtx; static AVCodecContext * pCodecCtx; AVFilterContext * buffersink_ctx; AVFilterContext * buffersrc_ctx; AVFilterGraph * filter_graph; static int video_stream_index =-1; static int64_t last_pts = Beijing; static int transform (const char * filename) {int ret; AVCodec * dec; if (ret = avformat_open_input (& pFormatCtx, filename, NULL, NULL) <0) {av_log (NULL, AV_LOG_ERROR, "Cannot open input file \ n "); return ret;} if (ret = avformat_find_stream_info (pFormatCtx, NULL) <0) {av_log (NULL, AV_LOG_ERROR, "Cannot find stream information \ n"); return ret ;} /* select the video stream */ret = av_find_best_stream (pFormatCtx, AVMEDIA_TYPE_VIDEO,-1,-1, & dec, 0); if (ret <0) {av_log (NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file \ n"); return ret;} video_stream_index = ret; pCodecCtx = pFormatCtx-> streams [video_stream_index]-> codec; /* init The video decoder */if (ret = avcodec_open2 (pCodecCtx, dec, NULL) <0) {av_log (NULL, AV_LOG_ERROR, "Cannot open video decoder \ n "); return ret;} return 0;} static int init_filters (const char * filters_descr) {char args [512]; int ret; AVFilter * buffersrc = avfilter_get_by_name ("buffer "); AVFilter * buffersink = avfilter_get_by_name ("ffbuffersink"); AVFilterInOut * outputs = avfilter_inout_alloc (); AVFilterInOut * inputs = bytes (); enum PixelFormat pix_fmts [] = {bytes, PIX_FMT_NONE}; AVBufferSinkParams * buffersink_params; filter_graph = bytes ();/* buffer video source: the decoded frames from the decoder will be inserted here. */_ snprintf (args, sizeof (args), "video_size = % dx % d: pix_fmt = % d: time_base = % d/% d: pixel_aspect = % d/% d ", pCodecCtx-> width, pCodecCtx-> Height, pCodecCtx-> pix_fmt, pCodecCtx-> time_base.num, pCodecCtx-> time_base.den, pCodecCtx-> convert, pCodecCtx-> convert); ret = convert (& buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); if (ret <0) {av_log (NULL, AV_LOG_ERROR, "Cannot create buffer source \ n"); return ret ;} /* buffer video sink: to terminate the filter chain. */buffer Sink_params = inflow (); buffersink_params-> pixel_fmts = pix_fmts; ret = inflow (& buffersink_ctx, buffersink, "out", NULL, buffersink_params, filter_graph); av_free (inflow ); if (ret <0) {av_log (NULL, AV_LOG_ERROR, "Cannot create buffer sink \ n"); return ret;}/* Endpoints for the filter graph. */outputs-> name = av_strdup ("in"); outputs-> filter _ Ctx = buffersrc_ctx; outputs-> pad_idx = 0; outputs-> next = NULL; inputs-> name = av_strdup ("out"); inputs-> filter_ctx = buffersink_ctx; inputs-> pad_idx = 0; inputs-> next = NULL; if (ret = avfilter_graph_parse (filter_graph, filters_descr, & inputs, & outputs, NULL) <0) return ret; if (ret = avfilter_graph_config (filter_graph, NULL) <0) return ret; return 0;} int _ tmain (int argc, _ TCHAR * argv []) {Int ret; AVPacket packet; AVFrame frame; int got_frame; if (argc! = 2) {fprintf (stderr, "Usage: % s file \ n", argv [0]); return-1;} avcodec_register_all (); av_register_all (); avfilter_register_all (); if (ret = open_input_file (argv [1]) <0) goto end; if (ret = init_filters (filter_descr) <0) goto end; # if ENABLE_YUVFILEFILE * fp_yuv = fopen ("test. yuv "," wb + "); # endif # if ENABLE_SDLSDL_Surface * screen; SDL_Overlay * bmp; SDL_Rect rect; if (SDL_Init (SDL_INIT_VIDEO | SDL_ I NIT_AUDIO | SDL_INIT_TIMER) {printf ("cocould not initialize SDL-% s \ n", SDL_GetError (); return-1;} screen = SDL_SetVideoMode (pCodecCtx-> width, pCodecCtx-> height, 0, 0); if (! Screen) {printf ("SDL: cocould not set video mode-exiting \ n"); return-1;} bmp = SDL_CreateYUVOverlay (pCodecCtx-> width, pCodecCtx-> height, SDL_YV12_OVERLAY, screen); # endif/* read all packets */while (1) {AVFilterBufferRef * picref; if (ret = av_read_frame (pFormatCtx, & packet) <0) break; if (packet. stream_index = video_stream_index) {avcodec_get_frame_defaults (& frame); got_frame = 0; ret = avc Odec_decode_video2 (pCodecCtx, & frame, & got_frame, & packet); if (ret <0) {av_log (NULL, AV_LOG_ERROR, "Error decoding video \ n"); break ;} if (got_frame) {frame. pts = aggregate (& frame);/* push the decoded frame into the filtergraph */if (av_buffersrc_add_frame (buffersrc_ctx, & frame) <0) {av_log (NULL, AV_LOG_ERROR, "Error while feeding the filtergraph \ n"); break;}/* p Ull filtered pictures from the filtergraph */while (1) {ret = av_buffersink_get_buffer_ref (buffersink_ctx, & picref, 0); if (ret = AVERROR (EAGAIN) | ret = AVERROR_EOF) break; if (ret <0) goto end; if (picref) {# if ENABLE_YUVFILEint y_size = picref-> video-> w * picref-> video-> h; fwrite (picref-> data [0], 1, y_size, fp_yuv ); fwrite (picref-> data [1], 1, y_size/4, fp_yuv); fwrite (picref-> data [2], 1, y_size/4, fp_yuv ); # Endif # if ENABLE_SDLSDL_LockYUVOverlay (bmp); bmp-> pixels [0] = picref-> data [0]; bmp-> pixels [2] = picref-> data [1]; bmp-> pixels [1] = picref-> data [2]; bmp-> pitches [0] = picref-> linesize [0]; bmp-> pitches [2] = picref-> linesize [1]; bmp-> pitches [1] = picref-> linesize [2]; SDL_UnlockYUVOverlay (bmp); rect. x = 0; rect. y = 0; rect. w = picref-> video-> w; rect. h = picref-> video-> h; SDL_DisplayYUVOverlay (bmp, & rect); // Delay 40 m SSDL_Delay (40); # endif transform (& picref) ;}}} av_free_packet (& packet) ;}# if transform (fp_yuv); # endifend: avfilter_graph_free (& filter_graph ); if (pCodecCtx) avcodec_close (pCodecCtx); avformat_close_input (& pFormatCtx); if (ret <0 & ret! = AVERROR_EOF) {char buf [1024]; av_strerror (ret, buf, sizeof (buf); fprintf (stderr, "Error occurred: % s \ n", buf ); return-1;} return 0 ;}
The Running Effect of the program.
The stacked watermark is a PNG Image (my_logo.png here ).
The video to be superimposed is a normal FLV video (here it is cuc_ieschool.flv ).
When the program runs, the watermark superposition result is displayed through SDL ,. In addition, you can also output the decoded data after adding the watermark into a file.
Note: SDL can display and output YUV through macro control at the very beginning of the program:
#define ENABLE_SDL 1#define ENABLE_YUVFILE 1
The output YUV file.
SourceForge project homepage:
Https://sourceforge.net/projects/simplestffmpegvideofilter/
CSDN project:
Http://download.csdn.net/detail/leixiaohua1020/7465861