1. Go to the FFmpeg website to download the dev version of the library, which has the header files and Lib files we need, and then downloads the shared version of the library, which has the DLL files we need http://ffmpeg.zeranoe.com/builds/remember to distinguish between 32-bit and 64-bit library, where a big pit, I downloaded a 64-bit library, but the project was created when the selection of 32-bit project, resulting in the link has been reportedUnresolved external symbol _av_register_all ... (This is because the previous use of Linux is compiled by their own library, so did not pay attention to this pit)
Finally through this link to solve the https://stackoverflow.com/questions/20672777/linker-error-using-ffmpeg-with-visual-studio-2013-express
2. Copy the extracted items from the Dev library into the project, and copy the extracted items from the shared library to the generated bin file directory (e.g. release)
g:\source\ffmpegdemo\ffmpegdemo\ffmpeg>├─inc│ ├─libavcodec│ ├─libavdevice│ ├─libavfilter│ ├─libavformat│ ├─libavutil│ ├─libpostproc│ ├─libswresample│ └─libswscale└─libs Avcodec.lib avdevice.lib avfilter.lib avformat.lib avutil.lib postproc.lib Swresample.lib Swscale.lib
3. Right-click on the Project "Properties", "C + +" and "Additional Include Directories" and join the path of the header file we added
4. Link lib files in the source code
#pragma comment (lib, "Ffmpeg\\libs\\avutil.lib")#pragma comment (lib, "ffmpeg\\libs\\ Avformat.lib ")#pragma comment (lib," Ffmpeg\\libs\\avcodec.lib ")#pragma comment (lib," ffmpeg\ \libs\\swscale.lib ")
The source code is as follows:
//main.cpp#include <stdio.h>#include<stdlib.h>#pragmaComment (lib, "Ffmpeg\\libs\\avutil.lib")#pragmaComment (lib, "Ffmpeg\\libs\\avformat.lib")#pragmaComment (lib, "Ffmpeg\\libs\\avcodec.lib")#pragmaComment (lib, "Ffmpeg\\libs\\swscale.lib")extern"C" {//Coding#include"libavcodec/avcodec.h"//Encapsulation Format Processing#include"libavformat/avformat.h"//pixel processing#include"libswscale/swscale.h"};intMainintargcChar*argv[]) { //get Input Output file nameConstChar*input ="Test.mp4"; ConstChar*output ="TEST.YUV"; //1. Register All ComponentsAv_register_all (); //encapsulates the format context, dominates the global structure, and holds information about the format of the video file encapsulationAvformatcontext *pformatctx =Avformat_alloc_context (); //2. Open the input video file if(Avformat_open_input (&pformatctx, input, NULL, NULL)! =0) {printf ("%s","Unable to open input video file"); Return-1; } //3. Get video File information if(Avformat_find_stream_info (Pformatctx, NULL) <0) {printf ("%s","Unable to get video file information"); Return-1; } //gets the index position of the video stream//traverse all types of streams (audio stream, video stream, subtitle stream), find video stream intV_stream_idx =-1; inti =0; //Number of streams for(; I < pformatctx->nb_streams; i++) { //type of Flow if(Pformatctx->streams[i]->codec->codec_type = =Avmedia_type_video) {V_stream_idx=i; Break } } if(V_stream_idx = =-1) {printf ("%s","video stream not found \ n"); Return-1; } //only by knowing the encoding of the video can we find the decoder according to the encoding method .//get the codec context in the video streamAvcodeccontext *pcodecctx = pformatctx->streams[v_stream_idx]->codec; //4. Find the corresponding decoding based on the encoding ID in the context of the codecAvcodec *pcodec = Avcodec_find_decoder (pcodecctx->codec_id); if(Pcodec = =NULL) {printf ("%s","the decoder could not be found \ n"); Return-1; } //5. Turn on the decoder if(Avcodec_open2 (Pcodecctx, Pcodec, NULL) <0) {printf ("%s","decoder cannot open \ n"); Return-1; } //Output Video Informationprintf"file format for video:%s", pformatctx->iformat->name); printf ("Video Duration:%d", (pformatctx->duration)/1000000); printf ("width of video:%d,%d", Pcodecctx->width, pcodecctx->height); printf ("name of decoder:%s", pcodec->name); //ready to read//Avpacket for storing compressed data in one frame (H264)//buffer, open spaceAvpacket *packet = (avpacket*) Av_malloc (sizeof (avpacket)); //avframe for storing decoded pixel data (YUV)//Memory allocationAvframe *pframe =Av_frame_alloc (); //YUV420Avframe *PFRAMEYUV =Av_frame_alloc (); //only the Avframe pixel format and screen size are specified to really allocate memory//Buffer Allocation Memoryuint8_t *out_buffer = (uint8_t *) Av_malloc (avpicture_get_size (av_pix_fmt_yuv420p, Pcodecctx->width, pCodecCtx- >height)); //Initializing buffersAvpicture_fill (Avpicture *) PFRAMEYUV, Out_buffer, av_pix_fmt_yuv420p, Pcodecctx->width, pCodecCtx->height); //parameters for transcoding (scaling), width height before turning, width height after turn, format, etc.struct Swscontext *sws_ctx = Sws_getcontext (Pcodecctx->width, Pcodecctx->height, pcodecctx->pix_fmt, Pcodecctx->width, pcodecctx->height, av_pix_fmt_yuv420p, sws_bicubic, NULL, NULL, NULL); intGot_picture, ret; FILE*FP_YUV = fopen (output,"wb+"); intFrame_count =0; //6. A frame-by-frame reading of compressed data while(Av_read_frame (pformatctx, packet) >=0) { //as long as the video compresses the data (judging by the index position of the stream) if(Packet->stream_index = =v_stream_idx) { //7. Decode a frame of video compression data to get video pixel dataret = Avcodec_decode_video2 (Pcodecctx, Pframe, &got_picture, packet); if(Ret <0) {printf ("%s","decoding Error"); Return-1; } //0 Description decoding complete, not 0 decoding if(got_picture) {//avframe to pixel format YUV420, wide height//2 6 input, output data//3 7 Input, output the size of a row of data avframe conversion is a line-by-line conversion//4 Input data The first column where you want to transcode starts at 0//5 The height of the input screenSws_scale (Sws_ctx, Pframe->data, Pframe->linesize,0, pcodecctx->height, Pframeyuv->data, pframeyuv->linesize); //output to YUV file//avframe Pixel frame Write file//image pixel data decoded by data (audio sampled data)//Y Brightness UV chroma (compressed) People are more sensitive to brightness//the number of U V is the y intY_size = Pcodecctx->width * pcodecctx->height; Fwrite (PFRAMEYUV->data[0],1, Y_size, FP_YUV); Fwrite (PFRAMEYUV->data[1],1, Y_size/4, FP_YUV); Fwrite (PFRAMEYUV->data[2],1, Y_size/4, FP_YUV); Frame_count++; printf ("decode frame%d \ n", Frame_count); } } //Freeing Resourcesav_free_packet (packet); } fclose (FP_YUV); Av_frame_free (&pframe); Avcodec_close (PCODECCTX); Avformat_free_context (PFORMATCTX); Return0;}
Windows ffmpeg Development Environment Configuration