The complete code below is compiled in vc2005. As you can see, after the program runs, the video is played out, but the playback delay is not added, so the video is crazy. Adding latency to the video will be implemented in tutorial 5, at present, the program can sleep for several seconds after playing a frame to improve the running status.
// FfmpegExe. cpp: Main project file. </P> <p> # include "stdafx. h "<br/> # include" libavformat/avformat. h "<br/> # include" libswscale/swscale. h "<br/> // # include <windows. h> </p> <p> # include <stdlib. h> <br/> # include <stdio. h> <br/> # include <string. h> <br/> # include <math. h> <br/> # include <SDL/SDL. h> </p> <p> # ifdef main <br/> # undef main <br/> # endif </p> <p> # define SDL_AUDIO_BUFFER_SIZE 1024 <br/> static int sws_flags = SWS_BICUBIC; </p> <p> int main (in T argc, char * argv []) <br/>{< br/> AVFormatContext * pFormatCtx; <br/> int I, videoStream (-1 ); <br/> AVCodecContext * pCodecCtx; <br/> AVCodec * pCodec; <br/> AVFrame * pFrame; <br/> AVPacket packet; <br/> int frameFinished; <br/> float aspect_ratio; <br/> AVCodecContext * aCodecCtx; <br/> SDL_Overlay * bmp; <br/> SDL_Surface * screen; <br/> SDL_Rect rect; <br/> SDL_Event event; <br/> if (argc <2) <br/>{< br/> fprintf (Stderr, "Usage: test/n"); <br/> exit (1); <br/>}</p> <p> av_register_all (); <br/> pFormatCtx = av_alloc_format_context (); <br/> if (! PFormatCtx) {<br/> fprintf (stderr, "Memory error/n"); <br/> exit (1 ); <br/>}< br/> if (av_open_input_file (& pFormatCtx, argv [1], NULL, 0, NULL )! = 0) <br/> return-1; // Couldn't open file <br/> if (av_find_stream_info (pFormatCtx) <0) <br/> return-1; // Couldn't find stream information <br/> // Dump information about file onto standard error <br/> dump_format (pFormatCtx, 0, argv [1], 0 ); </p> <p> // Find the first video stream <br/> for (I = 0; I <pFormatCtx-> nb_streams; I ++) <br/>{< br/> if (pFormatCtx-> streams [I]-> codec-> codec_type = CODEC_TYPE_VIDEO & vid EoStream <0) <br/>{< br/> videoStream = I; <br/>}< br/> if (videoStream =-1) <br/> return-1; // Didn't find a video stream </p> <p> // Get a pointer to the codec context for the video stream </p> <p> pCodecCtx = pFormatCtx-> streams [videoStream]-> codec; <br/> pCodec = avcodec_find_decoder (pCodecCtx-> codec_id); <br/> if (pCodec = NULL) <br/>{< br/> fprintf (stderr, "Unsupported codec! /N "); <br/> return-1; // Codec not found <br/>}< br/> // Open codec <br/> if (avcodec_open (pCodecCtx, pCodec) <0) <br/> return-1; // cocould not open codec </p> <p> // Allocate video frame <br/> pFrame = avcodec_alloc_frame (); </p> <p> uint8_t * buffer; <br/> int numBytes; <br/> // Determine required buffer size and allocate buffer <br/> numBytes = avpicture_get_size (PIX_FMT_RGB24, pCodecCtx-> width, <br/> pCodecCtx-> hei Ght); <br/> buffer = (uint8_t *) av_malloc (numBytes * sizeof (uint8_t )); </p> <p> if (SDL_Init (SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER) <br/>{< br/> fprintf (stderr, "cocould not initialize SDL-% s/n", SDL_GetError (); <br/> exit (1 ); <br/>}</p> <p> # ifndef _ DARWIN __< br/> screen = SDL_SetVideoMode (pCodecCtx-> width, pCodecCtx-> height, 0, 0); <br/> # else <br/> screen = SDL_SetVideoMode (pCodecCtx-> width, pCod EcCtx-> height, 24, 0); <br/> # endif <br/> if (! Screen) <br/>{< br/> fprintf (stderr, "SDL: cocould not set video mode-exiting/n"); <br/> exit (1 ); <br/>}</p> <p> bmp = SDL_CreateYUVOverlay (pCodecCtx-> width, pCodecCtx-> height, <br/> SDL_YV12_OVERLAY, screen ); </p> <p> static struct SwsContext * img_convert_ctx; <br/> if (img_convert_ctx = NULL) <br/>{< br/> img_convert_ctx = sws_getContext (pCodecCtx-> width, pCodecCtx-> height, <br/> pCodecCtx-> pix_fmt, <br/> PCodecCtx-> width, pCodecCtx-> height, <br/> PIX_FMT_YUV420P, <br/> sws_flags, NULL ); <br/> if (img_convert_ctx = NULL) <br/> {<br/> fprintf (stderr, "Cannot initialize the conversion context/n "); <br/> exit (1); <br/>}< br/> I = 0; <br/> while (av_read_frame (pFormatCtx, & packet)> = 0) <br/>{< br/> // Is this a packet from the video stream? <Br/> if (packet. stream_index = videoStream) <br/>{< br/> // Decode video frame <br/> avcodec_decode_video (pCodecCtx, pFrame, & frameFinished, <br/> packet. data, packet. size); <br/> // Did we get a video frame? <Br/> if (frameFinished) <br/> {<br/> // Convert the image from its native format to RGB <br/>/* sws_scale (img_convert_ctx, pFrame-> data, pFrame-> linesize, <br/> 0, pCodecCtx-> height, pFrameRGB-> data, pFrameRGB-> linesize ); */<br/> // Save the frame to disk <br/>/* if (++ I <= 5) <br/> SaveFrame (pFrameRGB, pCodecCtx-> width, pCodecCtx-> height, I); */<br/> SDL_LockYUVOverlay (bmp); <br/> AVPicture pict; <br/> pict. data [0] = bmp-> pixels [0]; <br/> pict. data [1] = bmp-> pixels [2]; <br/> pict. data [2] = bmp-> pixels [1]; </p> <p> pict. linesize [0] = bmp-> pitches [0]; <br/> pict. linesize [1] = bmp-> pitches [2]; <br/> pict. linesize [2] = bmp-> pitches [1]; </p> <p> // Convert the image into YUV format that SDL uses <br/>/* img_convert (& pict, PIX_FMT_YUV420P, <br/> (AVPicture *) pFrame, pCodecCtx-> pix_fmt, <br/> pCodecCtx-> width, pCodecCtx-> height); */<br/> sws_scale (img_convert_ctx, pFrame-> data, pFrame-> linesize, <br/> 0, pCodecCtx-> height, pict. data, pict. linesize); <br/> SDL_UnlockYUVOverlay (bmp); <br/> rect. x = 0; <br/> rect. y = 0; <br/> rect. w = pCodecCtx-> width; <br/> rect. h = pCodecCtx-> height; <br/> SDL_DisplayYUVOverlay (bmp, & rect); <br/> // Sleep (60 ); <br/>}</p> <p> // Free the packet that was allocated by av_read_frame <br/> av_free_packet (& packet ); </p> <p> SDL_PollEvent (& event); <br/> switch (event. type) <br/>{< br/> case SDL_QUIT: <br/> SDL_Quit (); <br/> exit (0); <br/> break; <br/> default: break; <br/>}< br/>}; <br/> // Free the RGB image <br/> av_free (buffer ); <br/> // av_free (pFrameRGB); <br/> // Free the YUV frame <br/> av_free (pFrame ); <br/> // Close the codec <br/> avcodec_close (pCodecCtx); <br/> // Close the video file <br/> av_close_input_file (pFormatCtx ); <br/> return 0; <br/>}< br/>