Project address, for star
https://github.com/979451341/AudioVideoStudyCodeTwo/tree/master/FFmpeg%E7%BB%99%E8%A7%86%E9%A2%91%E6%8D%A2%E5% A3%b0%e9%9f%b3
A video has three streams, video streaming, audio streaming, subtitle streaming, I'll take the video a video stream out and bring the audio stream out of music B, close together into the new video
Or do you just say how C code works?
Registering components, opening and getting information about MP4 files and MP3 files
av_register_all();//Inputif ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {//打开输入的视频文件 LOGE( "Could not open input file."); goto end;}if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {//获取视频文件信息 LOGE( "Failed to retrieve input stream information"); goto end;}if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {//打开输入的音频文件 LOGE( "Could not open input file."); goto end;}if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {//获取音频文件信息 LOGE( "Failed to retrieve input stream information"); goto end;}
Create an output file
//Outputavformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);//初始化输出码流的AVFormatContext。if (!ofmt_ctx) { LOGE( "Could not create output context\n"); ret = AVERROR_UNKNOWN; return -1;}ofmt = ofmt_ctx->oformat;
Get MP4 video streams and MP3 audio streams
Get an output from the input Avstream out_streamfor (i = 0; i < ifmt_ctx_v->nb_streams; i++) {//create output Avstream according To input Avstream if (ifmt_ctx_v->streams[i]->codec->codec_type==avmedia_type_video) {AVStream *in_stre AM = ifmt_ctx_v->streams[i]; Avstream *out_stream = Avformat_new_stream (Ofmt_ctx, IN_STREAM->CODEC->CODEC);//create circulation channel AVStream videoindex_v=i; if (!out_stream) {LOGE ("Failed Allocating output stream\n"); ret = Averror_unknown; Break } videoindex_out=out_stream->index; Copy the settings of Avcodeccontext if (Avcodec_copy_context (Out_stream->codec, In_stream->codec) < 0) { LOGE ("Failed to copy the context from input to output stream codec context\n"); Break } out_stream->codec->codec_tag = 0; if (Ofmt_ctx->oformat->flags & Avfmt_globalheader) out_stream->codec->flags |= CODEc_flag_global_header; Break }}for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {//create output avstream according to input avstream if (ifmt_ Ctx_a->streams[i]->codec->codec_type==avmedia_type_audio) {Avstream *in_stream = ifmt_ctx_a->streams[i ]; Avstream *out_stream = Avformat_new_stream (Ofmt_ctx, In_stream->codec->codec); Audioindex_a=i; if (!out_stream) {LOGE ("Failed Allocating output stream\n"); ret = Averror_unknown; Goto end; } audioindex_out=out_stream->index; Copy the settings of Avcodeccontext if (Avcodec_copy_context (Out_stream->codec, In_stream->codec) < 0) { LOGE ("Failed to copy the context from input to output stream codec context\n"); Goto end; } out_stream->codec->codec_tag = 0; if (Ofmt_ctx->oformat->flags & Avfmt_globalheader) out_stream->codec->flags |= CODec_flag_global_header; Break }}
Get the output file information and open the output file to get the output stream
LOGE("==========Output Information==========\n");av_dump_format(ofmt_ctx, 0, out_filename, 1);LOGE("======================================\n");//Open output fileif (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {//打开输出文件。 LOGE( "Could not open output file ‘%s‘", out_filename); return -1; }}//Write file headerif (avformat_write_header(ofmt_ctx, NULL) < 0) { LOGE( "Error occurred when opening output file\n"); return -1;}
Next is the edge decoding edge encoding, this decoding is to decode the video stream and audio stream, this two stream decoding speed needs to remain consistent
This is judged by the time axis of the two sides, and then the decoded data is encoded into the output file.
//Get an AVPacket . av_compare_ts是比较时间戳用的。通过该函数可以决定该写入视频还是音频。 if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
The first is to decode the video and put it into cur_pts_v
if (Av_read_frame (Ifmt_ctx, &PKT) >= 0) {do{In_stream = ifmt_ctx->streams[pkt . Stream_index]; Out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index==videoindex_v) {//fix:no PTS (Example:raw H.) H. "Bare Stream no pts, so you must write PTS manually Simple Write pts if (pkt.pts==av_nopts_value) {//write pts Avrational time_base1=in_stream->time_base; Duration between 2 frames (US) int64_t calc_duration= (double) av_time_base/av_q2d (in_stream->r_ Frame_rate); Parameters pkt.pts= (Double) (frame_index*calc_duration)/(double) (av_q2d (TIME_BASE1) *av_time_base); pkt.dts=pkt.pts; pkt.duration= (Double) calc_duration/(double) (av_q2d (TIME_BASE1) *av_time_base); Frame_index + +; } cur_pts_v=pkt.pts; Break }}while (Av_read_frame (Ifmt_ctx, &PKT) >= 0); }
followed by decoding the audio into cur_pts_a
if (Av_read_frame (Ifmt_ctx, &PKT) >= 0) {do{In_stream = ifmt_ctx->streams[pkt . Stream_index]; Out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index==audioindex_a) {//fix:no pts//simple Write PTS if (pkt.pts==av_nopts_value) {//write pts avrational time_base1=in_stream- >time_base; Duration between 2 frames (US) int64_t calc_duration= (double) av_time_base/av_q2d (in_stream->r_ Frame_rate); Parameters pkt.pts= (Double) (frame_index*calc_duration)/(double) (av_q2d (TIME_BASE1) *av_time_base); pkt.dts=pkt.pts; pkt.duration= (Double) calc_duration/(double) (av_q2d (TIME_BASE1) *av_time_base); frame_index++; } Cur_pts_a=pkt.pts; Break }}while (Av_read_frame (Ifmt_ctx, &PKT) >= 0); }else{break; }
Then we put the encoded data into the output file and release the PKT.
//Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=stream_index; LOGE("Write 1 Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts); //Write AVPacket 音频或视频裸流 if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { LOGE( "Error muxing packet\n"); break; }
Av_free_packet (&PKT);
By constantly looping, decoding the video stream, decoding the audio stream, encoding into the output file, these three steps continue to cycle through the synthesis of video
Complete the output video and release the resources
//Write file trailerav_write_trailer(ofmt_ctx);
#if USE_H264BSF
Av_bitstream_filter_close (H264BSFC);
#endif
#if USE_AACBSF
Av_bitstream_filter_close (AACBSFC);
#endif
end:avformat_close_input(&ifmt_ctx_v);avformat_close_input(&ifmt_ctx_a);/* close output */if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb);avformat_free_context(ofmt_ctx);if (ret < 0 && ret != AVERROR_EOF) { LOGE( "Error occurred.\n"); return -1;}
The next time is still related to audio
Android Audio Video In-depth 12 ffmpeg video replacement sound (with source download)