Project address, for star
https://github.com/979451341/AudioVideoStudyCodeTwo/tree/master/FFmpeg%E6%92%AD%E6%94%BE%E9%9F%B3%E4%B9%90%EF% bc%88%e4%bf%9d%e7%a8%8b%e5%ba%8f%e4%b8%8d%e6%ad%bb%ef%bc%89
This is ffmpeg decode the audio, to the Audiotrack play, this time is considered to be Java and C language cooperation between
This time we're going to call Java functions from C + +, here's the code for C + + using Audiotrack
private AudioTrack audioTrack;// 这个方法 是C进行调用 通道数public void createTrack(int sampleRateInHz,int nb_channals) { int channaleConfig;//通道数 if (nb_channals == 1) { channaleConfig = AudioFormat.CHANNEL_OUT_MONO; } else if (nb_channals == 2) { channaleConfig = AudioFormat.CHANNEL_OUT_STEREO; }else { channaleConfig = AudioFormat.CHANNEL_OUT_MONO; } int buffersize=AudioTrack.getMinBufferSize(sampleRateInHz, channaleConfig, AudioFormat.ENCODING_PCM_16BIT); audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,sampleRateInHz,channaleConfig, AudioFormat.ENCODING_PCM_16BIT,buffersize,AudioTrack.MODE_STREAM); audioTrack.play();}//C传入音频数据public void playTrack(byte[] buffer, int lenth) { if (audioTrack != null && audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) { audioTrack.write(buffer, 0, lenth); }}
Let's take a look at the C + + code
First register the component and then get the audio stream
av_register_all();AVFormatContext *pFormatCtx = avformat_alloc_context();//openif (avformat_open_input(&pFormatCtx, input, NULL, NULL) != 0) { LOGE("%s","打开输入视频文件失败"); return;}//获取视频信息if(avformat_find_stream_info(pFormatCtx,NULL) < 0){ LOGE("%s","获取视频信息失败"); return;}int audio_stream_idx=-1;int i=0;for (int i = 0; i < pFormatCtx->nb_streams; ++i) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { LOGE(" 找到音频id %d", pFormatCtx->streams[i]->codec->codec_type); audio_stream_idx=i; break; }}
Get decoder
//获取解码器上下文AVCodecContext *pCodecCtx=pFormatCtx->streams[audio_stream_idx]->codec;//获取解码器AVCodec *pCodex = avcodec_find_decoder(pCodecCtx->codec_id);//打开解码器if (avcodec_open2(pCodecCtx, pCodex, NULL)<0) {}
Set buffer to save data before and after decoding
//申请avpakcet,装解码前的数据AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));//申请avframe,装解码后的数据AVFrame *frame = av_frame_alloc();
Set the decoded sound a series of properties, such as: Mono, two channels, acquisition point size, acquisition rate, but also here to add effects to the sound,
//得到SwrContext ,进行重采样,具体参考http://blog.csdn.net/jammg/article/details/52688506SwrContext *swrContext = swr_alloc();//缓存区uint8_t *out_buffer = (uint8_t *) av_malloc(44100 * 2);
Channel layout for output (stereo)
uint64_t Out_ch_layout=av_ch_layout_stereo;
Output Sample bits 16 bits
Enum Avsampleformat OUT_FORMART=AV_SAMPLE_FMT_S16;
The sample rate of the output must be the same as the input
int out_sample_rate = pcodecctx->sample_rate;
Swr_alloc_set_opts convert the sample format of the PCM source file to the sample format you want
Swr_alloc_set_opts (Swrcontext, Out_ch_layout, Out_formart, Out_sample_rate,
Pcodecctx->channel_layout, Pcodecctx->sample_fmt, pcodecctx->sample_rate, 0,
NULL);
swr_init(swrContext);
Get the number of channels 2
int out_channer_nb = Av_get_channel_layout_nb_channels (Av_ch_layout_stereo);
Ability to run Java functions through reflection
Reflection gets class type
Jclass David_player = Env->getobjectclass (instance);
Reflection gets Createaudio method
Jmethodid Createaudio = Env->getmethodid (David_player, "Createtrack", "(II) V");
Reflection Call Createaudio
Env->callvoidmethod (instance, Createaudio, 44100, OUT_CHANNER_NB);
Jmethodid audio_write = Env->getmethodid (David_player, "Playtrack", "([BI) V");
Give the data to audiotrack while decoding.
while (av_read_frame(pFormatCtx, packet) >= 0) { if (packet->stream_index == audio_stream_idx) {
Decode MP3 encoded format frame----PCM frame
Avcodec_decode_audio4 (Pcodecctx, frame, &got_frame, packet);
if (got_frame) {
LOGE ("decoding");
Swr_convert (Swrcontext, &out_buffer, 44100 * 2, (const uint8_t *) Frame->data, frame->nb_samples);
Size of the buffer
int size = Av_samples_get_buffer_size (NULL, OUT_CHANNER_NB, Frame->nb_samples,
AV_SAMPLE_FMT_S16, 1);
Jbytearray Audio_sample_array = env->newbytearray (size);
Env->setbytearrayregion (audio_sample_array, 0, size, (const jbyte ) out_buffer);
Env->callvoidmethod (instance, Audio_write, Audio_sample_array, size);
Env->deletelocalref (Audio_sample_array);
}
}
}
Freeing resources
av_frame_free(&frame);swr_free(&swrContext);avcodec_close(pCodecCtx);avformat_close_input(&pFormatCtx);env->ReleaseStringUTFChars(input_, input);
FFmpeg is just a tool for audio and video processing, he does not have the ability to play video and audio, so we need surfaceview to display the video, Audiotrack play the sound, and opengles can play the sound, this back says
Next time is how to change the sound of the video, that is, the audio and video decoding and coding are to do once
Android Audio Video Depth 11 ffmpeg and Audiotrack play sound (with source download)