之前項目裡面需要解碼h264視頻,使用的是ffmpeg,解碼出來的是yuv,最好的顯示方法是通過opengl es 2.0來實現視頻的顯示,如果不會opengl es 2.0的話,那麼就必須將yuv2rgb然後在繪製映像,而yuv2rgb我只知道兩種方法1.通過ffmpeg裡面的sws_scale來實現,不過這種方法比較慢,2.這是我之前使用的方法在http://wss.co.uk/pinknoise/yuv2rgb/ 官網上有yuv2rgb的最佳化代碼,裡面有c實現和彙編實現兩種,實現彙編這種方法yuv2rgb消耗10ms。
下面的rgb格式是PIX_FMT_RGB565
前提:拿到解碼以後的AVFrame *frame,frame裡面存放解碼後的yuv資料。下面是需要用到的變數:
[cpp]
AVCodecContext *codec_ctx; uint8_t *fill_buffer; struct SwsContext *img_convert_ctx;
VFrame *frame_rgb;AVFrame *frame;
AVCodecContext *codec_ctx; uint8_t *fill_buffer; struct SwsContext *img_convert_ctx;
AVFrame *frame_rgb;AVFrame *frame;
方法一:
[cpp]
if(frame){
int numBytes = avpicture_get_size(PIX_FMT_RGB565, codec_ctx->width,codec_ctx->height);
fill_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *)frame_rgb, fill_buffer, PIX_FMT_RGB565,codec_ctx->width, codec_ctx->height);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
codec_ctx->width, codec_ctx->height, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, codec_ctx->height, frame_rgb->data, frame_rgb->linesize);
}
if(frame){
int numBytes = avpicture_get_size(PIX_FMT_RGB565, codec_ctx->width,codec_ctx->height);
fill_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *)frame_rgb, fill_buffer, PIX_FMT_RGB565,codec_ctx->width, codec_ctx->height);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
codec_ctx->width, codec_ctx->height, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, codec_ctx->height, frame_rgb->data, frame_rgb->linesize);
}方法二:
1、到官網
下載源碼,最新的版本yuv2rgb003
2、解壓檔案,能夠看到裡面有如下的檔案,包含了不同的格式互相轉換,由於我這邊是需要yuv420轉換為rgb565,所以就使用yuv420rgb565.s這個檔案:
[cpp]
root@zhangjie:/home/yuv2rgb# ls
COPYING yuv2rgbX.s yuv422rgb565c.c yuv444rgb565.s
out.yuv yuv420rgb565c.c yuv422rgb565.s yuv444rgb8888c.c
README yuv420rgb565.s yuv422rgb8888c.c yuv444rgb8888.s
test.c yuv420rgb8888c.c yuv422rgb8888.s yuv444rgb888c.c
yuv2rgb16tab.c yuv420rgb8888.s yuv422rgb888c.c yuv444rgb888.s
yuv2rgb555.s yuv420rgb888c.c yuv422rgb888.s
yuv2rgb.h yuv420rgb888.s yuv444rgb565c.c
root@zhangjie:/home/yuv2rgb# ls
COPYING yuv2rgbX.s yuv422rgb565c.c yuv444rgb565.s
out.yuv yuv420rgb565c.c yuv422rgb565.s yuv444rgb8888c.c
README yuv420rgb565.s yuv422rgb8888c.c yuv444rgb8888.s
test.c yuv420rgb8888c.c yuv422rgb8888.s yuv444rgb888c.c
yuv2rgb16tab.c yuv420rgb8888.s yuv422rgb888c.c yuv444rgb888.s
yuv2rgb555.s yuv420rgb888c.c yuv422rgb888.s
yuv2rgb.h yuv420rgb888.s yuv444rgb565c.c3、首先將yuv422rgb565.s,yuv2rgb.h,yuv2rgb16tab.c這三個檔案拷貝到你們項目裡面去
4、在解碼.c檔案中,添加"yuv2rgb.h","#include "yuv2rgb16tab.c""然後在檔案裡添加
[cpp]
extern const uint32_t yuv2rgb565_table[];
extern void yuv420_2_rgb565(uint8_t *dst_ptr, //存放rgb的指標
const uint8_t *y_ptr, //y資料
const uint8_t *u_ptr, //u資料
const uint8_t *v_ptr, //v資料
int32_t width, //視頻的寬度
int32_t height, //視頻的高度
int32_t y_span, //frame->linesize[0]
int32_t uv_span,//frame->linesize[1]
int32_t dst_span, //codec_ctx->width << 1
const uint32_t *tables, //之前聲明的yuv2rgb565_table
int32_t dither); //dither可以有四個選擇 0,1,2,3
extern const uint32_t yuv2rgb565_table[];
extern void yuv420_2_rgb565(uint8_t *dst_ptr, //存放rgb的指標
const uint8_t *y_ptr, //y資料
const uint8_t *u_ptr, //u資料
const uint8_t *v_ptr, //v資料
int32_t width, //視頻的寬度
int32_t height, //視頻的高度
int32_t y_span, //frame->linesize[0]
int32_t uv_span,//frame->linesize[1]
int32_t dst_span, //codec_ctx->width << 1
const uint32_t *tables, //之前聲明的yuv2rgb565_table
int32_t dither); //dither可以有四個選擇 0,1,2,3
5、使用方法:
[cpp] v
if(first == 0){
int numBytes = avpicture_get_size(PIX_FMT_RGB565, codec_ctx->width, codec_ctx->height);
uint8_t *fill_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill(&frame_rgb, fill_buffer, PIX_FMT_RGB565, codec_ctx->width, codec_ctx->height);
first = 1;
}
yuv420_2_rgb565(frame_rgb.data[0], frame->data[0], frame->data[1],frame->data[2], codec_ctx->width, codec_ctx->height, frame->linesize[0],
frame->linesize[1], codec_ctx->width << 1, yuv2rgb565_table,3);
if(first == 0){
int numBytes = avpicture_get_size(PIX_FMT_RGB565, codec_ctx->width, codec_ctx->height);
uint8_t *fill_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill(&frame_rgb, fill_buffer, PIX_FMT_RGB565, codec_ctx->width, codec_ctx->height);
first = 1;
}
yuv420_2_rgb565(frame_rgb.data[0], frame->data[0], frame->data[1],frame->data[2], codec_ctx->width, codec_ctx->height, frame->linesize[0],
frame->linesize[1], codec_ctx->width << 1, yuv2rgb565_table,3);
6、如果從java傳進數組指標,然後從c傳出方法(jbyteArray output):
[cpp]
uint8_t *buffer = (uint8_t *)(*env)->GetByteArrayElements(env, output, JNI_FALSE);
memcpy(buffer, frame_rgb.data[0], codec_ctx->width * codec_ctx->height * 2);
(*env)->ReleaseByteArrayElements(env, output, (jbyte *)buffer, 0);
uint8_t *buffer = (uint8_t *)(*env)->GetByteArrayElements(env, output, JNI_FALSE);
memcpy(buffer, frame_rgb.data[0], codec_ctx->width * codec_ctx->height * 2);
(*env)->ReleaseByteArrayElements(env, output, (jbyte *)buffer, 0);
7、如上步驟就可以將yuv420資料轉換為rgb565資料,並且從c傳遞給java層。