Read the original YUV data of the camera of the notebook, via LIBAV (FFMPEG encoded) __ encoded

Source: Internet
Author: User

The logic of the procedure is mainly divided into two parts:

1, through the video4linux2 to read the camera v4l2_pix_fmt_yuyv format of the original data

2. Convert the data in the V4L2_PIX_FMT_YUYV format into the YUV data in the av_pix_fmt_yuv422p format and store it in the avframe structure; send the avframe structure to the encoder;

Collect encoded H264 data stream, coexist in the file

Second, the main library used in the code:

1, through a call uvccapture-0.5 library (I directly changed the inside of the code), get the camera v4l2_pix_fmt_yuyv format data.

2, through the LIBAV code

Three, main code:

* * * * * main.cpp * * Created on:2014-3-2 * author:xy/#include <unistd.h> #include <stdio.h> #inc Lude <stdlib.h> #include <time.h> #include <linux/videodev2.h>//ffplay-f video4linux2-framerate 30-  Video_size hd720/dev/video0 #ifdef __cplusplus extern "C" {#endif #define __stdc_constant_macros #ifdef _stdint_h #undef _stdint_h #endif # include <stdint.h> #include "libavformat/avformat.h" #include "libavdevice/avdevice.h" # Include <libavutil/imgutils.h> #include <libavutil/opt.h> #ifdef __cplusplus} #endif//input device #include "v4l2u
	vc.h "int main () {//v4l2 val char *videodevice ="/dev/video0 "; int width = 640;
	320; int height = 480;
	240;
	int brightness = 0, contrast = 0, saturation = 0, gain = 0;
	int quality = 95;
	int format = V4L2_PIX_FMT_YUYV;
	struct Vdin *videoin;

	int grabmethod = 1;

	Video encoder init avcodec_register_all ();
	Avcodec *codec;
	Avcodeccontext *c = NULL;
	int I, ret, x, Y, got_output;FILE *f;
	Avframe *frame;
	Avpacket PKT;
	uint8_t endcode[] = {0, 0, 1, 0xb7};
	Char filename[] = "test.264";

	printf ("Encode video file%s\n", filename);
	/* Find the MPEG1 Video Encoder * * codec = Avcodec_find_encoder (av_codec_id_h264);
		if (!codec) {fprintf (stderr, "codec not found\n");
	Exit (1);
	} C = Avcodec_alloc_context3 (codec);
		if (!c) {fprintf (stderr, "Could not allocate video codec context\n");
	Exit (1);
	}/* Put sample parameters * * C->bit_rate = 400000; /* Resolution must be a multiple of two * * * c->width = width;
	352; C->height = height;
	288;
	/* Frames per second * * C->time_base = (avrational) {1,10/*25*/}; C->gop_size = 10; /* Emit one intra frame every ten frames/c->max_b_frames = 0; 1 c->pix_fmt = av_pix_fmt_yuv422p;

	V4L2 is this format av_pix_fmt_yuv420p;
	Av_opt_set (C->priv_data, "preset", "slow", 0); Av_opt_set (C->priv_data, "tune", "Zerolatency", 0); This allows LIBAV to not cache video frames/*********************************** There are two places that affect LIBAV is not cached encoded video frame, which is the impact of real-time: * 1, Av_opt_set (c->priv_data
			 , "Tune", "Zerolatency", 0); This is more important.
			 * 2, the parameter has C->max_b_frames = 1, if this frame is set to 0, there is no B frame, the code will be very fast. /* Open IT/if (Avcodec_open2 C,
		Codec, NULL) < 0) {fprintf (stderr, "could not open codec\n");
	Exit (1);
	} f = fopen (filename, "WB");
		if (!f) {fprintf (stderr, "could not open%s\n", filename);
	Exit (1);
	frame = Av_frame_alloc ();
		if (!frame) {fprintf (stderr, "Could not allocate video frame\n");
	Exit (1);
	} Frame->format = c->pix_fmt;
	Frame->width = c->width;

	Frame->height = c->height; /* The image can be allocated by any means and av_image_alloc () are * just the most convenient way if Av_malloc () to B
	E used * * ret = Av_image_alloc (Frame->data, Frame->linesize, C->width, C->height, C-&GT;PIX_FMT, 32); if (Ret < 0) {fprintf (stderr, "could not allocate raw picture buffer\n");
	Exit (1);
	}//v4l2 init videoin = (struct Vdin *) calloc (1, sizeof (struct vdin));

	if (Init_videoin (Videoin, (char *) videodevice, width, height, format, Grabmethod) < 0) exit (1);
	printf ("w:%d,h:%d\n", C->width, C->height);
	time_t TIMEP;
	TIMEP = time (NULL);
	printf ("%s\n", Asctime (Gmtime (&AMP;TIMEP));
		for (i = 0; i < i++) {//usleep (200000);
			The data format obtained from V4L2 is av_pix_fmt_yuv422p if (Uvcgrab (Videoin) < 0) {fprintf (stderr, "Error grabbing\n");
			CLOSE_V4L2 (Videoin);
			Free (videoin);
		Exit (1);
		} unsigned char *yuyv = videoin->framebuffer;
		Copy the data to the desired structure of the LIBAV av_init_packet (&AMP;PKT); Pkt.data = NULL;
		Packet data is allocated by the encoder pkt.size = 0;
		printf ("Debug!!!");
Fflush (stdout);  #if 0/* Prepare a dummy image *//* y * * (y = 0; y < c->height; y++) {for (x = 0; x < c->width; X + +) {Frame->data[0][y * frame->lINESIZE[0] + x] = x + y + i * 3; }/* Cb and Cr/for (y = 0, y < c->height/2; y++) {for (x = 0; x < c->width/2; x + +) {frame
				->data[1][y * Frame->linesize[1] + x] = 128 + y + i * 2;
			Frame->data[2][y * Frame->linesize[2] + x] = + + x + i * 5; } #else/* Prepare image */* y Cb Rb/* (y = 0; y < c->height; y++) {for (x = 0; x < c-> Width
			X + +) {Frame->data[0][y * frame->linesize[0] + x] = Yuyv[y * Frame->linesize[0] * 2 + 2 * x]; }/* Cb and Cr/for (y = 0, y < c->height; y++) {for (x = 0; x < c->width/2; x + +) {//fram
				E->data[0][y * Frame->linesize[0] + 2*x] = yuyv[y*frame->linesize[0]*4+4*x];
				Frame->data[0][y * Frame->linesize[0] + 2*x+1] = yuyv[y*frame->linesize[0]*4+4*x+2];
				Frame->data[1][y * Frame->linesize[1] + x] = Yuyv[y * frame->linesize[1] * 4 + 4 * x + 1]; Frame->data[2][y * Frame->lineSIZE[2] + x] = Yuyv[y * frame->linesize[2] * 4 + 4 * x + 3];

		}} #endif frame->pts = i;
		/* Encode the image */ret = Avcodec_encode_video2 (c, &AMP;PKT, frame, &got_output);
			if (Ret < 0) {fprintf (stderr, "Error encoding frame\n");
		Exit (1);
			} if (Got_output) {printf ("Write frame%3d (size=%5d) \ n", I, pkt.size);
			Fwrite (Pkt.data, 1, pkt.size, f);
		Av_free_packet (&AMP;PKT);

		//encoding}/* Get the delayed frames * * for (got_output = 1; got_output; i++) {fflush (stdout);
		ret = Avcodec_encode_video2 (c, &pkt, NULL, &got_output);
			if (Ret < 0) {fprintf (stderr, "Error encoding frame\n");
		Exit (1);
			} if (Got_output) {printf ("Write frame%3d (size=%5d) \ n", I, pkt.size);
			Fwrite (Pkt.data, 1, pkt.size, f);
		Av_free_packet (&AMP;PKT);
	} TIMEP = time (NULL);
	printf ("%s\n", Asctime (Gmtime (&AMP;TIMEP));
	/* Add sequence end code to have a real MPEG file */fwrite (Endcode, 1, sizeof (Endcode), f); Fclose (f);
	Avcodec_close (c);
	Av_free (c);
	Av_freep (&frame->data[0]);
	Av_frame_free (&frame);

	printf ("111\n");
Free (videoin);
 }

Friends who want to run the code can pass this article:

http://blog.csdn.net/xyyangkun/article/details/20456725

LIBAV Libraries used for compilation

Makefile in the process of modification:

libavdir=/home/xy/mywork/av/libav-2014-03-02

It is OK to specify the location of the LIBAV library.

The source of the code used to read the camera data:

http://staticwave.ca/source/uvccapture/uvccapture-0.5.tar.bz2

The project of the library is in the code:

http://download.csdn.net/detail/xyyangkun/6990791


The project has been uploaded to GitHub:

Https://github.com/xyyangkun/read_encoder_sender

Submitted version: 2E60986A438E731AA53CA7D54BC492C521E7B5BC

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.