1. Window Management
// Create and locate a new window
Cvnamedwindow ("win1", cv_window_autosize );
Cvmovewindow ("win1", 100,100); // offset from the UL corner of the screen
// Load the image
Iplimage * IMG = 0;
IMG = cvloadimage (filename, cv_load_image_color );
If (! IMG)
Printf ("cocould not load image file: % s \ n", filename );
// Display the image
Cvshowimage ("win1", IMG );
// Close the window
Cvdestroywindow ("win1 ");
// Change the window size
Cvresizewindow ("win1", 100,100); // new width/heigh in pixels
2. Input to process mouse events
// define the mouse Processing Program
// X, Y is relative to the pixel coordinate in the upper left corner
void mousehandler (INT event, int X, int y, int flags, void * PARAM)
{< br> switch (Event)
{< br> case cv_event_lbuttondown:
If (flags & cv_event_flag_ctrlkey)
printf ("left button down with CTRL pressed \ n");
break;
case cv_event_lbuttonup:
printf ("left button up \ n");
break;
}< BR >}< span style = "white-space: pre ">
// re-register the event handler
mouseparam = 5;
cvsetmousecallback (" win1 ", mousehandler, & mouseparam ); // The third parameter can be set to null
handle Keyboard Events
// detect keyboard input at intervals
int key;
key = cvwaitkey (10 ); // wait 10 ms for input
// abort the program and wait for the keyboard to enter
int key;
key = cvwaitkey (0 ); // wait indefinitely for input
// keyboard input loop handler
while (1)
{< br> key = cvwaitkey (10 );
If (Key = 27) break;
switch (key)
{< br> case 'H':
...
break;
case 'I':
...
break;
}< BR >}< br>
handle slide events
// Define a slider Handler
Void trackbarhandler (int pos)
{
Printf ("trackbar position: % d \ n", POS );
}
// Register the event handler
Int trackbarval = 25;
Int maxval = 100;
Cvcreatetrackbar ("bar1", "win1", & trackbarval, maxval, trackbarhandler );
// Obtain the current slider position
Int Pos = cvgettrackbarpos ("bar1", "win1 ");
// Set the slider position
Cvsettrackbarpos ("bar1", "win1", 25 );
3. Image Processing
Image Memory Allocation and release
// Allocate memory to a new image
// Size: cvsize (width, height); depth: pixel depth; channels: Number of pixel channels. Can be 1, 2, 3 or 4.
Iplimage * cvcreateimage (cvsize size, int depth, int channels );
// Allocate a 1-channel byte Image
Iplimage * img1 = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
// Allocate a 3-channel float Image
Iplimage * img2 = cvcreateimage (cvsize (640,480), ipl_depth_32f, 3 );
// Release the image
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
Cvreleaseimage (& IMG );
// Copy the image
Iplimage * img1 = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
Iplimage * img2;
Img2 = cvcloneimage (img1); // note the image obtained through cvcloneimage
// You must use cvreleaseimage to release the image. Otherwise, memory leakage may occur.
// Set/obtain the ROI of the region of interest
Void cvsetimageroi (iplimage * image, cvrect rect );
Void cvresetimageroi (iplimage * image );
Cvrect cvgetimageroi (const iplimage * image );
// Set/get the COI channel of interest
Void cvsetimagecoi (iplimage * image, int COI); // 0 = all
Int cvgetimagecoi (const iplimage * image );
Image read/write
// Read the image from the file
// Opencv forcibly converts the image to a three-channel color image by default.
Iplimage * IMG = 0;
IMG = cvloadimage (filename );
If (! IMG)
Printf ("cocould not load image file: % s \ n", filename );
IMG = cvloadimage (filename, flag );
// Flag:> 0: forcibly convert the image to a three-channel color image.
// = 0 forcibly convert the read image into a single channel grayscale image
// <0 indicates that the number of image channels read is the same as that of the file.
// Save the image
If (! Cvsaveimage (outfilename, IMG ))
Printf ("cocould not save: % s \ n", outfilename );
Access image pixels
Indirect access, low efficiency
// For single-channel byte Images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
Cvscalar S;
S = cvget2d (IMG, I, j); // get the (J, I) pixel value, note that the sequence of coordinate parameters in cvget2d and cvset2d is the opposite of that in other opencv functions. in this function, I indicates the Y axis, that is, height; j indicates the X axis, that is, weight.
Printf ("intensity = % F \ n", S. Val [0]);
S. Val [0] = 111;
Cvset2d (IMG, I, j, S); // set the (J, I) pixel value
// For multi-channel byte/floating point images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_32f, 3 );
Cvscalar S;
S = cvget2d (IMG, I, j); // get the (J, I) pixel value
Printf ("B = % F, G = % F, R = % F \ n", S. val [0], S. val [1], S. val [2]);
S. Val [0] = 111;
S. Val [1] = 111;
S. Val [2] = 111;
Cvset2d (IMG, I, j, S); // set the (J, I) pixel value
Direct access, high efficiency
// For single-channel byte Images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
(Uchar *) (IMG-> imagedata + I * IMG-> widthstep) [J] = 111;
// For multi-channel byte Images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 3 );
(Uchar *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 0] = 111; // B
(Uchar *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 1] = 112; // G
(Uchar *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 2] = 113; // R
// For multi-channel floating point images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_32f, 3 );
(Float *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 0] = 111; // B
(Float *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 1] = 112; // G
(Float *) (IMG-> imagedata + I * IMG-> widthstep) [J * IMG-> nchannels + 2] = 113; // R
Direct pointer-Based Access
// For single-channel byte Images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 1 );
Int Height = IMG-> height;
Int width = IMG-> width;
Int step = IMG-> widthstep;
Uchar * Data = (uchar *) IMG-> imagedata;
Data [I * Step + J] = 111;
// For multi-channel byte Images
Iplimage * IMG = cvcreateimage (cvsize (640,480), ipl_depth_8u, 3 );
Int Height = IMG-> height;
Int width = IMG-> width;
Int step = IMG-> widthstep;
Int channels = IMG-> nchannels;
Uchar * Data = (uchar *) IMG-> imagedata;
Data [I * Step + J * channels + k] = 111;
Image Conversion
// Grayscale-color conversion of byte images:
Cvconvertimage (SRC, DST, flags = 0 );
Src = float/byte grayscale/color image
DST = byte grayscale/color image
Flags = cv_cvtimg_flip (vertical flip image)
Cv_cvtimg_swap_rb (replacing r and B channels)
// Color image-> grayscale image:
// Using the opencv conversion:
Cvcvtcolor (cimg, gimg, cv_bgr2gray); // cimg-> gimg
// Using a direct conversion:
For (I = 0; I <cimg-> height; I ++) for (j = 0; j <cimg-> width; j ++)
Gimga [I] [J] = (uchar) (cimga [I] [J]. B * 0.114 +
Cimga [I] [J]. g * 0.587 +
Cimga [I] [J]. R * 0.299 );
// Conversion between different color spaces:
Cvcvtcolor (SRC, DST, Code); // Src-> DST
Code = CV _ <x> 2 <Y>
<X>/<Y> = RGB, BGR, gray, HSV, ycrcb, XYZ, lab, luv, HLS
Drawing Instruction
// Draw a rectangle:
// Draw a rectangle between the vertex (100,100) and (200,200). The edge is red and the width is 1.
Cvrectangle (IMG, cvpoint (100,100), cvpoint (200,200), cvscalar (255,), 1 );
// Draw a circle:
// The Center is (100,100), the radius is 20, the circumference is green, and the width is 1
Cvcircle (IMG, cvpoint (100,100), 20, cvscalar (0,255, 0), 1 );
// Draw a line segment:
// A green line segment with a line width of 1 between (100,100) and (200,200)
Cvline (IMG, cvpoint (100,100), cvpoint (200,200), cvscalar (0,255, 0), 1 );
// Draw a group of line segments:
Cvpoint curve1 [] = {10, 10,100,100,100,100, 10 };
Cvpoint curve2 [] = {30, 30, 30,130,130,130,130, 30 };
Cvpoint * curvearr [2] = {curve1, curve2 };
Int ncurvepts [2] = {4, 5 };
Int ncurves = 2;
Int iscurveclosed = 1;
Int linewidth = 1;
Cvpolyline (IMG, curvearr, ncurvepts, ncurves, iscurveclosed, cvscalar (0,255,255), linewidth );
Void cvpolyline (cvarr * IMG, cvpoint ** pts, int * NPTS, int contours, int is_closed,
Cvscalar color, int Thickness = 1, int line_type = 8, int shift = 0 );
// IMG image.
// Pts x-ray vertex pointer array.
// An array of the number of fixed points of the NPTs line. It can also be considered as the size of the PTS pointer array.
// The number of lines in the S line.
// Is_closed indicates whether the polygon is closed. If closed, the function connects the start and end points.
// The Color of the line.
// Width of the thickness line.
// Linestring type. See cvline.
// Number of decimal places in the shift Vertex
// Draw a group of filled polygon:
Cvfillpoly (IMG, curvearr, ncurvepts, ncurves, cvscalar (0,255,255 ));
// Cvfillpoly is used to fill a single area restricted by the polygon contour. Functions can fill complex areas, such as areas with vulnerabilities and areas with intersections.
Void cvfillpoly (cvarr * IMG, cvpoint ** pts, int * NPTS, int contours, cvscalar color, int line_type = 8, int shift = 0 );
// IMG image.
// PTS points to the array pointer of the polygon.
// An array of the number of vertices in the NPTs polygon.
// The number of line segments in the filled area composed of contours.
// Color the polygon color.
// The linetype of a polygon.
// Number of decimal places in shift vertex coordinates.
// Text annotation:
Cvfont font;
Double hscaling = 1.0;
Double vscaling = 1.0;
Int linewidth = 1;
Cvinitfont (& font, cv_font_hershey_simplex | cv_font_italic, hscale, vscale, 0, linewidth );
Cvputtext (IMG, "My comment", cvpoint (200,400), & font, cvscalar (255,255, 0 ));
4. Video Processing
// Initialize a camera CATCHER:
Cvcapture * capture = cvcapturefromcam (0); // capture from Video device #0
// Initialize a video file CATCHER:
Cvcapture * capture = cvcapturefromavi ("infile. Avi ");
// Capture a frame:
Iplimage * IMG = 0;
If (! Cvgrabframe (capture) // capture a frame
{
Printf ("cocould not grab a frame \ n \ 7 ");
Exit (0 );
}
IMG = cvretrieveframe (capture); // retrieve the captured frame
// Release the video stream CATCHER:
Cvreleasecapture (& capture );
// Obtain video stream device information:
Cvqueryframe (capture); // you must perform this operation before reading video stream information.
Int frameh = (INT) cvgetcaptureproperty (capture, cv_cap_prop_frame_height );
Int framew = (INT) cvgetcaptureproperty (capture, cv_cap_prop_frame_width );
Int FPS = (INT) cvgetcaptureproperty (capture, cv_cap_prop_fps );
Int numframes = (INT) cvgetcaptureproperty (capture, cv_cap_prop_frame_count );
// Obtain the frame chart information:
Float posmsec = cvgetcaptureproperty (capture, cv_cap_prop_pos_msec );
Int posframes = (INT) cvgetcaptureproperty (capture, cv_cap_prop_pos_frames );
Float posratio = cvgetcaptureproperty (capture, cv_cap_prop_pos_avi_ratio );
// Set the position of the first frame captured from the video file:
// Start capturing from a relative position of 0.9 of a video file
Cvsetcaptureproperty (capture, cv_cap_prop_pos_avi_ratio, (double) 0.9 );
// initialize the video Writer:
cvvideowriter * Writer = 0;
int iscolor = 1;
int FPS = 25; // or 30
int framew = 640; // 744 for Firewire cameras
int frameh = 480; // 480 for Firewire cameras
writer = cvcreatevideowriter ("out. avi ", cv_fourcc ('P', 'I', 'M', '1'),
FPS, cvsize (framew, frameh), iscolor );
// keep the video file:
iplimage * IMG = 0;
int nframes = 50;
for (I = 0; I cvgrabframe (capture ); // capture a frame
IMG = cvretrieveframe (capture); // retrieve the captured frame
// IMG = cvqueryframe (capture);
cvwriteframe (writer, IMG); // Add the frame to the file
}< span style = "white-space: pre">
// release the video Writer:
cvreleasevideowriter (& writer);