Gaussian mixture model for target tracking-CV implementation

Source: Internet
Author: User

# Include <stdio. h>
# Include <cv. h>
# Include <cxcore. h>
# Include # Include <cvaux. h> // This header file is required

Void main ()
{
// Parameter initialization Definition
Iplimage * pframe = NULL;
Iplimage * pfrimg = NULL;
Iplimage * pbkimg = NULL;
Cvcapture * pcapture = NULL;
Iplimage * origin_rgb = NULL; // defines the storage of RGB space
Iplimage * origin_ycc = NULL; // define the storage to be converted to ycrcb Space
Iplimage * Lumi = NULL; // defines the storage space of the brightness component.

// Define the window
Cvnamedwindow ("Lumi", 1); // defines the name of the display window and displays the original video.
Cvmovewindow ("Lumi",); // defines the position of the displayed window
Cvnamedwindow ("background", 1); // displays videos in the converted format.
Cvmovewindow ("background", 360,0 );
Cvnamedwindow ("foreground", 1); // displays the video extracted from the brightness.
Cvmovewindow ("foreground", 690,0 );
 
// Read a video file as initialization
Pcapture = cvcapturefromfile ("video. Long. XviD. Avi ");
Pframe = cvqueryframe (pcapture );
Int I;
For (I = 0; I <2; I ++)
{
Pframe = cvqueryframe (pcapture );
}
// Convert RGB to brightness
Origin_rgb = cvcreateimage (cvsize (pframe-> width, pframe-> height), ipl_depth_8u, 3 );
Origin_ycc = cvcreateimage (cvsize (pframe-> width, pframe-> height), ipl_depth_8u, 3 );
Lumi = cvcreateimage (cvsize (pframe-> width, pframe-> height), ipl_depth_8u, 1 );
Pfrimg = cvcreateimage (cvsize (pframe-> width, pframe-> height), ipl_depth_8u, 1 );
Pbkimg = cvcreateimage (cvsize (pframe-> width, pframe-> height), ipl_depth_8u, 1 );
 
// Origin_rgb = cvcloneimage (pframe); // or cvcopy (pframe, origin_rgb, null );
Cvcopy (pframe, origin_rgb, null); // copy a video
Cvcvtcolor (origin_rgb, origin_ycc, cv_bgr2ycrcb); // converts the video format.
Cvsplit (origin_ycc, Lumi, null); // obtain the brightness component
 
// Set initial parameters for Gaussian Model
Cvgaussbgstatmodelparams * Params = new cvgaussbgstatmodelparams;
Params-> win_size = 50;
Params-> n_gauss = 3;
Params-> bg_threshold = 0.7;
Params-& gt; std_threshold = 3.5;
Params-> minarea = 15;
Params-> weight_init = 0.333;
Params-> variance_init = 30;
 
Cvbgstatmodel * bgmodel = cvcreategaussianbgmodel (Lumi, Params );
 
Int key =-1;
While (key! = 'Q ')
{
// Obtain the next video frame
Pframe = cvqueryframe (pcapture );
Cvcopy (pframe, origin_rgb, null); // copy a video
Cvcvtcolor (origin_rgb, origin_ycc, cv_bgr2ycrcb); // converts the video format.
Cvsplit (origin_ycc, Lumi, null); // obtain the brightness component
If (! Pframe)
Break;

// Update Gaussian Model
Cvupdatebgstatmodel (Lumi, bgmodel );
Pfrimg = bgmodel-> foreground; // foreground Image
Pbkimg = bgmodel-> background; // background image

// Reverse the image
Pbkimg-> origin = 1;
Pfrimg-> origin = 1;
Lumi-> origin = 1;

// Display the result
Cvshowimage ("Lumi", Lumi );
Cvshowimage ("background", pbkimg );
Cvshowimage ("foreground", pfrimg );
Key = cvwaitkey (10 );
}
// Cvwaitkey (0); // The callback function of the window. required. Otherwise, the display of the window is abnormal.
// Release window memory resources
Cvdestroywindow ("Lumi ");
Cvdestroywindow ("background ");
Cvdestroywindow ("foreground ");

// Release memory resources occupied by images
Cvreleaseimage (& Lumi );
Cvreleaseimage (& pbkimg );
Cvreleaseimage (& pfrimg );
Cvreleasecapture (& pcapture );
Cvreleasebgstatmodel (& bgmodel );
}

Create gaussian background model

Cvcreategaussianbgmodel (iplimage * first_frame, cvgaussbgstatmodelparams * parameters)
{
// Cvgaussbgmodel is defined in cvaux. h and is a struct.
Cvgaussbgmodel * bg_model = 0;

Cv_funcname ("cvcreategaussianbgmodel"); // defines the cvfuncname macro variable in cxerror. h.
// Cvfuncname is defined as the same as the function name cvcreategaussianbgmodel

_ Begin __; // start processing (it must be followed by the cv_funcname)

Double var_init;
Cvgaussbgstatmodelparams Params; // defines the initialization variable. The cvgaussbgstatmodelparams struct defined in cvaux. h
Int I, j, k, n, m, p;

// Init Parameters
If (parameters = NULL)
{
Params. win_size = cv_bgfg_mog_window_size; // cv_bgfg_mog_window_size = 200, and the relationship with the learning rate is 1/200 = 0.005
Params. bg_threshold = cv_bgfg_mog_background_threshold; // cv_bgfg_mog_background_threshold = 0.7 (determines whether it is a background threshold)
Params. std_threshold = cv_bgfg_mog_std_threshold; // cv_bgfg_mog_std_threshold = 2.5 (standard threshold)
Params. weight_init = cv_bgfg_mog_weight_init; // cv_bgfg_mog_weight_init = 0.05 (weight)
Params. variance_init = cv_bgfg_mog_sigma_init * cv_bgfg_mog_sigma_init; // cv_bgfg_mog_sigma_init = 30 (variance)
Params. minarea = cv_bgfg_mog_minarea; // cv_bgfg_mog_minarea = 15.f( do you know this ?)
Params. n_gauss = cv_bgfg_mog_ngaussians; // cv_bgfg_mog_ngaussians = 5 (number of Gaussian models)
}
Else
{
Params = * parameters;
}
// Cv_is_image is defined in cxtypes. H. It is estimated that the image frame is read.
// Cv_stsbadarg =-5, which indicates that the function is faulty or the input parameter is faulty.
If (! Cv_is_image (first_frame ))
Cv_error (cv_stsbadarg, "invalid or null first_frame parameter ");

// Cv_call is defined in cxerror. H. It is used to confirm whether the call is correct.
Cv_call (bg_model = (cvgaussbgmodel *) cvalloc (sizeof (* bg_model )));
Memset (bg_model, 0, sizeof (* bg_model ));
Bg_model-> type = cv_bg_model_mog; // The cv_bg_stat_model_fields () function defined in cvgaussbgmodel contains type, release, update, foreground, background, etc.
Bg_model-> release = (cvreleasebgstatmodel) icvreleasegaussianbgmodel;
Bg_model-> Update = (cvupdatebgstatmodel) icvupdategaussianbgmodel;

Bg_model-> Params = Params;

// Allocate a bucket
Cv_call (bg_model-> g_point = (cvgaussbgpoint *) cvalloc (sizeof (cvgaussbgpoint )*
(First_frame-> width * first_frame-> height) + 256); // This is the number of participating points and the space required to store these points.
Cv_call (bg_model-> background = cvcreateimage (cvsize (first_frame-> width,
First_frame-> height), ipl_depth_8u, first_frame-> nchannels); // allocate storage space to the background
Cv_call (bg_model-> foreground = cvcreateimage (cvsize (first_frame-> width,
First_frame-> height), ipl_depth_8u, 1); // allocate storage space to the foreground

Cv_call (bg_model-> storage = cvcreatemstorage (); // allocate storage space

// Initialization

Var_init = 2 * Params. std_threshold * Params. std_threshold;
Cv_call (bg_model-> g_point [0]. g_values =
(Cvgaussbgvalues *) cvalloc (sizeof (cvgaussbgvalues) * Params. n_gauss *
(First_frame-> width * first_frame-> height + 128); // allocate sufficient storage space to g_value.

// Program description
// G_values [0], g_values [1], g_values [2] Stores three Gaussian mixture model variables
// G_values []. Weight (weight) g_values []. Mean [] (mean) g_values []. Variance [] (variance)
// The specific arrangement is that each pixel has three models,
// Then three models of each pixel point
// The weight of model 0 is 1, the variance is the square of the standard deviation of 2, and the mean value is the value of the current pixel.
// The weight of model 1 is 0, the variance is the square of the standard deviation of 2, and the mean is 0
// The weight of model 2 is 0, the variance is the square of the standard deviation of 2, and the mean is 0
// G_point indicates the number of pixel points involved in gaussian background modeling.
For (I = 0, P = 0, n = 0; I <first_frame-> height; I ++)
{
For (j = 0; j <first_frame-> width; j ++, N ++)
{
Bg_model-> g_point [N]. g_values =
Bg_model-> g_point [0]. g_values + N * Params. n_gauss;
Bg_model-> g_point [N]. g_values [0]. Weight = 1; // the first value seen has weight one
Bg_model-> g_point [N]. g_values [0]. match_sum = 1;
For (m = 0; m <first_frame-> nchannels; m ++)
{
Bg_model-> g_point [N]. g_values [0]. Variance [m] = var_init;
Bg_model-> g_point [N]. g_values [0]. Mean [m] = (unsigned char) first_frame-> imagedata [p + M];
}
For (k = 1; k <Params. n_gauss; k ++)
{
Bg_model-> g_point [N]. g_values [K]. Weight = 0;
Bg_model-> g_point [N]. g_values [K]. match_sum = 0;
For (m = 0; m <first_frame-> nchannels; m ++ ){
Bg_model-> g_point [N]. g_values [K]. Variance [m] = var_init;
Bg_model-> g_point [N]. g_values [K]. Mean [m] = 0;
}
}
P + = first_frame-> nchannels;
}
}

Bg_model-> countframes = 0;

_ End __;

If (cvgeterrstatus () <0)
{
Cvbgstatmodel * base_ptr = (cvbgstatmodel *) bg_model;

If (bg_model & bg_model-> release)
Bg_model-> release (& base_ptr );
Else
Cvfree (& bg_model );
Bg_model = 0;
}

Return (cvbgstatmodel *) bg_model;
}

The entire function is used to assign values to parameters in the cvgaussbgmodel: cv_bg_stat_model_fields (), Params, g_point, and countframes. In fact, you can copy them and modify the parameters at the beginning.

 

 

Gaussian background model

General motion detection methods

Currently, Motion Object detection is divided into two types: fixed cameras and motion cameras. The most famous solution for the detection of moving objects in a camera is the optical flow method. By solving the optical flow field of the image sequence obtained by the partial differential equation, we can predict the motion of the camera. For fixed camera situations, the optical stream method can also be used, but due to the complexity of the optical flow method, it is often difficult to calculate in real time, so I use the high background model. Because, when the camera is fixed, the background changes slowly, and most of the changes are caused by illumination and wind. Through modeling the background, for a given image separation foreground and background, in general, the foreground is a moving object, so as to achieve the purpose of moving object detection.

Single distribution gaussian background model

The single-distribution gaussian background model assumes that for a background image, the brightness distribution of specific pixels satisfies the Gaussian distribution, that is, the brightness of the background image B and (x, y) points is satisfied:

IB (x, y )~ N (u, d)

In this way, each pixel attribute of our background model includes two parameters: Average U and variance D.

For a given image G, if exp (-(ig (x, y)-u (x, y) ^ 2/(2 * d ^ 2)> t. (x, y) indicates the background, and vice versa.

At the same time, as time changes, the background image will also change slowly. At this time, we need to constantly update the parameters of each pixel point.

U (t + 1, x, y) = A * u (t, x, y) + (1-A) * I (x, y)

Here, a is called an update parameter, indicating the speed at which the background changes. Generally, we do not update D (we found that the effect does not change much in the experiment ).

Http://blog.csdn.net/chenhongc/article/details/5755922 C realize http://caimingdong2008.blog.163.com/blog/static/50452429200811711059396/http://blog.csdn.net/chenhongc/article/details/5755878http://blog.csdn.net/jinshengtao/article/details/26278725 principle http://www.360doc.com/content/10/0922/23/10610_55649753.shtml tracking Summary

Gaussian mixture model for target tracking-CV implementation

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.