现在的位置: 首页 > 综合 > 正文

OpenCV 人脸检测自学(7)

2013年08月09日 ⁄ 综合 ⁄ 共 5395字 ⁄ 字号 评论关闭

目前需要提炼下ml部分的接口。目的是以后方便选择用哪种分类器。还是一头雾水啊。。。学到哪先记录到哪。

一。以CvSVM为例。下面是CvSVM类的定义:

class CV_EXPORTS_W CvSVM : public CvStatModel
{
public:
    // SVM type
    enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 };
    // SVM kernel type
    enum { LINEAR=0, POLY=1, RBF=2, SIGMOID=3 };
    // SVM params type
    enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 };
    CV_WRAP CvSVM();
    virtual ~CvSVM();


    CvSVM( const CvMat* trainData, const CvMat* responses,
           const CvMat* varIdx=0, const CvMat* sampleIdx=0,
           CvSVMParams params=CvSVMParams() );


    virtual bool train( const CvMat* trainData, const CvMat* responses,
                        const CvMat* varIdx=0, const CvMat* sampleIdx=0,//这两个参数好像不太用
                        CvSVMParams params=CvSVMParams() );


    virtual float predict( const CvMat* sample, bool returnDFVal=false ) const;//
    virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const;


    CV_WRAP virtual int get_support_vector_count() const;
    virtual const float* get_support_vector(int i) const;
    virtual CvSVMParams get_params() const { return params; };
    CV_WRAP virtual void clear();


    static CvParamGrid get_default_grid( int param_id );


    virtual void write( CvFileStorage* storage, const char* name ) const;
    virtual void read( CvFileStorage* storage, CvFileNode* node );
    CV_WRAP int get_var_count() const { return var_idx ? var_idx->cols : var_all; }


protected:


    virtual bool set_params( const CvSVMParams& params );
    virtual bool train1( int sample_count, int var_count, const float** samples,
                    const void* responses, double Cp, double Cn,
                    CvMemStorage* _storage, double* alpha, double& rho );
    virtual bool do_train( int svm_type, int sample_count, int var_count, const float** samples,
                    const CvMat* responses, CvMemStorage* _storage, double* alpha );
    virtual void create_kernel();
    virtual void create_solver();


    virtual float predict( const float* row_sample, int row_len, bool returnDFVal=false ) const;


    virtual void write_params( CvFileStorage* fs ) const;
    virtual void read_params( CvFileStorage* fs, CvFileNode* node );


    CvSVMParams params;
    CvMat* class_labels;
    int var_all;
    float** sv;
    int sv_total;
    CvMat* var_idx;
    CvMat* class_weights;
    CvSVMDecisionFunc* decision_func;
    CvMemStorage* storage;


    CvSVMSolver* solver;
    CvSVMKernel* kernel;
};

SVM的接口基本上跟大部分分类器的差不多。train函数参数里一个是train_data,一个是response,最后一个是SVM对应的参数结构体。predict的参数就是一个1 x N的样本特征向量。下面是OpenCV提供的一个调用例子。

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>


using namespace cv;


int main()
{
	// Data for visual representation
	int width = 512, height = 512;
	Mat image = Mat::zeros(height, width, CV_8UC3);


	// Set up training data
	float labels[4] = {1.0, -1.0, -1.0, -1.0};
	Mat labelsMat(4, 1, CV_32FC1, labels);//对应于接口的_response


	float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
	Mat trainingDataMat(4, 2, CV_32FC1, trainingData);//对应于接口的_train_data


	// Set up SVM's parameters
	CvSVMParams params;
	params.svm_type    = CvSVM::C_SVC;
	params.kernel_type = CvSVM::LINEAR;
	params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);


	// Train the SVM
	CvSVM SVM;
	SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);


	Vec3b green(0,255,0), blue (255,0,0);
	// Show the decision regions given by the SVM
	for (int i = 0; i < image.rows; ++i)
		for (int j = 0; j < image.cols; ++j)
		{
			Mat sampleMat = (Mat_<float>(1,2) << i,j);
			float response = SVM.predict(sampleMat);


			if (response == 1)
				image.at<Vec3b>(j, i)  = green;
			else if (response == -1)
				image.at<Vec3b>(j, i)  = blue;
		}


		// Show the training data
		int thickness = -1;
		int lineType = 8;
		circle( image, Point(501,  10), 5, Scalar(  0,   0,   0), thickness, lineType);
		circle( image, Point(255,  10), 5, Scalar(255, 255, 255), thickness, lineType);
		circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType);
		circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType);


		// Show support vectors
		thickness = 2;
		lineType  = 8;
		int c     = SVM.get_support_vector_count();


		for (int i = 0; i < c; ++i)
		{
			const float* v = SVM.get_support_vector(i);
			circle( image,  Point( (int) v[0], (int) v[1]),   6,  Scalar(128, 128, 128), thickness, lineType);
		}


		imwrite("result.png", image);        // save the image


		imshow("SVM Simple Example", image); // show it to the user
		waitKey(0);


}

                           
二。以cascadeclassifier为例说下它如何跟ml.hpp的关系。这个关系有点复杂,不像SVM那么标准了。
在traincascade\boost.cpp中

bool CvCascadeBoost::train( const CvFeatureEvaluator* _featureEvaluator,//包含了sum,tilted,特征的位置等信息
                           int _numSamples,
                           int _precalcValBufSize, int _precalcIdxBufSize,
                           const CvCascadeBoostParams& _params )

这个是训练一个强分类器的接口,里面调用训练一个弱分类器的接口是:ml\ml.hpp

bool
CvBoostTree::train( CvDTreeTrainData* _train_data,
                    const CvMat* _subsample_idx, CvBoost* _ensemble )

可是从ml.hpp文件中可以看到大部分从cvStatModel里面继承来的分类器的训练函数的结构应该是:

virtual bool train( const CvMat* train_data, [int tflag,] ..., const CvMat* responses, ...,
    [const CvMat* var_idx,] ..., [const CvMat* sample_idx,] ...
    [const CvMat* var_type,] ..., [const CvMat* missing_mask,] <misc_training_alg_params> ... )=0;

用括号括起来的是可选的参数,但是train_data的意思是一行是一个样本的所有特征(好像这么一行都叫特征向量。。。),行数是样本的数目。responses是响应值的矩阵,应该是一个n x 1的矩阵。
而在我们的例子里这两个参数都跑到CvDTreeTrainData* _train_data这里面去了。

1. 

featureEvaluator->init( (CvFeatureParams*)featureParams, numPos + numNeg, cascadeParams.winSize );

在CvCascadeBoost初始化_featureEvaluator的时候就已经根据选择的特征类型,正样本的大小把所有样本的积分图空间申请了。还有就是在这个初始化的时候也把对应的responses申请了。
2. 

bool CvCascadeClassifier::updateTrainingSet( double& acceptanceRatio)//featureEvaluator->setImage( img, isPositive ? 1 : 0, i );

在这把积分图和response都计算出来。
3. TrainCascade\boost.cpp

data = new CvCascadeBoostTrainData( _featureEvaluator, _numSamples,
                                        _precalcValBufSize, _precalcIdxBufSize, _params );

在这里计算所有的样本的特征值。这样上面1.2.步骤中的featureEvaluator的信息也都在data中了,所以data直接送到CvBoostTree::train的接口中去了:

	CvCascadeBoostTree* tree = new CvCascadeBoostTree;
        if( !tree->train( data, subsample_mask, this ) )//应该是训练一个弱分类器tree
        {
            delete tree;
            break;
        }
        cvSeqPush( weak, &tree );//把弱分类器添加到强分类器里面

所以我们要封装train的时候需要把从父类vfr_machine_learning_package的接口的_train_data和response给处理下成CvDTreeTrainData的data,然后才能调用if( !tree->train( data, subsample_mask, this ) )

                           

抱歉!评论已关闭.