从背景中提取物体
假设一幅20*20的图像,通过算法标记出每一个像素点是属于前景还是背景,结构如下图
0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1
0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1
0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1
0 0 。。。。。之后的全部都是0
用0表示背景,用1表示前景
显然图中有两个前景物体,我现在想算出这两个前景物体的坐标和重心,
请各位高手指点,用什么样的算法计算?
[解决办法]
先做一个连通区域标记,相互连通的部分用同一个标号表示,这样你就能区分不同的前景物体了。
如果是matlab,好像自带了这个函数。如果你用C,在网上搜一下bwLabel这个函数,有被人实现好的。
[解决办法]
CvSeq* contour = 0; CvMemStorage* storage; IplImage* src; IplImage* dst; CvMoments moments; CvMat *region; CvPoint pt1,pt2; double m00 = 0, m10, m01, mu20, mu11, mu02, inv_m00; // double a, b, c; int xc, yc; int height,width,step,channels; uchar *data; int i,j,k; // 第一条命令行参数确定了图像的文件名。 if( (src=cvLoadImage("echo.bmp", 0))!= 0) //if( (src=cvLoadImage("fbb.jpg", 0))!= 0) //if( argc == 2 && (src=cvLoadImage(argv[1], 0))!= 0) { height = src->height; width = src->width; step = src->widthStep; channels = src->nChannels; data = (uchar *)src->imageData; // 反转图像 for(i=0;i<height;i++) for(j=0;j<width;j++) for(k=0;k<channels;k++) { data[i*step+j*channels+k]=255-data[i*step+j*channels+k]; } dst = cvCreateImage( cvGetSize(src), 8, 3 ); storage = cvCreateMemStorage(0); cvThreshold( src, src, 100, 255, CV_THRESH_BINARY );//100 is the thredhold cvNot( src, src ); //cvFindContours( src, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0) ); cvFindContours( src, storage, &contour, sizeof(CvContour),CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); cvZero( dst ); // static int dd=0; for( ; contour != 0; contour = contour->h_next ) { //CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 ); CvScalar color = CV_RGB( 255, 0,0 ); /* 用1替代 CV_FILLED 所指示的轮廓外形 */ cvDrawContours( dst, contour, color, color, -1, CV_FILLED, 8,cvPoint(0,0) );//you can change 1 to CV_FILLED contour = cvApproxPoly( contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 ); //CvRect* r = (CvRect*)cvGetSeqElem( contour,1); region=(CvMat*)contour; cvMoments( region, &moments,0 ); //cvMoments( &contour, &moments,0 ); // cvDrawContours( cnt_img, _contours, CV_RGB(255,0,0), CV_RGB(0,255,0), _levels, 3, CV_AA, cvPoint(0,0) ); CV_FILLED //////////////////////////////////////////////// ///////////////////////////////////////////////// m00 = moments.m00; m10 = moments.m10; m01 = moments.m01; mu11 = moments.mu11; mu20 = moments.mu20; mu02 = moments.mu02; //if( fabs(m00) < DBL_EPSILON )break; inv_m00 = 1. / m00; xc = cvRound( m10 * inv_m00 ); yc = cvRound( m01 * inv_m00 ); CvBox2D box = cvMinAreaRect2(contour,NULL); ///////////////// pt1.x=xc-1;pt1.y=yc; pt2.x=xc+1;pt2.y=yc; cvLine( dst, pt1, pt2, CV_RGB(0,255,0), 2, CV_AA, 0 ); pt1.x=xc;pt1.y=yc-1; pt2.x=xc;pt2.y=yc+1; cvLine( dst, pt1, pt2, CV_RGB(0,255,0), 2, CV_AA, 0 ); CString str; int dd=0; CString s1,s2,s3,s4,s5; s1.Format("%d",xc); s2.Format("%d",yc); s3.Format("%f",m00); s4.Format("%f",box.size.height); s5.Format("%f",box.size.width); // str.Format(_T("INSERT INTO radar(cx,cy) VALUES('%d','%d')"),xc,yc); CvPoint2D32f pt[4]; cvBoxPoints(box,pt); for(int i = 0;i<4;++i) { cvLine(dst,cvPointFrom32f(pt[i]),cvPointFrom32f(pt[((i+1)%4)?(i+1):0]),CV_RGB(255,255,255)); } } cvNamedWindow( "Components", 1 ); cvShowImage( "Components", dst ); cvWaitKey(0); }