OpenCV+OpenGL 双目立体视觉三维重建

0
1
0
1. 云栖社区>
2. 博客>
3. 正文

OpenCV+OpenGL 双目立体视觉三维重建

season雅宁 2016-08-08 00:02:00 浏览1296

1.视差计算

1.1基于视差信息的三维重建

``````// choose the corresponding points in the stereo images for 3d reconstruction
void GetPair( Mat &imgL, Mat &imgR, vector<Point2f> &ptsL, vector<Point2f> &ptsR )
{
Mat descriptorsL, descriptorsR;
double tt = (double)getTickCount();

Ptr<FeatureDetector> detector = FeatureDetector::create( DETECTOR_TYPE ); // factory mode
vector<KeyPoint> keypointsL, keypointsR;
detector->detect( imgL, keypointsL );
detector->detect( imgR, keypointsR );

Ptr<DescriptorExtractor> de = DescriptorExtractor::create( DESCRIPTOR_TYPE );
//SurfDescriptorExtractor de(4,2,true);
de->compute( imgL, keypointsL, descriptorsL );
de->compute( imgR, keypointsR, descriptorsR );

tt = ((double)getTickCount() - tt)/getTickFrequency(); // 620*555 pic, about 2s for SURF, 120s for SIFT

Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( MATCHER_TYPE );
vector<vector<DMatch>> matches;
matcher->knnMatch( descriptorsL, descriptorsR, matches, 2 ); // L:query, R:train

vector<DMatch> passedMatches; // save for drawing
DMatch m1, m2;
vector<Point2f> ptsRtemp, ptsLtemp;
for( size_t i = 0; i < matches.size(); i++ )
{
m1 = matches[i][0];
m2 = matches[i][1];
if (m1.distance < MAXM_FILTER_TH * m2.distance)
{
ptsRtemp.push_back(keypointsR[m1.trainIdx].pt);
ptsLtemp.push_back(keypointsL[i].pt);
passedMatches.push_back(m1);
}
}

Mat HLR;
HLR = findHomography( Mat(ptsLtemp), Mat(ptsRtemp), CV_RANSAC, 3 );
cout<<"Homography:"<<endl<<HLR<<endl;
Mat ptsLt;
perspectiveTransform(Mat(ptsLtemp), ptsLt, HLR);

int cnt = 0;
for( size_t i1 = 0; i1 < ptsLtemp.size(); i1++ )
{
Point2f prjPtR = ptsLt.at<Point2f>((int)i1,0); // prjx = ptsLt.at<float>((int)i1,0), prjy = ptsLt.at<float>((int)i1,1);
// inlier
if( abs(ptsRtemp[i1].x - prjPtR.x) < HOMO_FILTER_TH &&
abs(ptsRtemp[i1].y - prjPtR.y) < 2) // restriction on y is more strict
{
vector<Point2f>::iterator iter = ptsL.begin();
for (;iter!=ptsL.end();iter++)
{
Point2f diff = *iter - ptsLtemp[i1];
float dist = abs(diff.x)+abs(diff.y);
if (dist < NEAR_FILTER_TH) break;
}
if (iter != ptsL.end()) continue;

ptsL.push_back(ptsLtemp[i1]);
ptsR.push_back(ptsRtemp[i1]);
cnt++;
if (cnt%1 == 0) matchesMask[i1] = 1; // don't want to draw to many matches
}
}

Mat outImg;
drawMatches(imgL, keypointsL, imgR, keypointsR, passedMatches, outImg,
char title[50];
sprintf_s(title, 50, "%.3f s, %d matches, %d passed", tt, matches.size(), cnt);
imshow(title, outImg);
waitKey();
}``````

p.s. 源代码中的基于特征点的视差计算有点问题，还在调试中，希望有经验的大牛共同解决一下。

1.2基于块匹配的视差计算

``````// roughly smooth the glitches on the disparity map
void FixDisparity( Mat_<float> & disp, int numberOfDisparities )
{
Mat_<float> disp1;
float lastPixel = 10;
float minDisparity = 23;// algorithm parameters that can be modified
for (int i = 0; i < disp.rows; i++)
{
for (int j = numberOfDisparities; j < disp.cols; j++)
{
if (disp(i,j) <= minDisparity) disp(i,j) = lastPixel;
else lastPixel = disp(i,j);
}
}
int an = 4;    // algorithm parameters that can be modified
copyMakeBorder(disp, disp1, an,an,an,an, BORDER_REPLICATE);
Mat element = getStructuringElement(MORPH_ELLIPSE, Size(an*2+1, an*2+1));
morphologyEx(disp1, disp1, CV_MOP_OPEN, element);
morphologyEx(disp1, disp1, CV_MOP_CLOSE, element);
disp = disp1(Range(an, disp.rows-an), Range(an, disp.cols-an)).clone();
}``````

2.计算世界坐标

`````` // calculate 3d coordinates.
// for rectified stereos: pointLeft.y == pointRight.y
// the origin for both image is the top-left corner of the left image.
// the x-axis points to the right and the y-axis points downward on the image.
// the origin for the 3d real world is the optical center of the left camera
// object -> optical center -> image, the z value decreases.

void StereoTo3D( vector<Point2f> ptsL, vector<Point2f> ptsR, vector<Point3f> &pts3D,
float focalLenInPixel, float baselineInMM, Mat img,
Point3f &center3D, Vec3f &size3D) // output variable, the center coordinate and the size of the object described by pts3D
{
vector<Point2f>::iterator iterL = ptsL.begin(),
iterR = ptsR.begin();

float xl, xr, ylr;
float imgH = float(img.rows), imgW = float(img.cols);
Point3f pt3D;
float minX = 1e9, maxX = -1e9;
float minY = 1e9, maxY = -1e9;
float minZ = 1e9, maxZ = -1e9;

Mat imgShow = img.clone();
char str[100];
int ptCnt = ptsL.size(), showPtNum = 30, cnt = 0;
int showIntv = max(ptCnt/showPtNum, 1);
for ( ; iterL != ptsL.end(); iterL++, iterR++)
{
xl = iterL->x;
xr = iterR->x; // need not add baseline
ylr = (iterL->y + iterR->y)/2;

//if (yl-yr>5 || yr-yl>5) // may be wrong correspondence, discard. But vector can't be changed during iteration
//{}

pt3D.z = -focalLenInPixel * baselineInMM / (xl-xr); // xl should be larger than xr, if xl is shot by the left camera
pt3D.y = -(-ylr + imgH/2) * pt3D.z / focalLenInPixel;
pt3D.x = (imgW/2 - xl) * pt3D.z / focalLenInPixel;

minX = min(minX, pt3D.x); maxX = max(maxX, pt3D.x);
minY = min(minY, pt3D.y); maxY = max(maxY, pt3D.y);
minZ = min(minZ, pt3D.z); maxZ = max(maxZ, pt3D.z);
pts3D.push_back(pt3D);

if ((cnt++)%showIntv == 0)
{
Scalar color = CV_RGB(rand()&64,rand()&64,rand()&64);
sprintf_s(str, 100, "%.0f,%.0f,%.0f", pt3D.x, pt3D.y, pt3D.z);
putText(imgShow, str, Point(xl-13,ylr-3), FONT_HERSHEY_SIMPLEX, .3, color);
circle(imgShow, *iterL, 2, color, 3);
}

}

imshow("back project", imgShow);
waitKey();

center3D.x = (minX+maxX)/2;
center3D.y = (minY+maxY)/2;
center3D.z = (minZ+maxZ)/2;
size3D[0] = maxX-minX;
size3D[1] = maxY-minY;
size3D[2] = maxZ-minZ;
}``````

3.三角剖分

3.1 三角剖分简介

OpenCV使用Delaunay算法将平面分割成小的三角形区域（该三角形确保包括所有的分割点）开始不断迭代完成。在这种情况下，对偶划分就是输入的二维点集的Voronoi图表。这种划分可以用于对一个平面进行三维分段变换、形态变换、平面点的快速 定位以及建立特定的图结构（如NNG,RNG）。

3.2 Bowyer-Watson算法

1）建立初始三角网格：针对给定的点集V,找到一个包含该点集的矩形R,我们称R为辅助窗口。连接R的任意一条对角线，形成两个三角形，作为初始Delaunay三角网格。

2）逐点插入：假设目前已经有一个Delaunay三角网格T,现在在它里面再插入一个点P,需要找到该点P所在的三角形。从P所在的三角形开始，搜索该三角形的邻近三角形，并进行空外接圆检测。找到外接圆包含点P的所有的三角形并删除这些三角形，形成一个包含P的多边形空腔，我们称之为Delaunay空腔。然后连接P与Delaunay腔的每一个顶点，形成新的Delaunay三角网格。

3）删除辅助窗口R:重复步骤2）,当点集V中所有点都已经插入到三角形网格中后，将顶点包含辅助窗口R的三角形全部删除。

3.3 三角剖分代码分析

``````// STORAGE AND STRUCTURE FOR DELAUNAY SUBDIVISION //存储和结构 for三角剖分
//
CvRect rect = { 0, 0, 600, 600 };  //Our outer bounding box //我们的外接边界盒子
CvMemStorage* storage;    //Storage for the Delaunay subdivsion //用来存储三角剖分
storage = cvCreateMemStorage(0);    //Initialize the storage //初始化存储器
CvSubdiv2D* subdiv; //The subdivision itself // 细分
subdiv = init_delaunay( storage, rect);   //See this function below //函数返回CvSubdiv类型指针
init_delaunay函数如下，它是一个OpenCV函数，是一个包含一些OpenCV函数的函数包。``````
``````//INITIALIZATION CONVENIENCE FUNCTION FOR DELAUNAY SUBDIVISION //为三角剖分初始化便利函数
//
CvSubdiv2D* init_delaunay(CvMemStorage* storage,CvRect rect) {
CvSubdiv2D* subdiv;
cvInitSubdivDelaunay2D( subdiv, rect ); //rect sets the bounds
return subdiv;//返回申请空间的指针
}  ``````

``````CvPoint2D32f fp; //This is our point holder//这是我们点的持有者（容器）
for( i = 0; i < as_many_points_as_you_want; i++ ) {
// However you want to set points //如果我们的点集不是32位的，在这里我们将其转为CvPoint2D32f，如下两种方法。
//
fp = your_32f_point_list[i];
cvSubdivDelaunay2DInsert( subdiv, fp );
}  ``````

1）通过宏cvPoint2D32f(double x,double y)
2）通过cxtype.h下的cvPointTo32f(CvPoint point)函数将整形点方便的转换为32位浮点型。

``````cvCalcSubdivVoronoi2D( subdiv ); // Fill out Voronoi data in subdiv //在subdiv中填充Vornoi的数据
cvClearSubdivVoronoi2D( subdiv ); // Clear the Voronoi from subdiv//从subdiv中清除Voronoi的数据  ``````

CvSubdiv2D结构如下：

``````#define CV_SUBDIV2D_FIELDS() \
CV_GRAPH_FIELDS() \
int is_geometry_valid; \
CvSubdiv2DEdge recent_edge; \
CvPoint2D32f topleft; \
CvPoint2D32f bottomright;
typedef struct CvSubdiv2D
{
CV_SUBDIV2D_FIELDS()
}
CvSubdiv2D;  ``````
``````#define CV_GRAPH_FIELDS()               \
CV_SET_FIELDS() /* set of vertices */   \
CvSet *edges;  /* set of edges    */  ``````
``````#define CV_SET_FIELDS()                                            \
CV_SEQUENCE_FIELDS()             /*inherits from [#CvSeq CvSeq] */ \
struct CvSetElem* free_elems;   /*list of free nodes           */  ``````

``````void TriSubDiv( vector<Point2f> &pts, Mat &img, vector<Vec3i> &tri )
{
CvSubdiv2D* subdiv;//The subdivision itself // 细分
CvMemStorage* storage = cvCreateMemStorage(0); ;//Storage for the Delaunay subdivsion //用来存储三角剖分
Rect rc = Rect(0,0, img.cols, img.rows); //Our outer bounding box //我们的外接边界盒子

subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
sizeof(CvSubdiv2DPoint),
storage );//为数据申请空间

cvInitSubdivDelaunay2D( subdiv, rc );//rect sets the bounds

//如果我们的点集不是32位的，在这里我们将其转为CvPoint2D32f，如下两种方法。
for (size_t i = 0; i < pts.size(); i++)
{
CvSubdiv2DPoint *pt = cvSubdivDelaunay2DInsert( subdiv, pts[i] );
pt->id = i;
}

int total = subdiv->edges->total;
int elem_size = subdiv->edges->elem_size;

Point buf[3];
const Point *pBuf = buf;
Vec3i verticesIdx;
Mat imgShow = img.clone();

srand( (unsigned)time( NULL ) );
for( int i = 0; i < total; i++ )
{

if( CV_IS_SET_ELEM( edge ))
{
CvSubdiv2DEdge t = (CvSubdiv2DEdge)edge;
int iPointNum = 3;
Scalar color = CV_RGB(rand()&255,rand()&255,rand()&255);

//bool isNeg = false;
int j;
for(j = 0; j < iPointNum; j++ )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
if( !pt ) break;
buf[j] = pt->pt;
//if (pt->id == -1) isNeg = true;
verticesIdx[j] = pt->id;
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
}
if (j != iPointNum) continue;
if (isGoodTri(verticesIdx, tri))
{
//tri.push_back(verticesIdx);
polylines( imgShow, &pBuf, &iPointNum,
1, true, color,
1, CV_AA, 0);
//printf("(%d, %d)-(%d, %d)-(%d, %d)\n", buf[0].x, buf[0].y, buf[1].x, buf[1].y, buf[2].x, buf[2].y);
//printf("%d\t%d\t%d\n", verticesIdx[0], verticesIdx[1], verticesIdx[2]);
//imshow("Delaunay", imgShow);
//waitKey();
}

t = (CvSubdiv2DEdge)edge+2;

for(j = 0; j < iPointNum; j++ )
{
CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
if( !pt ) break;
buf[j] = pt->pt;
verticesIdx[j] = pt->id;
t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
}
if (j != iPointNum) continue;
if (isGoodTri(verticesIdx, tri))
{
//tri.push_back(verticesIdx);
polylines( imgShow, &pBuf, &iPointNum,
1, true, color,
1, CV_AA, 0);
//printf("(%d, %d)-(%d, %d)-(%d, %d)\n", buf[0].x, buf[0].y, buf[1].x, buf[1].y, buf[2].x, buf[2].y);
//printf("%d\t%d\t%d\n", verticesIdx[0], verticesIdx[1], verticesIdx[2]);
//imshow("Delaunay", imgShow);
//waitKey();
}
}

}

//RemoveDuplicate(tri);
char title[100];
sprintf_s(title, 100, "Delaunay: %d Triangles", tri.size());
imshow(title, imgShow);
waitKey();
}``````

4.三维重构

①深度图像中，物体的边界必需与图像中物体的边界对齐；
②在场景图中，深度图像要尽可能均勻和平滑，即对图像进行平滑处理。

``````GLuint Create3DTexture( Mat &img, vector<Vec3i> &tri,
vector<Point2f> pts2DTex, vector<Point3f> &pts3D,
Point3f center3D, Vec3f size3D )
{
GLuint tex = glGenLists(1);
int error = glGetError();
if (error != GL_NO_ERROR)
cout << "An OpenGL error has occured: " << gluErrorString(error) << endl;
if (tex == 0) return 0;

Mat texImg;
cvtColor(img, img, CV_BGR2RGB);
resize(img, texImg, Size(512,512)); // seems no need to do this

glNewList(tex, GL_COMPILE);

vector<Vec3i>::iterator iterTri = tri.begin();
//vector<Point3f>::iterator iterPts3D = pts3D.begin();
Point2f pt2D[3];
Point3f pt3D[3];

glDisable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
for ( ; iterTri != tri.end(); iterTri++)
{
Vec3i &vertices = *iterTri;
int ptIdx;
for (int i = 0; i < 3; i++)
{
ptIdx = vertices[i];
if (ptIdx == -1) break;
//else cout<<ptIdx<<"\t";
pt2D[i].x = pts2DTex[ptIdx].x / img.cols;
pt2D[i].y = pts2DTex[ptIdx].y / img.rows;
pt3D[i] = (pts3D[ptIdx] - center3D) * (1.f / max(size3D[0],size3D[1]));
//pt3D[i].z -= offset;
}

if (ptIdx != -1)
{
MapTexTri(texImg, pt2D, pt3D);
//cout<<endl;
}
}
glDisable(GL_TEXTURE_2D);

glEndList();
return tex;

}``````

Cloth图像是重构效果比较好的一组：

参考文献：

http://blog.csdn.net/newthinker_wei/article/details/45598769
http://www.learnopencv.com/delaunay-triangulation-and-voronoi-diagram-using-opencv-c-python/

season雅宁
+ 关注