双目相机测距代码演示
双目测距的操作流程有四步:相机标定——双目校正——双目匹配——计算深度,具体内容参考 : https://blog.****.net/qq_38236355/article/details/88933839
其中相机标定通常用matlab现成的工具箱进行标定,具体操作参考: https://blog.****.net/qq_38236355/article/details/89280633
我们接下来在完成相机标定的基础上,用标定得到的数据,按上述流程对双目深度进行计算。如果用的是任意的两个单目摄像头拼成的双目相机,则需要按上述所说进行双目校正;如果用的是成品的双目相机,则不需要进行双目校正,商家已经对相机校正好了。
一、双目校正+BM算法双目匹配生成视差图+生成深度图
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const int imagewidth = 752;
const int imageheigh = 480; // 摄像头分辨率752*480
Size imageSize = Size(imagewidth, imageheigh);
Mat grayImageL;
Mat grayImageR;
Mat rectifyImageL, rectifyImageR;
Mat disp, disp8;//视差图
Rect vaildROIL;
Rect vaildROIR;//图像校正后,会对图像进行裁剪,这里的vaildROIR就是指裁剪之后的区域
Mat maplx, maply, mapRx, mapRy;//映射表
Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P,重投影矩阵Q
Mat xyz;
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM>bm = StereoBM::create(16, 9);
//事先用matlab标定好的相机参数
//matlab里的左相机标定参数矩阵
Mat cameraMatrixL = (Mat_<float>(3, 3) << 165.9419, -0.2471, 372.8349, 0, 164.8281, 325.8182, 0, 0, 1);
//matlab里左相机畸变参数
Mat distCoeffL = (Mat_<double>(5, 1) << -0.0218, -0.0014, -0.0104, -0.0025, -0.000024286);
//matlab右相机标定矩阵
Mat cameraMatrixR = (Mat_<double>(3, 3) << 168.2781, 0.1610, 413.2010, 0, 167.7710, 304.7487, 0, 0, 1);
//matlab右相机畸变参数
Mat distCoffR = (Mat_<double>(5, 1) << -0.0332, 0.0033, -0.0090, -0.0029, -0.00038324);
//matlab T 平移参数
Mat T = (Mat_<double>(3, 1) << -117.2789, -0.8970, 0.9281);
//旋转矩阵
Mat R = (Mat_<double>(3, 3) << 1.0000, -0.0040, -0.000052, 0.0040, 1.0000, -0.0041, 0.0000683, 0.0041, 1.0000);
//立体匹配 BM算法
void stereo_match(int, void*)
{
bm->setBlockSize(2 * blockSize + 5);//SAD窗口大小
bm->setROI1(vaildROIL);
bm->setROI2(vaildROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0);//最小视差,默认值为0
bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,最大视差值与最小视差值之差
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//可以防止误匹配
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
bm->compute(rectifyImageL, rectifyImageR, disp);//生成视差图
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16) * 16.));//计算出的视差是CV_16S格式,将其转化为CV_8U的形式
reprojectImageTo3D(disp, xyz, Q, true);
xyz = xyz * 16;//在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
imshow("disp image", disp8);
}
//生成深度图
void disp2Depth(int, void*)
{
Mat depthMap = Mat(disp.size(), CV_16UC1, 255);
int type = disp.type();
float fx = cameraMatrixL.at<float>(0, 0);
float fy = cameraMatrixL.at<float>(1, 1);
float cx = cameraMatrixL.at<float>(0, 2);
float cy = cameraMatrixL.at<float>(1, 2);
float baseline = 40; //基线距离40mm
if (type == CV_16S)
{
int height = disp.rows;
int width = disp.cols;
short* dispData = (short*)disp.data;
ushort* depthData = (ushort*)depthMap.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int id = i * width + j;
if (!dispData[id])
{
continue;
}
depthData[id] = ushort((float)fx*baseline / ((float)dispData[id]));
}
}
}
else
{
cout << "please onfirm dispImage's type" << endl;
}
Mat depthMap8;
depthMap.convertTo(depthMap8, CV_8U);//计算出的深度图是CV_16U格式
imshow("depth image", depthMap8);
}
int main()
{
//双目校正
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &vaildROIL, &vaildROIR);
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl , imageSize, CV_32FC1, maplx, maply);
initUndistortRectifyMap(cameraMatrixR, distCoffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
//读取图像
grayImageL = imread("D:/opencv learning/diannao_left.png", 0);
grayImageR = imread("D:/opencv learning/diannao_right.png", 0);
if (grayImageR.empty() || grayImageL.empty())
{
printf("can not load image...");
return -1;
}
imshow("imageL before rectify", grayImageL);
imshow("imageR bedore rectify", grayImageR);
//经过remap之后,左右相机的图像已经共面并且行对准了
remap(grayImageL, rectifyImageL, maplx, maply, INTER_LINEAR);
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
//把校正结果显示出来
imshow("imageL after rectify", rectifyImageL);
imshow("imageR after rectify", rectifyImageR);
//显示在同一张图上
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w * 2, CV_8UC1);
//左图像画到画布上
Mat canvaspart = canvas(Rect(w * 0, 0, w, h));
resize(rectifyImageL, canvaspart, canvaspart.size(), 0, 0, INTER_AREA);
Rect vroil(cvRound(vaildROIL.x*sf), cvRound(vaildROIL.y*sf), cvRound(vaildROIL.width*sf), cvRound(vaildROIL.height*sf));
cout << "Painted imagel" << endl;
//右图像画到画布上
canvaspart = canvas(Rect(w, 0, w, h));
resize(rectifyImageR, canvaspart, canvaspart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(vaildROIR.x*sf), cvRound(vaildROIR.y*sf), cvRound(vaildROIR.width*sf), cvRound(vaildROIR.height*sf));
cout << "Painted imageR" << endl;
//画上对应线条
for (int i = 0; i < canvas.rows; i += 16)
{
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
}
//立体匹配
namedWindow("disp image", CV_WINDOW_AUTOSIZE);
createTrackbar("blocksize:\n", "disp image", &blockSize, 8, stereo_match);
createTrackbar("UniquenessRatio:\n", "disp image", &uniquenessRatio, 50, stereo_match);
createTrackbar("NumDisparities:\n", "disp image", &numDisparities, 16, stereo_match);
stereo_match(0, 0);
disp2Depth(0, 0);
waitKey(0);
return 0;
}
二、成品双目无双目校正,BM算法双目匹配生成视差图+生成深度图
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const int imagewidth = 752;
const int imageheigh = 480; // 摄像头分辨率752*480
Size imageSize = Size(imagewidth, imageheigh);
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9); //用Block Matching算法,实现双目匹配
Mat grayImageL, grayImageR , disp, disp8;
Rect vaildROIL = Rect(0, 0, 752, 480);
Rect vaildROIR = Rect(0, 0, 752, 480);
Mat cameraMatrixL = (Mat_<float>(3, 3) << 165.9419, -0.2471, 372.8349, 0, 164.8281, 325.8182, 0, 0, 1);
void stereo_match(int, void*); //双目匹配,生成视差图(xl-xr)
void disp2Depth(int, void*); //视差图转为深度图
int main()
{
grayImageL = imread("D:/opencv learning/liangdui_left.png", 0);
grayImageR = imread("D:/opencv learning/liangdui_right.png", 0);
if (grayImageL.empty() || grayImageR.empty())
{
printf("can not load image...");
}
imshow("input ImageL", grayImageL);
imshow("input ImageR", grayImageR);
//显示在同一张图上
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w * 2, CV_8UC1);
//左图像画到画布上
Mat canvaspart = canvas(Rect(w * 0, 0, w, h));
resize(grayImageL, canvaspart, canvaspart.size(), 0, 0, INTER_AREA);
Rect vroil(cvRound(vaildROIL.x*sf), cvRound(vaildROIL.y*sf), cvRound(vaildROIL.width*sf), cvRound(vaildROIL.height*sf));
cout << "Painted imagel" << endl;
//右图像画到画布上
canvaspart = canvas(Rect(w, 0, w, h));
resize(grayImageR, canvaspart, canvaspart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(vaildROIR.x*sf), cvRound(vaildROIR.y*sf), cvRound(vaildROIR.width*sf), cvRound(vaildROIR.height*sf));
cout << "Painted imageR" << endl;
//画上对应线条
for (int i = 0; i < canvas.rows; i += 16)
{
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
}
//立体匹配
namedWindow("disparity", CV_WINDOW_AUTOSIZE);
createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);//创建SAD窗口Trackbar
createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);//创建视差唯一性百分比窗口 Trackbar
createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);// 创建视差窗口 Trackbar
stereo_match(0, 0);
disp2Depth(0, 0);
waitKey(0);
return 0;
}
void stereo_match(int, void*)
{
bm->setBlockSize(2 * blockSize + 5); //SAD窗口大小,5-21之间为宜
bm->setROI1(vaildROIL);
bm->setROI2(vaildROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //最小视差
bm->setNumDisparities(numDisparities * 16 + 16);
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio); //uniquenessRatio主要可以防止误匹配
bm->setSpeckleRange(32);
bm->setSpeckleWindowSize(100);
bm->setDisp12MaxDiff(-1);
bm->compute(grayImageL, grayImageR, disp); //输入图像必须为灰度图,输出视差图
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
imshow("disparity", disp8);
}
void disp2Depth(int, void*)
{
Mat depthMap = Mat(disp.size(), CV_16UC1, 255);
int type = disp.type();
float fx = cameraMatrixL.at<float>(0, 0);
float fy = cameraMatrixL.at<float>(1, 1);
float cx = cameraMatrixL.at<float>(0, 2);
float cy = cameraMatrixL.at<float>(1, 2);
float baseline = 40; //基线距离40mm
if (type == CV_16S)
{
int height = disp.rows;
int width = disp.cols;
short* dispData = (short*)disp.data;
ushort* depthData = (ushort*)depthMap.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int id = i * width + j;
if (!dispData[id])
{
continue;
}
depthData[id] = ushort((float)fx*baseline / ((float)dispData[id]));
}
}
}
else
{
cout << "please onfirm dispImage's type" << endl;
}
Mat depthMap8;
depthMap.convertTo(depthMap8, CV_8U);//计算出的深度图是CV_16U格式
imshow("depth image", depthMap8);
}
我拍了一幅电脑的图片,效果如下图所示:
左右原图:
校正后放到同一平面:
视差图:
深度图: