我正在做一个需要高质量姿态估计的项目。因此,我试图得到这个姿态估计使用OpenCV charuco板。在此之前,我一直在使用大小为2x2的Aruco板,但估计的姿态还不够。我使用分辨率为640x480的realSense D415相机完成了charuco的估计工作。然而,当我将分辨率改为1280x720时,我在板上画的坐标系开始完全随机地跳跃。
估计charuco板的代码如下:
void ReconstructionSystem::detect_charuco_markers(cv::Mat& image, cv::Matx33f& matrix, cv::Vec<float, 5>& coef, int& centerPix_x, int& centerPix_y, cv::Vec3d& rotation, bool& arucoFound)
{
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(3, 3, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
//params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
cv::Mat copyImage;
image.copyTo(copyImage);
cv::Mat gray;
cv::cvtColor(copyImage, gray, cv::COLOR_RGB2GRAY);
cv::aruco::detectMarkers(gray, board->getDictionary(), markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 3) {
cv::aruco::drawDetectedMarkers(image, markerCorners, markerIds);
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, gray, board, charucoCorners, charucoIds, matrix, coef);
// if at least one charuco corner detected
if (charucoIds.size() > 3) {
cv::Scalar color = cv::Scalar(255, 0, 0);
cv::aruco::drawDetectedCornersCharuco(image, charucoCorners, charucoIds, color);
cv::Vec3d rvec, tvec;
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, matrix, coef, rvec, tvec);
// if charuco pose is valid
if (valid){
cv::drawFrameAxes(image, matrix, coef, rvec, tvec, 0.1f);
arucoFound = true;
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
}
else
{
arucoFound = false;
}
board = NULL;
dictionary = NULL;
copyImage.release();
gray.release();
}
上面的函数在这个while循环中调用:
//Variables for transformation matrices
int centerPix_x = 0, centerPix_y = 0;
cv::Vec3d rotationVec;
cv::Matx33f rotation;
bool arucoWasFound = false;
std::vector<float> final_x, final_y, final_z;
std::vector<float> rotation_x, rotation_y, rotation_z;
cv::Matx33f matrix = get_cameraMatrix(path);
cv::Vec<float, 5> coef = get_distCoeffs(path);
const auto window_name = "Validation image";
cv::namedWindow(window_name, cv::WINDOW_AUTOSIZE);
// TODO Also add here that if we have iterated through X frames and not found Aruco, exit with failure
while (cv::waitKey(1) < 0 && cv::getWindowProperty(window_name, cv::WND_PROP_AUTOSIZE) >= 0 && counter < 60) {
rs2::frame f = sensorPtr->color_data.wait_for_frame();
// Query frame size (width and height)
const int w = f.as<rs2::video_frame>().get_width();
const int h = f.as<rs2::video_frame>().get_height();
cv::Mat image(cv::Size(w, h), CV_8UC3, (void*)f.get_data(), cv::Mat::AUTO_STEP);
cv::cvtColor(image, image, cv::COLOR_RGB2BGR);
//detect_aruco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
detect_charuco_markers(image, matrix, coef, centerPix_x, centerPix_y, rotationVec, arucoWasFound);
if (arucoWasFound)
{
rs2::depth_frame depth = sensorPtr->depth_data.wait_for_frame();
rs2_intrinsics intrinsic = rs2::video_stream_profile(depth.get_profile()).get_intrinsics();
float pixel_distance_in_meters = depth.get_distance(centerPix_x, centerPix_y);
float InputPixelAsFloat[2];
InputPixelAsFloat[0] = centerPix_x;
InputPixelAsFloat[1] = centerPix_y;
float finalDepthPoint[3];
rs2_deproject_pixel_to_point(finalDepthPoint, &intrinsic, InputPixelAsFloat, pixel_distance_in_meters);
// Postion //
final_x.push_back(finalDepthPoint[0]);
final_y.push_back(finalDepthPoint[1]);
final_z.push_back(finalDepthPoint[2]);
// Rotation //
rotation_x.push_back(rotationVec[0]);
rotation_y.push_back(rotationVec[1]);
rotation_z.push_back(rotationVec[2]);
counter++;
}
cv::imshow(window_name, image);
}
cv::destroyWindow(window_name);
此外,这里是使用1270x720分辨率检测的图像。
这是一张分辨率640x480的检测图像。
如果有人知道为什么会这样,请告诉我:D
发布于 2022-11-01 10:25:36
正如所指出的,问题是摄像机的校准是以错误的分辨率进行的,在我的例子中是640x480,而不是1280x720。下面是我用来计算校准矩阵和系数的代码。两个错误值是: cv::Size frameSize(_imageWidth,_imageWidth);
void ReconstructionSystem::camera_calibration()
{
std::string folder_501("\\Users\\Mikke\\Desktop\\Calibration\\501_images\\*.png");
std::string folder_309("\\Users\\Mikke\\Desktop\\Calibration\\309_images\\*.png");
for (int x = 0; x < 2; x++)
{
std::vector<cv::String> filenames;
std::string currentCam;
if (x == 0) currentCam = folder_501;
if (x == 1) currentCam = folder_309;
cv::glob(currentCam, filenames, false);
for each (std::string var in filenames)
{
printf("file: %s\n", var.c_str());
}
cv::Size patterSize(9, 6);
std::vector<std::vector<cv::Point2f>> q(filenames.size());
std::vector<std::vector<cv::Point3f>> Q;
int checkerboard[2] = { 10, 7 }; //size of checkerboard
int square_size = 27; //2.7 cm == 27mm
std::vector<cv::Point3f> objp;
for (int i = 1; i < checkerboard[1]; i++) {
for (int j = 1; j < checkerboard[0]; j++) {
objp.push_back(cv::Point3f(j * square_size, i * square_size, 0));
}
}
std::vector<cv::Point2f> imgPoint;
std::size_t i = 0;
for (auto const& f : filenames) {
std::cout << std::string(f) << std::endl;
cv::Mat img = cv::imread(filenames[i]);
cv::Mat gray;
cv::cvtColor(img, gray, cv::COLOR_RGB2GRAY);
bool patternFound = cv::findChessboardCorners(gray, patterSize, q[i], cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE + cv::CALIB_CB_FAST_CHECK);
if (patternFound) {
cv::cornerSubPix(gray, q[i], cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));
Q.push_back(objp);
}
// Display
cv::drawChessboardCorners(img, patterSize, q[i], patternFound);
cv::imshow("chessboard detection", img);
cv::waitKey(0);
i++;
}
cv::Matx33f K(cv::Matx33f::eye());
cv::Vec<float, 5> k(0, 0, 0, 0, 0);
std::vector<cv::Mat> rvecs, tvecs;
std::vector<double> stdIntrinsics, stdExtrinsics, perViewErrors;
int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
cv::Size frameSize(_imageWidth, _imageWidth);
std::cout << "calibrating..." << std::endl;
float error = cv::calibrateCamera(Q, q, frameSize, K, k, rvecs, tvecs, flags);
std::cout << "reprojection error = " << error << "\nK = \n" << K << "\nk=\n" << k << std::endl;
if (x == 0) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\104122061501\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
if (x == 1) {
std::string path_mat = "\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_Mat_new.yml";
std::string path_coe = ("\\Users\\Mikke\\Desktop\\Calibration\\102122061309\\calibration_coef_new.yml");
saveData_mat(path_mat, K);
saveData_coef(path_coe, k);
}
}
}
https://stackoverflow.com/questions/74263897
复制相似问题