Mapping color image pixels to depth map point cloud coordinates with a camera featuring separate color lenses (such as LSR L)

Background

In deep learning model training, when the resolution of the camera used to generate depth maps does not match the resolution of the color image used for texture mapping, it becomes challenging to find corresponding depth map coordinates in the color image’s reference frame. This, in turn, affects the acquisition of point cloud coordinates.

To solve this problem, we can perform transformations and interpolation operations by reading the camera intrinsic parameters, allowing us to calculate the corresponding point cloud coordinates.

Solution

Below is an sample in C++.

This sample ultimately saves the original depth map, the original 2D color texture image, the depth map in the 2D color texture camera reference frame, and the corresponding point cloud for the 2D texture image while outputting the corresponding coordinates.

#include <iostream>
#include "MechEyeApi.h"
#include "SampleUtil.h"
#include "OpenCVUtil.h"
#include <opencv2/opencv.hpp>
#include <fstream>
#include <vector>

// Define a data structure for point clouds, including 3D coordinates and color information
struct PointCloud {
    cv::Point3f point;
    cv::Vec3b color;
};

int main() {

    // Create a MechEyeDevice object and connect to the camera
    mmind::api::MechEyeDevice device;
    mmind::api::DeviceIntri deviceIntri;
    if (!findAndConnect(device))
        return -1;

    // Capture depth maps and color images
    mmind::api::DepthMap depth;
    mmind::api::ColorMap color;
    showError(device.captureColorMap(color));
    showError(device.captureDepthMap(depth));

    // Save the depth maps and color images to files
    const std::string depthFile = "depth.tiff";
    const std::string colorFile = "color.png";
    saveMap(depth, depthFile);
    saveMap(color, colorFile);

    // Load the depth maps and color images
    cv::Mat depthImage = cv::imread("depth.tiff", cv::IMREAD_ANYDEPTH);
    cv::Mat textureImage = cv::imread("color.png");

    // Get and define camera matrices
    showError(device.getDeviceIntri(deviceIntri));
    //printCalibParams(deviceIntri);
    cv::Mat textureCameraMatrix = cv::Mat::zeros(3, 3, CV_64FC1);
    cv::Mat depthCameraMatrix = cv::Mat::zeros(3, 3, CV_64FC1);
    cv::Mat rotationMatrix = cv::Mat::zeros(3, 3, CV_64FC1);
    cv::Mat translationVector = cv::Mat::zeros(3, 1, CV_64FC1);

    // Extract camera matrices from camera intrinsic parameters
    textureCameraMatrix.at<double>(0, 0) = deviceIntri.textureCameraIntri.cameraMatrix[0];
    textureCameraMatrix.at<double>(1, 1) = deviceIntri.textureCameraIntri.cameraMatrix[1];
    textureCameraMatrix.at<double>(0, 2)= deviceIntri.textureCameraIntri.cameraMatrix[2];
    textureCameraMatrix.at<double>(1, 2)= deviceIntri.textureCameraIntri.cameraMatrix[3];
    textureCameraMatrix.at<double>(2, 2) = 1;

    depthCameraMatrix.at<double>(0, 0) =  deviceIntri.depthCameraIntri.cameraMatrix[0];
    depthCameraMatrix.at<double>(1, 1) =  deviceIntri.depthCameraIntri.cameraMatrix[1];
    depthCameraMatrix.at<double>(0, 2) =  deviceIntri.depthCameraIntri.cameraMatrix[2];
    depthCameraMatrix.at<double>(1, 2) =  deviceIntri.depthCameraIntri.cameraMatrix[3];
    depthCameraMatrix.at<double>(2, 2) = 1;

    // Extract rotation matrix and translation vector
    for (int i = 0; i < 3; ++i) {
        for (int j = 0; j < 3; ++j) {
             rotationMatrix.at<double>(i, j)=deviceIntri.depthToTexture.rotation[i][j];
        }
        translationVector.at<double>(i, 0)= deviceIntri.depthToTexture.translation[i];
    }

    // Create a buffer to store point cloud mapped to the texture image
    cv::Mat mappedImageBuffer(textureImage.size(), CV_32FC2, cv::Scalar(0, 0, 0));
    int countprint = 0;

    // Iterate through each pixel in the depth map, calculate point cloud, and map it to the texture image
    for (int y = 0; y < depthImage.rows; ++y) {
        for (int x = 0; x < depthImage.cols; ++x) {
            // Get the depth value
            float depthValue = depthImage.at<float>(y, x);

            if (depthValue <= 0)
                continue;

            // Calculate the depth point in the depth camera reference frame
            cv::Mat depthPoint = (cv::Mat_<double>(3, 1) <<
                depthValue * (x - depthCameraMatrix.at<double>(0, 2)) / depthCameraMatrix.at<double>(0, 0),
                depthValue * (y - depthCameraMatrix.at<double>(1, 2)) / depthCameraMatrix.at<double>(1, 1),
                depthValue);

            // Use rotation matrix and translation vector to map the depth point to the texture camera reference frame
            cv::Mat texturePoint = rotationMatrix * depthPoint + translationVector;

            // Project the point cloud onto the texture image using the texture camera matrix
           cv::Mat textureImagePoint = textureCameraMatrix * texturePoint;
           cv::Point2f textureImageCoords(
                textureImagePoint.at<double>(0, 0) / textureImagePoint.at<double>(2, 0),
                textureImagePoint.at<double>(1, 0) / textureImagePoint.at<double>(2, 0));

            // Check if the projected coordinates are within the range of the texture image
            if (textureImageCoords.x >= 0 && textureImageCoords.x < textureImage.cols &&
                textureImageCoords.y >= 0 && textureImageCoords.y < textureImage.rows) {

                // Average nearby pixels to reduce sampling error
                int xCenter = std::round(textureImageCoords.x);
                int yCenter = std::round(textureImageCoords.y);
                for (int i = -1; i < 2; i++) {
                    for (int j = -1; j < 2; j++) {
                        if (xCenter + j >= 0 && xCenter + j < textureImage.cols &&
                            yCenter + i >= 0 && yCenter + i < textureImage.rows) {
                            mappedImageBuffer.at<cv::Vec2f>(yCenter + i, xCenter + j)[0] += texturePoint.at<double>(2,0);
                            mappedImageBuffer.at<cv::Vec2f>(yCenter + i, xCenter + j)[1] += 1;
                        }
                    }
                }
            }
        }
    }

    // Create the mapping result for the depth map
    cv::Mat mappedImage(textureImage.size(), CV_32FC1, cv::Scalar(0, 0, 0));

    // Convert the point cloud mapping result to the final depth map
    for (int row = 0; row < mappedImageBuffer.rows; ++row) {
        for (int col = 0; col < mappedImageBuffer.cols; ++col) {
            if (mappedImageBuffer.at<cv::Vec2f>(row, col)[1] != 0)
                mappedImage.at<float>(row, col) = mappedImageBuffer.at<cv::Vec2f>(row, col)[0] / mappedImageBuffer.at<cv::Vec2f>(row, col)[1];
        }
    }

    // Display and save the mapped depth map
    cv::imshow("depthintexture", mappedImage);
    cv::imwrite("depthintexture.tiff", mappedImage);
    std::cout << "Save depth image : depthintexture.tiff" << std::endl;

    // Create a container to store point cloud data
    std::vector<PointCloud> pointCloudData;
    cv::Mat inverseRotationMatrix;
    bool invertable = cv::invert(rotationMatrix, inverseRotationMatrix);
    if (!invertable)
    {
        printf("rotation not invertable");
        return 0;
    }
    bool hasPrintedX = false;
    // Iterate through each pixel in the interpolated depth map
    for (int y = 0; y < mappedImage.rows; ++y) {
        for (int x = 0; x < mappedImage.cols; ++x) {
            // Get the depth value
            float depthValueInTexture = mappedImage.at<float>(y, x);

            // Get color information corresponding to the point in the color image
            cv::Vec3b color = textureImage.at<cv::Vec3b>(y, x);

            if (depthValueInTexture <= 0)
                continue;

            // Calculate the depth point's coordinates in the depth camera reference frame
            cv::Mat depthPointInTexture = (cv::Mat_<double>(3, 1) <<
                depthValueInTexture * (x - textureCameraMatrix.at<double>(0, 2)) / textureCameraMatrix.at<double>(0, 0),
                depthValueInTexture * (y - textureCameraMatrix.at<double>(1, 2)) / textureCameraMatrix.at<double>(1, 1),
                depthValueInTexture);
             // Convert from millimeters (mm) to meters (m)
                /*0.001 * depthValueInTexture * (x - textureCameraMatrix.at<double>(0, 2)) / textureCameraMatrix.at<double>(0, 0),
                0.001 * depthValueInTexture * (y - textureCameraMatrix.at<double>(1, 2)) / textureCameraMatrix.at<double>(1, 1),
                0.001 * depthValueInTexture);*/

            // Use the rotation matrix and translation vector to map the point to the depth camera reference frame
            cv::Mat pointInPattern = inverseRotationMatrix * (depthPointInTexture - translationVector);

            // Create a new point cloud and save the point cloud information
            PointCloud pointCloud;
            pointCloud.point = cv::Point3f(pointInPattern.at<double>(0, 0),
                pointInPattern.at<double>(1, 0),
                pointInPattern.at<double>(2, 0));
            pointCloud.color = color;
            pointCloudData.push_back(pointCloud);

            // Find point cloud coordinates corresponding to a specific pixel (e.g., 1000, 1000)
            if (x == 1000 && y == 1000 && !hasPrintedX) {
                std::cout << pointCloud.point.x << std::endl;
                std::cout << pointCloud.point.y << std::endl;
                std::cout << pointCloud.point.z << std::endl;
                hasPrintedX = true; 
            }
        }
    }
 
    // Save the point cloud data as a PLY file
    std::ofstream outFile("PointCloud.ply");
    if (outFile.is_open()) {
        outFile << "ply\n";
        outFile << "format ascii 1.0\n";
        outFile << "element vertex " << pointCloudData.size() << "\n";
        outFile << "property float x\n";
        outFile << "property float y\n";
        outFile << "property float z\n";
        outFile << "property uchar red\n";
        outFile << "property uchar green\n";
        outFile << "property uchar blue\n";
        outFile << "end_header\n";

        for (const auto& point : pointCloudData) {
            outFile << point.point.x << " " << point.point.y << " " << point.point.z << " ";
            outFile << static_cast<int>(point.color[2]) << " " << static_cast<int>(point.color[1]) << " " << static_cast<int>(point.color[0]) << "\n";
        }
        std::cout << "Save PointCloud : PointCloud.ply" << std::endl;
        outFile.close();
    }
    else {
        std::cerr << "Failed to save the point cloud to the file.\n";
    }

    cv::waitKey(0);
    return 0;
}
1 Like