I am using a dataset in which it has images where each pixel is a 16 bit unsigned int storing the depth value of that pixel in mm. I am trying to visualize this as a greyscale depth image by doing the following:
cv::Mat depthImage;
depthImage = cv::imread("coffee_mug_1_1_1_depthcrop.png", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR ); // Read the file
depthImage.convertTo(depthImage, CV_32F); // convert the image data to float type
namedWindow("window");
float max = 0;
for(int i = 0; i < depthImage.rows; i++){
for(int j = 0; j < depthImage.cols; j++){
if(depthImage.at<float>(i,j) > max){
max = depthImage.at<float>(i,j);
}
}
}
cout << max << endl;
float divisor = max / 255.0;
cout << divisor << endl;
for(int i = 0; i < depthImage.rows; i++){
for(int j = 0; j < depthImage.cols; j++){
cout << depthImage.at<float>(i,j) << ", ";
max = depthImage.at<float>(i,j) /= divisor;
cout << depthImage.at<float>(i,j) << endl;
}
}
imshow("window", depthImage);
waitKey(0);
However, it is only showing two colours this is because all of the values are close together i.e. in the range of 150-175 + the small values which show up black (see below).
Is there a way to normalize this data such that it will show various grey levels to highlight these small depth differences?
See Question&Answers more detail:os