#include <iostream> #include <chrono> #include <cmath> #include <iostream> #include <vector> #include <memory> #include <condition_variable> #include <opencv2/opencv.hpp> #include <string> #include <fstream> #include <sstream> #include <algorithm> #include "mot.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <thread> #include <dirent.h> #include <k4a/k4a.hpp> #include <k4a/k4atypes.h> #define COLOR_NUM 18 /*---- k4a相关声明 ----*/ k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT); k4a::capture capture; k4a::image rgbImage; k4a::image depthImage; k4a::image transformed_depthImage; cv::Mat cv_depth; cv::Mat cv_depth_8U; cv::Mat frame_depth; Mat cv_rgbImage_with_alpha; Mat cv_rgbImage_no_alpha; condition_variable con_v; bool ready = false; mutex mtx; Mat frame_line; Mot mot; Rect last_rect; vector<Point> line_vec; Rect result; Mat frame; void get_img() { const uint32_t device_count = k4a::device::get_installed_count(); cout << "device_count : " << device_count << endl; k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL; config.camera_fps = K4A_FRAMES_PER_SECOND_30; config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32; config.color_resolution = K4A_COLOR_RESOLUTION_720P; config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED; //config.depth_mode = K4A_DEPTH_MODE_WFOV_2X2BINNED; config.synchronized_images_only = true; device.start_cameras(&config); k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);// Get the camera calibration for the entire K4A device, which is used for all transformation functions. k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration); cout << "--------- 1 ----------" << endl; while(1) { //cap >> frame; if(device.get_capture(&capture)) { rgbImage = capture.get_color_image(); depthImage = capture.get_depth_image(); cv_rgbImage_with_alpha = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4, (void *) rgbImage.get_buffer()); cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR); transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage); cv_depth = cv::Mat(transformed_depthImage.get_height_pixels(), transformed_depthImage.get_width_pixels(), CV_16U, (void *) transformed_depthImage.get_buffer(), static_cast<size_t>(transformed_depthImage.get_stride_bytes())); normalize(cv_depth, cv_depth_8U, 0, 256 * 256, NORM_MINMAX); cout << "rgbImage.size() : " << cv_rgbImage_no_alpha.size() << endl; cout << "cv_depth_8U.size() : " << cv_depth_8U.size() << endl; //cv_depth_8U.convertTo(cv_depth, CV_8U, 1); } frame = cv_rgbImage_no_alpha.clone(); frame_depth = cv_depth_8U.clone(); imshow("rgb", frame); imshow("depth", frame_depth); waitKey(1); } } int main() { get_img(); return 0; }
标签:rgbImage,配准,Mat,get,Kinect,depthImage,Azure,include,cv From: https://www.cnblogs.com/xiaochouk/p/17665511.html