libfreenect2同时获取2台kinect2相机拍摄的图片——成功的代码在最后

#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <time.h>
#include <signal.h>
#include <opencv2/opencv.hpp>
#pragma warning( disable : 4996 )
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/logger.h>

using namespace std;
using namespace cv;
using namespace libfreenect2;

enum
{
	Processor_cl,
	Processor_gl,
	Processor_cpu
};

bool protonect_shutdown = false; // Whether the running application should shut down.
bool save_images = false;        // Flag to indicate when to save images.
bool frames_ready = false;       // Flag to indicate that frames from both cameras are ready.

void sigint_handler(int s)
{
	protonect_shutdown = true;
}

std::string getCurrentTimeString() {
	time_t now = time(0);
	tm *ltm = localtime(&now);
	char buffer[80];
	strftime(buffer, sizeof(buffer), "%Y%m%d_%H%M%S", ltm);
	return std::string(buffer);
}

int main()
{
	// Define variables
	std::cout << "Hello World!" << std::endl;
	libfreenect2::Freenect2 freenect2;
	std::vector<libfreenect2::Freenect2Device*> devices;
	std::vector<libfreenect2::PacketPipeline*> pipelines;
	std::vector<libfreenect2::SyncMultiFrameListener*> listeners;
	std::vector<libfreenect2::FrameMap> frames;

	// Search and initialize sensors
	int numDevices = freenect2.enumerateDevices();
	if (numDevices < 2)
	{
		std::cerr << "Error: Not enough Kinect devices connected!" << std::endl;
		return -1;
	}

	// Configure transfer format
	int depthProcessor = Processor_cl;
	for (int i = 0; i < 2; ++i)
	{
		string serial = freenect2.getDeviceSerialNumber(i);
		std::cout << "SERIAL " << i + 1 << ": " << serial << std::endl;

		libfreenect2::PacketPipeline* pipeline = nullptr;
		if (depthProcessor == Processor_cpu)
		{
			pipeline = new libfreenect2::CpuPacketPipeline();
		}
		else if (depthProcessor == Processor_gl) // if support gl
		{
#ifdef LIBFREENECT2_WITH_OPENGL_SUPPORT
			pipeline = new libfreenect2::OpenGLPacketPipeline();
#else
			std::cout << "OpenGL pipeline is not supported!" << std::endl;
#endif
		}
		else if (depthProcessor == Processor_cl) // if support cl
		{
			pipeline = new libfreenect2::OpenGLPacketPipeline();
			//#ifdef LIBFREENECT2_WITH_OPENCL_SUPPORT
			//            pipeline = new libfreenect2::OpenCLPacketPipeline();
			//#else
			//            std::cout << "OpenCL pipeline is not supported!" << std::endl;
			//#endif
		}

		if (pipeline)
		{
			libfreenect2::Freenect2Device* dev = freenect2.openDevice(serial, pipeline);
			if (dev == nullptr)
			{
				std::cerr << "Failure opening device " << i + 1 << "!" << std::endl;
				return -1;
			}
			devices.push_back(dev);
			pipelines.push_back(pipeline);

			libfreenect2::SyncMultiFrameListener* listener = new libfreenect2::SyncMultiFrameListener(
				libfreenect2::Frame::Color |
				libfreenect2::Frame::Depth |
				libfreenect2::Frame::Ir);
			listeners.push_back(listener);
			frames.push_back(libfreenect2::FrameMap());

			dev->setColorFrameListener(listener);
			dev->setIrAndDepthFrameListener(listener);
			dev->start();
		}
	}

	signal(SIGINT, sigint_handler);
	protonect_shutdown = false;

	// Create windows
	cv::namedWindow("rgb1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("rgb2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth2", WND_PROP_ASPECT_RATIO);

	// Define save directory
	std::string save_directory = "C:\\A0fangfang\\shiyan\\9.2\\";

	// Loop to receive frames
	while (!protonect_shutdown)
	{
		for (int i = 0; i < devices.size(); ++i)
		{
			listeners[i]->waitForNewFrame(frames[i]);
			libfreenect2::Frame* rgb = frames[i][libfreenect2::Frame::Color];
			libfreenect2::Frame* ir = frames[i][libfreenect2::Frame::Ir];
			libfreenect2::Frame* depth = frames[i][libfreenect2::Frame::Depth];

			cv::Mat rgbmat((int)rgb->height, (int)rgb->width, CV_8UC4, rgb->data);
			cv::Mat irmat((int)ir->height, (int)ir->width, CV_32FC1, ir->data);
			cv::Mat depthmat((int)depth->height, (int)depth->width, CV_32FC1, depth->data);

			cv::imshow("rgb" + std::to_string(i + 1), rgbmat);
			cv::imshow("ir" + std::to_string(i + 1), irmat / 4500.0f);
			cv::imshow("depth" + std::to_string(i + 1), depthmat / 4500.0f);

			// Check for keypress to trigger image saving
			int key = cv::waitKey(30);
			if (key == 'q')
			{
				save_images = true; // Set flag to save images on next iteration
			}
			else if (key == 27) // Escape key
			{
				protonect_shutdown = true;
			}

			// Update frames_ready flag when frames from both cameras are available
			if (i == devices.size() - 1 && frames[0].size() > 0 && frames[1].size() > 0)
			{
				frames_ready = true;
				cout << "Frames from both cameras are ready. Press 'q' to save." << endl;
			}

			// Save images if the flag is set and frames from both cameras are ready
			if (save_images && frames_ready)
			{
				std::string timestamp = getCurrentTimeString();
				for (int i = 0; i < devices.size(); ++i)
				{
					std::string filename_rgb = save_directory + "rgb" + std::to_string(i + 1) + "_" + timestamp + ".png";
					std::string filename_ir = save_directory + "ir" + std::to_string(i + 1) + "_" + timestamp + ".png";
					std::string filename_depth = save_directory + "depth" + std::to_string(i + 1) + "_" + timestamp + ".png";

					cv::imwrite(filename_rgb, rgbmat);
					cv::imwrite(filename_ir, irmat / 4500.0f * 255); // Convert to 8-bit
					cv::imwrite(filename_depth, depthmat / 4500.0f * 255); // Convert to 8-bit

					std::cout << "Images from camera " << i + 1 << " saved to " << save_directory << std::endl;
				}
				save_images = false; // Reset flag after saving
				frames_ready = false; // Reset flag after saving
			}

			listeners[i]->release(frames[i]);
		}
	}

	// Close devices
	for (auto dev : devices)
	{
		dev->stop();
		dev->close();
	}

	// Clean up resources
	for (auto pipeline : pipelines)
	{
		delete pipeline;
	}
	for (auto listener : listeners)
	{
		delete listener;
	}
	getchar();
	std::cout << "Goodbye World!" << std::endl;

	return 0;
}

 没有画面—

出图片了,但是无法保证2个同时保存

#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <time.h>
#include <signal.h>
#include <opencv2/opencv.hpp>
#pragma warning( disable : 4996 )
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/logger.h>

using namespace std;
using namespace cv;
using namespace libfreenect2;

enum
{
	Processor_cl,
	Processor_gl,
	Processor_cpu
};

bool protonect_shutdown = false; // Whether the running application should shut down.
bool save_images = false;        // Flag to indicate when to save images.
bool frames_ready = false;       // Flag to indicate that frames from both cameras are ready.
bool need_to_save = false;
void sigint_handler(int s)
{
	protonect_shutdown = true;
}

std::string getCurrentTimeString() {
	time_t now = time(0);
	tm *ltm = localtime(&now);
	char buffer[80];
	strftime(buffer, sizeof(buffer), "%Y%m%d_%H%M%S", ltm);
	return std::string(buffer);
}

int main()
{
	// Define variables
	std::cout << "Hello World!" << std::endl;
	libfreenect2::Freenect2 freenect2;
	std::vector<libfreenect2::Freenect2Device*> devices;
	std::vector<libfreenect2::PacketPipeline*> pipelines;
	std::vector<libfreenect2::SyncMultiFrameListener*> listeners;
	std::vector<libfreenect2::FrameMap> frames;

	// Search and initialize sensors
	int numDevices = freenect2.enumerateDevices();
	if (numDevices < 2)
	{
		std::cerr << "Error: Not enough Kinect devices connected!" << std::endl;
		return -1;
	}

	// Configure transfer format
	int depthProcessor = Processor_cl;
	for (int i = 0; i < 2; ++i)
	{
		string serial = freenect2.getDeviceSerialNumber(i);
		std::cout << "SERIAL " << i + 1 << ": " << serial << std::endl;

		libfreenect2::PacketPipeline* pipeline = nullptr;
		if (depthProcessor == Processor_cpu)
		{
			pipeline = new libfreenect2::CpuPacketPipeline();
		}
		else if (depthProcessor == Processor_gl) // if support gl
		{
#ifdef LIBFREENECT2_WITH_OPENGL_SUPPORT
			pipeline = new libfreenect2::OpenGLPacketPipeline();
#else
			std::cout << "OpenGL pipeline is not supported!" << std::endl;
#endif
		}
		else if (depthProcessor == Processor_cl) // if support cl
		{
			pipeline = new libfreenect2::OpenGLPacketPipeline();
			//#ifdef LIBFREENECT2_WITH_OPENCL_SUPPORT
			//            pipeline = new libfreenect2::OpenCLPacketPipeline();
			//#else
			//            std::cout << "OpenCL pipeline is not supported!" << std::endl;
			//#endif
		}

		if (pipeline)
		{
			libfreenect2::Freenect2Device* dev = freenect2.openDevice(serial, pipeline);
			if (dev == nullptr)
			{
				std::cerr << "Failure opening device " << i + 1 << "!" << std::endl;
				return -1;
			}
			devices.push_back(dev);
			pipelines.push_back(pipeline);

			libfreenect2::SyncMultiFrameListener* listener = new libfreenect2::SyncMultiFrameListener(
				libfreenect2::Frame::Color |
				libfreenect2::Frame::Depth |
				libfreenect2::Frame::Ir);
			listeners.push_back(listener);
			frames.push_back(libfreenect2::FrameMap());

			dev->setColorFrameListener(listener);
			dev->setIrAndDepthFrameListener(listener);
			dev->start();
		}
	}

	signal(SIGINT, sigint_handler);
	protonect_shutdown = false;

	// Create windows
	cv::namedWindow("rgb1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("rgb2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth2", WND_PROP_ASPECT_RATIO);

	// Define save directory
	std::string save_directory = "C:\\A0fangfang\\shiyan\\9.2\\";

	// Loop to receive frames
	while (!protonect_shutdown)
	{
		for (int i = 0; i < devices.size(); ++i)
		{
			listeners[i]->waitForNewFrame(frames[i]);
			libfreenect2::Frame* rgb = frames[i][libfreenect2::Frame::Color];
			libfreenect2::Frame* ir = frames[i][libfreenect2::Frame::Ir];
			libfreenect2::Frame* depth = frames[i][libfreenect2::Frame::Depth];

			cv::Mat rgbmat((int)rgb->height, (int)rgb->width, CV_8UC4, rgb->data);
			cv::Mat irmat((int)ir->height, (int)ir->width, CV_32FC1, ir->data);
			cv::Mat depthmat((int)depth->height, (int)depth->width, CV_32FC1, depth->data);

			cv::imshow("rgb" + std::to_string(i + 1), rgbmat);
			cv::imshow("ir" + std::to_string(i + 1), irmat / 4500.0f);
			cv::imshow("depth" + std::to_string(i + 1), depthmat / 4500.0f);

			// Check for keypress to trigger image saving
			int key = cv::waitKey(30);
			if (key == 'q')
			{
				save_images = true; // Set flag to save images on next iteration
				need_to_save = true; // Set flag to ensure all cameras' images are saved
			}
			else if (key == 27) // Escape key
			{
				protonect_shutdown = true;
			}

			// Save images if the flag is set
			if (save_images && need_to_save)
			{
				std::string timestamp = getCurrentTimeString();
				std::string filename_rgb = save_directory + "rgb" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_ir = save_directory + "ir" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_depth = save_directory + "depth" + std::to_string(i + 1) + "_" + timestamp + ".png";

				cv::imwrite(filename_rgb, rgbmat);
				cv::imwrite(filename_ir, irmat / 4500.0f * 255); // Convert to 8-bit
				cv::imwrite(filename_depth, depthmat / 4500.0f * 255); // Convert to 8-bit

				std::cout << "Images from camera " << i + 1 << " saved to " << save_directory << std::endl;
			}

			listeners[i]->release(frames[i]);
		}

		// After processing all cameras, reset the save_images and need_to_save flags
		if (save_images)
		{
			save_images = false;
			need_to_save = false;
		}
	}

	// Close devices
	for (auto dev : devices)
	{
		dev->stop();
		dev->close();
	}

	// Clean up resources
	for (auto pipeline : pipelines)
	{
		delete pipeline;
	}
	for (auto listener : listeners)
	{
		delete listener;
	}
	getchar();
	std::cout << "Goodbye World!" << std::endl;

	return 0;
}

 修改

#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <time.h>
#include <signal.h>
#include <opencv2/opencv.hpp>
#pragma warning( disable : 4996 )
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/logger.h>

using namespace std;
using namespace cv;
using namespace libfreenect2;

enum
{
	Processor_cl,
	Processor_gl,
	Processor_cpu
};

bool protonect_shutdown = false; // Whether the running application should shut down.
bool save_images = false;        // Flag to indicate when to save images.
bool frames_ready = false;       // Flag to indicate that frames from both cameras are ready.

void sigint_handler(int s)
{
	protonect_shutdown = true;
}

std::string getCurrentTimeString() {
	time_t now = time(0);
	tm *ltm = localtime(&now);
	char buffer[80];
	strftime(buffer, sizeof(buffer), "%Y%m%d_%H%M%S", ltm);
	return std::string(buffer);
}

int main()
{
	// Define variables
	std::cout << "Hello World!" << std::endl;
	libfreenect2::Freenect2 freenect2;
	std::vector<libfreenect2::Freenect2Device*> devices;
	std::vector<libfreenect2::PacketPipeline*> pipelines;
	std::vector<libfreenect2::SyncMultiFrameListener*> listeners;
	std::vector<libfreenect2::FrameMap> frames;

	// Search and initialize sensors
	int numDevices = freenect2.enumerateDevices();
	if (numDevices < 2)
	{
		std::cerr << "Error: Not enough Kinect devices connected!" << std::endl;
		return -1;
	}

	// Configure transfer format
	int depthProcessor = Processor_cl;
	for (int i = 0; i < 2; ++i)
	{
		string serial = freenect2.getDeviceSerialNumber(i);
		std::cout << "SERIAL " << i + 1 << ": " << serial << std::endl;

		libfreenect2::PacketPipeline* pipeline = nullptr;
		if (depthProcessor == Processor_cpu)
		{
			pipeline = new libfreenect2::CpuPacketPipeline();
		}
		else if (depthProcessor == Processor_gl) // if support gl
		{
#ifdef LIBFREENECT2_WITH_OPENGL_SUPPORT
			pipeline = new libfreenect2::OpenGLPacketPipeline();
#else
std::cout << "OpenGL pipeline is not supported!" << std::endl;
#endif
		}
		else if (depthProcessor == Processor_cl) // if support cl
		{
			pipeline = new libfreenect2::OpenGLPacketPipeline();
			//#ifdef LIBFREENECT2_WITH_OPENCL_SUPPORT
			//            pipeline = new libfreenect2::OpenCLPacketPipeline();
			//#else
			//            std::cout << "OpenCL pipeline is not supported!" << std::endl;
			//#endif
		}

		if (pipeline)
		{
			libfreenect2::Freenect2Device* dev = freenect2.openDevice(serial, pipeline);
			if (dev == nullptr)
			{
				std::cerr << "Failure opening device " << i + 1 << "!" << std::endl;
				return -1;
			}
			devices.push_back(dev);
			pipelines.push_back(pipeline);

			libfreenect2::SyncMultiFrameListener* listener = new libfreenect2::SyncMultiFrameListener(
				libfreenect2::Frame::Color |
				libfreenect2::Frame::Depth |
				libfreenect2::Frame::Ir);
			listeners.push_back(listener);
			frames.push_back(libfreenect2::FrameMap());

			dev->setColorFrameListener(listener);
			dev->setIrAndDepthFrameListener(listener);
			dev->start();
		}
	}

	signal(SIGINT, sigint_handler);
	protonect_shutdown = false;

	// Create windows
	cv::namedWindow("rgb1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("rgb2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth2", WND_PROP_ASPECT_RATIO);

	// Define save directory
	std::string save_directory = "C:\\A0fangfang\\shiyan\\9.2\\";

	// Loop to receive frames
	while (!protonect_shutdown)
	{
		for (int i = 0; i < devices.size(); ++i)
		{
			listeners[i]->waitForNewFrame(frames[i]);
			libfreenect2::Frame* rgb = frames[i][libfreenect2::Frame::Color];
			libfreenect2::Frame* ir = frames[i][libfreenect2::Frame::Ir];
			libfreenect2::Frame* depth = frames[i][libfreenect2::Frame::Depth];

			cv::Mat rgbmat((int)rgb->height, (int)rgb->width, CV_8UC4, rgb->data);
			cv::Mat irmat((int)ir->height, (int)ir->width, CV_32FC1, ir->data);
			cv::Mat depthmat((int)depth->height, (int)depth->width, CV_32FC1, depth->data);

			cv::imshow("rgb" + std::to_string(i + 1), rgbmat);
			cv::imshow("ir" + std::to_string(i + 1), irmat / 4500.0f);
			cv::imshow("depth" + std::to_string(i + 1), depthmat / 4500.0f);

			// Check for keypress to trigger image saving
			int key = cv::waitKey(30);
			if (key == 'q')
			{
				save_images = true; // Set flag to save images on next iteration
			}
			else if (key == 27) // Escape key
			{
				protonect_shutdown = true;
			}

			// Save images if the flag is set
			if (save_images)
			{
				std::string timestamp = getCurrentTimeString();
				std::string filename_rgb = save_directory + "rgb" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_ir = save_directory + "ir" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_depth = save_directory + "depth" + std::to_string(i + 1) + "_" + timestamp + ".png";

				cv::imwrite(filename_rgb, rgbmat);
				cv::imwrite(filename_ir, irmat / 4500.0f * 255); // Convert to 8-bit
				cv::imwrite(filename_depth, depthmat / 4500.0f * 255); // Convert to 8-bit

				std::cout << "Camera " << i + 1 << " saved successfully." << std::endl;
			}

			listeners[i]->release(frames[i]);
		}

		// After processing all cameras, reset the save_images flag
		if (save_images)
		{
			save_images = false;
		}
	}

	// Close devices
	for (auto dev : devices)
	{
		dev->stop();
		dev->close();
	}

	// Clean up resources
	for (auto pipeline : pipelines)
	{
		delete pipeline;
	}
	for (auto listener : listeners)
	{
		delete listener;
	}
	getchar();
	std::cout << "Goodbye World!" << std::endl;

	return 0;
}

成功的

#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <time.h>
#include <signal.h>
#pragma warning( disable : 4996 )
#include <opencv2/opencv.hpp>
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/logger.h>
#include <mutex>
#include <condition_variable>

using namespace std;
using namespace cv;
using namespace libfreenect2;

enum
{
	Processor_cl,
	Processor_gl,
	Processor_cpu
};

bool protonect_shutdown = false; // Whether the running application should shut down.
bool save_images = false;        // Flag to indicate when to save images.
bool frames_ready = false;       // Flag to indicate that frames from both cameras are ready.

std::mutex frames_mutex;
std::condition_variable frames_cv;

void sigint_handler(int s)
{
	protonect_shutdown = true;
}

std::string getCurrentTimeString() {
	time_t now = time(0);
	tm *ltm = localtime(&now);
	char buffer[80];
	strftime(buffer, sizeof(buffer), "%Y%m%d_%H%M%S", ltm);
	return std::string(buffer);
}

int main()
{
	// Define variables
	std::cout << "Hello World!" << std::endl;
	libfreenect2::Freenect2 freenect2;
	std::vector<libfreenect2::Freenect2Device*> devices;
	std::vector<libfreenect2::PacketPipeline*> pipelines;
	std::vector<libfreenect2::SyncMultiFrameListener*> listeners;
	std::vector<libfreenect2::FrameMap> frames;

	// Search and initialize sensors
	int numDevices = freenect2.enumerateDevices();
	if (numDevices < 2)
	{
		std::cerr << "Error: Not enough Kinect devices connected!" << std::endl;
		return -1;
	}

	// Configure transfer format
	int depthProcessor = Processor_cl;
	for (int i = 0; i < 2; ++i)
	{
		string serial = freenect2.getDeviceSerialNumber(i);
		std::cout << "SERIAL " << i + 1 << ": " << serial << std::endl;

		libfreenect2::PacketPipeline* pipeline = nullptr;
		if (depthProcessor == Processor_cpu)
		{
			pipeline = new libfreenect2::CpuPacketPipeline();
		}
		else if (depthProcessor == Processor_gl) // if support gl
		{
#ifdef LIBFREENECT2_WITH_OPENGL_SUPPORT
			pipeline = new libfreenect2::OpenGLPacketPipeline();
#else
			std::cout << "OpenGL pipeline is not supported!" << std::endl;
#endif
		}
		else if (depthProcessor == Processor_cl) // if support cl
		{
			pipeline = new libfreenect2::OpenGLPacketPipeline();
			//#ifdef LIBFREENECT2_WITH_OPENCL_SUPPORT
			//            pipeline = new libfreenect2::OpenCLPacketPipeline();
			//#else
			//            std::cout << "OpenCL pipeline is not supported!" << std::endl;
			//#endif
		}

		if (pipeline)
		{
			libfreenect2::Freenect2Device* dev = freenect2.openDevice(serial, pipeline);
			if (dev == nullptr)
			{
				std::cerr << "Failure opening device " << i + 1 << "!" << std::endl;
				return -1;
			}
			devices.push_back(dev);
			pipelines.push_back(pipeline);

			libfreenect2::SyncMultiFrameListener* listener = new libfreenect2::SyncMultiFrameListener(
				libfreenect2::Frame::Color |
				libfreenect2::Frame::Depth |
				libfreenect2::Frame::Ir);
			listeners.push_back(listener);
			frames.push_back(libfreenect2::FrameMap());

			dev->setColorFrameListener(listener);
			dev->setIrAndDepthFrameListener(listener);
			dev->start();
		}
	}

	signal(SIGINT, sigint_handler);
	protonect_shutdown = false;

	// Create windows
	cv::namedWindow("rgb1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("rgb2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("ir2", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth1", WND_PROP_ASPECT_RATIO);
	cv::namedWindow("depth2", WND_PROP_ASPECT_RATIO);

	// Define save directory
	std::string save_directory = "C:\\A0fangfang\\shiyan\\9.2\\";

	// Loop to receive frames
	while (!protonect_shutdown)
	{
		for (int i = 0; i < devices.size(); ++i)
		{
			listeners[i]->waitForNewFrame(frames[i]);
			libfreenect2::Frame* rgb = frames[i][libfreenect2::Frame::Color];
			libfreenect2::Frame* ir = frames[i][libfreenect2::Frame::Ir];
			libfreenect2::Frame* depth = frames[i][libfreenect2::Frame::Depth];

			cv::Mat rgbmat((int)rgb->height, (int)rgb->width, CV_8UC4, rgb->data);
			cv::Mat irmat((int)ir->height, (int)ir->width, CV_32FC1, ir->data);
			cv::Mat depthmat((int)depth->height, (int)depth->width, CV_32FC1, depth->data);

			cv::imshow("rgb" + std::to_string(i + 1), rgbmat);
			cv::imshow("ir" + std::to_string(i + 1), irmat / 4500.0f);
			cv::imshow("depth" + std::to_string(i + 1), depthmat / 4500.0f);

			// Check for keypress to trigger image saving
			int key = cv::waitKey(30);
			if (key == 'q')
			{
				save_images = true; // Set flag to save images on next iteration
			}
			else if (key == 27) // Escape key
			{
				protonect_shutdown = true;
			}

			// Signal that a frame is ready
			{
				std::lock_guard<std::mutex> lock(frames_mutex);
				frames_ready = true;
			}
			frames_cv.notify_one();
		}

		// Wait until all frames are ready before proceeding
		{
			std::unique_lock<std::mutex> lock(frames_mutex);
			frames_cv.wait(lock, [] { return frames_ready; });
		}

		// Save images if the flag is set
		if (save_images)
		{
			std::string timestamp = getCurrentTimeString();
			for (int i = 0; i < devices.size(); ++i)
			{
				libfreenect2::Frame* rgb = frames[i][libfreenect2::Frame::Color];
				libfreenect2::Frame* ir = frames[i][libfreenect2::Frame::Ir];
				libfreenect2::Frame* depth = frames[i][libfreenect2::Frame::Depth];

				std::string filename_rgb = save_directory + "rgb" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_ir = save_directory + "ir" + std::to_string(i + 1) + "_" + timestamp + ".png";
				std::string filename_depth = save_directory + "depth" + std::to_string(i + 1) + "_" + timestamp + ".png";

				cv::imwrite(filename_rgb, cv::Mat((int)rgb->height, (int)rgb->width, CV_8UC4, rgb->data));
				cv::imwrite(filename_ir, cv::Mat((int)ir->height, (int)ir->width, CV_32FC1, ir->data) / 4500.0f * 255);
				cv::imwrite(filename_depth, cv::Mat((int)depth->height, (int)depth->width, CV_32FC1, depth->data) / 4500.0f * 255);

				std::cout << "Camera " << i + 1 << " saved successfully." << std::endl;
			}
			save_images = false; // Reset the flag after saving
			frames_ready = false; // Reset the flag after saving
		}

		// Release frames
		for (int i = 0; i < devices.size(); ++i)
		{
			listeners[i]->release(frames[i]);
		}
	}

	// Close devices
	for (auto dev : devices)
	{
		dev->stop();
		dev->close();
	}

	// Clean up resources
	for (auto pipeline : pipelines)
	{
		delete pipeline;
	}
	for (auto listener : listeners)
	{
		delete listener;
	}
	getchar();
	std::cout << "Goodbye World!" << std::endl;

	return 0;
}

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是基于Python使用OpenCV库对Kinect v2进行相机标定并获取内参与外参的代码: ```python import numpy as np import cv2 import os # 设置标定板大小 board_w = 9 board_h = 6 # 设置标定板尺寸 square_size = 30 # 获取标定板角点坐标 def get_board_corners(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(gray, (board_w, board_h), None) if ret: corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) return ret, corners # 获取标定板坐标 def get_board_points(): points = np.zeros((board_h * board_w, 3), np.float32) points[:, :2] = np.mgrid[0:board_w, 0:board_h].T.reshape(-1, 2) points *= square_size return points # 读取所有标定板图片 images = [] for filename in os.listdir('calibration_images'): if filename.endswith('.png'): images.append(cv2.imread(os.path.join('calibration_images', filename))) # 获取标定板角点坐标和标定板坐标 board_corners = [] board_points = [] for image in images: ret, corners = get_board_corners(image) if ret: board_corners.append(corners) board_points.append(get_board_points()) # 标定相机 ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(board_points, board_corners, images[0].shape[:2][::-1], None, None) # 输出相机内参和外参 print('相机内参:') print(mtx) print('畸变系数:') print(dist) for i in range(len(images)): print('第{}张图片的外参:'.format(i+1)) print('旋转向量:') print(rvecs[i]) print('平移向量:') print(tvecs[i]) ``` 在运行代码前,需要先将标定板的图片放入名为`calibration_images`的文件夹中,然后修改代码中的标定板大小和尺寸以及标定板图片文件夹的路径。运行代码后会输出相机的内参和每张标定板图片的外参(旋转向量和平移向量)。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值