MPI_Scatter和MPI_Gather对向量的内层连续数据做集体通信

void t15(int argumentcount, char* argumentvector[]) {

	int process_id, num_process;
	int namelen;
	char processor_name[MPI_MAX_PROCESSOR_NAME];
	MPI_Init(&argumentcount, &argumentvector);
	MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
	MPI_Comm_size(MPI_COMM_WORLD, &num_process);
	MPI_Get_processor_name(processor_name, &namelen);
	fprintf(stderr, "Hello world! Processor %d of %d on %s \n", process_id, num_process, processor_name);
	MPI_Barrier(MPI_COMM_WORLD);

	int mat_rows = 2, mat_cols = 10;
	int nPart = mat_cols / num_process + 1;
	int m = 2;
	std::vector<Eigen::MatrixXd> send_mats(m);
	if (process_id == 0) {
		for (int i = 0; i < send_mats.size(); ++i) {
			send_mats[i] = Eigen::MatrixXd::Random(mat_rows, nPart * num_process);
		}
		
	}
		

	std::vector<Eigen::MatrixXd> recv_mats(m, Eigen::MatrixXd::Zero(mat_rows, nPart));

	std::vector<Eigen::MatrixXd> gather_max_elements(m, Eigen::MatrixXd::Zero(1, nPart* num_process));

	if (process_id == 0) {
	
		
		for (int i = 0; i < send_mats.size(); ++i) {
			std::cout << "send_mats["<<i<<"]" << std::endl;
			std::cout << send_mats[i] << std::endl;
		}
	}

	for (int i = 0; i < send_mats.size(); ++i) {
		MPI_Scatter(send_mats[i].data(), nPart * mat_rows, MPI_DOUBLE, recv_mats[i].data(), nPart * mat_rows, MPI_DOUBLE, 0, MPI_COMM_WORLD);
	}

	

	std::vector<Eigen::MatrixXd> max_elements(m, Eigen::MatrixXd::Zero(1, nPart));
	for (int i = 0; i < max_elements.size(); ++i) {
		std::cout << "process_id " << process_id << " recv_mats[" << i << "]" << recv_mats[i] << std::endl;
		
		for (int j = 0; j < recv_mats[i].cols(); ++j) {
			std::cout << "process_id " << process_id << " col " << j << " max element=" << recv_mats[i].col(j).maxCoeff() << std::endl;
			max_elements[i](0, j) = recv_mats[i].col(j).maxCoeff();
		}
		
	}

	for (int i = 0; i < max_elements.size(); ++i) {
		MPI_Gather(max_elements[i].data(), max_elements[i].size(), MPI_DOUBLE, gather_max_elements[i].data(), max_elements[i].size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
	}


	MPI_Barrier(MPI_COMM_WORLD);

	if (process_id == 0) {
		std::cout  << std::endl;
		for (int i = 0; i < gather_max_elements.size(); ++i) {
			std::cout << "m=" << i << " max element=" << gather_max_elements[i] << std::endl;
		}

		std::vector<Eigen::MatrixXd> validate_max_elements(m);
		for (int i = 0; i < validate_max_elements.size(); ++i) {
			validate_max_elements[i].resize(1, send_mats[i].cols());
			for (int c = 0; c < validate_max_elements[i].cols(); ++c) {
				validate_max_elements[i](0, c) = send_mats[i].col(c).maxCoeff();
			}
			std::cout << "m=" << i << " max element=" << validate_max_elements[i] << std::endl;
			
		}
	}



	MPI_Finalize();

}
void t16(int argumentcount, char* argumentvector[]) {

	int process_id, num_process;
	int namelen;
	char processor_name[MPI_MAX_PROCESSOR_NAME];
	MPI_Init(&argumentcount, &argumentvector);
	MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
	MPI_Comm_size(MPI_COMM_WORLD, &num_process);
	MPI_Get_processor_name(processor_name, &namelen);
	fprintf(stderr, "Hello world! Processor %d of %d on %s \n", process_id, num_process, processor_name);
	srand(time(NULL) + process_id * 10); // 初始化随机数发生器,srand() 用来设置 rand() 产生随机数时的随机数种子。参数 seed 必须是个整数,如果每次 seed 都设相同值,rand() 所产生的随机数值每次就会一样。

	MPI_Barrier(MPI_COMM_WORLD);

	int N = 10;
	int nPart = N / num_process + 1;
	int m = 2;
	std::vector<std::vector<double>> send_mats(m);
	

	if (process_id == 0) {
		for (int i = 0; i < send_mats.size(); ++i) {
			send_mats[i].resize(nPart * num_process);				
			for (int j = 0; j < send_mats[i].size(); ++j) {
				send_mats[i][j] = ((double)rand() / (double)RAND_MAX)*100.0;
				
			}
		}

	}

	std::vector<std::vector<double>> recv_mats(m);
	std::vector<std::vector<double>> gather_max_elements(m);
	for (int i = 0; i < send_mats.size(); ++i) {
		recv_mats[i].resize(nPart);
		gather_max_elements[i].resize(num_process);
	}

	if (process_id == 0) {

		for (int i = 0; i < send_mats.size(); ++i) {
			std::cout << "send_mats[" << i << "]= ";
			for (int j = 0; j < send_mats[i].size(); ++j) {
				std::cout <<  send_mats[i][j] << ", ";
			}
			std::cout << std::endl;
		}
	}

	for (int i = 0; i < send_mats.size(); ++i) {
		MPI_Scatter(send_mats[i].data(), nPart, MPI_DOUBLE, recv_mats[i].data(), nPart, MPI_DOUBLE, 0, MPI_COMM_WORLD);
	}

	std::vector<double> maxs(m);
	for (int i = 0; i < maxs.size(); ++i) {
		std::cout << "process_id " << process_id << " recv_mats[" << i << "]= ";
		for (int j = 0; j < recv_mats[i].size(); ++j) {
			std::cout << recv_mats[i][j] << ", ";
		}
		std::cout << std::endl;
		auto max_pos = std::max_element(recv_mats[i].begin(), recv_mats[i].end());
		maxs[i] = *max_pos;
	}
	 

	for (int i = 0; i < send_mats.size(); ++i) {
		MPI_Gather(&maxs[i], 1, MPI_DOUBLE, gather_max_elements[i].data(), 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
	}


	MPI_Barrier(MPI_COMM_WORLD);

	if (process_id == 0) {
		std::cout << std::endl;
		for (int i = 0; i < gather_max_elements.size(); ++i) {
			for (int j = 0; j < gather_max_elements[i].size(); ++j) {
				std::cout << "m=" << i <<  " " << j << " max element=" << gather_max_elements[i][j] << std::endl;
			}
			
		}

	}



	MPI_Finalize();

}

运行结果如下

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值