C++ MPI_Send, MPI_Recv 多重向量

    int process_id, num_process;
	MPI_Init(&argumentcount, &argumentvector);
	MPI_Comm_rank(MPI_COMM_WORLD, &process_id);
	MPI_Comm_size(MPI_COMM_WORLD, &num_process);    
    std::vector<std::vector<int>> inds;
	int inds_size;
	std::vector<int> inds_i_size;
	if (process_id == 0) {
		for (int i1 = 0; i1 < algo_types.size(); ++i1) {
			for (int i3 = 0; i3 < batch_sizes.size(); ++i3) {
				for (int i4 = 0; i4 < learning_rates.size(); ++i4) {
					for (int i5 = 0; i5 < rsq_weights.size(); ++i5) {
						for (int i6 = 0; i6 < activators.size(); ++i6) {
							std::vector<int> ind{ i1, i3, i4, i5, i6 };
							inds.emplace_back(ind);
						}
					}
				}
			}
		}
		inds_size = inds.size();
		inds_i_size.resize(inds.size());
		for (int i = 0; i < inds.size(); ++i) {
			inds_i_size[i] = inds[i].size();
			for (int j = 0; j < inds[i].size(); ++j) {
				std::cout << "inds[" << i << "][" << j << "]=" << inds[i][j] << ", ";
			}
			std::cout << std::endl;
		}
	}
	MPI_Bcast(&inds_size, 1 * sizeof(int), MPI_BYTE, 0, MPI_COMM_WORLD);
	if (process_id > 0)
		inds_i_size.resize(inds_size);
	MPI_Bcast(inds_i_size.data(), inds_i_size.size() * sizeof(int), MPI_BYTE, 0, MPI_COMM_WORLD);
	if (process_id > 0) {
		inds.resize(inds_size);
		for (int i = 0; i < inds.size(); ++i)
			inds[i].resize(inds_i_size[i]);
	}
	for (int i = 0; i < inds.size(); ++i) {
		MPI_Bcast(inds[i].data(), inds[i].size() * sizeof(int), MPI_BYTE, 0, MPI_COMM_WORLD);
	}

    std::vector<std::vector<std::vector<int>>> inds_id(num_process - 1); // except process 0
	for (int i = 0; i < inds.size(); ++i) {
		if (process_id == 0) {
			int ret = MPI_Send(inds[i].data(), inds[i].size() * sizeof(int), MPI_BYTE, i % (num_process-1) + 1, i, MPI_COMM_WORLD);
			std::cout << "MPI_Send ret=" << ret << ", process 0 send to process " << (i % (num_process - 1)) + 1  << " send: ";
			for (int j = 0; j < inds[i].size(); ++j) {
				std::cout << inds[i][j] << "  ";
			}
			std::cout << std::endl;
		}
		if (process_id == i % (num_process - 1) + 1) {
			std::vector<int> ind_recv(inds[i].size());
			int ret = MPI_Recv(ind_recv.data(), ind_recv.size() * sizeof(int), MPI_BYTE, 0, i, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
			std::cout << "MPI_Recv ret=" << ret <<", process " << process_id << " received from process 0 : ";
			for (int j = 0; j < ind_recv.size(); ++j) {
				std::cout << ind_recv[j] << "  ";
			}
			std::cout << std::endl;

			inds_id[process_id-1].emplace_back(ind_recv);

		}

	}

    std::vector<std::vector<std::vector<std::vector<Eigen::MatrixXf>>>> all_ws(inds.size()); 

    int epochs_num = 2;
	int params_num = 2;
	int layers_num = 2;
	for (int i = 0; i < inds.size(); ++i) {

		if (process_id == 0) {
            //先开辟接受的内存空间
			std::vector<std::vector<std::vector<Eigen::MatrixXf>>> recv_random_ws(epochs_num); //[epoch][param][layer]
			for (int g1 = 0; g1 < epochs_num; ++g1) {
				recv_random_ws[g1].resize(params_num);
				for (int g2 = 0; g2 < params_num; ++g2) {
					recv_random_ws[g1][g2].resize(layers_num);
					for (int g3 = 0; g3 < layers_num; ++g3) {
						recv_random_ws[g1][g2][g3].resize(2, 2);
					}
				}
			}
            //向进程i % (num_process- 1) + 1接受recv_random_ws,也是第i个all_ws,即all_ws[i] 
			for (int g1 = 0; g1 < epochs_num; ++g1) {
				for (int g2 = 0; g2 < params_num; ++g2) {
					for (int g3 = 0; g3 < layers_num; ++g3) {
						int ret = MPI_Recv(recv_random_ws[g1][g2][g3].data(), 
                                           recv_random_ws[g1][g2][g3].size() * sizeof(float), 
                                           MPI_BYTE, i % (num_process- 1) + 1, i, 
                                           MPI_COMM_WORLD, MPI_STATUS_IGNORE);
					}
				}
			}
			all_ws[i] = recv_random_ws; //recv_random_ws,也是第i个all_ws,即all_ws[i] 

		}
	}

	if (process_id > 0) {	

		for (int j = 0; j < inds_id[process_id - 1].size(); ++j) {
			int id = j * (num_process- 1) + process_id - 1;
			std::vector<std::vector<std::vector<Eigen::MatrixXf>>> random_ws; //[epoch][param][layer]
			for (int g1 = 0; g1 < epochs_num; ++g1) {
				std::vector<std::vector<Eigen::MatrixXf>> params_layers_random_matrix;
				for (int g2 = 0; g2 < params_num; ++g2) {
					std::vector<Eigen::MatrixXf> layers_random_matrix;
					for (int g3 = 0; g3 < layers_num; ++g3) {
						Eigen::MatrixXf random_matrix(Eigen::MatrixXf::Random(2, 2));
						Eigen::MatrixXf tmp_matrix(2, 2);
						for (int r = 0; r < 2; ++r) {
							for (int c = 0; c < 2; ++c) {
								tmp_matrix(r, c) = myid;
							}
						}
						layers_random_matrix.emplace_back(random_matrix + tmp_matrix);
						
						std::cout << "proc id " << myid << ", j=" << j <<  ", send all_models_epochs_all_params_ws[" << id << "][" << g1 << "][" << g2 << "][" << g3 << "] to proc 0=" << random_matrix + tmp_matrix << std::endl;
					}
					params_layers_random_matrix.emplace_back(layers_random_matrix);
				}
				random_ws.emplace_back(params_layers_random_matrix);
			}
            
            //每个进程向进程0发送
			for (int g1 = 0; g1 < epochs_num; ++g1) {
				for (int g2 = 0; g2 < params_num; ++g2) {
					for (int g3 = 0; g3 < layers_num; ++g3) {
						
						int ret = MPI_Send(random_ws[g1][g2][g3].data(), 
                                           random_ws[g1][g2][g3].size() * sizeof(float),             
                                           MPI_BYTE, 0, id, MPI_COMM_WORLD);
					}
				}
			}
        }
    }

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值