MPI组通信
MPI组通信,在一个特定的通信组内,所有进程同时参加通信的函数接口。组通信在各个进程调用的形式完全相同,方便编码,提高通信效率。
开发思路:
对每一个组通信函数进行理解,然后依次构造其所需要的参数即可快速准确完成代码编写
本文给出例子的函数主要有以下这些常用函数,在测试中,进程数设定为4
// 将root进程的消息广播到所有其它的进程
int MPI_Bcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm)// 广播
// 从进程组中收集消息
int MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int recvvount, MPI_Datatype datatype, int root, MPI_Comm comm)// 收集
// 从进程组中收集消息到指定的位置
int MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int* recvvounts, int* displs, MPI_Datatype datatype, int root, MPI_Comm comm)// 收集
// 将数据从一个进程发送到组中其它进程
int MPI_Scatter(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int recvcount, MPI_Datatype datatype, int root, MPI_Comm comm)// 散发
// 将缓冲区中指定部分的数据从一个进程发送到组中其它进程
int MPI_Scatterv(void* sendbuf, int* sendcounts, int* displs, MPI_Datatype datatype, void* recvbuf, int recvcount, MPI_Datatype datatype, int root, MPI_Comm comm)// 散发
// 每一进程都从所有其它进程收集数据 相当于所有进程都执行了一个MPI_Gather调用
int MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int recvcount, MPI_Datatype datatype, MPI_Comm comm)// 组收集
// 所有进程都收集数据到指定的位置 就如同每一个进程都执行了一个MPI_Gatherv调用
int MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int* recvcounts, int* displs, MPI_Datatype datatype, MPI_Comm comm)// 组收集
// 所有进程相互交换数据
int MPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype datatype, void* recvbuf, int recvcount, MPI_Datatype datatype, MPI_Comm comm)// 全互换
// 所有进程相互交换数据, 但数据有一个偏移量
int MPI_Alltoallv(void* sendbuf, int* sendcounts, int* sdispls, MPI_Datatype datatype, void* recvbuf, int* recvcounts, int* rdispls, MPI_Datatype datatype, MPI_Comm comm)// 全互换
// 等待直到所有的进程都执行到这一例程才继续执行下一条语句
int MPI_Barrier(MPI_Comm comm)// 同步
// 将所进程的值归约到root进程, 得到一个结果
int MPI_Reduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)// 归约
// 归约所有进程的计算结果 并将最终的结果传递给所有其它的进程 相当于每一个进程都执行了一次MPI_Reduce调用
int MPI_Allreduce(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)// 组归约
// 将结果归约后再发送出去
int MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int* recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)// 归约并散发
// 在给定的进程集合上进行扫描操作
int MPI_Scan(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)// 扫描
例:
// mpitest.cpp
#include <iostream>
#include <vector>
#include <map>
#include <mpi.h>
using namespace std;
int main(int argc, char *argv[])
{
int myid, numprocs;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Status status;
double startTime = 0;
double endTime = 0;
startTime = MPI_Wtime();
// 广播
// MPI_Bcast
/*
int value = 1;
while(value > 0){
if(myid == 0){
cin >> value;
}
MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD);
cout << "Task: " << myid << " value: " << value << endl;
}
*/
// 收集
// MPI_Gather
/*
int sendArray[10];
int *recvArray = (int*)malloc(numprocs*10*sizeof(int));
for(int i = 0; i < 10; i++){
sendArray[i] = myid * 10 + i;
}
MPI_Gather(sendArray, 10, MPI_INT, recvArray, 10, MPI_INT, 0, MPI_COMM_WORLD);
if(myid == 0){
for(int i = 0; i < 10*numprocs; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
}
*/
// MPI_Gatherv
/*
int sendArray[myid+1];
for(int i = 0; i < myid+1; i++){
sendArray[i] = myid * 10 + i;
}
int *recvArray = (int*)malloc(10*sizeof(int));
int recvCounts[4];
recvCounts[0] = 1;
recvCounts[1] = 2;
recvCounts[2] = 3;
recvCounts[3] = 4;
int displs[4];
displs[0] = 0;
for(int i = 1; i < 4; i++){
displs[i] = displs[i-1] + recvCounts[i-1];
}
MPI_Gatherv(sendArray, myid+1, MPI_INT, recvArray, recvCounts, displs, MPI_INT, 0, MPI_COMM_WORLD);
if(myid == 0){
for(int i = 0; i < 10; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
}
*/
// 散发
// MPI_Scatter
/*
int sendArray[40];
for(int i = 0; i < 40; i++){
sendArray[i] = i + 1;
}
int recvArray[10];
MPI_Scatter(sendArray, 10, MPI_INT, recvArray, 10, MPI_INT, 0, MPI_COMM_WORLD);
for(int i = 0; i < 10; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
*/
// MPI_Scatterv
/*
int sendArray[10];
for(int i = 0; i < 10; i++){
sendArray[i] = i + 1;
}
int sendCounts[4];
sendCounts[0] = 1;
sendCounts[1] = 2;
sendCounts[2] = 3;
sendCounts[3] = 4;
int displs[4];
displs[0] = 0;
for(int i = 1; i < 4; i++){
displs[i] = displs[i-1] + sendCounts[i-1];
}
int recvArray[myid+1];
MPI_Scatterv(sendArray, sendCounts, displs, MPI_INT, recvArray, myid+1, MPI_INT, 0, MPI_COMM_WORLD);
for(int i = 0; i < myid+1; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
*/
// 组收集
// MPI_Allgather
/*
int sendArray[10];
for(int i = 0; i < 10; i++){
sendArray[i] = myid * 10 + i;
}
int recvArray[40];
MPI_Allgather(sendArray, 10, MPI_INT, recvArray, 10, MPI_INT, MPI_COMM_WORLD);
for(int i = 0; i < 40; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
// MPI_Allgatherv
/*
int sendArray[myid+1];
for(int i = 0; i < myid+1; i++){
sendArray[i] = myid * 10 + i;
}
int recvCounts[4];
recvCounts[0] = 1;
recvCounts[1] = 2;
recvCounts[2] = 3;
recvCounts[3] = 4;
int recvArray[10];
int displs[4];
displs[0] = 0;
for(int i = 1; i < 4; i++){
displs[i] = displs[i-1] + recvCounts[i-1];
}
MPI_Allgatherv(sendArray, myid+1, MPI_INT, recvArray, recvCounts, displs, MPI_INT, MPI_COMM_WORLD);
for(int i = 0; i < 10; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
*/
// 全互换
// MPI_Alltoall
/*
int sendArray[40];
for(int i = 0; i < 40; i++){
sendArray[i] = myid;
}
int recvArray[40];
MPI_Alltoall(sendArray, 10, MPI_INT, recvArray, 10, MPI_INT, MPI_COMM_WORLD);
for(int i = 0; i < 40; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
*/
// MPI_Alltoallv
/*
int sendArray[(myid+1) * 4];
for(int i = 0; i < (myid+1) * 4; i++){
sendArray[i] = myid;
}
int sendCounts[4];
for(int i = 0; i < 4; i++){
sendCounts[i] = myid + 1;
}
int sdispls[4];
sdispls[0] = 0;
for(int i = 1; i < 4; i++){
sdispls[i] = sdispls[i-1] + sendCounts[i-1];
}
int recvArray[10];
int recvCounts[4];
recvCounts[0] = 1;
recvCounts[1] = 2;
recvCounts[2] = 3;
recvCounts[3] = 4;
int rdispls[4];
rdispls[0] = 0;
for(int i = 1; i < 4; i++){
rdispls[i] = rdispls[i-1] + recvCounts[i-1];
}
MPI_Alltoallv(sendArray, sendCounts, sdispls, MPI_INT, recvArray, recvCounts, rdispls, MPI_INT, MPI_COMM_WORLD);
for(int i = 0; i < 10; i++){
cout << "Task: " << myid << " value: " << recvArray[i] << endl;
}
*/
// 规约-最大值
// MPI_Reduce
/*
int value = myid;
int getValue;
MPI_Reduce(&value, &getValue, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
if(myid == 0){
cout << "Task: " << myid << " value: " << value << " getValue: " << getValue << endl;
}
*/
// MPI_Allreduce
/*
int value = myid;
int getValue;
MPI_Allreduce(&value, &getValue, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
cout << "Task: " << myid << " value: " << value << " getValue: " << getValue << endl;
*/
// 规约-求和
// MPI_Reduce
/*
int value[4];
for(int i = 0; i < 4; i++){
value[i] = myid * 10 + i;
}
int getValue[4];
MPI_Reduce(value, getValue, 4, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(myid == 0){
for(int i = 0; i < 4; i++){
cout << "Task: " << myid << " value: " << value[i] << " getValue: " << getValue[i] << endl;
}
}
*/
// MPI_Allreduce
/*
int value[4];
for(int i = 0; i < 4; i++){
value[i] = myid * 10 + i;
}
int getValue[4];
MPI_Allreduce(value, getValue, 4, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
for(int i = 0; i < 4; i++){
cout << "Task: " << myid << " value: " << value[i] << " getValue: " << getValue[i] << endl;
}
*/
// MPI_Reduce_scatter
/*
int value[4];
for(int i = 0; i < 4; i++){
value[i] = myid * 10 + i;
}
int getValue[4];
for(int i = 0; i < 4; i++){
getValue[i] = -1;
}
int recvCounts[4];
for(int i = 0; i < 4; i++){
recvCounts[i] = 1;
}
MPI_Reduce_scatter(value, getValue, recvCounts, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
for(int i = 0; i < 4; i++){
cout << "Task: " << myid << " value: " << value[i] << " getValue: " << getValue[i] << endl;
}
*/
// MPI_Scan
/*
int value[4];
for(int i = 0; i < 4; i++){
value[i] = myid * 10 + i;
}
int getValue[4];
for(int i = 0; i < 4; i++){
getValue[i] = -1;
}
MPI_Scan(value, getValue, 4, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
for(int i = 0; i < 4; i++){
cout << "Task: " << myid << " value: " << value[i] << " getValue: " << getValue[i] << endl;
}
*/
MPI_Barrier(MPI_COMM_WORLD);
endTime = MPI_Wtime();
if(myid == 0){
cout << "Tasks over" << endl;
cout << "Cost time: " << endTime - startTime << " s." << endl;
}
MPI_Finalize();
return 0;
}
# Makefile
all:
mpicxx -o testapp mpitest.cpp
run:
mpirun -np 4 ./testapp
clean:
rm *.o