1、main.cpp
//main.cpp
#include<iostream>
#include<mpi.h>
#include <QtCore>
#include <QThread>
enum MyTag
{
TAG_TEST = 1,
TAG_TEST_RSP,
TAG_FORECAST,
TAG_COLWARN
};
void doMaster()
{
MPI_Status status;
QList<int> dataList;
for (int i = 0; i < 20000; ++i)
{
dataList << i;
}
char message[100] = {0};
int nCoreSize;
MPI_Comm_size(MPI_COMM_WORLD, &nCoreSize);
for (int i = 1; i < nCoreSize; ++i)
{
sprintf(message, "Message-%05d", i);
MPI_Send(message, strlen(message), MPI_CHAR, i, TAG_TEST, MPI_COMM_WORLD);
}
std::cout << "Master Sent Over\n";
int nCoreId;
int nRspCout = 0;
while (nRspCout < nCoreSize-1)
{
//使用MPI_ANY_SOURCE 接收每轮优先到达的消息
int ret = MPI_Recv((void*)(&nCoreId), 1, MPI_INT, MPI_ANY_SOURCE, TAG_TEST_RSP, MPI_COMM_WORLD, &status);
if (ret != 0) std::cout << "Master recv ret: " << ret << "," << status.MPI_ERROR << std::endl;
if (status.MPI_ERROR == 0)
{
nRspCout++;
printf("Master recv response from #%d, %d/%d\n", nCoreId, nRspCout, nCoreSize-1);
}
}
std::cout << "Master END" << std::endl;
}
void doSlave(int nCoreId)
{
MPI_Status status;
char message[100] = {0};
while (true)
{
QThread::msleep(100 + qrand() % 100); //使其随机化
MPI_Recv((void*)&message, 100, MPI_CHAR, 0, TAG_TEST, MPI_COMM_WORLD, &status);
if (status.MPI_ERROR != 0)
{
QThread::msleep(10);
continue;
}
std::cout << "Slave revec message (" << message << ") by core " << nCoreId << std::endl;
MPI_Send((void*)(&nCoreId), 1, MPI_INT, 0, TAG_TEST_RSP, MPI_COMM_WORLD);
break;
};
}
int main(int argc,char* argv[])
{
int nCoreId;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&nCoreId);
if (nCoreId == 0)
{
doMaster();
}
else
{
doSlave(nCoreId);
}
MPI_Finalize();
return 0;
}
2、TestCpp.pro
#pro file
TEMPLATE = app
CONFIG += console c++11
CONFIG -= app_bundle
#CONFIG -= qt
QT -= guiSOURCES += \
main.cppINCLUDEPATH += /usr/mpi/gcc/openmpi-4.0.4rc3/include/
LIBS += -L/usr/mpi/gcc/openmpi-4.0.4rc3/lib64/ -lmpi
3、运行结果示例
$ mpirun -np 8 TestCpp
Master Sent Over
Slave revec message (Message-00006) by core 6
Master recv response from #6, 1/7
Slave revec message (Message-00001) by core 1
Master recv response from #1, 2/7
Slave revec message (Message-00007) by core 7
Master recv response from #7, 3/7
Slave revec message (Message-00002) by core 2
Master recv response from #2, 4/7
Slave revec message (Message-00005) by core 5
Slave revec message (Message-00004) by core 4
Master recv response from #4, 5/7
Master recv response from #5, 6/7
Slave revec message (Message-00003) by core 3
Master recv response from #3, 7/7
Master END
4、【改进】支持每次接收多条消息main.cpp
关键在于调用MPI_Get_elements接口获取MPI_Status中的消息的数量
#include <iostream> #include <mpi.h> #include <QtCore> #include <QThread> enum MyTag { TAG_TEST = 1, TAG_TEST_RSP, TAG_FORECAST, TAG_COLWARN }; void doMaster() { MPI_Status status; char message[100] = {0}; int nCoreSize; MPI_Comm_size(MPI_COMM_WORLD, &nCoreSize); for (int i = 1; i < nCoreSize; ++i) { sprintf(message, "Message-%05d", i); MPI_Send(message, strlen(message), MPI_CHAR, i, TAG_TEST, MPI_COMM_WORLD); } std::cout << "Master Sent Over\n"; int *nCoreIds = new int[nCoreSize]; int nRspCout = 0; while (nRspCout < 2 * (nCoreSize-1)) { //使用MPI_ANY_SOURCE 接收每轮优先到达的消息(可能不止一个) int ret = MPI_Recv((void*)(nCoreIds), nCoreSize, MPI_INT, MPI_ANY_SOURCE, TAG_TEST_RSP, MPI_COMM_WORLD, &status); if (ret != 0 || status.MPI_ERROR != 0) { std::cout << "Master recv ret: " << ret << "," << status.MPI_ERROR << std::endl; break; } //获取本次到达的消息的个数 int nCount = 0; ret = MPI_Get_elements(&status, MPI_INT, &nCount); if (ret != 0) { std::cout << "MPI_Get_elements failed ret: " << ret << std::endl; break; } //统计到达的消息的个数 nRspCout += nCount; printf("Master recv %d responses, %d/%d\n", nCount, nRspCout, nCoreSize-1); //打印收到的消息 for (int i = 0; i < nCount; ++i) { printf("#%d ", nCoreIds[i]); } printf("\n"); } std::cout << "Master END" << std::endl; } void doSlave(int nCoreId) { MPI_Status status; char message[100] = {0}; while (true) { MPI_Recv((void*)&message, 100, MPI_CHAR, 0, TAG_TEST, MPI_COMM_WORLD, &status); if (status.MPI_ERROR != 0) { QThread::msleep(10); continue; } std::cout << "Slave revec message (" << message << ") by core " << nCoreId << std::endl; //MPI_Send((void*)(&nCoreId), 1, MPI_INT, 0, TAG_TEST_RSP, MPI_COMM_WORLD); int data[2] = {nCoreId, nCoreId}; MPI_Send((void*)(data), 2, MPI_INT, 0, TAG_TEST_RSP, MPI_COMM_WORLD); break; }; } int main(int argc,char* argv[]) { int nCoreId; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&nCoreId); if (nCoreId == 0) { doMaster(); } else { doSlave(nCoreId); } MPI_Finalize(); return 0; }
5、【改进】后的输出:
$ mpirun -np 16 TestCpp
Slave revec message (Message-00001) by core 1
Slave revec message (Message-00002) by core 2
Slave revec message (Message-00003) by core 3
Slave revec message (Message-00005) by core 5
Slave revec message (Message-00004) by core 4
Slave revec message (Message-00006) by core 6
Slave revec message (Message-00008) by core 8
Slave revec message (Message-00007) by core 7
Slave revec message (Message-00009) by core 9
Slave revec message (Message-00010) by core 10
Slave revec message (Message-00011) by core 11
Master Sent Over
Slave revec message (Message-00013) by core 13
Master recv 2 responses, 2/15
#1 #1
Master recv 2 responses, 4/15
#2 #2
Master recv 2 responses, 6/15
#3 #3
Master recv 2 responses, 8/15
#8 #8
Master recv 2 responses, 10/15
#4 #4
Master recv 2 responses, 12/15
#6 #6
Master recv 2 responses, 14/15
#5 #5
Master recv 2 responses, 16/15
#7 #7
Master recv 2 responses, 18/15
#9 #9
Master recv 2 responses, 20/15
#10 #10
Master recv 2 responses, 22/15
#11 #11
Master recv 2 responses, 24/15
#13 #13
Slave revec message (Message-00012) by core 12
Master recv 2 responses, 26/15
#12 #12
Slave revec message (Message-00015) by core 15
Slave revec message (Message-00014) by core 14
Master recv 2 responses, 28/15
#15 #15
Master recv 2 responses, 30/15
#14 #14
Master END