编译:mpicc -o 目标文件 源文件.cpp
运行:mpirun -n 进程数 -machinefile ./machinefile ./目标文件
带注释版(但是有点小的语法错误)
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>//exit
#include <malloc.h>
#include <memory.h>
#include <time.h>
void merge(int mykeys[], int receive[], int n, int flag);
void odd_even_sort(int a[], int n);
void doSort(int myid, int local_n, int np);
void printMatrix(int array[], int n);
void init(int n, int myid, int np);
int getPartner(int phase, int myid, int comm_sz);
int cmp(const void *a, const void *b);
int *array, *mykeys, *receive;
int n, partner;
void Get_input(int my_rank , int comm_sz, int * n_p) {
int dest ;
if(my_rank ==0) {
printf("Enter n\n");
scanf("%d", n_p);
for(dest =1 ;dest < comm_sz;dest++) {
MPI_Send(n_p,1,MPI_INT, dest,0, MPI_COMM_WORLD);
}
}
else {
MPI_Recv(n_p, 1 ,MPI_INT,0,0,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
void merge(int mykeys[], int receive[], int n, int flag)
{
int mi, ti, ri; //mykeys receive的数目
int *temp = malloc(sizeof(int) * n * 2); //n 为local_n ,将两个进程的数合并
if (temp == NULL)
{
exit(-1);//非正常运行退出
}
mi = ri = ti = 0;
while (mi < n && ri < n)
{
if (mykeys[mi] >= receive[ri])
{
temp[ti] = receive[ri];
ri++;
ti++;
}
else
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
}
while (mi < n)
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
while (ri < n)
{
temp[ti] = receive[ri];
ti++;
ri++;
}
ti = flag > 0 ? n : 0;
for (mi = 0; mi < n; mi++)
mykeys[mi] = temp[ti++];
free(temp);
}
void printMatrix(int array[], int n)
{
int i;
for (i = 0; i < n; i++)
printf("%d\n", array[i]);
}
void doSort(int myid, int local_n, int np) 4. 进行np轮排序。
{
int i;
for (i = 0; i < np; i++)
{
partner = getPartner(i, myid, np);
if (partner != MPI_PROC_NULL) // 不是空进程
{
MPI_Sendrecv(mykeys, local_n, MPI_INT, partner, 0, receive, local_n, MPI_INT, partner, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
merge(mykeys, receive, local_n, myid - partner);
}
}//接受和发送消息标签都为0
}
//进程间的通信需要通过一个通信器来完成。MPI 环境在初始化时会自动创建两个通信器,一个称为 MPI_COMM_WORLD,它包含程序中的所有进程
// MPI_COMM_SELF,它是每个进程独自构成的、仅包含自己的通信器。
//MPI 系统提供了一个特殊进程号 MPI_PROC_NULL,它代表空进程 (不存在的进程),
//与 MPI_PROC_NULL 进行通信相当于一个空操作,对程序的运行没有任何影响。
//若单纯的利用MPI_Send, MPI_Recv函数进行通讯的话,容易造成死锁,下面介绍MPI_Sendrecv的来解决这个问题。
//MPI_Sendrecv表示的作用是将本进程的信息发送出去,并接收其他进程的信息
//MPI_Send和MPI_Recv两个函数是MPI里基本的点对点通信例程。
//两个函数都会阻塞调用进程,直到通信操作完成。阻塞可能会造成死锁。
void init(int n, int myid, int np)
{
int i;
int total = n + np - n % np; //一共有多少个数(填充后)
//其中有不能整除问题,对于不能整除的进行填补,
//用最大的数填充数组最后几个元素使之能整除
if (!myid) //如果不是0进程
{
srand(time(NULL));
//srand( (time(NULL) )中time(NULL)函数是得到一个从1900年1月1日到现在的时间秒数
//这样每一次运行程序的时间的不同就可以保证得到不同的随机数了。
array = (int *)malloc(sizeof(int) * total);
for (i = 0; i < n; i++)
{
*(array + i) = random();
}
for (i = n; i < total; i++)
{
*(array + i) = 0x7fffffff; //数组元素大小不能超过 0x7fffffff
}
}
receive = (int *)malloc(sizeof(int) * total / np);
mykeys = (int *)malloc(sizeof(int) * total / np);
}
int getPartner(int phase, int myid, int comm_sz) //partner = getPartner(i, myid, np);
{
int partner;
//phase为第几轮,先从偶排序开始
if (phase % 2 == 0)
{
if (myid % 2 != 0)
{
partner = myid - 1;
}
else
{
partner = myid + 1; 0和1 2和3
}
}
else
{
if (myid % 2 != 0)
{
partner = myid + 1;
}
else
{
partner = myid - 1;
}
}
if (partner == -1 || partner == comm_sz)
{
partner = MPI_PROC_NULL;
}
return partner;
}
int cmp(const void *a, const void *b) //第一步:自己内部排序
{
return *((int *)a) > *((int *)b);
}
int main(void) //int argc, char **argv
{
int i, j;
int myid, np, namelen; // np is size numprocess
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Get_processor_name(proc_name, &namelen);
double beginTime, endTime;
beginTime = MPI_Wtime();
Get_input(myid, np, &n);
// n = atoi(argv[1]); //atoi()代表的是ascii to integer,即“把字符串转换成有符号数字”
int local_n = (n + (np - n % np)) / np; // 填充 矩阵
//其中有不能整除问题,对于不能整除的进行填补,
//用最大的数填充数组最后几个元素使之能整除
//初始化随机数。
init(n, myid, np);
// 2. 把数据广播出去。
//int array[] = {15, 11, 9, 16, 3, 14, 8, 7, 4, 6, 12, 10, 5, 2, 13, 1};
MPI_Scatter(array, local_n, MPI_INT, mykeys, local_n, MPI_INT, 0, MPI_COMM_WORLD); //mykey为接受缓冲区起始地址
// 3. local sort
qsort(mykeys, local_n, sizeof(int), cmp);
// 4. 进行np轮排序。
doSort(myid, local_n, np);
// 5. 0号进程收集排序好的数据
MPI_Gather(mykeys, local_n, MPI_INT, array, local_n, MPI_INT, 0, MPI_COMM_WORLD);
endTime = MPI_Wtime();
if (myid == 0)
{
printMatrix(array, n);
printf("spent time = %lf second\n", endTime - beginTime);
}
free(array);
free(mykeys);
free(receive);
MPI_Finalize();
return 0;
}
无注释版,无语法错误,编译通过
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <memory.h>
#include <time.h>
#include <stdlib.h>
//int array[] = {15, 11, 9, 16, 3, 14, 8, 7, 4, 6, 12, 10, 5, 2, 13, 1};
void merge(int mykeys[], int receive[], int n, int flag);
void odd_even_sort(int a[], int n);
void doSort(int myid, int local_n, int np);
void printMatrix(int array[], int n);
void init(int n, int myid, int np);
int getPartner(int phase, int myid, int comm_sz);
int cmp(const void *a, const void *b);
int *array, *mykeys, *receive;
int n, partner;
void Get_input(int my_rank , int comm_sz, int * n_p) {
int dest ;
if(my_rank ==0) {
printf("Enter n\n");
scanf("%d", n_p);
for(dest =1 ;dest < comm_sz;dest++) {
MPI_Send(n_p,1,MPI_INT, dest,0, MPI_COMM_WORLD);
}
}
else {
MPI_Recv(n_p, 1 ,MPI_INT,0,0,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
int main(void)
{
int i, j,n;
int myid, np, namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Get_processor_name(proc_name, &namelen);
double beginTime, endTime;
beginTime = MPI_Wtime();
Get_input(myid, np, &n);
int local_n = (n + (np - n % np)) / np; // fill with the matrix
//初始化随机数。
init(n, myid, np);
// 2. 把数据广播出去。
MPI_Scatter(array, local_n, MPI_INT, mykeys, local_n, MPI_INT, 0, MPI_COMM_WORLD);
// 3. local sort
qsort(mykeys, local_n, sizeof(int), cmp);
// 4. 进行np轮排序。
doSort(myid, local_n, np);
// 5. 0号进程收集排序好的数据
MPI_Gather(mykeys, local_n, MPI_INT, array, local_n, MPI_INT, 0, MPI_COMM_WORLD);
endTime = MPI_Wtime();
if (myid == 0)
{
printMatrix(array, n);
printf("spent time = %lf second\n", endTime - beginTime);
}
free(array);
free(mykeys);
free(receive);
MPI_Finalize();
}
void merge(int mykeys[], int receive[], int n, int flag)
{
int mi, ti, ri;
int *temp = (int *)malloc(sizeof(int) * n * 2);
if (temp == NULL)
{
exit(-1);
}
mi = ri = ti = 0;
while (mi < n && ri < n)
{
if (mykeys[mi] >= receive[ri])
{
temp[ti] = receive[ri];
ri++;
ti++;
}
else
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
}
while (mi < n)
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
while (ri < n)
{
temp[ti] = receive[ri];
ti++;
ri++;
}
ti = flag > 0 ? n : 0;
for (mi = 0; mi < n; mi++)
mykeys[mi] = temp[ti++];
free(temp);
}
void printMatrix(int array[], int n)
{
int i;
for (i = 0; i < n; i++)
printf("%d\n", array[i]);
}
void doSort(int myid, int local_n, int np)
{
int i;
for (i = 0; i < np; i++)
{
partner = getPartner(i, myid, np);
if (partner != MPI_PROC_NULL)
{
MPI_Sendrecv(mykeys, local_n, MPI_INT, partner, 0, receive, local_n, MPI_INT, partner, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
merge(mykeys, receive, local_n, myid - partner);
}
}
}
void init(int n, int myid, int np)
{
int i;
int total = n + np - n % np;
if (!myid)
{
srand(time(NULL));
array = (int *)malloc(sizeof(int) * total);
for (i = 0; i < n; i++)
{
*(array + i) = random();
}
for (i = n; i < total; i++)
{
*(array + i) = 0x7fffffff;
}
}
receive = (int *)malloc(sizeof(int) * total / np);
mykeys = (int *)malloc(sizeof(int) * total / np);
}
int getPartner(int phase, int myid, int comm_sz)
{
int partner;
if (phase % 2 == 0)
{
if (myid % 2 != 0)
{
partner = myid - 1;
}
else
{
partner = myid + 1;
}
}
else
{
if (myid % 2 != 0)
{
partner = myid + 1;
}
else
{
partner = myid - 1;
}
}
if (partner == -1 || partner == comm_sz)
{
partner = MPI_PROC_NULL;
}
return partner;
}
int cmp(const void *a, const void *b)
{
return *((int *)a) > *((int *)b);
}