1. 熟悉MPI定义声明
/* first.c */
#include "mpi.h" /*MPI的头函数,提供基本的MPI定义和类型*/
#include <stdio.h>
int main( argc, **argv )
{
int rank, size, tag=333;
int buf[20]
MPI_Status status
MPI_Init( &argc, &argv ); /*MPI的初始化函数*/
MPI_Comm_rank( MPI_COMM_WORLD, &rank ); /*该进程的编号*/
MPI_Comm_size( MPI_COMM_WORLD, &size ); /*总的进程数目*/
if (rank==0)
MPI_Send( buf, 20, MPI_Int, 1, tag, MPI_COMM_WORLD); /*发送buf到进程1*/
if (rank==0)
MPI_Recv( buf, 20, MPI_Int, 0, tag, MPI_COMM_WORLD, &status); /*从进程0接收buf*/
MPI_Finalize(); /*MPI的结束函数*/
return 0;
}
<pre name="code" class="cpp">#include "mpi.h"
#include <stdio.h>
#define T_SIZE 2000
int main(argc,**argv)
{
int ierr, prev, next, tag, rank, size;
MPI_Status status;
double send_buf[T_SIZE], recv_buf[T_SIZE];
MPI_Init(&argc,&argv);
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
next = rank + 1;
if (next > size)
next = 0;
prev = rank – 1;
if (prev < 0)
prev = size – 1;
if (rank == 0) {
MPI_Send(send_buf, T_SIZE, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
MPI_Recv(recv_buf, T_SIZE, MPI_DOUBLE, prev, tag+1, MPI_COMM_WORLD, status);
}else{
MPI_Recv(recv_buf, T_SIZE, MPI_DOUBLE, prev, tag, MPI_COMM_WORLD, status);
MPI_Send(recv_buf, T_SIZE, MPI_DOUBLE, next, tag+1, MPI_COMM_WORLD);
}
MPI_Finalize();
}
2. 熟悉MPI基本通信函数
#include "mpi.h"
#include <stdio.h>
#include <math.h>
double f(a)
double a;
{
return (4.0 / (1.0 + a*a));
}
int main(argc,**argv)
{
int done = 0, n=100, myid, numprocs, i;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x, a,startwtime, endwtime;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
startwtime = MPI_Wtime();
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs){
x = h * ((double)i - 0.5);
sum += f(x);
}
mypi = h * sum;
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0){
printf("pi is %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT));
endwtime = MPI_Wtime();
printf("wall clock time = %f\n", endwtime-startwtime);
}
MPI_Finalize();
}
3. 点对点通信
#include <stdio.h>
#include "mpi.h"
int main(argc, **argv)
{
int locId,data[100], tag=8888;
MPI_Status status;
MPI_Init(&argc, &argv) ;
MPI_Comm_rank(MPI_COMM_WORLD, &locId) ;
if(locId == 0) {
MPI_Request events;
MPI_Isend(data, 100, MPI_INT, 1, tag , MPI_COMM_WORLD, &events) ;
MPI_Wait(&events, &status) ;
}
if(locId == 1) {
MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
if (status.MPI_SOURCE==0)
MPI_Recv(data, 100, MPI_INT, 0, tag, MPI_COMM_WORLD, &status) ;
}
MPI_Finalize() ;
}
4. 群体通信
#include <mpi.h>
int main(argc,**argv)
{
int i,myrank,size,root,full_domain_length,sub_domain_length;
double global_max,local_max,*full_domain,sub_domain;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
root = 0;
if (myrank == root)
get_full_domain(&full_domain, &full_domain_length);
MPI_Bcast(&full_domain_length, 1, MPI_INT, root, MPI_COMM_WORLD);
sub_domain_length = full_domain_length / size;
sub_domain = (double *) malloc(sub_domain_length * sizeof(double));
MPI_Scatter(full_domain, sub_domain_length, MPI_DOUBLE, sub_domain, sub_domain_length, MPI_DOUBLE, root, MPI_COMM_WORLD);
compute(sub_domain, sub_domain_length, &local_max);
MPI_Reduce(&local_max, &global_max, 1, MPI_DOUBLE, MPI_MAX, root, MPI_COMM_WORLD);
MPI_Gather(sub_domain, sub_domain_length, MPI_DOUBLE, full_domain, sub_domain_length, MPI_DOUBLE, root, MPI_COMM_WORLD);
}