MPI 在Windows10 上安装,使用VS2013编译生成可执行程序
系统环境:
Windows10 (Windows7及以上均可以)
64bit
VS2013
1. 下载并安装MPICH For Windows
进入http://www.mpich.org/downloads/站点根据操作系统下载。由于我们使用的是Windows,拉到下载网页最底部,最新的MPICH实现已经由微软官网托管,我们直接进去下载。
然后,选择最新的V8下载,包含两个文件:msmpisdk.msi和MSMpiSetup.exe。
下载完毕直接分别安装这两个程序 msmpisdk.msi 和 MSMpiSetup.exe
我安装在了D盘:
2. VS配置以及demo演示
新建一个VC++项目
在项目属性中配置修改如下:
包含目录里面添加:C:\Program Files (x86)\Microsoft SDKs\MPI\Include;
库目录的里面添加:C:\Program Files (x86)\Microsoft SDKs\MPI\Lib\x64;
其他必要配置:
配置管理器,选择x64编译平台;
C/C++ -> 预处理器,添加:MPICH_SKIP_MPICXX;
C/C++ -> 代码生成 -> 运行库,选择:多线程调试(/MTd);
链接器 -> 输入 -> 附加依赖项,添加:msmpi.lib;
demo测试,新建一个C++文件,命名为main.cpp
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
#include<time.h>
int main(int argc, char* argv[])
{
int myid, numprocs, namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv); // starts MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myid); // get current process id
MPI_Comm_size(MPI_COMM_WORLD, &numprocs); // get number of processes
MPI_Get_processor_name(processor_name, &namelen);
if (myid == 0) printf("number of processes: %d\n...", numprocs);
printf("%s: Hello world from process %d \n", processor_name, myid);
MPI_Finalize();
return 0;
}
编译整个项目,将编译(1.项目右键重新生成 或者 2.使用编辑器编译得到)得到的 exe文件(debug文件夹下)放在安装的MS-MPI的bin目录(默认为:C:\Program Files\Microsoft MPI\Bin 我安装在了D盘)下,在这个Bin目录下按住shift键于空白处右键单击,打开命令行窗口,输入 mpiexec -n 10 MPI-demo.exe 得到运行结果,如下图:
三个实例与结果:
// MPI-demo.cpp : 定义控制台应用程序的入口点。
#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
/*
MPI parallel programming
1.the usual fuctions on MPI
MPI environment will create two communicators,MPI_COMM_WORLD includes all processes,MPI_COMM_SELF include itself processes
MPI_Init(&argc,&argv) //initiate MPI environment maybe global variable ,it must be the start.
MPI_Comm_rank(communicator,&myid)//to get the id of the current process in communicator to identify each process to work parallely and cooprate
MPI_Comm_size(communicator,&numprocs)//to get the process number included communicator
MPI_Finalize()//to finalize compilation environment,it must be the end
int MPI_Send(void* buf,int count,MPI_Datatype datatype,int dest,int tag,MPI_Comm comm)
int MPI_Recv(void* buf,int count,MPI_Datatype datatype,int source,int tag,MPI_Comm comm,MPI_Status *status)
dest/source appoints:the id of process
MPI_Status:status_MPI_SOURCE,status_MPI_TAG,status_MPI_ERROR
MPI_PROC_NULL:the virtual process-faciliatate making up communication sentences
MPI_SENDRECV(sendbuf,sendcount,sendtype,dest,sendtag,recvbuf,recvcount,recvtype,source,recvtag,comm,status)
MPI predefined datatype:
MPI_CHAR\MPI_SHORT\MPI_INT\MPI_LONG\MPI_UNSIGNED_CHAR\MPI_UNSIGNED_SHORT\MPI_UNSIGNED\MPI_UNSIGNED_LONG\MPI_FLOAT\MPI_DOUBLE\MPI_LONG_DOUBLE
note:if the communication of having datatype must require the same datatype
if the communication of not having datatype must require the same (MPI_BYTE) datatype
if the communication of having package must require the same (MPI_PACKED) datatype
double MPI_Wtime(void)-return double seconds
int MPI_Get_processor_name(char *name,int *result)
*/
/*
int main(int argc, char* argv[])
{
int myid, numprocs, namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv); // starts MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myid); // get current process id
MPI_Comm_size(MPI_COMM_WORLD, &numprocs); // get number of processes
MPI_Get_processor_name(processor_name, &namelen);
if (myid == 0) printf("number of processes: %d\n...", numprocs);
printf("%s: Hello world from process %d \n", processor_name, myid);
MPI_Finalize();
return 0;
}
*/
/*
int main(int argc, char **argv)
{
int numpros,pid,namelen,buf[1],i;
MPI_Status status;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &numpros);
MPI_Get_processor_name(processor_name, &namelen);
if (pid == 0){
for (i = 0; i < 2 * (numpros - 1); i++){
MPI_Recv(buf, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
printf("Msg=%d from %d with tag %d\n", buf[0], status.MPI_SOURCE, status.MPI_TAG);
}
}
else{
for (i = 0; i < 2; i++){
buf[0] = pid + i;
MPI_Send(buf,1,MPI_INT,0,i,MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
*/
/*2.reduction funtion
int MPI_Reduce{
void *input_data,//send message-point of internal memory
void *output_data,//receive message-point of internal memory
int count,
MPI_Datatype datatype,
MPI_Op operator,
int dest,//the process id of receiving message
MPI_Comm comm;
}
operator:
MPI_MAX/MPI_MIN
MPI_SUM/MPI_PROB(inner product)
MPI_LAND\MPI_LOR\MPI_LXOR
MPI_BAND\MPI_BOR\MPI_BXOR
MPI_MAXLOC/MPI_MINLOC
*/
//T method to get sin(x) [0,3.1415926]
const double a = 0.0;
const double b = 3.1415926;
int n = 100;
double h = (b-a) / n;
double trap(double a, double b,int num)
{
double y0 = sin(a);
double yn = sin(b);
int i;
double sum = (y0 + yn) / 2;
for (i = 1; i < num; i++){
sum += sin(a + i*h);
}
double area = sum*h;
return area;
}
int main(int argc, char **argv)
{
int myid, numprocs,num,name_len;
double local_a, local_b;
double integral,total_integral;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
num = n / numprocs;
local_a = a + num*myid*h;
local_b = local_a + num*h;
integral = trap(local_a, local_b, num);
MPI_Reduce(&integral,&total_integral,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Get_processor_name(processor_name,&name_len);
if (myid == 0){
printf("%s processor of id=0 compute integral output is %lf", processor_name, total_integral);
}
MPI_Finalize();
return 0;
}