linux基于MPI的多进程编程,矩阵乘法(C++语言)
1.MPI的下载
下载网址:https://www.mpich.org/downloads/
2.安装和测试安装是否成功
tar -zxvf mpich-3.3.2.tar.gz
cd mpich-3.3.2
$ ./configure -prefix=/usr/local/mpich --disable-fortran
sudo make && make install
失败
sudo su
sudo make && make install
vim ~/.bashrc
export PATH=/usr/local/mpich/bin:$PATH
source ~/.bashrc
测试
root@lthpc:/home/peng/mpich-3.3.2# mpirun -np 10 ./examples/cpi
Process 9 of 10 is on lthpc
Process 7 of 10 is on lthpc
Process 3 of 10 is on lthpc
Process 1 of 10 is on lthpc
Process 0 of 10 is on lthpc
Process 6 of 10 is on lthpc
Process 8 of 10 is on lthpc
Process 2 of 10 is on lthpc
Process 4 of 10 is on lthpc
Process 5 of 10 is on lthpc
pi is approximately 3.1415926544231256, Error is 0.0000000008333325
wall clock time = 0.000274
root@lthpc:/home/peng/mpich-3.3.2#
3.MPI矩阵乘法
代码 C++:来自:https://www.bbsmax.com/A/A2dmnX7Wze/
#include<iostream>
#include<mpi.h>
#include<math.h>
#include<stdlib.h>
#include <time.h>
void initMatrixWithRV(float* A, int rows, int cols);
void matMultiplyWithSingleThread(float* A, float* B, float* matResult, int m, int p, int n);
int main(int argc, char** argv)
{
int m = atoi(argv[1]);
int p = atoi(argv[2]);
int n = atoi(argv[3]);
float* A, * B, * C;
float* bA, * bC;
double starttime, endtime;
double tmp, totaltime;
int myrank, numprocs;
MPI_Status status;
MPI_Init(&argc, &argv); // 并行开始
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int bm = m / numprocs;
starttime = MPI_Wtime();
bA = new float[bm * p];
B = new float[p * n];
bC = new float[bm * n];
if (myrank == 0) {
A = new float[m * p];
C = new float[m * n];
initMatrixWithRV(A, m, p);
initMatrixWithRV(B, p, n);
}
MPI_Barrier(MPI_COMM_WORLD);
/* step 1: 数据分配 */
MPI_Scatter(A, bm * p, MPI_FLOAT, bA, bm * p, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Bcast(B, p * n, MPI_FLOAT, 0, MPI_COMM_WORLD);
/* step 2: 并行计算C的各个分块 */
matMultiplyWithSingleThread(bA, B, bC, bm, p, n);
MPI_Barrier(MPI_COMM_WORLD);
/* step 3: 汇总结果 */
MPI_Gather(bC, bm * n, MPI_FLOAT, C, bm * n, MPI_FLOAT, 0, MPI_COMM_WORLD);
/* step 3-1: 解决历史遗留问题(多余的分块) */
int remainRowsStartId = bm * numprocs;
if (myrank == 0 && remainRowsStartId < m) {
int remainRows = m - remainRowsStartId;
matMultiplyWithSingleThread(A + remainRowsStartId * p, B, C + remainRowsStartId * n, remainRows, p, n);
}
delete[] bA;
delete[] B;
delete[] bC;
if (myrank == 0) {
delete[] A;
delete[] C;
}
endtime = MPI_Wtime();
totaltime = endtime - starttime;
printf("total time:%f s.\n", totaltime);
MPI_Finalize(); // 并行结束
return 0;
}
void initMatrixWithRV(float* A, int rows, int cols)
{
srand((unsigned)time(NULL));
for (int i = 0; i < rows * cols; i++) {
A[i] = (float)rand() / RAND_MAX;
}
}
void matMultiplyWithSingleThread(float* A, float* B, float* matResult, int m, int p, int n)
{
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float temp = 0;
for (int k = 0; k < p; k++) {
temp += A[i * p + k] * B[k * n + j];
}
matResult[i * n + j] = temp;
}
}
}
编译
root用户sudo su
mpic++,mpicc,gcc都不行
mpicxx可以
如果找不到mpi.h
#include "/usr/local/mpich/include/mpi.h"或者(-I表示include, -L表示lib)
g++ -o mpi mpi.cpp -I/home/peng/mpich-3.3.2/include -L/home/peng/mpich-3.3.2/lib
运行结果
root@lthpc:/home/peng# mpicxx -o mpi mpi.c
root@lthpc:/home/peng# mpirun -np 8 ./mpi 512 512 512
total time:0.131861 s.
total time:0.131989 s.
total time:0.132338 s.
total time:0.130865 s.
total time:0.131200 s.
total time:0.132584 s.
total time:0.131860 s.
total time:0.133085 s.
root@lthpc:/home/peng# mpirun -np 4 ./mpi 512 512 512
total time:0.252703 s.
total time:0.252134 s.
total time:0.253157 s.
total time:0.254014 s.
root@lthpc:/home/peng# mpirun -np 2 ./mpi 512 512 512
total time:0.487757 s.
total time:0.488377 s.
root@lthpc:/home/peng# mpirun -np 1 ./mpi 512 512 512
total time:0.846030 s.
root@lthpc:/home/peng# mpirun -np 8 ./mpi 1024 1024 1024
total time:1.096087 s.
total time:1.095107 s.
total time:1.096667 s.
total time:1.096917 s.
total time:1.096226 s.
total time:1.098098 s.
total time:1.097944 s.
total time:1.099281 s.
root@lthpc:/home/peng# mpirun -np 4 ./mpi 1024 1024 1024
total time:1.862266 s.
total time:1.862621 s.
total time:1.864353 s.
total time:1.864560 s.
root@lthpc:/home/peng# mpirun -np 2 ./mpi 1024 1024 1024
total time:3.493375 s.
total time:3.494138 s.
root@lthpc:/home/peng# mpirun -np 1 ./mpi 1024 1024 1024
total time:6.363792 s.
root@lthpc:/home/peng#