1. compute_pi.cpp
#include <mpi.h>
#include <math.h>
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
int main(int argc, char **argv) {
int n, myid, numprocs, i;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
std::cout << "Process " << getpid() << " is " << myid
<< " of " << numprocs << " processes" << std::endl;
n = 10000000;
h = 1.0 / (double) n;
sum = 0.0;
for (i = myid + 1; i <= n; i += numprocs) {
x = h * ((double)i - 0.5);
sum += 4.0 / (1.0 + x*x);
}
mypi = h * sum;
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
if (myid == 0) {
printf("pi is approximately %.16f, Error is %.16f\n"
,pi, fabs(pi - PI25DT));
}
MPI_Finalize();
return 0;
}
2. Makefile
CC=mpicxx
CFLAGS=-Wall
SRC=compute_pi.cpp
EXE=mpi_pi
release : ${SRC}
$(CC) -o ${EXE} $< ${CFLAGS}
debug : ${SRC}
$(CC) -o ${EXE}_debug $< ${CFLAGS} -g
.PHONY: clean
clean:
rm -f ${EXE} ${EXE}_debug
3. mpi_job.slurm
#!/bin/bash
#SBATCH --nodes=3
#SBATCH --tasks-per-node=8 # 8 MPI processes per node
#SBATCH --time=0-00:10:00 # 10 minutes of wall time
#SBATCH --mem=1G # 1 GB RAM per node
#SBATCH --output=job_%j.log
# Load the latest GCC compiler and OpenMPI
module load GCC OpenMPI
# Build executable file, print node list, and run MPI program
make
echo $SLURM_JOB_NODELIST
srun ./mpi_pi # srun is SLURM's version of mpirun/mpiexec