Slurm MPI Job

1. compute_pi.cpp

#include <mpi.h>
#include <math.h>
#include <sys/types.h>
#include <unistd.h>
#include <iostream>

int main(int argc, char **argv) {

	int n, myid, numprocs, i;
	double PI25DT = 3.141592653589793238462643;
	double mypi, pi, h, sum, x;

	MPI_Init(&argc,&argv);
	MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
	MPI_Comm_rank(MPI_COMM_WORLD,&myid);

	std::cout << "Process " << getpid() << " is " << myid
	<< " of " << numprocs << " processes" << std::endl;

	n = 10000000;
	h = 1.0 / (double) n;
	sum = 0.0;
	for (i = myid + 1; i <= n; i += numprocs) {
		x = h * ((double)i - 0.5);
		sum += 4.0 / (1.0 + x*x);
	}
	mypi = h * sum;

	MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
	
	if (myid == 0) {
		printf("pi is approximately %.16f, Error is %.16f\n"
               ,pi, fabs(pi - PI25DT));
	}
	MPI_Finalize();
	return 0;

}

2. Makefile

CC=mpicxx
CFLAGS=-Wall
SRC=compute_pi.cpp
EXE=mpi_pi

release : ${SRC}
	$(CC) -o ${EXE} $< ${CFLAGS}

debug : ${SRC}
	$(CC) -o ${EXE}_debug $< ${CFLAGS} -g

.PHONY: clean

clean:
	rm -f ${EXE} ${EXE}_debug

3. mpi_job.slurm

#!/bin/bash
#SBATCH --nodes=3
#SBATCH --tasks-per-node=8     # 8 MPI processes per node
#SBATCH --time=0-00:10:00      # 10 minutes of wall time
#SBATCH --mem=1G               # 1 GB RAM per node
#SBATCH --output=job_%j.log

# Load the latest GCC compiler and OpenMPI
module load GCC OpenMPI

# Build executable file, print node list, and run MPI program
make
echo $SLURM_JOB_NODELIST
srun ./mpi_pi  # srun is SLURM's version of mpirun/mpiexec

 

 

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值