VS2019 PI串,并 MPI OpenMP

#include<stdio.h>
#include<stdlib.h>
static long npoints=10000;
double circle_count=0;

int main()
{
	int i;
	double x,y,pi,sum=0.0,PII=3.1415926535897932384643;
	for(i=1;i<=npoints;i++)
	{
		x=rand()/(RAND_MAX+1.0);
		y=rand()/(RAND_MAX+1.0);
		if((x-0.5)*(x-0.5)+(y-0.5)*(y-0.5)<=0.25)
		{
			circle_count=circle_count+1;
		}
	}
	pi=4.0*circle_count/npoints;
	printf("%lf",pi);
	return 0;
} 

VS-2019 openmp


#include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#define NUM_THREADS 4
static long npoints = 10;
double circle_count = 0;

int main()
{
	int tid = 0;
	int i;
	double diff = 0.0;
	double x, y, pi, PII = 3.1415926535897932384643;
#pragma omp parallel for private(x,y)reduction(+:circle_count)
	for (i = 0; i < npoints; i++)
	{
		x = rand() / (RAND_MAX + 1.0);
		y = rand() / (RAND_MAX + 1.0);
		if ((x - 0.5) * (x - 0.5) + (y - 0.5) * (y - 0.5) <= 0.25)
		{
			circle_count = circle_count + 1;
		}
		tid = omp_get_thread_num();

		if (tid == 0)
			printf("i is %d,the %d thread is master,allocate threads to others and compute circle_count.\n",i, tid);
		else
			printf("i is %d,the %d thread is worker.\n",i, tid);
	}
	pi = 4.0 * circle_count / npoints;
	diff = PII - pi;
	printf("the value is %lf and the abs id %lf", pi,diff);
	return 0;
}


//you want to parallel from here,you must start using #pragma omp,and then following speciftc functional instruction

//the usual functional instruction is following
//parallel:before "for" with unconnective data to parallel with muli-threads
//parallel for:it has data parallel and task parallel at the same time 
//sections:tasks parallel,every block of code is instructed by instruction "section"
//parallel sections:likely parallel for
//single:express executed by a single thread in a certain segment
//critical:we  ensure only one openmp thread into the critical region
//flush:we guarantee the consistency/uniformity of data
//barrier:guarantee the synchronization
//atomic:appoint a data operation completed atomically
//master:appoint a segment of certain code executed by master thread
//threadprivate:appoint one or many variable used by thread specially

//the corresponding OpenMp clause(子句) are:
//private:the one or many variables in everythread have theirself private duplicates
//firstprivate:likely private,and when these private avariables will goto parallel area or task parallel,they will let the value of their same name of variable as their initiate value
//lastprivate:when these avariables will return the same name in master thread,and the duty of copying thread is the last thread of for or task of sections  
//reduction:likely private,and when parallel is ended ,these variables will execute appointed reduction operation then the result will return  master thread
//nowait:this means that parallel threads will ignore the barrier of instruction
//num_thread:the number of threads in parallel area
//schedule:the task allocation scheduled type in special for task
//shared:appoint one or a lots of avariables will be shared in many threads
//ordered:it means that this certain code segment in for task sharing domain will be executing orderly
//copyprivate:cooperating single instruction,let special avariable in special thread broadcast into the same name of other threads in parallel area in   
//copyin:this will initialize the threadprivate avarible on the same name of master thread
//default:default is shared. explain that the usage of avariables in parallel area.


//API
//omp_in_parallel
//omp_get_thread_num
//omp_set_num_threads
//omp_get_num_threads
//omp_get_max_thread
//omp_get_num_procs
//omp_get_dynamic
//omp_set_dynamic
//omp_get_nested
//omp_set_nested

//environment avariable
//OMP_SCHEDULE
//OMP_NUM_THREADS
//OMP_DYNAMIC
//OMP_NESTED
/*

VS-2013-MPI

#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<mpi.h>
int main(int argc, char **argv)
{
	int n=0;
	int pid, numprocs,j,i;
	double h,sum,x,mypi,pi;
	MPI_Status status;
	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&pid);
	MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
	
	if (pid == 0){
		n = 10000;
		for (j = 1; j < numprocs;j++)
		MPI_Send(&n,1,MPI_INT,j,100,MPI_COMM_WORLD);

	}
	else{
		MPI_Recv(&n,1,MPI_INT,0,100,MPI_COMM_WORLD,&status);
		//printf("%d\n",status.MPI_ERROR);
	}
	h = 1.0 / (double)n;
	sum = 0.0;

	for (i = pid; i < n; i+=numprocs){
		x = h*((double)(i+1)-0.5);
		sum += sqrt(1 - x*x);
	}
	mypi = sum*h;
	MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
	
	if (pid == 0){
		pi = 4 * pi;
		printf("the pi= %lf,the fabs is=%lf", pi, fabs(pi - 3.1415926));

	}
	MPI_Finalize();
	return 0;

}

MPI parallel programming
1.the usual fuctions on MPI
MPI environment will create two communicators,MPI_COMM_WORLD includes all processes,MPI_COMM_SELF include itself processes
MPI_Init(&argc,&argv)   //initiate MPI environment maybe global variable ,it  must be the start.
MPI_Comm_rank(communicator,&myid)//to get the id of the current process in communicator to identify each process to work parallely and cooprate
MPI_Comm_size(communicator,&numprocs)//to get the process number included communicator
MPI_Finalize()//to finalize compilation environment,it  must be the end
int MPI_Send(void* buf,int count,MPI_Datatype datatype,int dest,int tag,MPI_Comm comm)
int MPI_Recv(void* buf,int count,MPI_Datatype datatype,int source,int tag,MPI_Comm comm,MPI_Status *status)
dest/source appoints:the id of process
MPI_Status:status_MPI_SOURCE,status_MPI_TAG,status_MPI_ERROR
MPI_PROC_NULL:the virtual process-faciliatate making up communication sentences
MPI_SENDRECV(sendbuf,sendcount,sendtype,dest,sendtag,recvbuf,recvcount,recvtype,source,recvtag,comm,status)
MPI predefined datatype:
   MPI_CHAR\MPI_SHORT\MPI_INT\MPI_LONG\MPI_UNSIGNED_CHAR\MPI_UNSIGNED_SHORT\MPI_UNSIGNED\MPI_UNSIGNED_LONG\MPI_FLOAT\MPI_DOUBLE\MPI_LONG_DOUBLE

   note:if the communication of having datatype must require the same datatype
        if the communication of not having datatype must require the same (MPI_BYTE) datatype
        if the communication of having package must require the same (MPI_PACKED) datatype

double MPI_Wtime(void)-return double seconds
int MPI_Get_processor_name(char *name,int *result)

2.reduction funtion
int MPI_Reduce{
void *input_data,//send message-point of internal memory
void *output_data,//receive message-point of internal memory
int count,
MPI_Datatype datatype,
MPI_Op operator,
int dest,//the process id of receiving message
MPI_Comm comm;

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值