- 博客(17)
- 收藏
- 关注
原创 MPI_Send和MPI_Recv影响下程序的执行顺序是怎样的?
#include #include #include"mpi.h" #define N 9 #define M 20 int A[M][N]; int B[N]; int BB[N]; int C[M]; int buf[N]; int bufr[N]; int bufc; int tagB=100;
2016-10-13 16:33:33 2374
原创 MPI常用函数速查表
MPI_Send(buf,count,datatype,dest,tag,comm) MPI_Recv(buf,count.datatype,source,tag,comm.&status) for(int i=1;i { MPI_Send(buf,count,datatype,i,tag,comm) } 与MPI_Bcast(buf,count,datatype,root,com
2016-10-13 14:42:52 1311
原创 MPI并行计算与矩阵(2)
1The body of code #include"mpi.h" #include"stdio.h" #include const int rows = 3; //the rows of matrix const int cols = 2; //the cols of matrix int main(int argc, char **argv) {
2016-10-11 21:22:11 1291
原创 matrix9
//http://www.cnblogs.com/sdxk/p/4093484.html//Oct 9 2016//monte carlo method to calculate pi#include#include#include#include#includevoid read_num(long long int *num_point,int my_rank,MPI_Comm
2016-10-10 22:28:31 740
原创 matrix8
//http://blog.csdn.net/pouloghost/article/details/7904049//gauss //Oct 9 2016#include "mpi.h" #include #include typedef struct{ float value; int rank; } MD; int main(int arg
2016-10-10 22:27:15 522
原创 matrix7
//http://blog.csdn.net/pouloghost/article/details/8824089#include #include #include "mpi.h" /* matrix transpostion using mpi linear division author GT 2013.4.18 */ int rank,size;
2016-10-10 22:26:16 410
原创 matrix6
//http://blog.csdn.net/pouloghost/article/details/7913342//oct 9 2016//jaccobi#include "mpi.h" #include #include //broadcast x void bcastx(float *xs,int size) { for(int i=0;i<siz
2016-10-10 22:25:16 638
原创 matrix5
//http://www.linuxidc.com/Linux/2012-08/67662.htm#include "mpi.h" #include #include #define ROOT 0 #define TAG 0 int main(int argc,char *argv[]) { float matrix[][4]={{2,3,4,
2016-10-10 22:24:26 551
原创 matrix4
//Writed by YaoPu, Oct 10,2016//if any problem,send email yaopu@iccas.ac.cn#include"mpi.h"#include"stdio.h"#includevoid matgen(float *a,int Width){int i,j;for(i=0;i<Width;i++){for(j=0;j<Wid
2016-10-10 22:23:22 775
原创 matrix3
//Writed by YaoPu, Oct 10,2016//if any problem,send email yaopu@iccas.ac.cn//only for 2 processors !!!!!#include"mpi.h"#includeint main(int argc,char **argv){int rows=6,cols=5;int tag=0;MPI
2016-10-10 22:21:36 432
原创 matrix2
//Writed by YaoPu, Oct 10,2016//if any question, send email to yaopu@iccas.ac.cn#include"mpi.h"#include"stdio.h"int main(int argc,char **argv){MPI_Init(&argc,&argv);int np,rank;MPI_Status st
2016-10-10 22:18:54 575
原创 matrix1
//Writed by YaoPu, Oct 10,2016//connect via Email:yaopu@iccas.ac.cn#include"mpi.h"#include"stdio.h"#includeconst int rows = 400; //the rows of matrixconst int cols = 100; //the cols of matrix
2016-10-10 22:16:43 661
原创 MPI_Bcast
//Writed by YaoPu Oct 10,2016//connect via Email:yaopu@iccas.ac.cn#include"mpi.h"#includeint main(int argc,char **argv){int rank,value;MPI_Init(&argc,&argv);MPI_Comm_rank(MPI_COMM_WORLD,&ran
2016-10-10 22:14:59 1155
原创 example1
//Writed by YaoPu, Oct 10 2016//Connect via yaopu@iccas.ac.cn#include#include"mpi.h"#includeint master_io(void);int slave_io(void);int main(int argc, char **argv){int rank,size;MPI_Init(&a
2016-10-10 22:12:29 573
原创 MPI_Barrier
//Writed by YaoPU 2016 Oct 10//Connect by yaopu@iccas.ac.cn#include"mpi.h"#includeint main(int argc,char **argv){MPI_Init(&argc,&argv);int rank,size,value;MPI_Status status;MPI_Comm_rank(MP
2016-10-10 22:09:58 2416
原创 MPI_Recv
#include"mpi.h"#include"stdio.h"#include"string.h"int main(int argc, char **argv){char message[20];int myrank;MPI_Status status;MPI_Init(&argc,&argv);MPI_Comm_rank(MPI_COMM_WORLD,&myrank);if(myrank==0
2016-10-10 22:06:17 551
原创 MPI并行计算与矩阵1
//Writed by YaoPu //connect via email: yaopu@iccas.ac.cn//2016 Oct 10#include#include#includelong n, i;double sum, pi, mypi, x, h;int group_size,my_rank;int main(argc,argv)int argc;
2016-10-10 21:55:45 1214
空空如也
空空如也
TA创建的收藏夹 TA关注的收藏夹
TA关注的人