1.什么是MPI
MPI是一个跨语言的通讯协议,用于编写并行计算机。支持点对点和广播。MPI是一个信息传递应用程序接口,包括协议和和语义说明,他们指明其如何在各种实现中发挥其特性。MPI的目标是高性能,大规模性,和可移植性。MPI在今天仍为高性能计算的主要模型。
主要的MPI-1模型不包括共享内存概念,MPI-2只有有限的分布共享内存概念。 但是MPI程序经常在共享内存的机器上运行。在MPI模型周边设计程序比在NUMA架构下设计要好因为MPI鼓励内存本地化。
尽管MPI属于OSI参考模型的第五层或者更高,他的实现可能通过传输层的sockets和Transmission Control Protocol (TCP)覆盖大部分的层。大部分的MPI实现由一些指定惯例集(API)组成,可由C,C++,Fortran,或者有此类库的语言比如C#, Java or Python直接调用。MPI优于老式信息传递库是因为他的可移植性和速度。
2.第一个MPI程序
int main()
{
int rank;
int allProcess;
MPI_Comm worker_comm;
MPI_Init(nullptr, nullptr);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &allProcess);
MPI_Finalize();
return 0;
}
下面我们来逐个介绍上例中使用的MPI函数:
MPI_Init(nullptr, nullptr);
函数原型:int MPI_Init(int *argc,char ***argv)
初始化MPI,参数是命令行参数,当然你也可以制空,就像我写的,传给了它nullptr。
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &allProcess);
分别获取当前通信组的含有的进程数和当前进程在通信组的编号,这里有一个通信组的概念。一个通信组,你可以理解为一个讨论组,成员可以在里面发言(传输数据),你也可以看到内容(接收数据)。每个进程在每个通信组都会有一个唯一的编号,而这个编号是从0开始,按自然数的顺序往下编的。
MPI_Finalize();
告诉程序已经完成了所有MPI任务,需要释放相应的空间,如果无法释放(可能仍有MPI程序在运行),他会返回一个表示错误的值(int)。
一些常用函数
MPI作为一个用来传递信息的协议,核心肯定是通信,我们先介绍几组常用的通信函数
int MPI_Send (void *buf, int count, MPI_Datatype datatype,int dest, int tag,MPI_Comm comm)
int MPI_Recv (void *buf, int count, MPI_Datatype datatype,int source, int tag, MPI_Comm comm,MPI_Status *status)
这个是堵塞的方法。意味着程序一直会等待数据的传入或者发送的返回值,注意这里很容易产生死锁。
幸运的是,我们有一组非堵塞方法来完成我们的任务。
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source,int tag, MPI_Comm comm, MPI_Request *request)
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm, MPI_Request *request)
这是一组非堵塞方法,程序会继续运行不管发送/接受是否完成。
当然我们也可以通过一个堵塞方法来保证程序的执行。
int MPI_Wait(MPI_Request *request, MPI_Status *status)
当程序运行到时MPI_Wait,会堵塞直到MPI_Wait传入的句柄参数对应的函数被完成。
下面是一个比较完整的函数列表:
MPIX_Comm_agree
int MPIX_Comm_agree(MPI_Comm comm, int *flag)
在通信组上执行协议操作
MPI_File_set_errhandler
为MPI_file设置一个错误句柄
注意,只有在没有其他MPI程序更新或释放相同的MPI对象时这个才是线程安全的。
MPI_Rsend
int MPI_Rsend(const void *buf, int count, MPI_Datatype datatype, int dest,int tag, MPI_Comm comm)
堵塞的就绪发送,调用时确保已经准备好接收。线程安全。
MPIX_Comm_failure_ack
int MPIX_Comm_failure_ack( MPI_Comm comm )
获取当前通信组失败的进程。线程安全。
MPI_File_set_info
int MPI_File_set_info(MPI_File fh, MPI_Info info)
设置与文件相关联的新值
MPI_Rsend_init
int MPI_Rsend_init(const void *buf, int count, MPI_Datatype datatype, int dest,int tag, MPI_Comm comm, MPI_Request *request)
初始化MPI_Rsend
MPIX_Comm_failure_get_acked
int MPIX_Comm_failure_get_acked( MPI_Comm comm, MPI_Group *failedgrp )
获得错误的group数
MPI_File_set_size
int MPI_File_set_size(MPI_File fh, MPI_Offset size)
设置文件大小
MPI_Scan
int MPI_Scan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,MPI_Op op, MPI_Comm comm)
计算一组进程的数据
MPIX_Comm_revoke
int MPIX_Comm_revoke(MPI_Comm comm)
禁用一个通信组
MPI_File_set_view
int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,MPI_Datatype filetype, ROMIO_CONST char *datarep, MPI_Info info)
Sets the file view
MPI_Scatter
int MPI_Scatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,MPI_Comm comm)
向通信组的其他进程(除了发送者)散发数据
MPIX_Comm_shrink
int MPIX_Comm_shrink(MPI_Comm comm, MPI_Comm *newcomm)
将通信组中所有不失败的进程组成新的通信组
MPI_File_sync
int MPI_File_sync(MPI_File fh)
将所有已写入的数据传输到存储设备中
MPI_Scatterv
int MPI_Scatterv(const void *sendbuf, const int *sendcounts, const int *displs,MPI_Datatype sendtype, void *recvbuf, int recvcount,MPI_Datatype recvtype,int root, MPI_Comm comm)
向通信组中的所有进程发送数据(包括发送者)
MPI_Abort
int MPI_Abort(MPI_Comm comm, int errorcode)
终止MPI执行环境(将返回errorcode参数给援引环境)
MPI_File_write
int MPI_File_write(MPI_File fh, ROMIO_CONST void *buf, int count, MPI_Datatype datatype, MPI_Status *status)
用单独的文件指针写单个文件
MPI_Send
上面已有描述
MPI_Accumulate
int MPI_Accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win)
Accumulate data into the target process using remote memory access
MPI_File_write_all
int MPI_File_write_all(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype, MPI_Status *status)
利用单独的文件指针进行集体写入
MPI_Send_init
int MPI_Send_init(const void *buf, int count, MPI_Datatype datatype, int dest,int tag, MPI_Comm comm, MPI_Request *request)
为MPI_Send进行初始化
MPI_Add_error_class
int MPI_Add_error_class(int *errorclass)
向已知的classes中添加error clasee
MPI_File_write_all_begin
int MPI_File_write_all_begin(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype)
用单独的文件指针开始拆分集体写入
MPI_Sendrecv
int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dest, int sendtag,void *recvbuf, int recvcount, MPI_Datatype recvtype,int source, int recvtag,MPI_Comm comm, MPI_Status *status)
带有返回的发送
MPI_Add_error_code
int MPI_Add_error_code(int errorclass, int *errorcode)
向error class添加错误代码
MPI_File_write_all_end 、
int MPI_File_write_all_end(MPI_File fh, ROMIO_CONST void *buf, MPI_Status *status)
用单独的文件指针完成拆分集体写入
MPI_Sendrecv_replace
int MPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPI_Comm comm, MPI_Status *status)
使用单个缓冲区进行发送和接收
MPI_Add_error_string
int MPI_Add_error_string(int errorcode, const char *string)
将错误信息字符串和error class或者错误代码联系起来
MPI_File_write_at
int MPI_File_write_at(MPI_File fh, MPI_Offset offset, ROMIO_CONST void *buf,int count, MPI_Datatype datatype, MPI_Status *status)
使用显式偏移输入内容到文件
MPI_Ssend
int MPI_Ssend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm)
堵塞的异步发送
MPI_Address
int MPI_Address(const void *location, MPI_Aint *address)
获取内存中的地址
MPI_File_write_at_all
int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, ROMIO_CONST void *buf,int count, MPI_Datatype datatype, MPI_Status *status)
使用显示偏移集体输入内容到文件
MPI_Ssend_init
int MPI_Ssend_init(const void *buf, int count, MPI_Datatype datatype, int dest,int tag, MPI_Comm comm, MPI_Request *request)
初始化MPI_Ssend
MPI_Aint_add
MPI_Aint MPI_Aint_add(MPI_Aint base, MPI_Aint disp)
返回base和disp的总和
MPI_File_write_at_all_begin
int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, ROMIO_CONST void *buf,int count, MPI_Datatype datatype)
开始使用显式偏移输入内容到文件
MPI_Start
int MPI_Start(MPI_Request *request)
使用句柄初始化通信
MPI_Aint_diff
MPI_Aint MPI_Aint_diff(MPI_Aint addr1, MPI_Aint addr2)
返回两个参数的区别
MPI_File_write_at_all_end
int MPI_File_write_at_all_end(MPI_File fh, ROMIO_CONST void *buf,MPI_Status *status)
完成使用显式偏移输入内容到文件
MPI_Startall
int MPI_Startall(int count, MPI_Request array_of_requests[])
启动请求集合
MPI_Allgather
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,MPI_Comm comm)
将所有进程数据收集起来并分发返回结合数据
MPI_File_write_ordered
int MPI_File_write_ordered(MPI_File fh, ROMIO_CONST void *buf, int count, MPI_Datatype datatype, MPI_Status *status)
使用共享文件指针进行集体写入
MPI_Status_set_cancelled
int MPI_Status_set_cancelled(MPI_Status *status, int flag)
设置与Status对象关联的取消状态
MPI_Allgatherv
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, const int *recvcounts, const int *displs,MPI_Datatype recvtype, MPI_Comm comm)
将所有进程数据收集起来并返回结合数据
MPI_File_write_ordered_begin
int MPI_File_write_ordered_begin(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype)
使用共享文件指针进行开始写入
MPI_Status_set_elements
int MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype, int count)
设置status结构体里的元素数量
MPI_Alloc_mem
int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr)
为消息和RMA分配内存
MPI_File_write_ordered_end
int MPI_File_write_ordered_end(MPI_File fh, ROMIO_CONST void *buf, MPI_Status *status)
使用共享文件指针进行完成写入
MPI_Status_set_elements_x
int MPI_Status_set_elements_x(MPI_Status *status, MPI_Datatype datatype, MPI_Count count)
修改状态的不透明部分以允许MPI_Get_elements返回计数
MPI_Allreduce
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
获得每个进程的数据并且将处理后的结果分发回每个进程
MPI_File_write_shared
int MPI_File_write_shared(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype, MPI_Status *status)
用共享的指针写文件
MPI_T_category_changed
int MPI_T_category_changed(int *stamp)
获得类别最后一次的更改时间
MPI_Alltoall
int MPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
从每一个进程向每一个进程发送数据
MPI_Finalize
int MPI_Finalize( void )
参见上面
MPI_T_category_get_categories
int MPI_T_category_get_categories(int cat_index, int len, int indices[])
获得类别的子类别
MPI_Alltoallv
int MPI_Alltoallv(const void *sendbuf, const int *sendcounts, const int *sdispls, MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *rdispls, MPI_Datatype recvtype, MPI_Comm comm)
从所有进程发送数据到所有进程; 每个进程可以发送不同数量的数据并为输入输出数据提供不同的位移。
MPI_Finalized
int MPI_Finalized( int *flag )
显示MPI_Finalize是否已经被调用
MPI_T_category_get_cvars
int MPI_T_category_get_cvars(int cat_index, int len, int indices[])
获得一个category的控制变量
MPI_Alltoallw
int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],const int sdispls[], const MPI_Datatype sendtypes[],void *recvbuf, const int recvcounts[], const int rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm)
对全体,允许不同的数据类型,计数和位移
MPI_Free_mem
int MPI_Free_mem(void *base)
释放MPI_Alloc_mem所申请的内存
MPI_T_category_get_info
int MPI_T_category_get_info(int cat_index, char *name, int *name_len, char *desc, int *desc_len, int *num_cvars, int *num_pvars, int *num_categories)
获得category的信息
MPI_Attr_delete
int MPI_Attr_delete(MPI_Comm comm, int keyval)
删除一个和通信组的key关联的value
MPI_Gather
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
将一组进程中的值收集起来
MPI_T_category_get_num
int MPI_T_category_get_num(int *num_cat)
获得category的数量
MPI_Attr_get
int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
通过key搜索value
MPI_Gatherv
int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *displs,MPI_Datatype recvtype, int root, MPI_Comm comm)
把组中所有进程的数据收集到指定位置
MPI_T_category_get_pvars
int MPI_T_category_get_pvars(int cat_index, int len, int indices[])
获得类别中的performance变量
MPI_Attr_put
int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
通过key存储value
MPI_Get
int MPI_Get(void *origin_addr, int origin_count, MPI_Datatypeorigin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win)
从远程进程的内存获取数据
MPI_T_cvar_get_info
int MPI_T_cvar_get_info(int cvar_index, char *name, int *name_len,int *verbosity, MPI_Datatype *datatype, MPI_T_enum *enumtype,char *desc, int *desc_len, int *binding, int *scope)
获得一个控制变量的信息
MPI_Barrier
int MPI_Barrier( MPI_Comm comm )
创建一个barrier
MPI_Get_accumulate
int MPI_Get_accumulate(const void *origin_addr, int origin_count,MPI_Datatype origin_datatype, void *result_addr, int result_count,MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win)
将origin_datatype类型的origin_count元素从origin缓冲区(origin_addr)到offset_disp上的缓冲区,由target_rank和win指定的目标窗口中,使用操作op并在结果缓冲区result_addr中返回目标缓冲区的累积内容。
(Perform an atomic, one-sided read-and-accumulate operation.)
MPI_T_cvar_get_num
int MPI_T_cvar_get_num(int *num_cvar)
获得控制变量的数量
MPI_Bcast
int MPI_Bcast( void *buffer, int count, MPI_Datatype datatype, int root,MPI_Comm comm )
从root进程向进程组内的其他进程发送消息
MPI_Get_address
int MPI_Get_address(const void *location, MPI_Aint *address)
获得内存的地址
MPI_T_cvar_handle_alloc
int MPI_T_cvar_handle_alloc(int cvar_index, void *obj_handle, MPI_T_cvar_handle *handle, int *count)
对控制变量分配句柄
MPI_Bsend
int MPI_Bsend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
基本发送用户指定的缓存区
MPI_Get_count
int MPI_Get_count( const MPI_Status *status, MPI_Datatype datatype, int *count )
获得顶级元素的数量
MPI_T_cvar_handle_free
int MPI_T_cvar_handle_free(MPI_T_cvar_handle *handle)
释放一个控制变量的句柄
MPI_Bsend_init
int MPI_Bsend_init(const void *buf, int count, MPI_Datatype datatype,int dest, int tag, MPI_Comm comm, MPI_Request *request)
初始化MPI_Bsend
MPI_Get_elements
int MPI_Get_elements(const MPI_Status *status, MPI_Datatype datatype, int *count)
获得一个数据类型的基本元素的数量
MPI_T_cvar_read
int MPI_T_cvar_read(MPI_T_cvar_handle handle, void *buf)
返回一个控制变量的值
MPI_Buffer_attach
int MPI_Buffer_attach(void *buffer, int size)
获得一个用户提供的用来发送的缓存区
MPI_Get_elements_x
int MPI_Get_elements_x(const MPI_Status *status, MPI_Datatype datatype, MPI_Count *count)
获得一个数据类型的元素数量(不再只是基本的,和MPI_Get_elements相比)
MPI_T_cvar_write
int MPI_T_cvar_write(MPI_T_cvar_handle handle, const void *buf)
写一个控制变量
MPI_Buffer_detach
int MPI_Buffer_detach(void *buffer_addr, int *size)
移除一个已存在的缓冲区
MPI_Get_library_version
#undef FUNCNAME
#define FUNCNAME MPI_Get_library_version
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPI_Get_library_version(char *version, int *resultlen)
返回MPI_library 的版本
MPI_T_enum_get_info
int MPI_T_enum_get_info(MPI_T_enum enumtype, int *num, char *name, int *name_len)
获得一个枚举的信息
MPI_Cancel
int MPI_Cancel(MPI_Request *request)
取消一个request信息
MPI_Get_processor_name
int MPI_Get_processor_name( char *name, int *resultlen )
获得进程名称
MPI_T_enum_get_item
int MPI_T_enum_get_item(MPI_T_enum enumtype, int index, int *value, char *name, int *name_len)
获得枚举中一个元素的信息
MPI_Cart_coords
int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int coords[])
确定笛卡尔拓扑中的过程坐标给定编号
MPI_Get_version
int MPI_Get_version( int *version, int *subversion )
返回MPI的版本
MPI_T_finalize
int MPI_T_finalize(void)
完成MPI工具信息界面
MPI_Cart_create
int MPI_Cart_create(MPI_Comm comm_old, int ndims, const int dims[], const int periods[], int reorder, MPI_Comm *comm_cart)
向拓补信息连接处新建通信组
MPI_Graph_create
int MPI_Graph_create(MPI_Comm comm_old, int nnodes, const int indx[], const int edges[], int reorder, MPI_Comm *comm_graph)
向拓补信息连接处新建通信组
MPI_T_init_thread
int MPI_T_init_thread(int required, int *provided)
初始化MPI_T执行环境
MPI_Cart_get
int MPI_Cart_get(MPI_Comm comm, int maxdims, int dims[], int periods[], int coords[])
检索与通信组相关的笛卡尔拓补信息
MPI_Graph_get
int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int indx[], int edges[])
检索与通信组相关的图像拓补信息
MPI_T_pvar_get_info
int MPI_T_pvar_get_info(int pvar_index, char *name, int *name_len, int *verbosity, int *var_class, MPI_Datatype *datatype, MPI_T_enum *enumtype, char *desc,int *desc_len, int *binding, int *readonly, int *continuous, int *atomic)
获得一个performance变量的信息
MPI_Cart_map
int MPI_Cart_map(MPI_Comm comm, int ndims, const int dims[], const int periods[], int *newrank)
笛卡尔拓补信息处理
MPI_Graph_map
int MPI_Graph_map(MPI_Comm comm, int nnodes, const int indx[], const int edges[],int *newrank)
图形拓补信息处理
MPI_T_pvar_get_num
int MPI_T_pvar_get_num(int *num_pvar)
获得performance变量的数量
MPI_Cart_rank
int MPI_Cart_rank(MPI_Comm comm, const int coords[], int *rank)
通过给定笛卡尔位置确定进程在通信组的序号
MPI_Graph_neighbors
int MPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, int neighbors[])
获得图上的相邻节点
MPI_T_pvar_handle_alloc
int MPI_T_pvar_handle_alloc(MPI_T_pvar_session session, int pvar_index,void *obj_handle, MPI_T_pvar_handle *handle, int *count)
申请一个performace变量的句柄
MPI_Cart_shift
int MPI_Cart_shift(MPI_Comm comm, int direction, int disp, int *rank_source,int *rank_dest)
通过给移动方向和数量,返回移动后的sources和目标队列
MPI_Graph_neighbors_count
MPI_T_pvar_handle_free
int MPI_T_pvar_handle_free(MPI_T_pvar_session session, MPI_T_pvar_handle *handle)
释放一个performance变量的句柄
MPI_Cart_sub
int MPI_Cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm)
将一个通信组划分成形成低维笛卡尔网格的子组
MPI_Graphdims_get
int MPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges)
搜索和通信组相关联的拓补信息
MPI_T_pvar_read
int MPI_T_pvar_read(MPI_T_pvar_session session, MPI_T_pvar_handle handle, void *buf)
获取performance句柄信息
MPI_Cartdim_get
int MPI_Cartdim_get(MPI_Comm comm, int *ndims)
搜索和通信组相关联的笛卡尔拓补信息
MPI_Grequest_complete
int MPI_Grequest_complete( MPI_Request request )
告诉MPI用户定义的句柄已完成
MPI_T_pvar_readreset
int MPI_T_pvar_readreset(MPI_T_pvar_session session, MPI_T_pvar_handle handle, void *buf)
获得一个performance变量并重置它
MPI_Close_port
int MPI_Close_port(const char *port_name)
关闭端口
MPI_Grequest_start
int MPI_Grequest_start( MPI_Grequest_query_function *query_fn,MPI_Grequest_free_function *free_fn,MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request )
创建一个request并返回它
MPI_T_pvar_reset
int MPI_T_pvar_reset(MPI_T_pvar_session session, MPI_T_pvar_handle handle)
重置performance变量
MPI_Comm_accept
int MPI_Comm_accept(const char *port_name, MPI_Info info, int root, MPI_Comm comm,MPI_Comm *newcomm)
接受一个request,并形成新的通信组
MPI_Group_compare
int MPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result)
比较两个Group
MPI_T_pvar_session_create
int MPI_T_pvar_session_create(MPI_T_pvar_session *session)
创建一个用来访问performance变量的新会话
MPI_Comm_call_errhandler
int MPI_Comm_call_errhandler(MPI_Comm comm, int errorcode)
调用通信组的error句柄
MPI_Group_difference
int MPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup)
用两个group的差别新建一个group
MPI_T_pvar_session_free
int MPI_T_pvar_session_free(MPI_T_pvar_session *session)
释放现有的performance变量会话
MPI_Comm_compare
int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result)
比较两个通信组
MPI_Group_excl
int MPI_Group_excl(MPI_Group group, int n, const int ranks[], MPI_Group *newgroup)
通过重新排序一个现有的组并且只采用未列出的成员来生成一个组
MPI_T_pvar_start
int MPI_T_pvar_start(MPI_T_pvar_session session, MPI_T_pvar_handle handle)
开始一个performance变量
MPI_Comm_connect
int MPI_Comm_connect(const char *port_name, MPI_Info info, int root, MPI_Comm comm,MPI_Comm *newcomm)
请求创建新的通信组
MPI_Group_free
int MPI_Group_free(MPI_Group *group)
释放一个组
MPI_T_pvar_stop
int MPI_T_pvar_stop(MPI_T_pvar_session session, MPI_T_pvar_handle handle)
停止一个performance变量
MPI_Comm_create
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)
创建一个通信组
MPI_Group_incl
int MPI_Group_incl(MPI_Group group, int n, const int ranks[], MPI_Group *newgroup)
通过重新排列现有组并仅用列出的成员来生成一个组
MPI_T_pvar_write
int MPI_T_pvar_write(MPI_T_pvar_session session, MPI_T_pvar_handle handle, const void *buf)
写一个performance变量
MPI_Comm_create_errhandler
int MPI_Comm_create_errhandler(MPI_Comm_errhandler_function *comm_errhandler_fn, MPI_Errhandler *errhandler)
创建一个错误句柄
MPI_Group_intersection
int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup)
生成一个组作为交集
MPI_Test
int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status)
测试一个request是否完成
MPI_Comm_create_group
int MPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm * newcomm)
创建新的通信组
MPI_Group_range_excl
int MPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group *newgroup)
通过排除现有组中的进程范围来生成组
MPI_Test_cancelled
int MPI_Test_cancelled(const MPI_Status *status, int *flag)
测试一个request是否被取消
MPI_Comm_create_keyval
int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn, MPI_Comm_delete_attr_function *comm_delete_attr_fn, int *comm_keyval, void *extra_state)
创建一个新的key
MPI_Group_range_incl
int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group *newgroup)
从现有组通过给定范围创建新的组
MPI_Testall
int MPI_Testall(int count, MPI_Request array_of_requests[], int *flag, MPI_Status array_of_statuses[])
测试所有已经初始化的请求是否完成
MPI_Comm_delete_attr
int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval)
通过key删除value
MPI_Group_rank
int MPI_Group_rank(MPI_Group group, int *rank)
获得在组中的编号
MPI_Testany
int MPI_Testany(int count, MPI_Request array_of_requests[], int *indx, int *flag, MPI_Status *status)
测试任何已经初始化的请求是否完成
MPI_Comm_disconnect
int MPI_Comm_disconnect(MPI_Comm * comm)
停止通信组的通信
MPI_Group_size
int MPI_Group_size(MPI_Group group, int *size)
获得组大小
MPI_Testsome
int MPI_Testsome(int incount, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[])
测试所给的请求是否完成
MPI_Comm_dup
int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
将现有的通信组与其所有缓存的信息创建副本
MPI_Group_translate_ranks
int MPI_Group_translate_ranks(MPI_Group group1, int n, const int ranks1[],MPI_Group group2, int ranks2[])
将一个组中的进程编号转换为另一个组中的进程
MPI_Topo_test
int MPI_Topo_test(MPI_Comm comm, int *status)
获取一个通信组的拓补信息
MPI_Comm_dup_with_info
int MPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm * newcomm)
为一个组的所有缓存的信息创建副本
MPI_Group_union
int MPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup)
合并两个组
MPI_Type_commit
int MPI_Type_commit(MPI_Datatype *datatype)
提交数据类型
MPI_Comm_free
int MPI_Comm_free(MPI_Comm *comm)
释放一个通信组
MPI_Iallgather
int MPI_Iallgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
收集所有任务的数据,并以非阻塞方式将组合数据分发到所有任务
MPI_Type_contiguous
int MPI_Type_contiguous(int count,MPI_Datatype oldtype,
MPI_Datatype *newtype)
创建一个连续的数据类型
MPI_Comm_free_keyval
int MPI_Comm_free_keyval(int *comm_keyval)
为一个通信组释放key
MPI_Iallgatherv
int MPI_Iallgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
收集所有任务的数据,并以非阻塞的方式将组合的数据传递到所有任务
MPI_Type_create_darray
int MPI_Type_create_darray(int size,int rank,int ndims,const int array_of_gsizes[],const int array_of_distribs[],const int array_of_dargs[], const int array_of_psizes[],int order,MPI_Datatype oldtype, MPI_Datatype *newtype)
创建一个分布式数组的数据类型
MPI_Comm_get_attr
int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
通过key搜索value
MPI_Iallreduce
int MPI_Iallreduce(const void *sendbuf, void *recvbuf, int count,MPI_Datatype datatype, MPI_Op op, MPI_Comm comm,MPI_Request *request)
组合来自所有进程的值,并将结果以非阻塞方式分发回所有进程
MPI_Type_create_hindexed
int MPI_Type_create_hindexed(int count,const int array_of_blocklengths[],const MPI_Aint array_of_displacements[],MPI_Datatype oldtype, MPI_Datatype *newtype)
创建索引数据类型的数据类型,以字节为单位
MPI_Comm_get_errhandler
int MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *errhandler
获得和通信组相关联的错误句柄
MPI_Ialltoall
int MPI_Ialltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,MPI_Comm comm, MPI_Request *request)
通过非堵塞方式从所有进程分发到所有进程
MPI_Type_create_hindexed_block
int MPI_Type_create_hindexed_block(int count,int blocklength,const MPI_Aint array_of_displacements[], MPI_Datatype oldtype, MPI_Datatype * newtype)
创建具有常量块的索引数据类型
MPI_Comm_get_info
int MPI_Comm_get_info(MPI_Comm comm, MPI_Info * info_used)
返回一个新的信息对象,其中包含与comm相关联的通信器的提示。 与该通信器相关的系统实际使用的所有提示的当前设置在info_used中返回。 如果没有这样的提示,则返回新创建的info对象的句柄,该对象不包含键/值对。 用户负责通过MPI_INFO_FREE释放info_used。
MPI_Ialltoallv
int MPI_Ialltoallv(const void *sendbuf, const int sendcounts[], const int sdispls[],MPI_Datatype sendtype, void *recvbuf, const int recvcounts[],const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
以非阻塞方式从所有流程发送数据; 每个进程可以发送不同数量的数据并提供输入和输出数据的位移。
MPI_Type_create_hvector
int MPI_Type_create_hvector(int count, int blocklength,MPI_Aint stride,MPI_Datatype oldtype,MPI_Datatype *newtype)
创建一个以字节为单位的恒定步长的数据类型
MPI_Comm_get_name
int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen)
获得通信组的名字
MPI_Ialltoallw
int MPI_Ialltoallw(const void *sendbuf, const int sendcounts[], const int sdispls[],const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const int rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Request *request)
非阻塞广义的all-all通信,允许每个partner的不同数据类型,计数和位移
MPI_Type_create_indexed_block
int MPI_Type_create_indexed_block(int count,int blocklength,const int array_of_displacements[],MPI_Datatype oldtype,MPI_Datatype *newtype)
创建具有常量块的索引数据类型
MPI_Comm_get_parent
int MPI_Comm_get_parent(MPI_Comm *parent)
获得进程的母通信组
MPI_Ibarrier
int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request)
通知进程已达到障碍并立即返回
MPI_Type_create_keyval
int MPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn, MPI_Type_delete_attr_function *type_delete_attr_fn, int *type_keyval, void *extra_state)
为MPI数据类型创建属性键值
MPI_Comm_group
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group)
访问和通信组相关联的组
MPI_Ibcast
int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, MPI_Request *request)
非堵塞的从root进程发送数据到其他进程
MPI_Type_create_resized
int MPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb,MPI_Aint extent, MPI_Datatype *newtype)
从现有的数据类型创建一个新的下限和范围的数据类型
MPI_Comm_idup
int MPI_Comm_idup(MPI_Comm comm, MPI_Comm *newcomm, MPI_Request *request)
非堵塞的创建通信组副本
MPI_Ibsend
int MPI_Ibsend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm, MPI_Request *request)
开始非堵塞的缓冲区发送
MPI_Type_create_struct
int MPI_Type_create_struct(int count,const int array_of_blocklengths[], const MPI_Aint array_of_displacements[], const MPI_Datatype array_of_types[], MPI_Datatype *newtype)
用一般数据类型,位移和块大小创建MPI数据类型
MPI_Comm_join
int MPI_Comm_join(int fd, MPI_Comm *intercomm)
为使用socket连接的两个进程创建通信组
MPI_Iexscan
int MPI_Iexscan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,MPI_Op op, MPI_Comm comm, MPI_Request *request)
以非阻塞方式计算一组进程的数据的排他扫描(部分减少)
MPI_Type_create_subarray
int MPI_Type_create_subarray(int ndims,const int array_of_sizes[], const int array_of_subsizes[], const int array_of_starts[],int order,MPI_Datatype oldtype,MPI_Datatype *newtype)
为常规多维数组的子阵列创建数据类型
MPI_Comm_rank
int MPI_Comm_rank( MPI_Comm comm, int *rank )
MPI_Igather
int MPI_Igather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm, MPI_Request *request)
以非阻塞方式将来自一组进程的值汇总在一起
MPI_Type_delete_attr
int MPI_Type_delete_attr(MPI_Datatype datatype, int type_keyval)
通过key删除value
MPI_Comm_remote_group
int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group)
通过通信组访问远程组
MPI_Igatherv
int MPI_Igatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root,MPI_Comm comm, MPI_Request *request)
以非阻塞的方式从组中的所有进程收集到特定位置
MPI_Type_dup
int MPI_Type_dup(MPI_Datatype oldtype, MPI_Datatype *newtype)
为一个数据类型创建副本
MPI_Comm_remote_size
int MPI_Comm_remote_size(MPI_Comm comm, int *size)
设置远程组的大小
MPI_Improbe
int MPI_Improbe(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status)
非阻塞匹配探针
MPI_Type_extent
int MPI_Type_extent(MPI_Datatype datatype, MPI_Aint *extent)
获取数据类型的长度
MPI_Comm_set_attr
int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val)
通过key存储value
MPI_Imrecv
int MPI_Imrecv(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Request *request)
非堵塞的获取由MPI_Mprobe or MPI_Improbe匹配的信息
MPI_Type_free
int MPI_Type_free(MPI_Datatype *datatype)
释放类型
MPI_Comm_set_errhandler
int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler)
设置错误句柄
MPI_Ineighbor_allgather
int MPI_Ineighbor_allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
与MPI_Neighbor_allgather相对应的非堵塞方法
MPI_Type_free_keyval
int MPI_Type_free_keyval(int *type_keyval)
释放数据类型的key
MPI_Comm_set_info
int MPI_Comm_set_info(MPI_Comm comm, MPI_Info info)
为与comm相关联的通信器的提示设置新值。 这个通话是通信组的集合。 info对象在每个进程上可能不同,但是实现需要在所有进程上相同的任何信息条目必须在每个进程的info对象中显示相同的值。
MPI_Ineighbor_allgatherv
int MPI_Ineighbor_allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
MPI_Neighbor_allgatherv的非堵塞版本
MPI_Type_get_attr
int MPI_Type_get_attr(MPI_Datatype datatype, int type_keyval, void *attribute_val,int *flag)
通过key搜索value
MPI_Comm_set_name
int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name)
为通信组设置名字
MPI_Ineighbor_alltoall
int MPI_Ineighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
MPI_Neighbor_alltoall非堵塞版本
MPI_Type_get_contents
int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int array_of_integers[], MPI_Aint array_of_addresses[], MPI_Datatype array_of_datatypes[])
获得数据类型的内容
MPI_Comm_size
int MPI_Comm_size( MPI_Comm comm, int *size )
MPI_Ineighbor_alltoallv
int MPI_Ineighbor_alltoallv(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
MPI_Neighbor_alltoallv的非堵塞形式
MPI_Type_get_envelope
int MPI_Type_get_envelope(MPI_Datatype datatype,nt *num_integers, int *num_addresses,int *num_datatypes,
int *combiner)
获得类型封装
int MPI_Comm_spawn(const char *command, char *argv[], int maxprocs, MPI_Info info,int root, MPI_Comm comm, MPI_Comm *intercomm,int array_of_errcodes[])
产生一个MPI应用程序的maxprocs实例
MPI_Ineighbor_alltoallw
int MPI_Ineighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Request *request)
MPI_neighbor_alltoallw 的非堵塞形式
MPI_Type_get_extent
int MPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint *lb, MPI_Aint *extent)
获取数据类型的下限和范围
MPI_Comm_spawn_multiple
int MPI_Comm_spawn_multiple(int count, char *array_of_commands[],char **array_of_argv[], const int array_of_maxprocs[],const MPI_Info array_of_info[], int root, MPI_Comm comm,MPI_Comm *intercomm, int array_of_errcodes[])
MPI_Info_create
int MPI_Info_create( MPI_Info *info )
创建一个新的info对象
MPI_Type_get_extent_x
int MPI_Type_get_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent)
MPI_Comm_split
int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)
通过 colors and keys创建新的通信组
MPI_Info_delete
int MPI_Info_delete( MPI_Info info, const char *key )
删除info的键值对
MPI_Type_get_name
int MPI_Type_get_name(MPI_Datatype datatype, char *type_name, int *resultlen)
获得数据类型的名称
MPI_Comm_split_type
int MPI_Comm_split_type(MPI_Comm comm, int split_type, int key, MPI_Info info,MPI_Comm * newcomm)
基于split type和keys创建新的通信组
MPI_Info_dup
int MPI_Info_dup( MPI_Info info, MPI_Info *newinfo )
返回info的副本
MPI_Type_get_true_extent
int MPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint *true_lb,MPI_Aint *true_extent)
获得数据类型正确的下界和范围
MPI_Comm_test_inter
int MPI_Comm_test_inter(MPI_Comm comm, int *flag)
测试通信组
MPI_Info_free
int MPI_Info_free( MPI_Info *info )
释放一个info对象
MPI_Type_get_true_extent_x
int MPI_Type_get_true_extent_x(MPI_Datatype datatype, MPI_Count *true_lb, MPI_Count *true_extent)
MPI_Compare_and_swap
int MPI_Compare_and_swap(const void *origin_addr, const void *compare_addr,void *result_addr, MPI_Datatype datatype, int target_rank,MPI_Aint target_disp, MPI_Win win)
执行one-side的原子比较交换
MPI_Info_get
#undef FUNCNAME
#define FUNCNAME MPI_Info_get
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPI_Info_get(MPI_Info info, const char *key, int valuelen, char *value,
int *flag)
通过key查找value
MPI_Type_hindexed
int MPI_Type_hindexed(int count,const int *array_of_blocklengths, const MPI_Aint *array_of_displacements,MPI_Datatype oldtype, MPI_Datatype *newtype)
以字节为单位创建索引的数据类型
MPI_Dims_create
int MPI_Dims_create(int nnodes, int ndims, int dims[])
在笛卡尔网格中创建一个进程划分
MPI_Info_get_nkeys
#undef FUNCNAME
#define FUNCNAME MPI_Info_get_nkeys
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPI_Info_get_nkeys( MPI_Info info, int *nkeys )
返回info中key的数量
MPI_Type_hvector
int MPI_Type_hvector(int count,int blocklength, MPI_Aint stride,MPI_Datatype oldtype, MPI_Datatype *newtype)
MPI_Dist_graph_create
int MPI_Dist_graph_create(MPI_Comm comm_old, int n, const int sources[],const int degrees[], const int destinations[],const int weights[],MPI_Info info, int reorder, MPI_Comm *comm_dist_graph)
返回一个与分布式图形拓扑信息相连的新通信组的句柄。
MPI_Info_get_nthkey
#undef FUNCNAME
#define FUNCNAME MPI_Info_get_nthkey
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPI_Info_get_nthkey( MPI_Info info, int n, char *key )
返回info的第n个key
MPI_Type_indexed
int MPI_Type_indexed(int count,const int *array_of_blocklengths, const int *array_of_displacements,MPI_Datatype oldtype,MPI_Datatype *newtype)
创建一个有索引的数据类型
MPI_Dist_graph_create_adjacent
int MPI_Dist_graph_create_adjacent(MPI_Comm comm_old,int indegree, const int sources[],const int sourceweights[],int outdegree, const int destinations[], const int destweights[], MPI_Info info, int reorder, MPI_Comm *comm_dist_graph)
向分布式图形拓扑信息附加到的新通信组返回句柄
MPI_Info_get_valuelen
#undef FUNCNAME
#define FUNCNAME MPIRInfo_get_valuelen
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPI_Info_get_valuelen( MPI_Info info, const char *key, int *valuelen, int *flag )
检索与密钥相关联的值的长度
MPI_Type_lb
int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint *displacement)
返回类型的下界
MPI_Dist_graph_neighbors
int MPI_Dist_graph_neighbors(MPI_Comm comm,int maxindegree, int sources[], int sourceweights[],int maxoutdegree, int destinations[], int destweights[])
提供分布式图形拓扑的邻接信息
MPI_Info_set
int MPI_Info_set( MPI_Info info, const char *key, const char *value )
向info中添加键值对
MPI_Type_match_size
int MPI_Type_match_size(int typeclass, int size, MPI_Datatype *datatype)
返回符合特性长度的数据类型
MPI_Dist_graph_neighbors_count
int MPI_Dist_graph_neighbors_count(MPI_Comm comm, int *indegree, int *outdegree, int *weighted)
提供分布式图形拓扑的邻接信息
MPI_Init
int MPI_Init( int *argc, char ***argv )
MPI_Type_set_attr
int MPI_Type_set_attr(MPI_Datatype datatype, int type_keyval, void *attribute_val)
为key存储value
MPI_Errhandler_create
int MPI_Errhandler_create(MPI_Handler_function *function, MPI_Errhandler *errhandler)
创建错误句柄
MPI_Init_thread
int MPI_Init_thread( int *argc, char ***argv, int required, int *provided )
初始化mpi执行环境
MPI_Type_set_name
int MPI_Type_set_name(MPI_Datatype datatype, const char *type_name)
设置数据类型的名称
MPI_Errhandler_free
int MPI_Errhandler_free(MPI_Errhandler *errhandler)
释放MPI风格的错误句柄
MPI_Initialized
int MPI_Initialized( int *flag )
指示MPI_Init是否已被调用
MPI_Type_size
int MPI_Type_size(MPI_Datatype datatype, int *size)
获得数据类型占用的字节数
MPI_Errhandler_get
int MPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler *errhandler)
获得通信组的错误句柄
MPI_Intercomm_create
int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm)
为两个通信组创建新通信组
MPI_Type_size_x
int MPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size)
MPI_Errhandler_set
int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler)
为一个通信组设置错误句柄
MPI_Intercomm_merge
int MPI_Intercomm_merge(MPI_Comm intercomm, int high, MPI_Comm *newintracomm)
从一个通信组创建一个通信组
MPI_Type_struct
int MPI_Type_struct(int count,const int *array_of_blocklengths,const MPI_Aint *array_of_displacements,const MPI_Datatype *array_of_types,MPI_Datatype *newtype)
创建一个数据类型
MPI_Error_class
int MPI_Error_class(int errorcode, int *errorclass)
把错误代码转换成错误类
MPI_Iprobe
int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status)
消息的非堵塞测试
MPI_Type_ub
int MPI_Type_ub(MPI_Datatype datatype, MPI_Aint *displacement)
返回数据类型的上界
MPI_Error_string
int MPI_Error_string(int errorcode, char *string, int *resultlen)
根据错误代码返回字符串
MPI_Irecv
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source,int tag, MPI_Comm comm, MPI_Request *request)
MPI_Type_vector
int MPI_Type_vector(int count, int blocklength,int stride, MPI_Datatype oldtype,MPI_Datatype *newtype)
创建一个向量的数据类型
MPI_Exscan
int MPI_Exscan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,MPI_Op op, MPI_Comm comm)
计算一组进程的数据的排他扫描(部分reduction)
MPI_Ireduce
int MPI_Ireduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,MPI_Op op, int root, MPI_Comm comm, MPI_Request *request)
MPI_reduce的非堵塞版本
MPI_Unpack
int MPI_Unpack(const void *inbuf, int insize, int *position,void *outbuf, int outcount, MPI_Datatype datatype,MPI_Comm comm)
根据数据类型将缓冲区打包到连续的内存中
MPI_Fetch_and_op
int MPI_Fetch_and_op(const void *origin_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp,MPI_Op op, MPI_Win win)
Perform one-sided read-modify-write
MPI_Ireduce_scatter
int MPI_Ireduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
非堵塞版本的MPI_reduce_scatter
MPI_Unpack_external
int MPI_Unpack_external(const char datarep[],const void *inbuf,MPI_Aint insize, MPI_Aint *position,void *outbuf, int outcount,MPI_Datatype datatype)
根据数据类型将缓冲区(包装MPI_Pack_external)打包到连续的内存中
MPI_File_c2f
MPI_Fint MPI_File_c2f(MPI_File fh)
把c转换为fortran
MPI_Ireduce_scatter_block
int MPI_Ireduce_scatter_block(const void *sendbuf, void *recvbuf,int recvcount,MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
MPI_reduce_scatter_block的非堵塞版本
MPI_Unpublish_name
int MPI_Unpublish_name(const char *service_name, MPI_Info info, const char *port_name)
取消发布使用MPI_Publish_name发布的服务名称
MPI_File_call_errhandler
int MPI_File_call_errhandler(MPI_File fh, int errorcode)
调用文件中安装的错误处理程序
MPI_Irsend
nt MPI_Irsend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm, MPI_Request *request)
MPI_rsend的非堵塞形式
MPI_Wait
int MPI_Wait(MPI_Request *request, MPI_Status *status)
MPI_File_close
int MPI_File_close(MPI_File *fh)
关闭一个文件
MPI_Is_thread_main
int MPI_Is_thread_main( int *flag )
返回一个标志,指示这个线程是否调用MPI_Init或MPI_Init_thread
MPI_Waitall
int MPI_Waitall(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
等待所有的请求被完成
MPI_File_create_errhandler
int MPI_File_create_errhandler(MPI_File_errhandler_function *file_errhandler_fn, MPI_Errhandler *errhandler)
创建一个文件错误句柄
MPI_Iscan
int MPI_Iscan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
MPI_scan的非堵塞形式
MPI_Waitany
int MPI_Waitany(int count, MPI_Request array_of_requests[], int *indx,MPI_Status *status)
等待指定的请求被完成
MPI_File_delete
int MPI_File_delete(ROMIO_CONST char *filename, MPI_Info info)
删除文件
MPI_Iscatter
int MPI_Iscatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request)
MPI_scatter的非堵塞形式
MPI_Waitsome
int MPI_Waitsome(int incount, MPI_Request array_of_requests[], int *outcount, int array_of_indices[],MPI_Status array_of_statuses[])
等待指定的请求完成
MPI_File_f2c
MPI_File MPI_File_f2c(MPI_Fint fh)
把fortran文件转换为c文件
MPI_Iscatterv
int MPI_Iscatterv(const void *sendbuf, const int sendcounts[], const int displs[],MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm, MPI_Request *request)
MPI_scatterv的非堵塞形式
MPI_Win_allocate
int MPI_Win_allocate(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *baseptr, MPI_Win *win)
创建并分配MPI Window对象进行one-side通信
MPI_File_get_amode
int MPI_File_get_amode(MPI_File fh, int *amode)
获得文件访问模式
MPI_Isend
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm, MPI_Request *request)
MPI_send的非堵塞模式
MPI_Win_allocate_shared
int MPI_Win_allocate_shared(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *baseptr, MPI_Win *win)
创建一个用于one-side通信和共享内存访问的MPI Window对象,并在每个进程分配内存
MPI_File_get_atomicity
int MPI_File_get_atomicity(MPI_File fh, int *flag)
返回原子模式
MPI_Issend
int MPI_Issend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,MPI_Comm comm, MPI_Request *request)
MPI_ssend的非堵塞模式
MPI_Win_attach
int MPI_Win_attach(MPI_Win win, void *base, MPI_Aint size)
将内存附加到动态窗口
MPI_File_get_byte_offset
int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset, MPI_Offset *disp)
返回相对于当前视图的对应于“offset”etypes的文件中的绝对字节位置
MPI_Keyval_create
int MPI_Keyval_create(MPI_Copy_function *copy_fn, MPI_Delete_function *delete_fn, int *keyval, void *extra_state)
加一个新的key
MPI_Win_call_errhandler
int MPI_Win_call_errhandler(MPI_Win win, int errorcode)
调用装载在window object上的错误句柄
MPI_File_get_errhandler
int MPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler)
获得文件的错误句柄
MPI_Keyval_free
int MPI_Keyval_free(int *keyval)
释放key
MPI_Win_complete
int MPI_Win_complete(MPI_Win win)
在MPI_Win_start后完成RMA操作
MPI_File_get_group
int MPI_File_get_group(MPI_File fh, MPI_Group *group)
返回打开文件的进程组
MPI_Lookup_name
int MPI_Lookup_name(const char *service_name, MPI_Info info, char *port_name)
通过服务名查找端口
MPI_Win_create
int MPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win)
创建一个win对象
MPI_File_get_info
int MPI_File_get_info(MPI_File fh, MPI_Info *info_used)
返回MPI实际使用的文件的提示
MPI_Mprobe
int MPI_Mprobe(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status *status)
堵塞的匹配探针
MPI_Win_create_dynamic
int MPI_Win_create_dynamic(MPI_Info info, MPI_Comm comm, MPI_Win *win)
创建用于单面通信的MPI Window对象。 此窗口允许内存被动态曝光和未曝光以进行RMA操作
MPI_File_get_position
int MPI_File_get_position(MPI_File fh, MPI_Offset *offset)
以相对于当前视图的etype单位返回单个文件指针的当前位置
MPI_Mrecv
int MPI_Mrecv(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Status *status)
堵塞的返回和MPI_Mprobe or MPI_Improbe匹配到的信息
MPI_Win_create_errhandler
int MPI_Win_create_errhandler(MPI_Win_errhandler_function *win_errhandler_fn,MPI_Errhandler *errhandler)、
为window对象创建错误句柄
MPI_File_get_position_shared
int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset)
以相对于当前视图的etype单位返回共享文件指针的当前位置
MPI_Neighbor_allgather
int MPI_Neighbor_allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
在此功能中,如果拓扑图中存在边(j,i),则每个进程i从每个进程j收集数据项,并且每个进程i将相同的数据项发送到存在边(i,j)的所有进程j。 发送缓冲器被发送到每个相邻进程,并且从第l个邻居接收接收缓冲器中的第1个块
MPI_Win_create_keyval
int MPI_Win_create_keyval(MPI_Win_copy_attr_function *win_copy_attr_fn, MPI_Win_delete_attr_function *win_delete_attr_fn, int *win_keyval, void *extra_state)
为MPI窗口程序创建错误句柄
MPI_File_get_size
int MPI_File_get_size(MPI_File fh, MPI_Offset *size)
返回文件大小
MPI_Neighbor_allgatherv
int MPI_Neighbor_allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm)
MPI_Neighbor_allgather的向量变体
MPI_Win_delete_attr
int MPI_Win_delete_attr(MPI_Win win, int win_keyval)
删除与数据类型上的键相关联的属性值
MPI_File_get_type_extent
int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype, MPI_Aint *extent)
获得文件中的数据类型的范围
MPI_Neighbor_alltoall
int MPI_Neighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
在该功能中,如果拓扑图或笛卡尔拓扑中存在边(j,i),则每个进程i从每个进程j接收数据项。 类似地,每个进程i将数据项发送到存在边(i,j)的所有进程j。 这个调用比MPI_NEIGHBOR_ALLGATHER更通用,因为不同的数据项可以发送给每个邻居。 发送缓冲器中的第k个块被发送到第k个相邻进程,并且从第l个邻居接收接收缓冲器中的第1个块
MPI_Win_detach
int MPI_Win_detach(MPI_Win win, const void *base)
从动态窗口中分离内存
MPI_File_get_view
int MPI_File_get_view(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype,MPI_Datatype *filetype, char *datarep)
返回文件视图
MPI_Neighbor_alltoallv
int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm)
MPI_Neighbor_alltoall的向量变体允许向每个邻居发送/接收不同数量的元素
MPI_Win_fence
int MPI_Win_fence(int assert, MPI_Win win)
在MPI窗口上执行MPI栅栏同步
MPI_File_iread
int MPI_File_iread(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request)
用单一文件指针非堵塞的读取文件
MPI_Neighbor_alltoallw
int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm)
像MPI_Neighbor_alltoallv一样,但是它允许用户从每个邻居发送和接收不同类型的数据
MPI_Win_flush
int MPI_Win_flush(int rank, MPI_Win win)
在给定目标下完成所有的RMA操作
MPI_File_iread_all
int MPI_File_iread_all(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request)
用单一文件指针非堵塞的读取文件集
MPI_Op_commute
int MPI_Op_commutative(MPI_Op op, int *commute)
查询MPIreduction操作的交换性
MPI_Win_flush_all
int MPI_Win_flush_all(MPI_Win win)
Complete all outstanding RMA operations at all targets
MPI_File_iread_at
int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPIO_Request *request)
非阻塞使用显式偏移读取
MPI_Op_create
int MPI_Op_create(MPI_User_function *user_fn, int commute, MPI_Op *op)
创建用户定义的组合功能句柄
MPI_Win_flush_local
int MPI_Win_flush_local(int rank, MPI_Win win)
Complete locally all outstanding RMA operations at the given target
MPI_File_iread_at_all
int MPI_File_iread_at_all(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype,MPI_Request *request)
非阻塞集体读取使用显式偏移
MPI_Op_free
int MPI_Op_free(MPI_Op *op)
释放一个用于用户组合功能的句柄
MPI_Win_flush_local_all
int MPI_Win_flush_local_all(MPI_Win win)
Complete locally all outstanding RMA operations at all targets
MPI_File_iread_shared
int MPI_File_iread_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Request *request)
非堵塞的运用共享的文件指针读取
MPI_Open_port
int MPI_Open_port(MPI_Info info, char *port_name)
建立一个可用于建立MPI进程组之间的连接的地址
MPI_Win_free
int MPI_Win_free(MPI_Win *win)
释放一个MPI的RMA窗口
MPI_File_iwrite
#ifdef HAVE_MPI_GREQUEST
#include "mpiu_greq.h"
#endif
int MPI_File_iwrite(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype, MPI_Request *request)
非堵塞的运用单独指针写
MPI_Pack
int MPI_Pack(const void *inbuf, int incount,MPI_Datatype datatype,void *outbuf, int outsize,int *position, MPI_Comm comm)
将数据类型包装到连续的内存中
MPI_Win_free_keyval
int MPI_Win_free_keyval(int *win_keyval)
为MPI RMA窗口释放一个key
MPI_File_iwrite_all
int MPI_File_iwrite_all(MPI_File fh, ROMIO_CONST void *buf, int count,MPI_Datatype datatype, MPI_Request *request)
非堵塞的用单一指针写多个文件
MPI_Pack_external
int MPI_Pack_external(const char datarep[], const void *inbuf,int incount,MPI_Datatype datatype,void *outbuf, MPI_Aint outsize, MPI_Aint *position)
使用external32格式将数据类型包装到连续的内存中
MPI_Win_get_attr
int MPI_Win_get_attr(MPI_Win win, int win_keyval, void *attribute_val, int *flag)
获得window对象的属性
MPI_File_iwrite_at
#ifdef HAVE_MPI_GREQUEST
#include "mpiu_greq.h"
#endif
int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, ROMIO_CONST void *buf,int count, MPI_Datatype datatype, MPIO_Request *request)
非阻塞使用显式偏移写
MPI_Pack_external_size
int MPI_Pack_external_size(const char datarep[],int incount,MPI_Datatype datatype,MPI_Aint *size)
使用MPI_Pack_external返回打包消息所需的空间量的上限
MPI_Win_get_errhandler
int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler)
获得MPI RMA窗口的错误句柄
MPI_File_iwrite_at_all
int MPI_File_iwrite_at_all(MPI_File fh, MPI_Offset offset, ROMIO_CONST void *buf, int count, MPI_Datatype datatype, MPI_Request *request)
非阻塞集体使用显式偏移写’
MPI_Pack_size
int MPI_Pack_size(int incount,MPI_Datatype datatype, MPI_Comm comm, int *size)
返回打包消息所需的空间量的上限
MPI_Win_get_group
int MPI_Win_get_group(MPI_Win win, MPI_Group *group)
获取窗口对象的MPI组
MPI_File_iwrite_shared
#ifdef HAVE_MPI_GREQUEST
#include "mpiu_greq.h"
#endif
int MPI_File_iwrite_shared(MPI_File fh, ROMIO_CONST void *buf, int count, MPI_Datatype datatype, MPIO_Request *request)
使用共享文件指针进行非阻塞写入
MPI_Pcontrol
int MPI_Pcontrol(const int level, …)
控制分析
MPI_Win_get_info
int MPI_Win_get_info(MPI_Win win, MPI_Info *info_used)
返回一个新的信息对象,其中包含与win关联的窗口的提示
MPI_File_open
int MPI_File_open(MPI_Comm comm, ROMIO_CONST char *filename, int amode,MPI_Info info, MPI_File *fh)
打开文件
MPI_Probe
int MPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status)
堵塞测试消息
MPI_Win_get_name
int MPI_Win_get_name(MPI_Win win, char *win_name, int *resultlen)
获得MPI RMA窗口名字
MPI_File_preallocate
int MPI_File_preallocate(MPI_File fh, MPI_Offset size)
为文件预分配存储空间
MPI_Publish_name
int MPI_Publish_name(const char *service_name, MPI_Info info, const char *port_name)
发布与MPI_Comm_connect一起使用的服务名称
MPI_Win_lock
int MPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win)
在目标进程开始一个RMA访问epoch
MPI_File_read
int MPI_File_read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status)
用单文件指针读取文件
MPI_Put
int MPI_Put(const void *origin_addr, int origin_count, MPI_Datatypeorigin_datatype, int target_rank, MPI_Aint target_disp,int target_count, MPI_Datatype target_datatype, MPI_Win win)
将数据放入远程进程的内存窗口
MPI_Win_lock_all
int MPI_Win_lock_all(int assert, MPI_Win win)
在所有进程开始一个RMA访问epoch
MPI_File_read_all
int MPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status)
用单个文件指针读取多个文件
MPI_Query_thread
int MPI_Query_thread( int *provided )
返回由MPI库提供的线程支持级别
MPI_Win_post
int MPI_Win_post(MPI_Group group, int assert, MPI_Win win)
开始RMA曝光epoch
MPI_File_read_all_begin
int MPI_File_read_all_begin(MPI_File fh, void *buf, int count, MPI_Datatype datatype)
使用单独的文件指针开始分开的集体读取
MPI_Raccumulate
int MPI_Raccumulate(const void *origin_addr, int origin_count, MPI_Datatypeorigin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win,MPI_Request *request)
使用远程内存访问将数据收集到目标进程中,并返回操作的请求句柄。
MPI_Win_set_attr
int MPI_Win_set_attr(MPI_Win win, int win_keyval, void *attribute_val)
通过key存value
MPI_File_read_all_end
int MPI_File_read_all_end(MPI_File fh, void *buf, MPI_Status *status)
使用单个文件指针完成拆分集体读取
MPI_Recv
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,MPI_Comm comm, MPI_Status *status)
MPI_Win_set_errhandler
int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler)
为窗口程序设置错误句柄
MPI_File_read_at
int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status)
使用显式偏移读取
MPI_Recv_init
int MPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request)
初始化MPI_Recv
MPI_Win_set_info
int MPI_Win_set_info(MPI_Win win, MPI_Info info)
为与win相关联的窗口的提示设置新值
MPI_File_read_at_all
int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status)
使用显式偏移集体读取
MPI_Reduce
int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,MPI_Op op, int root, MPI_Comm comm)
完成规约操作
MPI_Win_set_name
int MPI_Win_set_name(MPI_Win win, const char *win_name)
为MPI RMA设置名称
MPI_File_read_at_all_begin
int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype)
使用显式偏移开始拆分集体读取
MPI_Reduce_local
int MPI_Reduce_local(const void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op)
将规约运算符应用于本地参数
MPI_Win_shared_query
int MPI_Win_shared_query(MPI_Win win, int rank, MPI_Aint *size, int *disp_unit, void *baseptr)
查询共享内存窗口的修补程序的大小和基本指针
MPI_File_read_at_all_end
int MPI_File_read_at_all_end(MPI_File fh, void *buf, MPI_Status *status)
使用显式偏移完成分割集体读取
MPI_Reduce_scatter
int MPI_Reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
完成规约并分发数据
MPI_Win_start
int MPI_Win_start(MPI_Group group, int assert, MPI_Win win)
开始一个RMA epoch
MPI_File_read_ordered
int MPI_File_read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status)
使用共享文件指针进行集体阅读
MPI_Reduce_scatter_block
int MPI_Reduce_scatter_block(const void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
结合值并分散结果
MPI_Win_sync
int MPI_Win_sync(MPI_Win win)
同步给定窗口的公共和私人副本
MPI_File_read_ordered_begin
int MPI_File_read_ordered_begin(MPI_File fh, void *buf, int count,MPI_Datatype datatype)
使用共享文件指针开始拆分集体读取
MPI_Register_datarep
int MPI_Register_datarep(ROMIO_CONST char *datarep,MPI_Datarep_conversion_function *read_conversion_fn, MPI_Datarep_conversion_function *write_conversion_fn, MPI_Datarep_extent_function *dtype_file_extent_fn,void *extra_state)
注册功能用于用户定义的数据表示
MPI_Win_test
int MPI_Win_test(MPI_Win win, int *flag)
测试RMA曝光epoch是否完成
MPI_File_read_ordered_end
int MPI_File_read_ordered_end(MPI_File fh, void *buf, MPI_Status *status)
使用共享文件指针完成拆分集体读取
MPI_Request_free
int MPI_Request_free(MPI_Request *request)
释放一个request
MPI_Win_unlock
int MPI_Win_unlock(int rank, MPI_Win win)
Completes an RMA access epoch at the target process
MPI_File_read_shared
int MPI_File_read_shared(MPI_File fh, void *buf, int count,
MPI_Datatype datatype, MPI_Status *status)
使用共享文件指针读取
MPI_Request_get_status
int MPI_Request_get_status(MPI_Request request, int *flag, MPI_Status *status)
无损检测完成请求
MPI_Win_unlock_all
int MPI_Win_unlock_all(MPI_Win win)
Completes an RMA access epoch at all processes on the given window
MPI_File_seek
int MPI_File_seek(MPI_File fh, MPI_Offset offset, int whence)
更新单独文件指针
MPI_Rget
int MPI_Rget(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request *request)
从远程进程的内存窗口获取数据
MPI_Win_wait
int MPI_Win_wait(MPI_Win win)
Completes an RMA exposure epoch begun with MPI_Win_post
MPI_File_seek_shared
int MPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence)
更新共享文件指针
MPI_Rget_accumulate
int MPI_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win,MPI_Request *request)
执行原子,one-side的读取和累加操作并返回操作的请求句柄
MPI_Wtick
double MPI_Wtick( void )
返回MPI_Wtime的分辨率
MPI_File_set_atomicity
int MPI_File_set_atomicity(MPI_File fh, int flag)
设置原子模式
MPI_Rput
int MPI_Rput(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request *request)
将数据放入远程进程的内存窗口并返回操作的请求句柄
MPI_Wtime
double MPI_Wtime( void )
返回调用进程经过的时间
以上难免有不足或错误之处,愿大佬们能在评论区不吝赐教。