// 并行快排 2的n次方个进程
// n = 4, 8, 16时为什么问题,n再大时,数据量要改大点,不然有问题
// 至于为什么,不知道,有空再看
#include <iostream>
#include <algorithm>
#include <time.h>
#include <mpi.h>
using namespace std;
const int TOTAL_SIZE = 2000;
int original[TOTAL_SIZE], sorted[TOTAL_SIZE];
void quickSort(int* arr, int e, int base, int& i)
{
if (e == 1 && arr[0] <= base) {
i = 1;
return;
}
if (e == 0 || (e == 1 && arr[0] > base)) {
i = 0;
return;
}
//对数组arr在区间[b,e)上进行快排
int l = -1, r = e;
i = 0;
while (i < r)
{
if (arr[i] < base)
swap(arr[i++], arr[++l]);
else if (arr[i] > base)
swap(arr[i], arr[--r]);
else i++;
}
// 结束时 [0,i) 的 <= base, [i,e) 的 > base
}
void bubblingSort(int arr[], int n)
{
int i, j, temp;
// 每次将一个元素送到末尾,n个元素,执行n次
for (i = 0; i < n; ++i) {
// 之前的循环已经将i个元素送到末尾,不需要再次比较,故减去,因为跟后一个元素比较,为了避免溢出,故减一
for (j = 0; j < n - i - 1; ++j) {
// 如果当前的元素比后一个元素小,就交换
if (arr[j] > arr[j + 1]) {
temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
}
}
}
}
int main(int argc, char* argv[])
{
srand(unsigned(time(0)));
int process_num, my_ID;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &process_num);
MPI_Comm_rank(MPI_COMM_WORLD, &my_ID);
int base;
int* arr = new int[TOTAL_SIZE];
int *subSize = new int[process_num], *border = new int[process_num];
for (int i = 0; i < process_num; i++) subSize[i] = TOTAL_SIZE / process_num;
int k = 0;
while (pow(2, k) < process_num) k++;
if (my_ID == 0) // 零号进程获得初始化数组
{
//printf("我是主进程,初始序列是: \n");
for (int i = 0; i < TOTAL_SIZE; i++)
{
original[i] = rand() % TOTAL_SIZE;
//printf("%d ", original[i]);
}
printf("\n");
}
MPI_Scatter(original, TOTAL_SIZE / process_num, MPI_INT, arr, TOTAL_SIZE / process_num, MPI_INT, 0, MPI_COMM_WORLD);
int cnt = 1;
while (pow(2,cnt) <= process_num)
{
int x = pow(2, k - cnt + 1); // 小组长度
if (my_ID % x == 0) // 组内第一个进程获得 base 并发给组内其他进程
{
base = arr[subSize[my_ID] / 2];
for (int i = 1; i < x; i ++)
MPI_Send(&base, 1, MPI_INT, my_ID + i, my_ID, MPI_COMM_WORLD);
}
else
{
MPI_Recv(&base, 1, MPI_INT, my_ID - my_ID % x, my_ID - my_ID % x, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
}
quickSort(arr, subSize[my_ID], base, border[my_ID]); // 各进程快排
int dis = process_num / pow(2, cnt);
if (dis > 1) // 各组交换数据,先交换数组大小和快排的分界大小,在交换数组元素
{
if ((my_ID / dis) % dis == 0)
{
MPI_Send(&subSize[my_ID], 1, MPI_INT, my_ID + dis, my_ID, MPI_COMM_WORLD);
MPI_Recv(&subSize[my_ID + dis], 1, MPI_INT, my_ID + dis, my_ID + dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&border[my_ID], 1, MPI_INT, my_ID + dis, my_ID, MPI_COMM_WORLD);
MPI_Recv(&border[my_ID + dis], 1, MPI_INT, my_ID + dis, my_ID + dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(arr, border[my_ID], MPI_INT, my_ID + dis, my_ID, MPI_COMM_WORLD);
MPI_Recv(arr + subSize[my_ID], subSize[my_ID + dis] - border[my_ID + dis], MPI_INT, my_ID + dis, my_ID + dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
subSize[my_ID] = subSize[my_ID] - border[my_ID] + subSize[my_ID + dis] - border[my_ID + dis];
for (int i = 0; i < subSize[my_ID]; i++)
arr[i] = arr[i + border[my_ID]];
}
if ((my_ID / dis) % dis == 1)
{
MPI_Recv(&subSize[my_ID - dis], 1, MPI_INT, my_ID - dis, my_ID - dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&subSize[my_ID], 1, MPI_INT, my_ID - dis, my_ID, MPI_COMM_WORLD);
MPI_Recv(&border[my_ID - dis], 1, MPI_INT, my_ID - dis, my_ID - dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&border[my_ID], 1, MPI_INT, my_ID - dis, my_ID, MPI_COMM_WORLD);
MPI_Recv(arr + subSize[my_ID], border[my_ID - dis], MPI_INT, my_ID - dis, my_ID - dis, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(arr + border[my_ID], subSize[my_ID] - border[my_ID], MPI_INT, my_ID - dis, my_ID, MPI_COMM_WORLD);
for (int i = 0; i < border[my_ID - dis]; i++)
arr[i + border[my_ID]] = arr[i + subSize[my_ID]];
subSize[my_ID] = border[my_ID] + border[my_ID - dis];
}
}
else if (dis == 1) {
if (my_ID % 2 == 0)
{
MPI_Send(&subSize[my_ID], 1, MPI_INT, my_ID + 1, my_ID, MPI_COMM_WORLD);
MPI_Recv(&subSize[my_ID + 1], 1, MPI_INT, my_ID + 1, my_ID + 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&border[my_ID], 1, MPI_INT, my_ID + 1, my_ID, MPI_COMM_WORLD);
MPI_Recv(&border[my_ID + 1], 1, MPI_INT, my_ID + 1, my_ID + 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(arr, border[my_ID], MPI_INT, my_ID + 1, my_ID, MPI_COMM_WORLD);
MPI_Recv(arr + subSize[my_ID], subSize[my_ID + 1] - border[my_ID + 1], MPI_INT, my_ID + 1, my_ID + 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
subSize[my_ID] = subSize[my_ID] - border[my_ID] + subSize[my_ID + 1] - border[my_ID + 1];
for (int i = 0; i < subSize[my_ID]; i++)
arr[i] = arr[i + border[my_ID]];
}
if (my_ID % 2 == 1)
{
MPI_Recv(&subSize[my_ID - 1], 1, MPI_INT, my_ID - 1, my_ID - 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&subSize[my_ID], 1, MPI_INT, my_ID - 1, my_ID, MPI_COMM_WORLD);
MPI_Recv(&border[my_ID - 1], 1, MPI_INT, my_ID - 1, my_ID - 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(&border[my_ID], 1, MPI_INT, my_ID - 1, my_ID, MPI_COMM_WORLD);
MPI_Recv(arr + subSize[my_ID], border[my_ID - 1], MPI_INT, my_ID - 1, my_ID - 1, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
MPI_Send(arr + border[my_ID], subSize[my_ID] - border[my_ID], MPI_INT, my_ID - 1, my_ID, MPI_COMM_WORLD);
for (int i = 0; i < border[my_ID - 1]; i++)
arr[i + border[my_ID]] = arr[i + subSize[my_ID]];
subSize[my_ID] = border[my_ID] + border[my_ID - 1];
}
}
cnt++;
}
bubblingSort(arr, subSize[my_ID]);
if (my_ID != 0) // 快排后,每个子进程冒泡排序并把数据发给 0 进程
{
MPI_Send(&subSize[my_ID], 1, MPI_INT, 0, my_ID, MPI_COMM_WORLD);
MPI_Send(arr, subSize[my_ID], MPI_INT, 0, my_ID, MPI_COMM_WORLD);
}
else
{
for (int i = 1; i < process_num; i++)
MPI_Recv(&subSize[i], 1, MPI_INT, i, i, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
int size = 0;
for (int i = process_num - 1; i > 0; i--)
{
MPI_Recv(sorted + size, subSize[i], MPI_INT, i, i, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
size += subSize[i];
}
for (int i = 0; i < subSize[0]; i++)
sorted[i + size] = arr[i];
if (process_num == 16) bubblingSort(sorted, TOTAL_SIZE);
printf("我是主进程,新序列是:\n");
for (auto p : sorted) printf("%d ", p);
puts("");
}
delete[] arr, subSize, border;
MPI_Finalize();
return 0;
}
MPI实现并行快排2^n个进程
最新推荐文章于 2024-11-10 22:49:00 发布