gpt4 book ai didi

c - MPI 点对点通信到集体通信 : MPI_Scatterv Trouble

转载 作者:太空宇宙 更新时间:2023-11-04 04:50:54 25 4
gpt4 key购买 nike

我正在从事一个将点对点通信转换为集体通信的项目。

基本上,我想做的是使用 MPI_Scatterv 而不是 MPI_Send 和 MPI_Recv。我无法确定 Scatterv 的正确参数。

这是我正在使用的函数:

    void read_block_vector (
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int datum_size; /* Bytes per element */
int i;
FILE *infileptr; /* Input file pointer */
int local_els; /* Elements on this proc */
MPI_Status status; /* Result of receive */
int id; /* Process rank */
int p; /* Number of processes */
int x; /* Result of read */

datum_size = get_size (dtype);
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);

/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes. */

if (id == (p-1)) {
infileptr = fopen (s, "r");
if (infileptr == NULL) *n = 0;
else fread (n, sizeof(int), 1, infileptr);
}
MPI_Bcast (n, 1, MPI_INT, p-1, comm);
if (! *n) {
if (!id) {
printf ("Input file '%s' cannot be opened\n", s);
fflush (stdout);
}
}

/* Block mapping of vector elements to processes */

local_els = BLOCK_SIZE(id,p,*n);

/* Dynamically allocate vector. */

*v = my_malloc (id, local_els * datum_size);
if (id == (p-1)) {
for (i = 0; i < p-1; i++) {
x = fread (*v, datum_size, BLOCK_SIZE(i,p,*n),
infileptr);
MPI_Send (*v, BLOCK_SIZE(i,p,*n), dtype, i, DATA_MSG,
comm);
}
x = fread (*v, datum_size, BLOCK_SIZE(id,p,*n),
infileptr);
fclose (infileptr);
} else {
MPI_Recv (*v, BLOCK_SIZE(id,p,*n), dtype, p-1, DATA_MSG,
comm, &status);
}
// My Attempt at making this collective communication:
if(id == (p-1))
{
x = fread(*v,datum_size,*n,infileptr);

for(i = 0; i < p; i++)
{
size[i] = BLOCK_SIZE(i,p,*n);

}
//x = fread(*v,datum_size,BLOCK_SIZE(id, p, *n),infileptr);
fclose(infileptr);
}
MPI_Scatterv(v,send_count,send_disp, dtype, storage, size[id], dtype, p-1, comm);

}

如有任何帮助,我们将不胜感激。

谢谢

最佳答案

如果您发布一个小型的、独立的、可重现的示例,人们会更容易回答您的问题。

对于Scatterv ,您需要提供要发送到每个进程的计数列表,这似乎是您的 size[] 数组,以及要发送的数据中的位移。比较详细地描述了 Scatter 与 Scatterv 的机制 in this answer .尝试推断所有变量和未提供的函数/宏的作用,下面的示例将文件分散到进程中。

但还要注意,如果您这样做,实际使用 MPI-IO 并不会更难。直接协调文件访问,避免需要让一个进程首先读取所有数据。还提供了代码。

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

int main(int argc, char **argv) {

int id, p;
int *block_size;
int datasize = 0;

MPI_Init(&argc, &argv);

MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &id);

block_size = malloc(p * sizeof(int));
for (int i=0; i<p; i++) {
block_size[i] = i + 1;
datasize += block_size[i];
}

/* create file for reading */
if (id == p-1) {
char *data = malloc(datasize * sizeof(char));
for (int i=0; i<datasize; i++)
data[i] = 'a' + i;

FILE *f = fopen("data.dat","wb");
fwrite(data, sizeof(char), datasize, f);
fclose(f);

printf("Initial data: ");
for (int i=0; i<datasize; i++)
printf("%c", data[i]);
printf("\n");
free(data);
}

if (id == 0) printf("---Using MPI-Scatterv---\n");

/* using scatterv */

int local_els = block_size[id];
char *v = malloc ((local_els + 1) * sizeof(char));
char *all;

int *counts, *disps;
counts = malloc(p * sizeof(int));
disps = malloc(p * sizeof(int));

/* counts.. */
for(int i = 0; i < p; i++)
counts[i] = block_size[i];

/* and displacements (where the data starts within the send buffer) */
disps[0] = 0;
for(int i = 1; i < p; i++)
disps[i] = disps[i-1] + counts[i-1];

if(id == (p-1))
{
all = malloc(datasize*sizeof(char));

FILE *f = fopen("data.dat","rb");
int x = fread(all,sizeof(char),datasize,f);
fclose(f);
}

MPI_Scatterv(all, counts, disps, MPI_CHAR, v, local_els, MPI_CHAR, p-1, MPI_COMM_WORLD);

if (id == (p-1)) {
free(all);
}

v[local_els] = '\0';
printf("[%d]: %s\n", id, v);

/* using MPI I/O */

fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); /* only for syncing output to screen */

if (id == 0) printf("---Using MPI-IO---\n");

for (int i=0; i<local_els; i++)
v[i] = 'X';

/* create the file layout - the subarrays within the 1d array of data */
MPI_Datatype myview;
MPI_Type_create_subarray(1, &datasize, &local_els, &(disps[id]),
MPI_ORDER_C, MPI_CHAR, &myview);
MPI_Type_commit(&myview);

MPI_File mpif;
MPI_Status status;

MPI_File_open(MPI_COMM_WORLD, "data.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &mpif);

MPI_File_set_view(mpif, (MPI_Offset)0, MPI_CHAR, myview, "native", MPI_INFO_NULL);
MPI_File_read_all(mpif, v, local_els, MPI_CHAR, &status);

MPI_File_close(&mpif);
MPI_Type_free(&myview);

v[local_els] = '\0';
printf("[%d]: %s\n", id, v);

free(v);
free(counts);
free(disps);

MPI_Finalize();
return 0;
}

运行这个给出(为清楚起见重新排序输出)

$ mpirun -np 6 ./foo
Initial data: abcdefghijklmnopqrstu
---Using MPI-Scatterv---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu
---Using MPI-IO---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu

关于c - MPI 点对点通信到集体通信 : MPI_Scatterv Trouble,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/15509653/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com