首页 > 其他分享 >MPI_矩阵计算

MPI_矩阵计算

时间:2022-10-23 17:46:19浏览次数:70  
标签:int 矩阵 rank MPI comm 计算 new my

cal the matrix by mpi

use send and receive to do it

// comm_sz is 4*n+1   n is int and n>=0

#include "mpi.h"
#include <iostream>
using namespace std;

void matMulti(int *A,int *B,int *C,int row,int n);
void matGene(int *A,int size,int actual_size);
void printRecv(int *Matrix,int my_rank,int length,int n);

int main(int argc,char* argv[])
{
    int n=atoi(argv[1]);

    int my_rank=0,comm_sz=0;
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);
    MPI_Comm_size(MPI_COMM_WORLD,&comm_sz);
    MPI_Status status;

    int each_row=n/(comm_sz-1),beginRow,endRow;
    double beginTime=MPI_Wtime();

    if (my_rank==0)
    {
        int* A =new int [n*n];
        int* B =new int [n*n];
        int* C =new int [n*n];
        cout<<"create A matrix:"<<endl;
        matGene(A,n,n);
        cout<<"create B matrix:"<<endl;
        matGene(B,n,n);

        for (int i=0;i<comm_sz-1;i++)
        {
            beginRow=each_row*i,endRow=each_row*(i+1);
            MPI_Send(&A[beginRow*n+0],each_row*n,MPI_INT,i+1,1,MPI_COMM_WORLD);
            MPI_Send(&B[0*n+0],n*n,MPI_INT,i+1,2,MPI_COMM_WORLD);
        }

        // Recv: C[beginRow: endRow,: ]
        for (int i=0;i<comm_sz-1;i++)
        {
            beginRow=each_row*i,endRow=each_row*(i+1);
            MPI_Recv(&C[beginRow*n+0],each_row*n,MPI_INT,i+1,3,MPI_COMM_WORLD,&status);
            cout << "I am alive in receiving data from process "<<i+1<<endl;
        }
        printRecv(C,my_rank,n*n,n);

        cout<<"cost of main process time is "<<MPI_Wtime()-beginTime<<endl;
    }
    if (my_rank!=0)
    {
        // other processes: calculate A*B
        // beginRow = each_row*(my_rank-1), endRow = each_row * my_rank;

        int* A=new int[each_row * n]; //A[beginRow:endRow,:]
        int* B=new int [n*n];
        int* C= new int[each_row*n]; //C[beginRow:endRow, :]
        MPI_Recv(&A[0*n+0],each_row*n,MPI_INT,0,1,MPI_COMM_WORLD,&status); // from A[0][0] to B[0][0] begin to connect
        // printRecv(A,my_rank,each_row*n);
        MPI_Recv(&B[0*n+0],n*n,MPI_INT,0,2,MPI_COMM_WORLD,&status);
        // printRecv(B,my_rank,n*n);
        matMulti(A,B,C,each_row,n);
        printRecv(C,my_rank,each_row*n,n);
        MPI_Send(&C[0*n+0],each_row*n,MPI_INT,0,3,MPI_COMM_WORLD); // beginning place is C[my_rank-1][00], size is each_row*n
    
        cout<<"cost of time is "<<MPI_Wtime()-beginTime<<" by process rank "<<my_rank<<endl;
    }

    MPI_Finalize();

    return 0;
}

void matMulti(int *A,int *B,int *C,int row,int n)
{
    for (int i=0;i<row;i++)
    {
        for (int j=0;j<n;j++)
        {
            C[i*n+j]=0;
            for (int k=0;k<n;k++)
                C[i*n+j]=A[i*n+k]*B[k*n+j];
        }
    }
}

void matGene(int *A,int size,int actual_size)
{
    //actual size: the matrix we use may have a larger dimension than n*n
    for (int i=0;i<actual_size;i++)
    {
        for (int j=0;j<actual_size;j++)
        {
            if (i<size && j<size)
                A[i*actual_size+j]=rand()%10;
            else
                A[i*actual_size+j]=0;
            cout<<A[i*actual_size+j]<<" ";
        }
        cout<<endl;
    }
}

void printRecv(int *Matrix,int my_rank,int length,int n)
{
    cout<<"process "<<my_rank<<" receive Matrix"<<endl;
    for (int i=0;i<length;i++)
    {
        cout<<Matrix[i]<<" ";
        if ((i+1)%n==0)
            cout<<endl;
    }
    cout<<endl;
}

expain

  1. init the matrix A and matrix B in process 0
  2. cut the matrix A averagely and send other process in process 0(process 0 don't take part in cal)
  3. other process multi the data and send the result C matrix

use Bcast and Scatter and Gather to do it

#include "mpi.h"
#include <iostream>
using namespace std;

void matMulti(int *A,int *B,int *C,int row,int n);
void matGene(int *A,int size,int actual_size);
void printRecv(int *Matrix,int my_rank,int length,int n);

int main(int argc,char* argv[])
{
    int n=atoi(argv[1]);

    int my_rank=0,comm_sz=0;
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);
    MPI_Comm_size(MPI_COMM_WORLD,&comm_sz);
    MPI_Status status;

    int each_row=n/comm_sz,beginRow,endRow;
    double beginTime=MPI_Wtime();

    int* A =new int [n*n];
    int* B =new int [n*n];
    int* C =new int [n*n];
    int* partA=new int [each_row*n];
    int* partC=new int [each_row*n];

    if (my_rank==0)
    {
        cout<<"create A matrix:"<<endl;
        matGene(A,n,n);
        cout<<"create B matrix:"<<endl;
        matGene(B,n,n);

        cout<<"Bcast and Scatter are finished!"<<endl;
    }

    MPI_Scatter(&A[0*n+0],each_row*n,MPI_INT,&partA[0*n+0],each_row*n,MPI_INT,0,MPI_COMM_WORLD);
    MPI_Bcast(&B[0*n+0],n*n,MPI_INT,0,MPI_COMM_WORLD);
    
    cout<<"partA: "<<endl;
    printRecv(partA,my_rank,n*each_row,n);
    matMulti(partA,B,partC,each_row,n);
    cout<<"partC: "<<endl;
    printRecv(partC,my_rank,n*each_row,n);
    // MPI_Barrier(MPI_COMM_WORLD);
    MPI_Gather(&partC[0*n+0],each_row*n,MPI_INT,&C[0*n+0],each_row*n,MPI_INT,0,MPI_COMM_WORLD);

    if (my_rank==0)
    {
        cout<<"cost of time is "<<MPI_Wtime()-beginTime<<" by process rank "<<my_rank<<endl;
        printRecv(C,my_rank,n*n,n);
    }
        

    MPI_Finalize();

    return 0;
}

void matMulti(int *A,int *B,int *C,int row,int n)
{
    for (int i=0;i<row;i++)
    {
        for (int j=0;j<n;j++)
        {
            C[i*n+j]=0;
            for (int k=0;k<n;k++)
                C[i*n+j]=A[i*n+k]*B[k*n+j];
        }
    }
}

void matGene(int *A,int size,int actual_size)
{
    //actual size: the matrix we use may have a larger dimension than n*n
    for (int i=0;i<actual_size;i++)
    {
        for (int j=0;j<actual_size;j++)
        {
            if (i<size && j<size)
                A[i*actual_size+j]=rand()%10;
            else
                A[i*actual_size+j]=0;
            cout<<A[i*actual_size+j]<<" ";
        }
        cout<<endl;
    }
}

void printRecv(int *Matrix,int my_rank,int length,int n)
{
    cout<<"process "<<my_rank<<" receive Matrix"<<endl;
    for (int i=0;i<length;i++)
    {
        cout<<Matrix[i]<<" ";
        if ((i+1)%n==0)
            cout<<endl;
    }
    cout<<endl;
}

tips

  • if put the Bcast and Scatter in only process 0, other processes' data may have problems, so Bcast and Scatter function should to be in main().

标签:int,矩阵,rank,MPI,comm,计算,new,my
From: https://www.cnblogs.com/Frey-Li/p/16818988.html

相关文章