Professional Documents
Culture Documents
Arash Bakhtiari
2013-01-13 Sun
Distributed Memory
Advantages:
I Memory is scalable with the number of processors
I Each processor can rapidly access its own memory
without interference
Disadvantages:
I programmer is responsible:
I
I
I
What is MPI?
I
I
Advantages of MPI
I
I
Core Routines
I
i n t MPI_Init ( i n t a r g c , char a r g v )
I
MPI_Finalize ()
I
DEMO
#i n c l u d e <mpi . h>
#i n c l u d e < s t d i o . h>
i n t main ( i n t a r g c , char a r g v ) {
i n t my_rank ;
int size ;
MPI_Init (& a r g c , &a r g v ) ; /START MPI /
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank ) ;
MPI_Comm_size (MPI_COMM_WORLD, & s i z e ) ;
p r i n t f ( " H e l l o w o r l d ! I m r a n k %d . \ n" , my_rank ) ;
MPI_Finalize ( ) ;
}
/ EXIT MPI /
Communicaiton Routines
MPI_Send:
MPI_Recv:
i n t MPI_Bcast ( v o i d b u f f e r , i n t count ,
MPI_Datatype d a t a t y p e ,
i n t r o o t , MPI_Comm comm )
I
DEMO
#i n c l u d e " mpi . h"
#i n c l u d e < s t d i o . h>
i n t main ( i n t a r g c , char a r g v [ ] )
{
i n t rank , s i z e , i ;
i n t b u f f e r [ 10 ] ;
MPI_Status s t a t u s ;
MPI_Init (& a r g c , &a r g v ) ;
MPI_Comm_size (MPI_COMM_WORLD, & s i z e ) ;
MPI_Comm_rank(MPI_COMM_WORLD, &r a n k ) ;
i f ( r a n k == 0 )
{
f o r ( i =0; i <10; i ++)
buffer [ i ] = i ;
MPI_Send ( b u f f e r , 1 0 , MPI_INT ,
1 , 1 2 3 , MPI_COMM_WORLD) ;
}
DEMO
i f ( r a n k == 1 )
{
f o r ( i =0; i <10; i ++)
b u f f e r [ i ] = 1;
MPI_Recv ( b u f f e r , 1 0 , MPI_INT ,
0 , 1 2 3 , MPI_COMM_WORLD,
&s t a t u s ) ;
f o r ( i =0; i <10; i ++)
{
p r i n t f ( " b u f f e r [%d ] = %d\n" , i , b u f f e r [ i ] ) ;
}
f f l u s h ( stdout );
}
MPI_Finalize ( ) ;
return 0;
References