I've got a one to all broadcast method for a hypercube, written using MPI:
one2allbcast(int n, int rank, void *data, int count, MPI_Datatype dtype)
{
MPI_Status status;
int mask, partner;
int mask2 = ((1 << n) - 1) ^ (1 << n-1);
for (mask = (1 << n-1); mask; mask >>= 1, mask2 >>= 1)
{
if (rank & mask2 == 0)
{
partner = rank ^ mask;
if (rank & mask)
MPI_Recv(data, count, dtype, partner, 99, MPI_COMM_WORLD, &status);
else
MPI_Send(data, count, dtype, partner, 99, MPI_COMM_WORLD);
}
}
}
Upon calling it from main:
int main( int argc, char **argv )
{
int n, rank;
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &n);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
one2allbcast(floor(log(n) / log (2)), rank, "message", sizeof(message), MPI_CHAR);
MPI_Finalize();
return 0;
}
compiling and executing on 8 nodes, I receive a series of errors reporting that processes 1, 3, 5, 7 were stopped before the point of receiving any data:
MPI_Recv: process in local group is dead (rank 1, MPI_COMM_WORLD)
Rank (1, MPI_COMM_WORLD): Call stack within LAM:
Rank (1, MPI_COMM_WORLD): - MPI_Recv()
Rank (1, MPI_COMM_WORLD): - main()
MPI_Recv: process in local group is dead (rank 3, MPI_COMM_WORLD)
Rank (3, MPI_COMM_WORLD): Call stack within LAM:
Rank (3, MPI_COMM_WORLD): - MPI_Recv()
Rank (3, MPI_COMM_WORLD): - main()
MPI_Recv: process in local group is dead (rank 5, MPI_COMM_WORLD)
Rank (5, MPI_COMM_WORLD): Call stack within LAM:
Rank (5, MPI_COMM_WORLD): - MPI_Recv()
Rank (5, MPI_COMM_WORLD): - main()
MPI_Recv: process in local group is dead (rank 7, MPI_COMM_WORLD)
Rank (7, MPI_COMM_WO开发者_如何学PythonRLD): Call stack within LAM:
Rank (7, MPI_COMM_WORLD): - MPI_Recv()
Rank (7, MPI_COMM_WORLD): - main()
Where do I go wrong?
It turns out that the error was in the line
if (rank & mask2 == 0)
where I haven't accounted for operator priority. The correct and functioning way of writing it is
if ((rank & mask2) == 0)
where the bitwise &
gets evaluated first.
It is a common error when a MPI communication is requested after a MPI_Finalize
is called. Before calling MPI_Finalize
make test if all MPI calls are done.
// a c# class for one-to-all broadcast on a hypercube
class Program
{
static void Main(string[] args)
{
using (new MPI.Environment(ref args))
{
string strMsg = "msg";
Intracommunicator comm = Communicator.world;
int my_id = comm.Rank;
int d = Convert.ToInt32(Math.Log(comm.Size, 2));
///////////////////////////////////////////////
int power2i = 0;
int msgDestination = 0, msgSource = 0;
//d=0xff;
//Console.WriteLine("d:{0:x}", d);
int mask = Convert.ToInt32(Math.Pow(2, d)) - 1;
//Console.WriteLine("first: " + Convert.ToString(mask, 2));
//Console.WriteLine("second: " + mask.ToString());
comm.Barrier();
for (int i = d - 1; i >= 0; i--)
{
power2i = Convert.ToInt32(Math.Pow(2, i));
mask = mask ^ power2i;
//Console.WriteLine("third: " + "i: " + i + "-" + Convert.ToString(mask, 2));
if ((my_id & mask) == 0)
{
if ((my_id & power2i) == 0)
{
msgDestination = my_id ^ power2i;
comm.Send<string>(strMsg, msgDestination, 1);
Console.WriteLine("process: " + my_id + "- sent: " + strMsg + " to: " + msgDestination + "-@: " + DateTime.Now.Millisecond);
}
else
{
msgSource = my_id ^ power2i;
strMsg = comm.Receive<string>(msgSource, 1);
//Console.WriteLine("process: " + my_id + "- received: " + strMsg + " from: " + msgSource + "-@: " + i);
}
}
}
/////////////////////////////////////////////////////////////////////////////////
}
}
精彩评论