| 506 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 507 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 508 |
|
|
| 509 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 509 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 510 |
|
|
| 511 |
|
if (nProcessors > nGlobalMols) { |
| 512 |
|
sprintf(painCave.errMsg, |
| 544 |
|
nTarget = (int)(precast + 0.5); |
| 545 |
|
|
| 546 |
|
for(i = 0; i < nGlobalMols; i++) { |
| 547 |
+ |
|
| 548 |
|
done = 0; |
| 549 |
|
loops = 0; |
| 550 |
|
|
| 569 |
|
// and be done with it. |
| 570 |
|
|
| 571 |
|
if (loops > 100) { |
| 572 |
+ |
|
| 573 |
|
sprintf(painCave.errMsg, |
| 574 |
< |
"I've tried 100 times to assign molecule %d to a " |
| 575 |
< |
" processor, but can't find a good spot.\n" |
| 576 |
< |
"I'm assigning it at random to processor %d.\n", |
| 577 |
< |
i, which_proc); |
| 578 |
< |
|
| 574 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
| 575 |
> |
"\tunderworked processor, but there's no good place to\n" |
| 576 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
| 577 |
> |
i, which_proc); |
| 578 |
> |
|
| 579 |
|
painCave.isFatal = 0; |
| 580 |
+ |
painCave.severity = OPENMD_INFO; |
| 581 |
|
simError(); |
| 582 |
|
|
| 583 |
|
molToProcMap[i] = which_proc; |
| 622 |
|
} |
| 623 |
|
|
| 624 |
|
delete myRandom; |
| 625 |
< |
|
| 625 |
> |
|
| 626 |
|
// Spray out this nonsense to all other processors: |
| 627 |
< |
|
| 625 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 627 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 628 |
|
} else { |
| 629 |
|
|
| 630 |
|
// Listen to your marching orders from processor 0: |
| 631 |
< |
|
| 632 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 631 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 632 |
> |
|
| 633 |
|
} |
| 634 |
|
|
| 635 |
|
info->setMolToProcMap(molToProcMap); |
| 853 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 854 |
|
// docs said we could. |
| 855 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 856 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 857 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 856 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 857 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 858 |
> |
MPI::INT, MPI::SUM); |
| 859 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 860 |
|
#else |
| 861 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 872 |
|
|
| 873 |
|
#ifdef IS_MPI |
| 874 |
|
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 875 |
< |
|
| 876 |
< |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 877 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 875 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 876 |
> |
nGlobalAtoms, |
| 877 |
> |
MPI::INT, MPI::SUM); |
| 878 |
|
|
| 879 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 880 |
|
#else |
| 891 |
|
|
| 892 |
|
#ifdef IS_MPI |
| 893 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 894 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 895 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 894 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 895 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 896 |
|
#else |
| 897 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 898 |
|
#endif |