| 112 |
|
AtomCommIntRow->gather(idents, identsRow); |
| 113 |
|
AtomCommIntColumn->gather(idents, identsCol); |
| 114 |
|
|
| 115 |
+ |
// allocate memory for the parallel objects |
| 116 |
+ |
AtomRowToGlobal.resize(nAtomsInRow_); |
| 117 |
+ |
AtomColToGlobal.resize(nAtomsInCol_); |
| 118 |
+ |
cgRowToGlobal.resize(nGroupsInRow_); |
| 119 |
+ |
cgColToGlobal.resize(nGroupsInCol_); |
| 120 |
+ |
massFactorsRow.resize(nAtomsInRow_); |
| 121 |
+ |
massFactorsCol.resize(nAtomsInCol_); |
| 122 |
+ |
pot_row.resize(nAtomsInRow_); |
| 123 |
+ |
pot_col.resize(nAtomsInCol_); |
| 124 |
+ |
|
| 125 |
|
AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); |
| 126 |
|
AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); |
| 127 |
|
|
| 341 |
|
|
| 342 |
|
// Now we find the maximum group cutoff value present in the simulation |
| 343 |
|
|
| 344 |
< |
RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end()); |
| 344 |
> |
RealType groupMax = *max_element(gTypeCutoffs.begin(), |
| 345 |
> |
gTypeCutoffs.end()); |
| 346 |
|
|
| 347 |
|
#ifdef IS_MPI |
| 348 |
< |
MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX); |
| 348 |
> |
MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, |
| 349 |
> |
MPI::MAX); |
| 350 |
|
#endif |
| 351 |
|
|
| 352 |
|
RealType tradRcut = groupMax; |
| 442 |
|
Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); |
| 443 |
|
|
| 444 |
|
if (storageLayout_ & DataStorage::dslParticlePot) { |
| 445 |
< |
fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0); |
| 446 |
< |
fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0); |
| 445 |
> |
fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), |
| 446 |
> |
0.0); |
| 447 |
> |
fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), |
| 448 |
> |
0.0); |
| 449 |
|
} |
| 450 |
|
|
| 451 |
|
if (storageLayout_ & DataStorage::dslDensity) { |
| 454 |
|
} |
| 455 |
|
|
| 456 |
|
if (storageLayout_ & DataStorage::dslFunctional) { |
| 457 |
< |
fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0); |
| 458 |
< |
fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0); |
| 457 |
> |
fill(atomRowData.functional.begin(), atomRowData.functional.end(), |
| 458 |
> |
0.0); |
| 459 |
> |
fill(atomColData.functional.begin(), atomColData.functional.end(), |
| 460 |
> |
0.0); |
| 461 |
|
} |
| 462 |
|
|
| 463 |
|
if (storageLayout_ & DataStorage::dslFunctionalDerivative) { |
| 474 |
|
atomColData.skippedCharge.end(), 0.0); |
| 475 |
|
} |
| 476 |
|
|
| 477 |
< |
#else |
| 478 |
< |
|
| 477 |
> |
#endif |
| 478 |
> |
// even in parallel, we need to zero out the local arrays: |
| 479 |
> |
|
| 480 |
|
if (storageLayout_ & DataStorage::dslParticlePot) { |
| 481 |
|
fill(snap_->atomData.particlePot.begin(), |
| 482 |
|
snap_->atomData.particlePot.end(), 0.0); |
| 498 |
|
fill(snap_->atomData.skippedCharge.begin(), |
| 499 |
|
snap_->atomData.skippedCharge.end(), 0.0); |
| 500 |
|
} |
| 484 |
– |
#endif |
| 501 |
|
|
| 502 |
|
} |
| 503 |
|
|
| 534 |
|
AtomCommMatrixColumn->gather(snap_->atomData.electroFrame, |
| 535 |
|
atomColData.electroFrame); |
| 536 |
|
} |
| 537 |
+ |
|
| 538 |
|
#endif |
| 539 |
|
} |
| 540 |
|
|
| 601 |
|
AtomCommVectorColumn->scatter(atomColData.force, frc_tmp); |
| 602 |
|
for (int i = 0; i < n; i++) |
| 603 |
|
snap_->atomData.force[i] += frc_tmp[i]; |
| 604 |
< |
|
| 588 |
< |
|
| 604 |
> |
|
| 605 |
|
if (storageLayout_ & DataStorage::dslTorque) { |
| 606 |
|
|
| 607 |
|
int nt = snap_->atomData.torque.size(); |
| 625 |
|
|
| 626 |
|
AtomCommRealRow->scatter(atomRowData.skippedCharge, skch_tmp); |
| 627 |
|
for (int i = 0; i < ns; i++) { |
| 628 |
< |
snap_->atomData.skippedCharge[i] = skch_tmp[i]; |
| 628 |
> |
snap_->atomData.skippedCharge[i] += skch_tmp[i]; |
| 629 |
|
skch_tmp[i] = 0.0; |
| 630 |
|
} |
| 631 |
|
|