| 72 |  | forceField_(ff), simParams_(simParams), | 
| 73 |  | ndf_(0), fdf_local(0), ndfRaw_(0), ndfTrans_(0), nZconstraint_(0), | 
| 74 |  | nGlobalMols_(0), nGlobalAtoms_(0), nGlobalCutoffGroups_(0), | 
| 75 | < | nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), | 
| 75 | > | nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), nGlobalFluctuatingCharges_(0), | 
| 76 |  | nAtoms_(0), nBonds_(0),  nBends_(0), nTorsions_(0), nInversions_(0), | 
| 77 |  | nRigidBodies_(0), nIntegrableObjects_(0), nCutoffGroups_(0), | 
| 78 | < | nConstraints_(0), sman_(NULL), topologyDone_(false), | 
| 78 | > | nConstraints_(0), nFluctuatingCharges_(0), sman_(NULL), topologyDone_(false), | 
| 79 |  | calcBoxDipole_(false), useAtomicVirial_(true) { | 
| 80 |  |  | 
| 81 |  | MoleculeStamp* molStamp; | 
| 225 |  |  | 
| 226 |  |  | 
| 227 |  | void SimInfo::calcNdf() { | 
| 228 | < | int ndf_local; | 
| 228 | > | int ndf_local, nfq_local; | 
| 229 |  | MoleculeIterator i; | 
| 230 |  | vector<StuntDouble*>::iterator j; | 
| 231 | + | vector<Atom*>::iterator k; | 
| 232 | + |  | 
| 233 |  | Molecule* mol; | 
| 234 |  | StuntDouble* integrableObject; | 
| 235 | + | Atom* atom; | 
| 236 |  |  | 
| 237 |  | ndf_local = 0; | 
| 238 | + | nfq_local = 0; | 
| 239 |  |  | 
| 240 |  | for (mol = beginMolecule(i); mol != NULL; mol = nextMolecule(i)) { | 
| 241 |  | for (integrableObject = mol->beginIntegrableObject(j); integrableObject != NULL; | 
| 250 |  | ndf_local += 3; | 
| 251 |  | } | 
| 252 |  | } | 
| 249 | – |  | 
| 253 |  | } | 
| 254 | + | for (atom = mol->beginFluctuatingCharge(k); atom != NULL; | 
| 255 | + | atom = mol->nextFluctuatingCharge(k)) { | 
| 256 | + | if (atom->isFluctuatingCharge()) { | 
| 257 | + | nfq_local++; | 
| 258 | + | } | 
| 259 | + | } | 
| 260 |  | } | 
| 261 |  |  | 
| 262 |  | // n_constraints is local, so subtract them on each processor | 
| 264 |  |  | 
| 265 |  | #ifdef IS_MPI | 
| 266 |  | MPI_Allreduce(&ndf_local,&ndf_,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD); | 
| 267 | + | MPI_Allreduce(&nfq_local,&nGlobalFluctuatingCharges_,1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 268 |  | #else | 
| 269 |  | ndf_ = ndf_local; | 
| 270 | + | nGlobalFluctuatingCharges_ = nfq_local; | 
| 271 |  | #endif | 
| 272 |  |  | 
| 273 |  | // nZconstraints_ is global, as are the 3 COM translations for the | 
| 791 |  | int usesElectrostatic = 0; | 
| 792 |  | int usesMetallic = 0; | 
| 793 |  | int usesDirectional = 0; | 
| 794 | + | int usesFluctuatingCharges =  0; | 
| 795 |  | //loop over all of the atom types | 
| 796 |  | for (i = atomTypes.begin(); i != atomTypes.end(); ++i) { | 
| 797 |  | usesElectrostatic |= (*i)->isElectrostatic(); | 
| 798 |  | usesMetallic |= (*i)->isMetal(); | 
| 799 |  | usesDirectional |= (*i)->isDirectional(); | 
| 800 | + | usesFluctuatingCharges |= (*i)->isFluctuatingCharge(); | 
| 801 |  | } | 
| 802 |  |  | 
| 803 |  | #ifdef IS_MPI | 
| 810 |  |  | 
| 811 |  | temp = usesElectrostatic; | 
| 812 |  | MPI_Allreduce(&temp, &usesElectrostaticAtoms_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); | 
| 813 | + |  | 
| 814 | + | temp = usesFluctuatingCharges; | 
| 815 | + | MPI_Allreduce(&temp, &usesFluctuatingCharges_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); | 
| 816 |  | #else | 
| 817 |  |  | 
| 818 |  | usesDirectionalAtoms_ = usesDirectional; | 
| 819 |  | usesMetallicAtoms_ = usesMetallic; | 
| 820 |  | usesElectrostaticAtoms_ = usesElectrostatic; | 
| 821 | + | usesFluctuatingCharges_ = usesFluctuatingCharges; | 
| 822 |  |  | 
| 823 |  | #endif | 
| 824 |  |  |