# | Line 36 | Line 36 | |
---|---|---|
36 | * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | |
37 | * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | |
38 | * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | |
39 | < | * [4] Vardeman & Gezelter, in progress (2009). |
39 | > | * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 | > | * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 | */ | |
42 | ||
43 | /** | |
# | Line 54 | Line 55 | |
55 | #include "math/Vector3.hpp" | |
56 | #include "primitives/Molecule.hpp" | |
57 | #include "primitives/StuntDouble.hpp" | |
57 | – | #include "UseTheForce/DarkSide/neighborLists_interface.h" |
58 | – | #include "UseTheForce/doForces_interface.h" |
58 | #include "utils/MemoryUtils.hpp" | |
59 | #include "utils/simError.h" | |
60 | #include "selection/SelectionManager.hpp" | |
61 | #include "io/ForceFieldOptions.hpp" | |
62 | #include "UseTheForce/ForceField.hpp" | |
63 | #include "nonbonded/SwitchingFunction.hpp" | |
65 | – | |
64 | #ifdef IS_MPI | |
65 | < | #include "UseTheForce/mpiComponentPlan.h" |
66 | < | #include "UseTheForce/DarkSide/simParallel_interface.h" |
69 | < | #endif |
65 | > | #include <mpi.h> |
66 | > | #endif |
67 | ||
68 | using namespace std; | |
69 | namespace OpenMD { | |
# | Line 75 | Line 72 | namespace OpenMD { | |
72 | forceField_(ff), simParams_(simParams), | |
73 | ndf_(0), fdf_local(0), ndfRaw_(0), ndfTrans_(0), nZconstraint_(0), | |
74 | nGlobalMols_(0), nGlobalAtoms_(0), nGlobalCutoffGroups_(0), | |
75 | < | nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), |
75 | > | nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), nGlobalFluctuatingCharges_(0), |
76 | nAtoms_(0), nBonds_(0), nBends_(0), nTorsions_(0), nInversions_(0), | |
77 | nRigidBodies_(0), nIntegrableObjects_(0), nCutoffGroups_(0), | |
78 | < | nConstraints_(0), sman_(NULL), fortranInitialized_(false), |
78 | > | nConstraints_(0), nFluctuatingCharges_(0), sman_(NULL), topologyDone_(false), |
79 | calcBoxDipole_(false), useAtomicVirial_(true) { | |
80 | ||
81 | MoleculeStamp* molStamp; | |
# | Line 132 | Line 129 | namespace OpenMD { | |
129 | //equal to the total number of atoms minus number of atoms belong to | |
130 | //cutoff group defined in meta-data file plus the number of cutoff | |
131 | //groups defined in meta-data file | |
135 | – | std::cerr << "nGA = " << nGlobalAtoms_ << "\n"; |
136 | – | std::cerr << "nCA = " << nCutoffAtoms << "\n"; |
137 | – | std::cerr << "nG = " << nGroups << "\n"; |
132 | ||
133 | nGlobalCutoffGroups_ = nGlobalAtoms_ - nCutoffAtoms + nGroups; | |
140 | – | |
141 | – | std::cerr << "nGCG = " << nGlobalCutoffGroups_ << "\n"; |
134 | ||
135 | //every free atom (atom does not belong to rigid bodies) is an | |
136 | //integrable object therefore the total number of integrable objects | |
# | Line 233 | Line 225 | namespace OpenMD { | |
225 | ||
226 | ||
227 | void SimInfo::calcNdf() { | |
228 | < | int ndf_local; |
228 | > | int ndf_local, nfq_local; |
229 | MoleculeIterator i; | |
230 | vector<StuntDouble*>::iterator j; | |
231 | + | vector<Atom*>::iterator k; |
232 | + | |
233 | Molecule* mol; | |
234 | StuntDouble* integrableObject; | |
235 | + | Atom* atom; |
236 | ||
237 | ndf_local = 0; | |
238 | + | nfq_local = 0; |
239 | ||
240 | for (mol = beginMolecule(i); mol != NULL; mol = nextMolecule(i)) { | |
241 | for (integrableObject = mol->beginIntegrableObject(j); integrableObject != NULL; | |
# | Line 254 | Line 250 | namespace OpenMD { | |
250 | ndf_local += 3; | |
251 | } | |
252 | } | |
257 | – | |
253 | } | |
254 | + | for (atom = mol->beginFluctuatingCharge(k); atom != NULL; |
255 | + | atom = mol->nextFluctuatingCharge(k)) { |
256 | + | if (atom->isFluctuatingCharge()) { |
257 | + | nfq_local++; |
258 | + | } |
259 | + | } |
260 | } | |
261 | ||
262 | // n_constraints is local, so subtract them on each processor | |
# | Line 263 | Line 264 | namespace OpenMD { | |
264 | ||
265 | #ifdef IS_MPI | |
266 | MPI_Allreduce(&ndf_local,&ndf_,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD); | |
267 | + | MPI_Allreduce(&nfq_local,&nGlobalFluctuatingCharges_,1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
268 | #else | |
269 | ndf_ = ndf_local; | |
270 | + | nGlobalFluctuatingCharges_ = nfq_local; |
271 | #endif | |
272 | ||
273 | // nZconstraints_ is global, as are the 3 COM translations for the | |
# | Line 281 | Line 284 | namespace OpenMD { | |
284 | #endif | |
285 | return fdf_; | |
286 | } | |
287 | + | |
288 | + | unsigned int SimInfo::getNLocalCutoffGroups(){ |
289 | + | int nLocalCutoffAtoms = 0; |
290 | + | Molecule* mol; |
291 | + | MoleculeIterator mi; |
292 | + | CutoffGroup* cg; |
293 | + | Molecule::CutoffGroupIterator ci; |
294 | ||
295 | + | for (mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
296 | + | |
297 | + | for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
298 | + | cg = mol->nextCutoffGroup(ci)) { |
299 | + | nLocalCutoffAtoms += cg->getNumAtom(); |
300 | + | |
301 | + | } |
302 | + | } |
303 | + | |
304 | + | return nAtoms_ - nLocalCutoffAtoms + nCutoffGroups_; |
305 | + | } |
306 | + | |
307 | void SimInfo::calcNdfRaw() { | |
308 | int ndfRaw_local; | |
309 | ||
# | Line 687 | Line 709 | namespace OpenMD { | |
709 | Atom* atom; | |
710 | set<AtomType*> atomTypes; | |
711 | ||
712 | < | for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
713 | < | for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
712 | > | for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
713 | > | for(atom = mol->beginAtom(ai); atom != NULL; |
714 | > | atom = mol->nextAtom(ai)) { |
715 | atomTypes.insert(atom->getAtomType()); | |
716 | } | |
717 | } | |
718 | < | |
718 | > | |
719 | #ifdef IS_MPI | |
720 | ||
721 | // loop over the found atom types on this processor, and add their | |
722 | // numerical idents to a vector: | |
723 | < | |
723 | > | |
724 | vector<int> foundTypes; | |
725 | set<AtomType*>::iterator i; | |
726 | for (i = atomTypes.begin(); i != atomTypes.end(); ++i) | |
# | Line 706 | Line 729 | namespace OpenMD { | |
729 | // count_local holds the number of found types on this processor | |
730 | int count_local = foundTypes.size(); | |
731 | ||
709 | – | // count holds the total number of found types on all processors |
710 | – | // (some will be redundant with the ones found locally): |
711 | – | int count; |
712 | – | MPI::COMM_WORLD.Allreduce(&count_local, &count, 1, MPI::INT, MPI::SUM); |
713 | – | |
714 | – | // create a vector to hold the globally found types, and resize it: |
715 | – | vector<int> ftGlobal; |
716 | – | ftGlobal.resize(count); |
717 | – | vector<int> counts; |
718 | – | |
732 | int nproc = MPI::COMM_WORLD.Get_size(); | |
720 | – | counts.resize(nproc); |
721 | – | vector<int> disps; |
722 | – | disps.resize(nproc); |
733 | ||
734 | < | // now spray out the foundTypes to all the other processors: |
734 | > | // we need arrays to hold the counts and displacement vectors for |
735 | > | // all processors |
736 | > | vector<int> counts(nproc, 0); |
737 | > | vector<int> disps(nproc, 0); |
738 | > | |
739 | > | // fill the counts array |
740 | > | MPI::COMM_WORLD.Allgather(&count_local, 1, MPI::INT, &counts[0], |
741 | > | 1, MPI::INT); |
742 | > | |
743 | > | // use the processor counts to compute the displacement array |
744 | > | disps[0] = 0; |
745 | > | int totalCount = counts[0]; |
746 | > | for (int iproc = 1; iproc < nproc; iproc++) { |
747 | > | disps[iproc] = disps[iproc-1] + counts[iproc-1]; |
748 | > | totalCount += counts[iproc]; |
749 | > | } |
750 | > | |
751 | > | // we need a (possibly redundant) set of all found types: |
752 | > | vector<int> ftGlobal(totalCount); |
753 | ||
754 | + | // now spray out the foundTypes to all the other processors: |
755 | MPI::COMM_WORLD.Allgatherv(&foundTypes[0], count_local, MPI::INT, | |
756 | < | &ftGlobal[0], &counts[0], &disps[0], MPI::INT); |
756 | > | &ftGlobal[0], &counts[0], &disps[0], |
757 | > | MPI::INT); |
758 | ||
759 | + | vector<int>::iterator j; |
760 | + | |
761 | // foundIdents is a stl set, so inserting an already found ident | |
762 | // will have no effect. | |
763 | set<int> foundIdents; | |
764 | < | vector<int>::iterator j; |
764 | > | |
765 | for (j = ftGlobal.begin(); j != ftGlobal.end(); ++j) | |
766 | foundIdents.insert((*j)); | |
767 | ||
768 | // now iterate over the foundIdents and get the actual atom types | |
769 | // that correspond to these: | |
770 | set<int>::iterator it; | |
771 | < | for (it = foundIdents.begin(); it != foundIdents.end(); ++it) |
771 | > | for (it = foundIdents.begin(); it != foundIdents.end(); ++it) |
772 | atomTypes.insert( forceField_->getAtomType((*it)) ); | |
773 | ||
774 | #endif | |
775 | < | |
775 | > | |
776 | return atomTypes; | |
777 | } | |
778 | ||
# | Line 752 | Line 784 | namespace OpenMD { | |
784 | if ( simParams_->getAccumulateBoxDipole() ) { | |
785 | calcBoxDipole_ = true; | |
786 | } | |
787 | < | |
787 | > | |
788 | set<AtomType*>::iterator i; | |
789 | set<AtomType*> atomTypes; | |
790 | atomTypes = getSimulatedAtomTypes(); | |
791 | int usesElectrostatic = 0; | |
792 | int usesMetallic = 0; | |
793 | int usesDirectional = 0; | |
794 | + | int usesFluctuatingCharges = 0; |
795 | //loop over all of the atom types | |
796 | for (i = atomTypes.begin(); i != atomTypes.end(); ++i) { | |
797 | usesElectrostatic |= (*i)->isElectrostatic(); | |
798 | usesMetallic |= (*i)->isMetal(); | |
799 | usesDirectional |= (*i)->isDirectional(); | |
800 | + | usesFluctuatingCharges |= (*i)->isFluctuatingCharge(); |
801 | } | |
802 | < | |
802 | > | |
803 | #ifdef IS_MPI | |
804 | int temp; | |
805 | temp = usesDirectional; | |
806 | MPI_Allreduce(&temp, &usesDirectionalAtoms_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); | |
807 | < | |
807 | > | |
808 | temp = usesMetallic; | |
809 | MPI_Allreduce(&temp, &usesMetallicAtoms_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); | |
810 | < | |
810 | > | |
811 | temp = usesElectrostatic; | |
812 | MPI_Allreduce(&temp, &usesElectrostaticAtoms_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); | |
813 | + | |
814 | + | temp = usesFluctuatingCharges; |
815 | + | MPI_Allreduce(&temp, &usesFluctuatingCharges_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); |
816 | + | #else |
817 | + | |
818 | + | usesDirectionalAtoms_ = usesDirectional; |
819 | + | usesMetallicAtoms_ = usesMetallic; |
820 | + | usesElectrostaticAtoms_ = usesElectrostatic; |
821 | + | usesFluctuatingCharges_ = usesFluctuatingCharges; |
822 | + | |
823 | #endif | |
824 | < | fInfo_.SIM_uses_PBC = usesPeriodicBoundaries_; |
825 | < | fInfo_.SIM_uses_DirectionalAtoms = usesDirectionalAtoms_; |
826 | < | fInfo_.SIM_uses_MetallicAtoms = usesMetallicAtoms_; |
827 | < | fInfo_.SIM_requires_SkipCorrection = usesElectrostaticAtoms_; |
784 | < | fInfo_.SIM_requires_SelfCorrection = usesElectrostaticAtoms_; |
785 | < | fInfo_.SIM_uses_AtomicVirial = usesAtomicVirial_; |
824 | > | |
825 | > | requiresPrepair_ = usesMetallicAtoms_ ? true : false; |
826 | > | requiresSkipCorrection_ = usesElectrostaticAtoms_ ? true : false; |
827 | > | requiresSelfCorrection_ = usesElectrostaticAtoms_ ? true : false; |
828 | } | |
829 | ||
788 | – | void SimInfo::setupFortran() { |
789 | – | int isError; |
790 | – | int nExclude, nOneTwo, nOneThree, nOneFour; |
791 | – | vector<int> fortranGlobalGroupMembership; |
792 | – | |
793 | – | isError = 0; |
830 | ||
831 | < | //globalGroupMembership_ is filled by SimCreator |
832 | < | for (int i = 0; i < nGlobalAtoms_; i++) { |
833 | < | fortranGlobalGroupMembership.push_back(globalGroupMembership_[i] + 1); |
831 | > | vector<int> SimInfo::getGlobalAtomIndices() { |
832 | > | SimInfo::MoleculeIterator mi; |
833 | > | Molecule* mol; |
834 | > | Molecule::AtomIterator ai; |
835 | > | Atom* atom; |
836 | > | |
837 | > | vector<int> GlobalAtomIndices(getNAtoms(), 0); |
838 | > | |
839 | > | for (mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
840 | > | |
841 | > | for (atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
842 | > | GlobalAtomIndices[atom->getLocalIndex()] = atom->getGlobalIndex(); |
843 | > | } |
844 | } | |
845 | + | return GlobalAtomIndices; |
846 | + | } |
847 | ||
848 | + | |
849 | + | vector<int> SimInfo::getGlobalGroupIndices() { |
850 | + | SimInfo::MoleculeIterator mi; |
851 | + | Molecule* mol; |
852 | + | Molecule::CutoffGroupIterator ci; |
853 | + | CutoffGroup* cg; |
854 | + | |
855 | + | vector<int> GlobalGroupIndices; |
856 | + | |
857 | + | for (mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
858 | + | |
859 | + | //local index of cutoff group is trivial, it only depends on the |
860 | + | //order of travesing |
861 | + | for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
862 | + | cg = mol->nextCutoffGroup(ci)) { |
863 | + | GlobalGroupIndices.push_back(cg->getGlobalIndex()); |
864 | + | } |
865 | + | } |
866 | + | return GlobalGroupIndices; |
867 | + | } |
868 | + | |
869 | + | |
870 | + | void SimInfo::prepareTopology() { |
871 | + | int nExclude, nOneTwo, nOneThree, nOneFour; |
872 | + | |
873 | //calculate mass ratio of cutoff group | |
801 | – | vector<RealType> mfact; |
874 | SimInfo::MoleculeIterator mi; | |
875 | Molecule* mol; | |
876 | Molecule::CutoffGroupIterator ci; | |
# | Line 807 | Line 879 | namespace OpenMD { | |
879 | Atom* atom; | |
880 | RealType totalMass; | |
881 | ||
882 | < | //to avoid memory reallocation, reserve enough space for mfact |
883 | < | mfact.reserve(getNCutoffGroups()); |
882 | > | /** |
883 | > | * The mass factor is the relative mass of an atom to the total |
884 | > | * mass of the cutoff group it belongs to. By default, all atoms |
885 | > | * are their own cutoff groups, and therefore have mass factors of |
886 | > | * 1. We need some special handling for massless atoms, which |
887 | > | * will be treated as carrying the entire mass of the cutoff |
888 | > | * group. |
889 | > | */ |
890 | > | massFactors_.clear(); |
891 | > | massFactors_.resize(getNAtoms(), 1.0); |
892 | ||
893 | for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { | |
894 | < | for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { |
894 | > | for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
895 | > | cg = mol->nextCutoffGroup(ci)) { |
896 | ||
897 | totalMass = cg->getMass(); | |
898 | for(atom = cg->beginAtom(ai); atom != NULL; atom = cg->nextAtom(ai)) { | |
899 | // Check for massless groups - set mfact to 1 if true | |
900 | < | if (totalMass != 0) |
901 | < | mfact.push_back(atom->getMass()/totalMass); |
900 | > | if (totalMass != 0) |
901 | > | massFactors_[atom->getLocalIndex()] = atom->getMass()/totalMass; |
902 | else | |
903 | < | mfact.push_back( 1.0 ); |
903 | > | massFactors_[atom->getLocalIndex()] = 1.0; |
904 | } | |
905 | } | |
906 | } | |
# | Line 833 | Line 914 | namespace OpenMD { | |
914 | identArray_.push_back(atom->getIdent()); | |
915 | } | |
916 | } | |
836 | – | |
837 | – | //fill molMembershipArray |
838 | – | //molMembershipArray is filled by SimCreator |
839 | – | vector<int> molMembershipArray(nGlobalAtoms_); |
840 | – | for (int i = 0; i < nGlobalAtoms_; i++) { |
841 | – | molMembershipArray[i] = globalMolMembership_[i] + 1; |
842 | – | } |
917 | ||
918 | < | //setup fortran simulation |
918 | > | //scan topology |
919 | ||
920 | nExclude = excludedInteractions_.getSize(); | |
921 | nOneTwo = oneTwoInteractions_.getSize(); | |
# | Line 853 | Line 927 | namespace OpenMD { | |
927 | int* oneThreeList = oneThreeInteractions_.getPairList(); | |
928 | int* oneFourList = oneFourInteractions_.getPairList(); | |
929 | ||
930 | < | setFortranSim( &fInfo_, &nGlobalAtoms_, &nAtoms_, &identArray[0], |
857 | < | &nExclude, excludeList, |
858 | < | &nOneTwo, oneTwoList, |
859 | < | &nOneThree, oneThreeList, |
860 | < | &nOneFour, oneFourList, |
861 | < | &molMembershipArray[0], &mfact[0], &nCutoffGroups_, |
862 | < | &fortranGlobalGroupMembership[0], &isError); |
863 | < | |
864 | < | if( isError ){ |
865 | < | |
866 | < | sprintf( painCave.errMsg, |
867 | < | "There was an error setting the simulation information in fortran.\n" ); |
868 | < | painCave.isFatal = 1; |
869 | < | painCave.severity = OPENMD_ERROR; |
870 | < | simError(); |
871 | < | } |
872 | < | |
873 | < | |
874 | < | sprintf( checkPointMsg, |
875 | < | "succesfully sent the simulation information to fortran.\n"); |
876 | < | |
877 | < | errorCheckPoint(); |
878 | < | |
879 | < | // Setup number of neighbors in neighbor list if present |
880 | < | if (simParams_->haveNeighborListNeighbors()) { |
881 | < | int nlistNeighbors = simParams_->getNeighborListNeighbors(); |
882 | < | setNeighbors(&nlistNeighbors); |
883 | < | } |
884 | < | |
885 | < | #ifdef IS_MPI |
886 | < | //SimInfo is responsible for creating localToGlobalAtomIndex and |
887 | < | //localToGlobalGroupIndex |
888 | < | vector<int> localToGlobalAtomIndex(getNAtoms(), 0); |
889 | < | vector<int> localToGlobalCutoffGroupIndex; |
890 | < | mpiSimData parallelData; |
891 | < | |
892 | < | for (mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
893 | < | |
894 | < | //local index(index in DataStorge) of atom is important |
895 | < | for (atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
896 | < | localToGlobalAtomIndex[atom->getLocalIndex()] = atom->getGlobalIndex() + 1; |
897 | < | } |
898 | < | |
899 | < | //local index of cutoff group is trivial, it only depends on the order of travesing |
900 | < | for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { |
901 | < | localToGlobalCutoffGroupIndex.push_back(cg->getGlobalIndex() + 1); |
902 | < | } |
903 | < | |
904 | < | } |
905 | < | |
906 | < | //fill up mpiSimData struct |
907 | < | parallelData.nMolGlobal = getNGlobalMolecules(); |
908 | < | parallelData.nMolLocal = getNMolecules(); |
909 | < | parallelData.nAtomsGlobal = getNGlobalAtoms(); |
910 | < | parallelData.nAtomsLocal = getNAtoms(); |
911 | < | parallelData.nGroupsGlobal = getNGlobalCutoffGroups(); |
912 | < | parallelData.nGroupsLocal = getNCutoffGroups(); |
913 | < | parallelData.myNode = worldRank; |
914 | < | MPI_Comm_size(MPI_COMM_WORLD, &(parallelData.nProcessors)); |
915 | < | |
916 | < | //pass mpiSimData struct and index arrays to fortran |
917 | < | setFsimParallel(¶llelData, &(parallelData.nAtomsLocal), |
918 | < | &localToGlobalAtomIndex[0], &(parallelData.nGroupsLocal), |
919 | < | &localToGlobalCutoffGroupIndex[0], &isError); |
920 | < | |
921 | < | if (isError) { |
922 | < | sprintf(painCave.errMsg, |
923 | < | "mpiRefresh errror: fortran didn't like something we gave it.\n"); |
924 | < | painCave.isFatal = 1; |
925 | < | simError(); |
926 | < | } |
927 | < | |
928 | < | sprintf(checkPointMsg, " mpiRefresh successful.\n"); |
929 | < | errorCheckPoint(); |
930 | < | #endif |
931 | < | |
932 | < | initFortranFF(&isError); |
933 | < | if (isError) { |
934 | < | sprintf(painCave.errMsg, |
935 | < | "initFortranFF errror: fortran didn't like something we gave it.\n"); |
936 | < | painCave.isFatal = 1; |
937 | < | simError(); |
938 | < | } |
939 | < | fortranInitialized_ = true; |
930 | > | topologyDone_ = true; |
931 | } | |
932 | ||
933 | void SimInfo::addProperty(GenericData* genData) { | |
# | Line 1220 | Line 1211 | namespace OpenMD { | |
1211 | ||
1212 | det = intTensor.determinant(); | |
1213 | sysconstants = geomCnst/(RealType)nGlobalIntegrableObjects_; | |
1214 | < | volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,3.0/2.0)*sqrt(det); |
1214 | > | volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,geomCnst)*sqrt(det); |
1215 | return; | |
1216 | } | |
1217 | ||
# | Line 1236 | Line 1227 | namespace OpenMD { | |
1227 | ||
1228 | detI = intTensor.determinant(); | |
1229 | sysconstants = geomCnst/(RealType)nGlobalIntegrableObjects_; | |
1230 | < | volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,3.0/2.0)*sqrt(detI); |
1230 | > | volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,geomCnst)*sqrt(detI); |
1231 | return; | |
1232 | } | |
1233 | /* |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |