1 |
!! do_Forces.F90 |
2 |
!! module do_Forces |
3 |
!! Calculates Long Range forces. |
4 |
|
5 |
!! @author Charles F. Vardeman II |
6 |
!! @author Matthew Meineke |
7 |
!! @version $Id: do_Forces.F90,v 1.66 2004-05-28 15:21:37 gezelter Exp $, $Date: 2004-05-28 15:21:37 $, $Name: not supported by cvs2svn $, $Revision: 1.66 $ |
8 |
|
9 |
module do_Forces |
10 |
use force_globals |
11 |
use simulation |
12 |
use definitions |
13 |
use atype_module |
14 |
use switcheroo |
15 |
use neighborLists |
16 |
use lj |
17 |
use sticky_pair |
18 |
use dipole_dipole |
19 |
use charge_charge |
20 |
use reaction_field |
21 |
use gb_pair |
22 |
use vector_class |
23 |
use eam |
24 |
use status |
25 |
#ifdef IS_MPI |
26 |
use mpiSimulation |
27 |
#endif |
28 |
|
29 |
implicit none |
30 |
PRIVATE |
31 |
|
32 |
#define __FORTRAN90 |
33 |
#include "fForceField.h" |
34 |
#include "fSwitchingFunction.h" |
35 |
|
36 |
INTEGER, PARAMETER:: PREPAIR_LOOP = 1 |
37 |
INTEGER, PARAMETER:: PAIR_LOOP = 2 |
38 |
|
39 |
logical, save :: haveRlist = .false. |
40 |
logical, save :: haveNeighborList = .false. |
41 |
logical, save :: havePolicies = .false. |
42 |
logical, save :: haveSIMvariables = .false. |
43 |
logical, save :: havePropertyMap = .false. |
44 |
logical, save :: haveSaneForceField = .false. |
45 |
logical, save :: FF_uses_LJ |
46 |
logical, save :: FF_uses_sticky |
47 |
logical, save :: FF_uses_charges |
48 |
logical, save :: FF_uses_dipoles |
49 |
logical, save :: FF_uses_RF |
50 |
logical, save :: FF_uses_GB |
51 |
logical, save :: FF_uses_EAM |
52 |
logical, save :: SIM_uses_LJ |
53 |
logical, save :: SIM_uses_sticky |
54 |
logical, save :: SIM_uses_charges |
55 |
logical, save :: SIM_uses_dipoles |
56 |
logical, save :: SIM_uses_RF |
57 |
logical, save :: SIM_uses_GB |
58 |
logical, save :: SIM_uses_EAM |
59 |
logical, save :: SIM_requires_postpair_calc |
60 |
logical, save :: SIM_requires_prepair_calc |
61 |
logical, save :: SIM_uses_directional_atoms |
62 |
logical, save :: SIM_uses_PBC |
63 |
logical, save :: SIM_uses_molecular_cutoffs |
64 |
|
65 |
real(kind=dp), save :: rlist, rlistsq |
66 |
|
67 |
public :: init_FF |
68 |
public :: do_force_loop |
69 |
public :: setRlistDF |
70 |
|
71 |
#ifdef PROFILE |
72 |
public :: getforcetime |
73 |
real, save :: forceTime = 0 |
74 |
real :: forceTimeInitial, forceTimeFinal |
75 |
integer :: nLoops |
76 |
#endif |
77 |
|
78 |
type :: Properties |
79 |
logical :: is_lj = .false. |
80 |
logical :: is_sticky = .false. |
81 |
logical :: is_dp = .false. |
82 |
logical :: is_gb = .false. |
83 |
logical :: is_eam = .false. |
84 |
logical :: is_charge = .false. |
85 |
real(kind=DP) :: charge = 0.0_DP |
86 |
real(kind=DP) :: dipole_moment = 0.0_DP |
87 |
end type Properties |
88 |
|
89 |
type(Properties), dimension(:),allocatable :: PropertyMap |
90 |
|
91 |
contains |
92 |
|
93 |
subroutine setRlistDF( this_rlist ) |
94 |
|
95 |
real(kind=dp) :: this_rlist |
96 |
|
97 |
rlist = this_rlist |
98 |
rlistsq = rlist * rlist |
99 |
|
100 |
haveRlist = .true. |
101 |
|
102 |
end subroutine setRlistDF |
103 |
|
104 |
subroutine createPropertyMap(status) |
105 |
integer :: nAtypes |
106 |
integer :: status |
107 |
integer :: i |
108 |
logical :: thisProperty |
109 |
real (kind=DP) :: thisDPproperty |
110 |
|
111 |
status = 0 |
112 |
|
113 |
nAtypes = getSize(atypes) |
114 |
|
115 |
if (nAtypes == 0) then |
116 |
status = -1 |
117 |
return |
118 |
end if |
119 |
|
120 |
if (.not. allocated(PropertyMap)) then |
121 |
allocate(PropertyMap(nAtypes)) |
122 |
endif |
123 |
|
124 |
do i = 1, nAtypes |
125 |
call getElementProperty(atypes, i, "is_LJ", thisProperty) |
126 |
PropertyMap(i)%is_LJ = thisProperty |
127 |
|
128 |
call getElementProperty(atypes, i, "is_Charge", thisProperty) |
129 |
PropertyMap(i)%is_Charge = thisProperty |
130 |
|
131 |
if (thisProperty) then |
132 |
call getElementProperty(atypes, i, "charge", thisDPproperty) |
133 |
PropertyMap(i)%charge = thisDPproperty |
134 |
endif |
135 |
|
136 |
call getElementProperty(atypes, i, "is_DP", thisProperty) |
137 |
PropertyMap(i)%is_DP = thisProperty |
138 |
|
139 |
if (thisProperty) then |
140 |
call getElementProperty(atypes, i, "dipole_moment", thisDPproperty) |
141 |
PropertyMap(i)%dipole_moment = thisDPproperty |
142 |
endif |
143 |
|
144 |
call getElementProperty(atypes, i, "is_Sticky", thisProperty) |
145 |
PropertyMap(i)%is_Sticky = thisProperty |
146 |
call getElementProperty(atypes, i, "is_GB", thisProperty) |
147 |
PropertyMap(i)%is_GB = thisProperty |
148 |
call getElementProperty(atypes, i, "is_EAM", thisProperty) |
149 |
PropertyMap(i)%is_EAM = thisProperty |
150 |
end do |
151 |
|
152 |
havePropertyMap = .true. |
153 |
|
154 |
end subroutine createPropertyMap |
155 |
|
156 |
subroutine setSimVariables() |
157 |
SIM_uses_LJ = SimUsesLJ() |
158 |
SIM_uses_sticky = SimUsesSticky() |
159 |
SIM_uses_charges = SimUsesCharges() |
160 |
SIM_uses_dipoles = SimUsesDipoles() |
161 |
SIM_uses_RF = SimUsesRF() |
162 |
SIM_uses_GB = SimUsesGB() |
163 |
SIM_uses_EAM = SimUsesEAM() |
164 |
SIM_requires_postpair_calc = SimRequiresPostpairCalc() |
165 |
SIM_requires_prepair_calc = SimRequiresPrepairCalc() |
166 |
SIM_uses_directional_atoms = SimUsesDirectionalAtoms() |
167 |
SIM_uses_PBC = SimUsesPBC() |
168 |
!SIM_uses_molecular_cutoffs = SimUsesMolecularCutoffs() |
169 |
|
170 |
haveSIMvariables = .true. |
171 |
|
172 |
return |
173 |
end subroutine setSimVariables |
174 |
|
175 |
subroutine doReadyCheck(error) |
176 |
integer, intent(out) :: error |
177 |
|
178 |
integer :: myStatus |
179 |
|
180 |
error = 0 |
181 |
|
182 |
if (.not. havePropertyMap) then |
183 |
|
184 |
myStatus = 0 |
185 |
|
186 |
call createPropertyMap(myStatus) |
187 |
|
188 |
if (myStatus .ne. 0) then |
189 |
write(default_error, *) 'createPropertyMap failed in do_Forces!' |
190 |
error = -1 |
191 |
return |
192 |
endif |
193 |
endif |
194 |
|
195 |
if (.not. haveSIMvariables) then |
196 |
call setSimVariables() |
197 |
endif |
198 |
|
199 |
if (.not. haveRlist) then |
200 |
write(default_error, *) 'rList has not been set in do_Forces!' |
201 |
error = -1 |
202 |
return |
203 |
endif |
204 |
|
205 |
if (SIM_uses_LJ .and. FF_uses_LJ) then |
206 |
if (.not. havePolicies) then |
207 |
write(default_error, *) 'LJ mixing Policies have not been set in do_Forces!' |
208 |
error = -1 |
209 |
return |
210 |
endif |
211 |
endif |
212 |
|
213 |
if (.not. haveNeighborList) then |
214 |
write(default_error, *) 'neighbor list has not been initialized in do_Forces!' |
215 |
error = -1 |
216 |
return |
217 |
end if |
218 |
|
219 |
if (.not. haveSaneForceField) then |
220 |
write(default_error, *) 'Force Field is not sane in do_Forces!' |
221 |
error = -1 |
222 |
return |
223 |
end if |
224 |
|
225 |
#ifdef IS_MPI |
226 |
if (.not. isMPISimSet()) then |
227 |
write(default_error,*) "ERROR: mpiSimulation has not been initialized!" |
228 |
error = -1 |
229 |
return |
230 |
endif |
231 |
#endif |
232 |
return |
233 |
end subroutine doReadyCheck |
234 |
|
235 |
|
236 |
subroutine init_FF(LJMIXPOLICY, use_RF_c, thisStat) |
237 |
|
238 |
integer, intent(in) :: LJMIXPOLICY |
239 |
logical, intent(in) :: use_RF_c |
240 |
|
241 |
integer, intent(out) :: thisStat |
242 |
integer :: my_status, nMatches |
243 |
integer, pointer :: MatchList(:) => null() |
244 |
real(kind=dp) :: rcut, rrf, rt, dielect |
245 |
|
246 |
!! assume things are copacetic, unless they aren't |
247 |
thisStat = 0 |
248 |
|
249 |
!! Fortran's version of a cast: |
250 |
FF_uses_RF = use_RF_c |
251 |
|
252 |
!! init_FF is called *after* all of the atom types have been |
253 |
!! defined in atype_module using the new_atype subroutine. |
254 |
!! |
255 |
!! this will scan through the known atypes and figure out what |
256 |
!! interactions are used by the force field. |
257 |
|
258 |
FF_uses_LJ = .false. |
259 |
FF_uses_sticky = .false. |
260 |
FF_uses_charges = .false. |
261 |
FF_uses_dipoles = .false. |
262 |
FF_uses_GB = .false. |
263 |
FF_uses_EAM = .false. |
264 |
|
265 |
call getMatchingElementList(atypes, "is_LJ", .true., nMatches, MatchList) |
266 |
if (nMatches .gt. 0) FF_uses_LJ = .true. |
267 |
|
268 |
call getMatchingElementList(atypes, "is_Charge", .true., nMatches, MatchList) |
269 |
if (nMatches .gt. 0) FF_uses_charges = .true. |
270 |
|
271 |
call getMatchingElementList(atypes, "is_DP", .true., nMatches, MatchList) |
272 |
if (nMatches .gt. 0) FF_uses_dipoles = .true. |
273 |
|
274 |
call getMatchingElementList(atypes, "is_Sticky", .true., nMatches, & |
275 |
MatchList) |
276 |
if (nMatches .gt. 0) FF_uses_Sticky = .true. |
277 |
|
278 |
call getMatchingElementList(atypes, "is_GB", .true., nMatches, MatchList) |
279 |
if (nMatches .gt. 0) FF_uses_GB = .true. |
280 |
|
281 |
call getMatchingElementList(atypes, "is_EAM", .true., nMatches, MatchList) |
282 |
if (nMatches .gt. 0) FF_uses_EAM = .true. |
283 |
|
284 |
!! Assume sanity (for the sake of argument) |
285 |
haveSaneForceField = .true. |
286 |
|
287 |
!! check to make sure the FF_uses_RF setting makes sense |
288 |
|
289 |
if (FF_uses_dipoles) then |
290 |
if (FF_uses_RF) then |
291 |
dielect = getDielect() |
292 |
call initialize_rf(dielect) |
293 |
endif |
294 |
else |
295 |
if (FF_uses_RF) then |
296 |
write(default_error,*) 'Using Reaction Field with no dipoles? Huh?' |
297 |
thisStat = -1 |
298 |
haveSaneForceField = .false. |
299 |
return |
300 |
endif |
301 |
endif |
302 |
|
303 |
if (FF_uses_LJ) then |
304 |
|
305 |
select case (LJMIXPOLICY) |
306 |
case (LB_MIXING_RULE) |
307 |
call init_lj_FF(LB_MIXING_RULE, my_status) |
308 |
case (EXPLICIT_MIXING_RULE) |
309 |
call init_lj_FF(EXPLICIT_MIXING_RULE, my_status) |
310 |
case default |
311 |
write(default_error,*) 'unknown LJ Mixing Policy!' |
312 |
thisStat = -1 |
313 |
haveSaneForceField = .false. |
314 |
return |
315 |
end select |
316 |
if (my_status /= 0) then |
317 |
thisStat = -1 |
318 |
haveSaneForceField = .false. |
319 |
return |
320 |
end if |
321 |
havePolicies = .true. |
322 |
endif |
323 |
|
324 |
if (FF_uses_sticky) then |
325 |
call check_sticky_FF(my_status) |
326 |
if (my_status /= 0) then |
327 |
thisStat = -1 |
328 |
haveSaneForceField = .false. |
329 |
return |
330 |
end if |
331 |
endif |
332 |
|
333 |
|
334 |
if (FF_uses_EAM) then |
335 |
call init_EAM_FF(my_status) |
336 |
if (my_status /= 0) then |
337 |
write(default_error, *) "init_EAM_FF returned a bad status" |
338 |
thisStat = -1 |
339 |
haveSaneForceField = .false. |
340 |
return |
341 |
end if |
342 |
endif |
343 |
|
344 |
if (FF_uses_GB) then |
345 |
call check_gb_pair_FF(my_status) |
346 |
if (my_status .ne. 0) then |
347 |
thisStat = -1 |
348 |
haveSaneForceField = .false. |
349 |
return |
350 |
endif |
351 |
endif |
352 |
|
353 |
if (FF_uses_GB .and. FF_uses_LJ) then |
354 |
endif |
355 |
if (.not. haveNeighborList) then |
356 |
!! Create neighbor lists |
357 |
call expandNeighborList(nLocal, my_status) |
358 |
if (my_Status /= 0) then |
359 |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
360 |
thisStat = -1 |
361 |
return |
362 |
endif |
363 |
haveNeighborList = .true. |
364 |
endif |
365 |
|
366 |
|
367 |
|
368 |
end subroutine init_FF |
369 |
|
370 |
|
371 |
!! Does force loop over i,j pairs. Calls do_pair to calculates forces. |
372 |
!-------------------------------------------------------------> |
373 |
subroutine do_force_loop(q, q_group, A, u_l, f, t, tau, pot, & |
374 |
do_pot_c, do_stress_c, error) |
375 |
!! Position array provided by C, dimensioned by getNlocal |
376 |
real ( kind = dp ), dimension(3, nLocal) :: q |
377 |
!! molecular center-of-mass position array |
378 |
real ( kind = dp ), dimension(3, nGroups) :: q_group |
379 |
!! Rotation Matrix for each long range particle in simulation. |
380 |
real( kind = dp), dimension(9, nLocal) :: A |
381 |
!! Unit vectors for dipoles (lab frame) |
382 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
383 |
!! Force array provided by C, dimensioned by getNlocal |
384 |
real ( kind = dp ), dimension(3,nLocal) :: f |
385 |
!! Torsion array provided by C, dimensioned by getNlocal |
386 |
real( kind = dp ), dimension(3,nLocal) :: t |
387 |
|
388 |
!! Stress Tensor |
389 |
real( kind = dp), dimension(9) :: tau |
390 |
real ( kind = dp ) :: pot |
391 |
logical ( kind = 2) :: do_pot_c, do_stress_c |
392 |
logical :: do_pot |
393 |
logical :: do_stress |
394 |
logical :: in_switching_region |
395 |
#ifdef IS_MPI |
396 |
real( kind = DP ) :: pot_local |
397 |
integer :: nAtomsInRow |
398 |
integer :: nAtomsInCol |
399 |
integer :: nprocs |
400 |
integer :: nGroupsInRow |
401 |
integer :: nGroupsInCol |
402 |
#endif |
403 |
integer :: natoms |
404 |
logical :: update_nlist |
405 |
integer :: i, j, jstart, jend, jnab |
406 |
integer :: istart, iend |
407 |
integer :: ia, jb, atom1, atom2 |
408 |
integer :: nlist |
409 |
real( kind = DP ) :: ratmsq, rgrpsq, rgrp, vpair, vij |
410 |
real( kind = DP ) :: sw, dswdr, swderiv, mf |
411 |
real(kind=dp),dimension(3) :: d_atm, d_grp, fpair, fij |
412 |
real(kind=dp) :: rfpot, mu_i, virial |
413 |
integer :: me_i, me_j, n_in_i, n_in_j |
414 |
logical :: is_dp_i |
415 |
integer :: neighborListSize |
416 |
integer :: listerror, error |
417 |
integer :: localError |
418 |
integer :: propPack_i, propPack_j |
419 |
integer :: loopStart, loopEnd, loop |
420 |
|
421 |
real(kind=dp) :: listSkin = 1.0 |
422 |
|
423 |
!! initialize local variables |
424 |
|
425 |
#ifdef IS_MPI |
426 |
pot_local = 0.0_dp |
427 |
nAtomsInRow = getNatomsInRow(plan_atom_row) |
428 |
nAtomsInCol = getNatomsInCol(plan_atom_col) |
429 |
nGroupsInRow = getNgroupsInRow(plan_group_row) |
430 |
nGroupsInCol = getNgroupsInCol(plan_group_col) |
431 |
write(*,*) nAtomsInRow, nAtomsInCol, nGroupsInRow, nGroupsInCol |
432 |
write(*,*) pot_local |
433 |
#else |
434 |
natoms = nlocal |
435 |
#endif |
436 |
|
437 |
call doReadyCheck(localError) |
438 |
if ( localError .ne. 0 ) then |
439 |
call handleError("do_force_loop", "Not Initialized") |
440 |
error = -1 |
441 |
return |
442 |
end if |
443 |
call zero_work_arrays() |
444 |
|
445 |
do_pot = do_pot_c |
446 |
do_stress = do_stress_c |
447 |
|
448 |
! Gather all information needed by all force loops: |
449 |
|
450 |
#ifdef IS_MPI |
451 |
|
452 |
call gather(q, q_Row, plan_atom_row_3d) |
453 |
call gather(q, q_Col, plan_atom_col_3d) |
454 |
|
455 |
call gather(q_group, q_group_Row, plan_group_row_3d) |
456 |
call gather(q_group, q_group_Col, plan_group_col_3d) |
457 |
|
458 |
if (FF_UsesDirectionalAtoms() .and. SIM_uses_directional_atoms) then |
459 |
call gather(u_l, u_l_Row, plan_atom_row_3d) |
460 |
call gather(u_l, u_l_Col, plan_atom_col_3d) |
461 |
|
462 |
call gather(A, A_Row, plan_atom_row_rotation) |
463 |
call gather(A, A_Col, plan_atom_col_rotation) |
464 |
endif |
465 |
|
466 |
#endif |
467 |
|
468 |
!! Begin force loop timing: |
469 |
#ifdef PROFILE |
470 |
call cpu_time(forceTimeInitial) |
471 |
nloops = nloops + 1 |
472 |
#endif |
473 |
|
474 |
loopEnd = PAIR_LOOP |
475 |
if (FF_RequiresPrepairCalc() .and. SIM_requires_prepair_calc) then |
476 |
loopStart = PREPAIR_LOOP |
477 |
else |
478 |
loopStart = PAIR_LOOP |
479 |
endif |
480 |
|
481 |
do loop = loopStart, loopEnd |
482 |
write(*,*) 'doign loop ', loop |
483 |
|
484 |
! See if we need to update neighbor lists |
485 |
! (but only on the first time through): |
486 |
if (loop .eq. loopStart) then |
487 |
#ifdef IS_MPI |
488 |
call checkNeighborList(nGroupsInRow, q_group_row, listSkin, & |
489 |
update_nlist) |
490 |
#else |
491 |
call checkNeighborList(nGroups, q_group, listSkin, & |
492 |
update_nlist) |
493 |
#endif |
494 |
endif |
495 |
|
496 |
if (update_nlist) then |
497 |
!! save current configuration and construct neighbor list |
498 |
#ifdef IS_MPI |
499 |
call saveNeighborList(nGroupsInRow, q_group_row) |
500 |
#else |
501 |
call saveNeighborList(nGroups, q_group) |
502 |
#endif |
503 |
neighborListSize = size(list) |
504 |
nlist = 0 |
505 |
endif |
506 |
|
507 |
istart = 1 |
508 |
#ifdef IS_MPI |
509 |
iend = nGroupsInRow |
510 |
#else |
511 |
iend = nGroups - 1 |
512 |
#endif |
513 |
outer: do i = istart, iend |
514 |
|
515 |
write(*,*) 'doing ', i |
516 |
|
517 |
if (update_nlist) point(i) = nlist + 1 |
518 |
|
519 |
n_in_i = groupStartRow(i+1) - groupStartRow(i) |
520 |
|
521 |
if (update_nlist) then |
522 |
#ifdef IS_MPI |
523 |
jstart = 1 |
524 |
jend = nGroupsInCol |
525 |
#else |
526 |
jstart = i+1 |
527 |
jend = nGroups |
528 |
#endif |
529 |
else |
530 |
jstart = point(i) |
531 |
jend = point(i+1) - 1 |
532 |
! make sure group i has neighbors |
533 |
if (jstart .gt. jend) cycle outer |
534 |
endif |
535 |
|
536 |
do jnab = jstart, jend |
537 |
if (update_nlist) then |
538 |
j = jnab |
539 |
else |
540 |
j = list(jnab) |
541 |
endif |
542 |
|
543 |
#ifdef IS_MPI |
544 |
call get_interatomic_vector(q_group_Row(:,i), & |
545 |
q_group_Col(:,j), d_grp, rgrpsq) |
546 |
#else |
547 |
call get_interatomic_vector(q_group(:,i), & |
548 |
q_group(:,j), d_grp, rgrpsq) |
549 |
#endif |
550 |
|
551 |
if (rgrpsq < rlistsq) then |
552 |
if (update_nlist) then |
553 |
nlist = nlist + 1 |
554 |
|
555 |
if (nlist > neighborListSize) then |
556 |
#ifdef IS_MPI |
557 |
call expandNeighborList(nGroupsInRow, listerror) |
558 |
#else |
559 |
call expandNeighborList(nGroups, listerror) |
560 |
#endif |
561 |
if (listerror /= 0) then |
562 |
error = -1 |
563 |
write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded." |
564 |
return |
565 |
end if |
566 |
neighborListSize = size(list) |
567 |
endif |
568 |
|
569 |
list(nlist) = j |
570 |
endif |
571 |
|
572 |
if (loop .eq. PAIR_LOOP) then |
573 |
vij = 0.0d0 |
574 |
fij(1:3) = 0.0d0 |
575 |
endif |
576 |
|
577 |
call get_switch(rgrpsq, sw, dswdr, rgrp, group_switch, & |
578 |
in_switching_region) |
579 |
|
580 |
n_in_j = groupStartCol(j+1) - groupStartCol(j) |
581 |
|
582 |
do ia = groupStartRow(i), groupStartRow(i+1)-1 |
583 |
|
584 |
atom1 = groupListRow(ia) |
585 |
|
586 |
inner: do jb = groupStartCol(j), groupStartCol(j+1)-1 |
587 |
|
588 |
atom2 = groupListCol(jb) |
589 |
|
590 |
if (skipThisPair(atom1, atom2)) cycle inner |
591 |
|
592 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
593 |
d_atm(1:3) = d_grp(1:3) |
594 |
ratmsq = rgrpsq |
595 |
else |
596 |
#ifdef IS_MPI |
597 |
call get_interatomic_vector(q_Row(:,atom1), & |
598 |
q_Col(:,atom2), d_atm, ratmsq) |
599 |
#else |
600 |
call get_interatomic_vector(q(:,atom1), & |
601 |
q(:,atom2), d_atm, ratmsq) |
602 |
#endif |
603 |
endif |
604 |
if (loop .eq. PREPAIR_LOOP) then |
605 |
#ifdef IS_MPI |
606 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
607 |
rgrpsq, d_grp, do_pot, do_stress, & |
608 |
u_l, A, f, t, pot_local) |
609 |
#else |
610 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
611 |
rgrpsq, d_grp, do_pot, do_stress, & |
612 |
u_l, A, f, t, pot) |
613 |
#endif |
614 |
else |
615 |
#ifdef IS_MPI |
616 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
617 |
do_pot, & |
618 |
u_l, A, f, t, pot_local, vpair, fpair) |
619 |
#else |
620 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
621 |
do_pot, & |
622 |
u_l, A, f, t, pot, vpair, fpair) |
623 |
#endif |
624 |
vij = vij + vpair |
625 |
fij(1:3) = fij(1:3) + fpair(1:3) |
626 |
endif |
627 |
enddo inner |
628 |
enddo |
629 |
|
630 |
if (loop .eq. PAIR_LOOP) then |
631 |
if (in_switching_region) then |
632 |
swderiv = vij*dswdr/rgrp |
633 |
fij(1) = fij(1) + swderiv*d_grp(1) |
634 |
fij(2) = fij(2) + swderiv*d_grp(2) |
635 |
fij(3) = fij(3) + swderiv*d_grp(3) |
636 |
|
637 |
do ia=groupStartRow(i), groupStartRow(i+1)-1 |
638 |
atom1=groupListRow(ia) |
639 |
mf = mfactRow(atom1) |
640 |
#ifdef IS_MPI |
641 |
f_Row(1,atom1) = f_Row(1,atom1) + swderiv*d_grp(1)*mf |
642 |
f_Row(2,atom1) = f_Row(2,atom1) + swderiv*d_grp(2)*mf |
643 |
f_Row(3,atom1) = f_Row(3,atom1) + swderiv*d_grp(3)*mf |
644 |
#else |
645 |
f(1,atom1) = f(1,atom1) + swderiv*d_grp(1)*mf |
646 |
f(2,atom1) = f(2,atom1) + swderiv*d_grp(2)*mf |
647 |
f(3,atom1) = f(3,atom1) + swderiv*d_grp(3)*mf |
648 |
#endif |
649 |
enddo |
650 |
|
651 |
do jb=groupStartCol(j), groupStartCol(j+1)-1 |
652 |
atom2=groupListCol(jb) |
653 |
mf = mfactCol(atom2) |
654 |
#ifdef IS_MPI |
655 |
f_Col(1,atom2) = f_Col(1,atom2) - swderiv*d_grp(1)*mf |
656 |
f_Col(2,atom2) = f_Col(2,atom2) - swderiv*d_grp(2)*mf |
657 |
f_Col(3,atom2) = f_Col(3,atom2) - swderiv*d_grp(3)*mf |
658 |
#else |
659 |
f(1,atom2) = f(1,atom2) - swderiv*d_grp(1)*mf |
660 |
f(2,atom2) = f(2,atom2) - swderiv*d_grp(2)*mf |
661 |
f(3,atom2) = f(3,atom2) - swderiv*d_grp(3)*mf |
662 |
#endif |
663 |
enddo |
664 |
endif |
665 |
|
666 |
if (do_stress) call add_stress_tensor(d_grp, fij) |
667 |
endif |
668 |
end if |
669 |
enddo |
670 |
enddo outer |
671 |
|
672 |
if (update_nlist) then |
673 |
#ifdef IS_MPI |
674 |
point(nGroupsInRow + 1) = nlist + 1 |
675 |
#else |
676 |
point(nGroups) = nlist + 1 |
677 |
#endif |
678 |
if (loop .eq. PREPAIR_LOOP) then |
679 |
! we just did the neighbor list update on the first |
680 |
! pass, so we don't need to do it |
681 |
! again on the second pass |
682 |
update_nlist = .false. |
683 |
endif |
684 |
endif |
685 |
|
686 |
if (loop .eq. PREPAIR_LOOP) then |
687 |
call do_preforce(nlocal, pot) |
688 |
endif |
689 |
|
690 |
enddo |
691 |
|
692 |
!! Do timing |
693 |
#ifdef PROFILE |
694 |
call cpu_time(forceTimeFinal) |
695 |
forceTime = forceTime + forceTimeFinal - forceTimeInitial |
696 |
#endif |
697 |
|
698 |
#ifdef IS_MPI |
699 |
!!distribute forces |
700 |
|
701 |
f_temp = 0.0_dp |
702 |
call scatter(f_Row,f_temp,plan_atom_row_3d) |
703 |
do i = 1,nlocal |
704 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
705 |
end do |
706 |
|
707 |
f_temp = 0.0_dp |
708 |
call scatter(f_Col,f_temp,plan_atom_col_3d) |
709 |
do i = 1,nlocal |
710 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
711 |
end do |
712 |
|
713 |
if (FF_UsesDirectionalAtoms() .and. SIM_uses_directional_atoms) then |
714 |
t_temp = 0.0_dp |
715 |
call scatter(t_Row,t_temp,plan_atom_row_3d) |
716 |
do i = 1,nlocal |
717 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
718 |
end do |
719 |
t_temp = 0.0_dp |
720 |
call scatter(t_Col,t_temp,plan_atom_col_3d) |
721 |
|
722 |
do i = 1,nlocal |
723 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
724 |
end do |
725 |
endif |
726 |
|
727 |
if (do_pot) then |
728 |
! scatter/gather pot_row into the members of my column |
729 |
call scatter(pot_Row, pot_Temp, plan_atom_row) |
730 |
|
731 |
! scatter/gather pot_local into all other procs |
732 |
! add resultant to get total pot |
733 |
do i = 1, nlocal |
734 |
pot_local = pot_local + pot_Temp(i) |
735 |
enddo |
736 |
|
737 |
pot_Temp = 0.0_DP |
738 |
|
739 |
call scatter(pot_Col, pot_Temp, plan_atom_col) |
740 |
do i = 1, nlocal |
741 |
pot_local = pot_local + pot_Temp(i) |
742 |
enddo |
743 |
|
744 |
endif |
745 |
#endif |
746 |
|
747 |
if (FF_RequiresPostpairCalc() .and. SIM_requires_postpair_calc) then |
748 |
|
749 |
if (FF_uses_RF .and. SIM_uses_RF) then |
750 |
|
751 |
#ifdef IS_MPI |
752 |
call scatter(rf_Row,rf,plan_atom_row_3d) |
753 |
call scatter(rf_Col,rf_Temp,plan_atom_col_3d) |
754 |
do i = 1,nlocal |
755 |
rf(1:3,i) = rf(1:3,i) + rf_Temp(1:3,i) |
756 |
end do |
757 |
#endif |
758 |
|
759 |
do i = 1, nLocal |
760 |
|
761 |
rfpot = 0.0_DP |
762 |
#ifdef IS_MPI |
763 |
me_i = atid_row(i) |
764 |
#else |
765 |
me_i = atid(i) |
766 |
#endif |
767 |
|
768 |
if (PropertyMap(me_i)%is_DP) then |
769 |
|
770 |
mu_i = PropertyMap(me_i)%dipole_moment |
771 |
|
772 |
!! The reaction field needs to include a self contribution |
773 |
!! to the field: |
774 |
call accumulate_self_rf(i, mu_i, u_l) |
775 |
!! Get the reaction field contribution to the |
776 |
!! potential and torques: |
777 |
call reaction_field_final(i, mu_i, u_l, rfpot, t, do_pot) |
778 |
#ifdef IS_MPI |
779 |
pot_local = pot_local + rfpot |
780 |
#else |
781 |
pot = pot + rfpot |
782 |
|
783 |
#endif |
784 |
endif |
785 |
enddo |
786 |
endif |
787 |
endif |
788 |
|
789 |
|
790 |
#ifdef IS_MPI |
791 |
|
792 |
if (do_pot) then |
793 |
pot = pot + pot_local |
794 |
!! we assume the c code will do the allreduce to get the total potential |
795 |
!! we could do it right here if we needed to... |
796 |
endif |
797 |
|
798 |
if (do_stress) then |
799 |
call mpi_allreduce(tau_Temp, tau, 9,mpi_double_precision,mpi_sum, & |
800 |
mpi_comm_world,mpi_err) |
801 |
call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, & |
802 |
mpi_comm_world,mpi_err) |
803 |
endif |
804 |
|
805 |
#else |
806 |
|
807 |
if (do_stress) then |
808 |
tau = tau_Temp |
809 |
virial = virial_Temp |
810 |
endif |
811 |
|
812 |
#endif |
813 |
|
814 |
end subroutine do_force_loop |
815 |
|
816 |
subroutine do_pair(i, j, rijsq, d, sw, do_pot, & |
817 |
u_l, A, f, t, pot, vpair, fpair) |
818 |
|
819 |
real( kind = dp ) :: pot, vpair, sw |
820 |
real( kind = dp ), dimension(3) :: fpair |
821 |
real( kind = dp ), dimension(nLocal) :: mfact |
822 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
823 |
real( kind = dp ), dimension(9,nLocal) :: A |
824 |
real( kind = dp ), dimension(3,nLocal) :: f |
825 |
real( kind = dp ), dimension(3,nLocal) :: t |
826 |
|
827 |
logical, intent(inout) :: do_pot |
828 |
integer, intent(in) :: i, j |
829 |
real ( kind = dp ), intent(inout) :: rijsq |
830 |
real ( kind = dp ) :: r |
831 |
real ( kind = dp ), intent(inout) :: d(3) |
832 |
integer :: me_i, me_j |
833 |
|
834 |
r = sqrt(rijsq) |
835 |
vpair = 0.0d0 |
836 |
fpair(1:3) = 0.0d0 |
837 |
|
838 |
#ifdef IS_MPI |
839 |
if (AtomRowToGlobal(i) .eq. AtomColToGlobal(j)) then |
840 |
write(0,*) 'do_pair is doing', i , j, AtomRowToGlobal(i), AtomColToGlobal(j) |
841 |
endif |
842 |
me_i = atid_row(i) |
843 |
me_j = atid_col(j) |
844 |
#else |
845 |
me_i = atid(i) |
846 |
me_j = atid(j) |
847 |
#endif |
848 |
|
849 |
if (FF_uses_LJ .and. SIM_uses_LJ) then |
850 |
|
851 |
if ( PropertyMap(me_i)%is_LJ .and. PropertyMap(me_j)%is_LJ ) then |
852 |
!write(*,*) 'calling lj with' |
853 |
!write(*,*) i, j, r, rijsq |
854 |
!write(*,'(3es12.3)') d(1), d(2), d(3) |
855 |
!write(*,'(3es12.3)') sw, vpair, pot |
856 |
!write(*,*) |
857 |
|
858 |
call do_lj_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, do_pot) |
859 |
endif |
860 |
|
861 |
endif |
862 |
|
863 |
if (FF_uses_charges .and. SIM_uses_charges) then |
864 |
|
865 |
if (PropertyMap(me_i)%is_Charge .and. PropertyMap(me_j)%is_Charge) then |
866 |
call do_charge_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, do_pot) |
867 |
endif |
868 |
|
869 |
endif |
870 |
|
871 |
if (FF_uses_dipoles .and. SIM_uses_dipoles) then |
872 |
|
873 |
if ( PropertyMap(me_i)%is_DP .and. PropertyMap(me_j)%is_DP) then |
874 |
call do_dipole_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, u_l, f, t, & |
875 |
do_pot) |
876 |
if (FF_uses_RF .and. SIM_uses_RF) then |
877 |
call accumulate_rf(i, j, r, u_l, sw) |
878 |
call rf_correct_forces(i, j, d, r, u_l, sw, f, fpair) |
879 |
endif |
880 |
endif |
881 |
|
882 |
endif |
883 |
|
884 |
if (FF_uses_Sticky .and. SIM_uses_sticky) then |
885 |
|
886 |
if ( PropertyMap(me_i)%is_Sticky .and. PropertyMap(me_j)%is_Sticky) then |
887 |
call do_sticky_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, A, f, t, & |
888 |
do_pot) |
889 |
endif |
890 |
|
891 |
endif |
892 |
|
893 |
|
894 |
if (FF_uses_GB .and. SIM_uses_GB) then |
895 |
|
896 |
if ( PropertyMap(me_i)%is_GB .and. PropertyMap(me_j)%is_GB) then |
897 |
call do_gb_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, u_l, f, t, & |
898 |
do_pot) |
899 |
endif |
900 |
|
901 |
endif |
902 |
|
903 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
904 |
|
905 |
if ( PropertyMap(me_i)%is_EAM .and. PropertyMap(me_j)%is_EAM) then |
906 |
call do_eam_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, & |
907 |
do_pot) |
908 |
endif |
909 |
|
910 |
endif |
911 |
|
912 |
end subroutine do_pair |
913 |
|
914 |
subroutine do_prepair(i, j, rijsq, d, sw, rcijsq, dc, & |
915 |
do_pot, do_stress, u_l, A, f, t, pot) |
916 |
|
917 |
real( kind = dp ) :: pot, sw |
918 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
919 |
real (kind=dp), dimension(9,nLocal) :: A |
920 |
real (kind=dp), dimension(3,nLocal) :: f |
921 |
real (kind=dp), dimension(3,nLocal) :: t |
922 |
|
923 |
logical, intent(inout) :: do_pot, do_stress |
924 |
integer, intent(in) :: i, j |
925 |
real ( kind = dp ), intent(inout) :: rijsq, rcijsq |
926 |
real ( kind = dp ) :: r, rc |
927 |
real ( kind = dp ), intent(inout) :: d(3), dc(3) |
928 |
|
929 |
logical :: is_EAM_i, is_EAM_j |
930 |
|
931 |
integer :: me_i, me_j |
932 |
|
933 |
|
934 |
r = sqrt(rijsq) |
935 |
if (SIM_uses_molecular_cutoffs) then |
936 |
rc = sqrt(rcijsq) |
937 |
else |
938 |
rc = r |
939 |
endif |
940 |
|
941 |
|
942 |
#ifdef IS_MPI |
943 |
if (AtomRowToGlobal(i) .eq. AtomColToGlobal(j)) then |
944 |
write(0,*) 'do_prepair is doing', i , j, AtomRowToGlobal(i), AtomColToGlobal(j) |
945 |
endif |
946 |
|
947 |
me_i = atid_row(i) |
948 |
me_j = atid_col(j) |
949 |
|
950 |
#else |
951 |
|
952 |
me_i = atid(i) |
953 |
me_j = atid(j) |
954 |
|
955 |
#endif |
956 |
|
957 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
958 |
|
959 |
if (PropertyMap(me_i)%is_EAM .and. PropertyMap(me_j)%is_EAM) & |
960 |
call calc_EAM_prepair_rho(i, j, d, r, rijsq ) |
961 |
|
962 |
endif |
963 |
|
964 |
end subroutine do_prepair |
965 |
|
966 |
|
967 |
subroutine do_preforce(nlocal,pot) |
968 |
integer :: nlocal |
969 |
real( kind = dp ) :: pot |
970 |
|
971 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
972 |
call calc_EAM_preforce_Frho(nlocal,pot) |
973 |
endif |
974 |
|
975 |
|
976 |
end subroutine do_preforce |
977 |
|
978 |
|
979 |
subroutine get_interatomic_vector(q_i, q_j, d, r_sq) |
980 |
|
981 |
real (kind = dp), dimension(3) :: q_i |
982 |
real (kind = dp), dimension(3) :: q_j |
983 |
real ( kind = dp ), intent(out) :: r_sq |
984 |
real( kind = dp ) :: d(3), scaled(3) |
985 |
integer i |
986 |
|
987 |
d(1:3) = q_j(1:3) - q_i(1:3) |
988 |
|
989 |
! Wrap back into periodic box if necessary |
990 |
if ( SIM_uses_PBC ) then |
991 |
|
992 |
if( .not.boxIsOrthorhombic ) then |
993 |
! calc the scaled coordinates. |
994 |
|
995 |
scaled = matmul(HmatInv, d) |
996 |
|
997 |
! wrap the scaled coordinates |
998 |
|
999 |
scaled = scaled - anint(scaled) |
1000 |
|
1001 |
|
1002 |
! calc the wrapped real coordinates from the wrapped scaled |
1003 |
! coordinates |
1004 |
|
1005 |
d = matmul(Hmat,scaled) |
1006 |
|
1007 |
else |
1008 |
! calc the scaled coordinates. |
1009 |
|
1010 |
do i = 1, 3 |
1011 |
scaled(i) = d(i) * HmatInv(i,i) |
1012 |
|
1013 |
! wrap the scaled coordinates |
1014 |
|
1015 |
scaled(i) = scaled(i) - anint(scaled(i)) |
1016 |
|
1017 |
! calc the wrapped real coordinates from the wrapped scaled |
1018 |
! coordinates |
1019 |
|
1020 |
d(i) = scaled(i)*Hmat(i,i) |
1021 |
enddo |
1022 |
endif |
1023 |
|
1024 |
endif |
1025 |
|
1026 |
r_sq = dot_product(d,d) |
1027 |
|
1028 |
end subroutine get_interatomic_vector |
1029 |
|
1030 |
subroutine zero_work_arrays() |
1031 |
|
1032 |
#ifdef IS_MPI |
1033 |
|
1034 |
q_Row = 0.0_dp |
1035 |
q_Col = 0.0_dp |
1036 |
|
1037 |
q_group_Row = 0.0_dp |
1038 |
q_group_Col = 0.0_dp |
1039 |
|
1040 |
u_l_Row = 0.0_dp |
1041 |
u_l_Col = 0.0_dp |
1042 |
|
1043 |
A_Row = 0.0_dp |
1044 |
A_Col = 0.0_dp |
1045 |
|
1046 |
f_Row = 0.0_dp |
1047 |
f_Col = 0.0_dp |
1048 |
f_Temp = 0.0_dp |
1049 |
|
1050 |
t_Row = 0.0_dp |
1051 |
t_Col = 0.0_dp |
1052 |
t_Temp = 0.0_dp |
1053 |
|
1054 |
pot_Row = 0.0_dp |
1055 |
pot_Col = 0.0_dp |
1056 |
pot_Temp = 0.0_dp |
1057 |
|
1058 |
rf_Row = 0.0_dp |
1059 |
rf_Col = 0.0_dp |
1060 |
rf_Temp = 0.0_dp |
1061 |
|
1062 |
#endif |
1063 |
|
1064 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
1065 |
call clean_EAM() |
1066 |
endif |
1067 |
|
1068 |
rf = 0.0_dp |
1069 |
tau_Temp = 0.0_dp |
1070 |
virial_Temp = 0.0_dp |
1071 |
end subroutine zero_work_arrays |
1072 |
|
1073 |
function skipThisPair(atom1, atom2) result(skip_it) |
1074 |
integer, intent(in) :: atom1 |
1075 |
integer, intent(in), optional :: atom2 |
1076 |
logical :: skip_it |
1077 |
integer :: unique_id_1, unique_id_2 |
1078 |
integer :: me_i,me_j |
1079 |
integer :: i |
1080 |
|
1081 |
skip_it = .false. |
1082 |
|
1083 |
!! there are a number of reasons to skip a pair or a particle |
1084 |
!! mostly we do this to exclude atoms who are involved in short |
1085 |
!! range interactions (bonds, bends, torsions), but we also need |
1086 |
!! to exclude some overcounted interactions that result from |
1087 |
!! the parallel decomposition |
1088 |
|
1089 |
#ifdef IS_MPI |
1090 |
!! in MPI, we have to look up the unique IDs for each atom |
1091 |
unique_id_1 = AtomRowToGlobal(atom1) |
1092 |
#else |
1093 |
!! in the normal loop, the atom numbers are unique |
1094 |
unique_id_1 = atom1 |
1095 |
#endif |
1096 |
|
1097 |
!! We were called with only one atom, so just check the global exclude |
1098 |
!! list for this atom |
1099 |
if (.not. present(atom2)) then |
1100 |
do i = 1, nExcludes_global |
1101 |
if (excludesGlobal(i) == unique_id_1) then |
1102 |
skip_it = .true. |
1103 |
return |
1104 |
end if |
1105 |
end do |
1106 |
return |
1107 |
end if |
1108 |
|
1109 |
#ifdef IS_MPI |
1110 |
unique_id_2 = AtomColToGlobal(atom2) |
1111 |
#else |
1112 |
unique_id_2 = atom2 |
1113 |
#endif |
1114 |
|
1115 |
#ifdef IS_MPI |
1116 |
!! this situation should only arise in MPI simulations |
1117 |
if (unique_id_1 == unique_id_2) then |
1118 |
skip_it = .true. |
1119 |
return |
1120 |
end if |
1121 |
|
1122 |
!! this prevents us from doing the pair on multiple processors |
1123 |
if (unique_id_1 < unique_id_2) then |
1124 |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
1125 |
skip_it = .true. |
1126 |
return |
1127 |
endif |
1128 |
else |
1129 |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
1130 |
skip_it = .true. |
1131 |
return |
1132 |
endif |
1133 |
endif |
1134 |
#endif |
1135 |
|
1136 |
!! the rest of these situations can happen in all simulations: |
1137 |
do i = 1, nExcludes_global |
1138 |
if ((excludesGlobal(i) == unique_id_1) .or. & |
1139 |
(excludesGlobal(i) == unique_id_2)) then |
1140 |
skip_it = .true. |
1141 |
return |
1142 |
endif |
1143 |
enddo |
1144 |
|
1145 |
do i = 1, nSkipsForAtom(atom1) |
1146 |
if (skipsForAtom(atom1, i) .eq. unique_id_2) then |
1147 |
skip_it = .true. |
1148 |
return |
1149 |
endif |
1150 |
end do |
1151 |
|
1152 |
return |
1153 |
end function skipThisPair |
1154 |
|
1155 |
function FF_UsesDirectionalAtoms() result(doesit) |
1156 |
logical :: doesit |
1157 |
doesit = FF_uses_dipoles .or. FF_uses_sticky .or. & |
1158 |
FF_uses_GB .or. FF_uses_RF |
1159 |
end function FF_UsesDirectionalAtoms |
1160 |
|
1161 |
function FF_RequiresPrepairCalc() result(doesit) |
1162 |
logical :: doesit |
1163 |
doesit = FF_uses_EAM |
1164 |
end function FF_RequiresPrepairCalc |
1165 |
|
1166 |
function FF_RequiresPostpairCalc() result(doesit) |
1167 |
logical :: doesit |
1168 |
doesit = FF_uses_RF |
1169 |
end function FF_RequiresPostpairCalc |
1170 |
|
1171 |
#ifdef PROFILE |
1172 |
function getforcetime() result(totalforcetime) |
1173 |
real(kind=dp) :: totalforcetime |
1174 |
totalforcetime = forcetime |
1175 |
end function getforcetime |
1176 |
#endif |
1177 |
|
1178 |
!! This cleans componets of force arrays belonging only to fortran |
1179 |
|
1180 |
subroutine add_stress_tensor(dpair, fpair) |
1181 |
|
1182 |
real( kind = dp ), dimension(3), intent(in) :: dpair, fpair |
1183 |
|
1184 |
! because the d vector is the rj - ri vector, and |
1185 |
! because fx, fy, fz are the force on atom i, we need a |
1186 |
! negative sign here: |
1187 |
|
1188 |
tau_Temp(1) = tau_Temp(1) - dpair(1) * fpair(1) |
1189 |
tau_Temp(2) = tau_Temp(2) - dpair(1) * fpair(2) |
1190 |
tau_Temp(3) = tau_Temp(3) - dpair(1) * fpair(3) |
1191 |
tau_Temp(4) = tau_Temp(4) - dpair(2) * fpair(1) |
1192 |
tau_Temp(5) = tau_Temp(5) - dpair(2) * fpair(2) |
1193 |
tau_Temp(6) = tau_Temp(6) - dpair(2) * fpair(3) |
1194 |
tau_Temp(7) = tau_Temp(7) - dpair(3) * fpair(1) |
1195 |
tau_Temp(8) = tau_Temp(8) - dpair(3) * fpair(2) |
1196 |
tau_Temp(9) = tau_Temp(9) - dpair(3) * fpair(3) |
1197 |
|
1198 |
!write(*,'(6es12.3)') fpair(1:3), tau_Temp(1), tau_Temp(5), tau_temp(9) |
1199 |
virial_Temp = virial_Temp + & |
1200 |
(tau_Temp(1) + tau_Temp(5) + tau_Temp(9)) |
1201 |
|
1202 |
end subroutine add_stress_tensor |
1203 |
|
1204 |
end module do_Forces |
1205 |
|