1 |
!! do_Forces.F90 |
2 |
!! module do_Forces |
3 |
!! Calculates Long Range forces. |
4 |
|
5 |
!! @author Charles F. Vardeman II |
6 |
!! @author Matthew Meineke |
7 |
!! @version $Id: do_Forces.F90,v 1.61 2004-05-24 21:03:25 gezelter Exp $, $Date: 2004-05-24 21:03:25 $, $Name: not supported by cvs2svn $, $Revision: 1.61 $ |
8 |
|
9 |
module do_Forces |
10 |
use force_globals |
11 |
use simulation |
12 |
use definitions |
13 |
use atype_module |
14 |
use switcheroo |
15 |
use neighborLists |
16 |
use lj |
17 |
use sticky_pair |
18 |
use dipole_dipole |
19 |
use charge_charge |
20 |
use reaction_field |
21 |
use gb_pair |
22 |
use vector_class |
23 |
use eam |
24 |
use status |
25 |
#ifdef IS_MPI |
26 |
use mpiSimulation |
27 |
#endif |
28 |
|
29 |
implicit none |
30 |
PRIVATE |
31 |
|
32 |
#define __FORTRAN90 |
33 |
#include "fForceField.h" |
34 |
#include "fSwitchingFunction.h" |
35 |
|
36 |
logical, save :: haveRlist = .false. |
37 |
logical, save :: haveNeighborList = .false. |
38 |
logical, save :: havePolicies = .false. |
39 |
logical, save :: haveSIMvariables = .false. |
40 |
logical, save :: havePropertyMap = .false. |
41 |
logical, save :: haveSaneForceField = .false. |
42 |
logical, save :: FF_uses_LJ |
43 |
logical, save :: FF_uses_sticky |
44 |
logical, save :: FF_uses_charges |
45 |
logical, save :: FF_uses_dipoles |
46 |
logical, save :: FF_uses_RF |
47 |
logical, save :: FF_uses_GB |
48 |
logical, save :: FF_uses_EAM |
49 |
logical, save :: SIM_uses_LJ |
50 |
logical, save :: SIM_uses_sticky |
51 |
logical, save :: SIM_uses_charges |
52 |
logical, save :: SIM_uses_dipoles |
53 |
logical, save :: SIM_uses_RF |
54 |
logical, save :: SIM_uses_GB |
55 |
logical, save :: SIM_uses_EAM |
56 |
logical, save :: SIM_requires_postpair_calc |
57 |
logical, save :: SIM_requires_prepair_calc |
58 |
logical, save :: SIM_uses_directional_atoms |
59 |
logical, save :: SIM_uses_PBC |
60 |
logical, save :: SIM_uses_molecular_cutoffs |
61 |
|
62 |
real(kind=dp), save :: rlist, rlistsq |
63 |
|
64 |
public :: init_FF |
65 |
public :: do_force_loop |
66 |
public :: setRlistDF |
67 |
|
68 |
#ifdef PROFILE |
69 |
public :: getforcetime |
70 |
real, save :: forceTime = 0 |
71 |
real :: forceTimeInitial, forceTimeFinal |
72 |
integer :: nLoops |
73 |
#endif |
74 |
|
75 |
type :: Properties |
76 |
logical :: is_lj = .false. |
77 |
logical :: is_sticky = .false. |
78 |
logical :: is_dp = .false. |
79 |
logical :: is_gb = .false. |
80 |
logical :: is_eam = .false. |
81 |
logical :: is_charge = .false. |
82 |
real(kind=DP) :: charge = 0.0_DP |
83 |
real(kind=DP) :: dipole_moment = 0.0_DP |
84 |
end type Properties |
85 |
|
86 |
type(Properties), dimension(:),allocatable :: PropertyMap |
87 |
|
88 |
contains |
89 |
|
90 |
subroutine setRlistDF( this_rlist ) |
91 |
|
92 |
real(kind=dp) :: this_rlist |
93 |
|
94 |
rlist = this_rlist |
95 |
rlistsq = rlist * rlist |
96 |
|
97 |
haveRlist = .true. |
98 |
|
99 |
end subroutine setRlistDF |
100 |
|
101 |
subroutine createPropertyMap(status) |
102 |
integer :: nAtypes |
103 |
integer :: status |
104 |
integer :: i |
105 |
logical :: thisProperty |
106 |
real (kind=DP) :: thisDPproperty |
107 |
|
108 |
status = 0 |
109 |
|
110 |
nAtypes = getSize(atypes) |
111 |
|
112 |
if (nAtypes == 0) then |
113 |
status = -1 |
114 |
return |
115 |
end if |
116 |
|
117 |
if (.not. allocated(PropertyMap)) then |
118 |
allocate(PropertyMap(nAtypes)) |
119 |
endif |
120 |
|
121 |
do i = 1, nAtypes |
122 |
call getElementProperty(atypes, i, "is_LJ", thisProperty) |
123 |
PropertyMap(i)%is_LJ = thisProperty |
124 |
|
125 |
call getElementProperty(atypes, i, "is_Charge", thisProperty) |
126 |
PropertyMap(i)%is_Charge = thisProperty |
127 |
|
128 |
if (thisProperty) then |
129 |
call getElementProperty(atypes, i, "charge", thisDPproperty) |
130 |
PropertyMap(i)%charge = thisDPproperty |
131 |
endif |
132 |
|
133 |
call getElementProperty(atypes, i, "is_DP", thisProperty) |
134 |
PropertyMap(i)%is_DP = thisProperty |
135 |
|
136 |
if (thisProperty) then |
137 |
call getElementProperty(atypes, i, "dipole_moment", thisDPproperty) |
138 |
PropertyMap(i)%dipole_moment = thisDPproperty |
139 |
endif |
140 |
|
141 |
call getElementProperty(atypes, i, "is_Sticky", thisProperty) |
142 |
PropertyMap(i)%is_Sticky = thisProperty |
143 |
call getElementProperty(atypes, i, "is_GB", thisProperty) |
144 |
PropertyMap(i)%is_GB = thisProperty |
145 |
call getElementProperty(atypes, i, "is_EAM", thisProperty) |
146 |
PropertyMap(i)%is_EAM = thisProperty |
147 |
end do |
148 |
|
149 |
havePropertyMap = .true. |
150 |
|
151 |
end subroutine createPropertyMap |
152 |
|
153 |
subroutine setSimVariables() |
154 |
SIM_uses_LJ = SimUsesLJ() |
155 |
SIM_uses_sticky = SimUsesSticky() |
156 |
SIM_uses_charges = SimUsesCharges() |
157 |
SIM_uses_dipoles = SimUsesDipoles() |
158 |
SIM_uses_RF = SimUsesRF() |
159 |
SIM_uses_GB = SimUsesGB() |
160 |
SIM_uses_EAM = SimUsesEAM() |
161 |
SIM_requires_postpair_calc = SimRequiresPostpairCalc() |
162 |
SIM_requires_prepair_calc = SimRequiresPrepairCalc() |
163 |
SIM_uses_directional_atoms = SimUsesDirectionalAtoms() |
164 |
SIM_uses_PBC = SimUsesPBC() |
165 |
!SIM_uses_molecular_cutoffs = SimUsesMolecularCutoffs() |
166 |
|
167 |
haveSIMvariables = .true. |
168 |
|
169 |
return |
170 |
end subroutine setSimVariables |
171 |
|
172 |
subroutine doReadyCheck(error) |
173 |
integer, intent(out) :: error |
174 |
|
175 |
integer :: myStatus |
176 |
|
177 |
error = 0 |
178 |
|
179 |
if (.not. havePropertyMap) then |
180 |
|
181 |
myStatus = 0 |
182 |
|
183 |
call createPropertyMap(myStatus) |
184 |
|
185 |
if (myStatus .ne. 0) then |
186 |
write(default_error, *) 'createPropertyMap failed in do_Forces!' |
187 |
error = -1 |
188 |
return |
189 |
endif |
190 |
endif |
191 |
|
192 |
if (.not. haveSIMvariables) then |
193 |
call setSimVariables() |
194 |
endif |
195 |
|
196 |
if (.not. haveRlist) then |
197 |
write(default_error, *) 'rList has not been set in do_Forces!' |
198 |
error = -1 |
199 |
return |
200 |
endif |
201 |
|
202 |
if (SIM_uses_LJ .and. FF_uses_LJ) then |
203 |
if (.not. havePolicies) then |
204 |
write(default_error, *) 'LJ mixing Policies have not been set in do_Forces!' |
205 |
error = -1 |
206 |
return |
207 |
endif |
208 |
endif |
209 |
|
210 |
if (.not. haveNeighborList) then |
211 |
write(default_error, *) 'neighbor list has not been initialized in do_Forces!' |
212 |
error = -1 |
213 |
return |
214 |
end if |
215 |
|
216 |
if (.not. haveSaneForceField) then |
217 |
write(default_error, *) 'Force Field is not sane in do_Forces!' |
218 |
error = -1 |
219 |
return |
220 |
end if |
221 |
|
222 |
#ifdef IS_MPI |
223 |
if (.not. isMPISimSet()) then |
224 |
write(default_error,*) "ERROR: mpiSimulation has not been initialized!" |
225 |
error = -1 |
226 |
return |
227 |
endif |
228 |
#endif |
229 |
return |
230 |
end subroutine doReadyCheck |
231 |
|
232 |
|
233 |
subroutine init_FF(LJMIXPOLICY, use_RF_c, thisStat) |
234 |
|
235 |
integer, intent(in) :: LJMIXPOLICY |
236 |
logical, intent(in) :: use_RF_c |
237 |
|
238 |
integer, intent(out) :: thisStat |
239 |
integer :: my_status, nMatches |
240 |
integer, pointer :: MatchList(:) => null() |
241 |
real(kind=dp) :: rcut, rrf, rt, dielect |
242 |
|
243 |
!! assume things are copacetic, unless they aren't |
244 |
thisStat = 0 |
245 |
|
246 |
!! Fortran's version of a cast: |
247 |
FF_uses_RF = use_RF_c |
248 |
|
249 |
!! init_FF is called *after* all of the atom types have been |
250 |
!! defined in atype_module using the new_atype subroutine. |
251 |
!! |
252 |
!! this will scan through the known atypes and figure out what |
253 |
!! interactions are used by the force field. |
254 |
|
255 |
FF_uses_LJ = .false. |
256 |
FF_uses_sticky = .false. |
257 |
FF_uses_charges = .false. |
258 |
FF_uses_dipoles = .false. |
259 |
FF_uses_GB = .false. |
260 |
FF_uses_EAM = .false. |
261 |
|
262 |
call getMatchingElementList(atypes, "is_LJ", .true., nMatches, MatchList) |
263 |
if (nMatches .gt. 0) FF_uses_LJ = .true. |
264 |
|
265 |
call getMatchingElementList(atypes, "is_Charge", .true., nMatches, MatchList) |
266 |
if (nMatches .gt. 0) FF_uses_charges = .true. |
267 |
|
268 |
call getMatchingElementList(atypes, "is_DP", .true., nMatches, MatchList) |
269 |
if (nMatches .gt. 0) FF_uses_dipoles = .true. |
270 |
|
271 |
call getMatchingElementList(atypes, "is_Sticky", .true., nMatches, & |
272 |
MatchList) |
273 |
if (nMatches .gt. 0) FF_uses_Sticky = .true. |
274 |
|
275 |
call getMatchingElementList(atypes, "is_GB", .true., nMatches, MatchList) |
276 |
if (nMatches .gt. 0) FF_uses_GB = .true. |
277 |
|
278 |
call getMatchingElementList(atypes, "is_EAM", .true., nMatches, MatchList) |
279 |
if (nMatches .gt. 0) FF_uses_EAM = .true. |
280 |
|
281 |
!! Assume sanity (for the sake of argument) |
282 |
haveSaneForceField = .true. |
283 |
|
284 |
!! check to make sure the FF_uses_RF setting makes sense |
285 |
|
286 |
if (FF_uses_dipoles) then |
287 |
if (FF_uses_RF) then |
288 |
dielect = getDielect() |
289 |
call initialize_rf(dielect) |
290 |
endif |
291 |
else |
292 |
if (FF_uses_RF) then |
293 |
write(default_error,*) 'Using Reaction Field with no dipoles? Huh?' |
294 |
thisStat = -1 |
295 |
haveSaneForceField = .false. |
296 |
return |
297 |
endif |
298 |
endif |
299 |
|
300 |
if (FF_uses_LJ) then |
301 |
|
302 |
select case (LJMIXPOLICY) |
303 |
case (LB_MIXING_RULE) |
304 |
call init_lj_FF(LB_MIXING_RULE, my_status) |
305 |
case (EXPLICIT_MIXING_RULE) |
306 |
call init_lj_FF(EXPLICIT_MIXING_RULE, my_status) |
307 |
case default |
308 |
write(default_error,*) 'unknown LJ Mixing Policy!' |
309 |
thisStat = -1 |
310 |
haveSaneForceField = .false. |
311 |
return |
312 |
end select |
313 |
if (my_status /= 0) then |
314 |
thisStat = -1 |
315 |
haveSaneForceField = .false. |
316 |
return |
317 |
end if |
318 |
havePolicies = .true. |
319 |
endif |
320 |
|
321 |
if (FF_uses_sticky) then |
322 |
call check_sticky_FF(my_status) |
323 |
if (my_status /= 0) then |
324 |
thisStat = -1 |
325 |
haveSaneForceField = .false. |
326 |
return |
327 |
end if |
328 |
endif |
329 |
|
330 |
|
331 |
if (FF_uses_EAM) then |
332 |
call init_EAM_FF(my_status) |
333 |
if (my_status /= 0) then |
334 |
write(default_error, *) "init_EAM_FF returned a bad status" |
335 |
thisStat = -1 |
336 |
haveSaneForceField = .false. |
337 |
return |
338 |
end if |
339 |
endif |
340 |
|
341 |
if (FF_uses_GB) then |
342 |
call check_gb_pair_FF(my_status) |
343 |
if (my_status .ne. 0) then |
344 |
thisStat = -1 |
345 |
haveSaneForceField = .false. |
346 |
return |
347 |
endif |
348 |
endif |
349 |
|
350 |
if (FF_uses_GB .and. FF_uses_LJ) then |
351 |
endif |
352 |
if (.not. haveNeighborList) then |
353 |
!! Create neighbor lists |
354 |
call expandNeighborList(nLocal, my_status) |
355 |
if (my_Status /= 0) then |
356 |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
357 |
thisStat = -1 |
358 |
return |
359 |
endif |
360 |
haveNeighborList = .true. |
361 |
endif |
362 |
|
363 |
|
364 |
|
365 |
end subroutine init_FF |
366 |
|
367 |
|
368 |
!! Does force loop over i,j pairs. Calls do_pair to calculates forces. |
369 |
!-------------------------------------------------------------> |
370 |
subroutine do_force_loop(q, q_group, A, u_l, f, t, tau, pot, & |
371 |
do_pot_c, do_stress_c, error) |
372 |
!! Position array provided by C, dimensioned by getNlocal |
373 |
real ( kind = dp ), dimension(3, nLocal) :: q |
374 |
!! molecular center-of-mass position array |
375 |
real ( kind = dp ), dimension(3, nGroup) :: q_group |
376 |
!! Rotation Matrix for each long range particle in simulation. |
377 |
real( kind = dp), dimension(9, nLocal) :: A |
378 |
!! Unit vectors for dipoles (lab frame) |
379 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
380 |
!! Force array provided by C, dimensioned by getNlocal |
381 |
real ( kind = dp ), dimension(3,nLocal) :: f |
382 |
!! Torsion array provided by C, dimensioned by getNlocal |
383 |
real( kind = dp ), dimension(3,nLocal) :: t |
384 |
|
385 |
!! Stress Tensor |
386 |
real( kind = dp), dimension(9) :: tau |
387 |
real ( kind = dp ) :: pot |
388 |
logical ( kind = 2) :: do_pot_c, do_stress_c |
389 |
logical :: do_pot |
390 |
logical :: do_stress |
391 |
logical :: in_switching_region |
392 |
#ifdef IS_MPI |
393 |
real( kind = DP ) :: pot_local |
394 |
integer :: nrow |
395 |
integer :: ncol |
396 |
integer :: nprocs |
397 |
integer :: nrow_group |
398 |
integer :: ncol_group |
399 |
#endif |
400 |
integer :: natoms |
401 |
logical :: update_nlist |
402 |
integer :: i, j, jbeg, jend, jnab |
403 |
integer :: istart, iend, jstart |
404 |
integer :: ia, jb, atom1, atom2 |
405 |
integer :: nlist |
406 |
real( kind = DP ) :: ratmsq, rgrpsq, rgrp, vpair, vij |
407 |
real( kind = DP ) :: sw, dswdr, swderiv, mf |
408 |
real(kind=dp),dimension(3) :: d_atm, d_grp, fpair, fij |
409 |
real(kind=dp) :: rfpot, mu_i, virial |
410 |
integer :: me_i, me_j, n_in_i, n_in_j |
411 |
logical :: is_dp_i |
412 |
integer :: neighborListSize |
413 |
integer :: listerror, error |
414 |
integer :: localError |
415 |
integer :: propPack_i, propPack_j |
416 |
|
417 |
real(kind=dp) :: listSkin = 1.0 |
418 |
|
419 |
!! initialize local variables |
420 |
|
421 |
#ifdef IS_MPI |
422 |
pot_local = 0.0_dp |
423 |
nrow = getNrow(plan_row) |
424 |
ncol = getNcol(plan_col) |
425 |
nrow_group = getNrowGroup(plan_row) |
426 |
ncol_group = getNcolGroup(plan_col) |
427 |
#else |
428 |
natoms = nlocal |
429 |
#endif |
430 |
|
431 |
call doReadyCheck(localError) |
432 |
if ( localError .ne. 0 ) then |
433 |
call handleError("do_force_loop", "Not Initialized") |
434 |
error = -1 |
435 |
return |
436 |
end if |
437 |
call zero_work_arrays() |
438 |
|
439 |
do_pot = do_pot_c |
440 |
do_stress = do_stress_c |
441 |
|
442 |
! Gather all information needed by all force loops: |
443 |
|
444 |
#ifdef IS_MPI |
445 |
|
446 |
call gather(q, q_Row, plan_row3d) |
447 |
call gather(q, q_Col, plan_col3d) |
448 |
|
449 |
call gather(q_group, q_group_Row, plan_row_Group_3d) |
450 |
call gather(q_group, q_group_Col, plan_col_Group_3d) |
451 |
|
452 |
if (FF_UsesDirectionalAtoms() .and. SIM_uses_directional_atoms) then |
453 |
call gather(u_l,u_l_Row,plan_row3d) |
454 |
call gather(u_l,u_l_Col,plan_col3d) |
455 |
|
456 |
call gather(A,A_Row,plan_row_rotation) |
457 |
call gather(A,A_Col,plan_col_rotation) |
458 |
endif |
459 |
|
460 |
#endif |
461 |
|
462 |
!! Begin force loop timing: |
463 |
#ifdef PROFILE |
464 |
call cpu_time(forceTimeInitial) |
465 |
nloops = nloops + 1 |
466 |
#endif |
467 |
|
468 |
if (FF_RequiresPrepairCalc() .and. SIM_requires_prepair_calc) then |
469 |
!! See if we need to update neighbor lists |
470 |
|
471 |
call checkNeighborList(nGroup, q_group, listSkin, update_nlist) |
472 |
|
473 |
!! if_mpi_gather_stuff_for_prepair |
474 |
!! do_prepair_loop_if_needed |
475 |
!! if_mpi_scatter_stuff_from_prepair |
476 |
!! if_mpi_gather_stuff_from_prepair_to_main_loop |
477 |
|
478 |
!--------------------PREFORCE LOOP----------->>>>>>>>>>>>>>>>>>>>>>>>>>> |
479 |
|
480 |
if (update_nlist) then |
481 |
|
482 |
!! save current configuration, construct neighbor list, |
483 |
!! and calculate forces |
484 |
|
485 |
call saveNeighborList(nGroup, q_group) |
486 |
|
487 |
neighborListSize = size(list) |
488 |
nlist = 0 |
489 |
|
490 |
istart = 1 |
491 |
#ifdef IS_MPI |
492 |
iend = nrow_group |
493 |
#else |
494 |
iend = nGroup - 1 |
495 |
#endif |
496 |
do i = istart, iend |
497 |
|
498 |
point(i) = nlist + 1 |
499 |
|
500 |
n_in_i = groupStart(i+1) - groupStart(i) |
501 |
|
502 |
#ifdef IS_MPI |
503 |
jstart = 1 |
504 |
jend = ncol_group |
505 |
#else |
506 |
jstart = i+1 |
507 |
jend = nGroup |
508 |
#endif |
509 |
do j = jstart, jend |
510 |
|
511 |
#ifdef IS_MPI |
512 |
call get_interatomic_vector(q_group_Row(:,i), & |
513 |
q_group_Col(:,j), d_grp, rgrpsq) |
514 |
#else |
515 |
call get_interatomic_vector(q_group(:,i), & |
516 |
q_group(:,j), d_grp, rgrpsq) |
517 |
#endif |
518 |
if (rgrpsq < rlistsq) then |
519 |
nlist = nlist + 1 |
520 |
|
521 |
if (nlist > neighborListSize) then |
522 |
call expandNeighborList(nGroup, listerror) |
523 |
if (listerror /= 0) then |
524 |
error = -1 |
525 |
write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded." |
526 |
return |
527 |
end if |
528 |
neighborListSize = size(list) |
529 |
endif |
530 |
|
531 |
list(nlist) = j |
532 |
|
533 |
call get_switch(rgrpsq, sw, dswdr, rgrp, group_switch, & |
534 |
in_switching_region) |
535 |
|
536 |
n_in_j = groupStart(j+1) - groupStart(j) |
537 |
|
538 |
do ia = groupStart(i), groupStart(i+1)-1 |
539 |
atom1 = groupList(ia) |
540 |
|
541 |
prepair_inner1: do jb = groupStart(j), groupStart(j+1)-1 |
542 |
atom2 = groupList(jb) |
543 |
|
544 |
if (skipThisPair(atom1, atom2)) cycle prepair_inner1 |
545 |
|
546 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
547 |
d_atm(1:3) = d_grp(1:3) |
548 |
ratmsq = rgrpsq |
549 |
else |
550 |
#ifdef IS_MPI |
551 |
call get_interatomic_vector(q_Row(:,atom1), & |
552 |
q_Col(:,atom2), d_atm, ratmsq) |
553 |
#else |
554 |
call get_interatomic_vector(q(:,atom1), & |
555 |
q(:,atom2), d_atm, ratmsq) |
556 |
#endif |
557 |
endif |
558 |
#ifdef IS_MPI |
559 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
560 |
rgrpsq, d_grp, do_pot, do_stress, & |
561 |
u_l, A, f, t, pot_local) |
562 |
#else |
563 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
564 |
rgrpsq, d_grp, do_pot, do_stress, & |
565 |
u_l, A, f, t, pot) |
566 |
#endif |
567 |
enddo prepair_inner1 |
568 |
enddo |
569 |
|
570 |
end if |
571 |
enddo |
572 |
enddo |
573 |
|
574 |
#ifdef IS_MPI |
575 |
point(nrow_group + 1) = nlist + 1 |
576 |
#else |
577 |
point(nGroup) = nlist + 1 |
578 |
#endif |
579 |
|
580 |
else !! (of update_check) |
581 |
|
582 |
! use the list to find the neighbors |
583 |
|
584 |
istart = 1 |
585 |
#ifdef IS_MPI |
586 |
iend = nrow_group |
587 |
#else |
588 |
iend = nGroup - 1 |
589 |
#endif |
590 |
|
591 |
do i = istart, iend |
592 |
|
593 |
n_in_i = groupStart(i+1) - groupStart(i) |
594 |
|
595 |
JBEG = POINT(i) |
596 |
JEND = POINT(i+1) - 1 |
597 |
! check that group i has neighbors |
598 |
if (jbeg .le. jend) then |
599 |
|
600 |
do jnab = jbeg, jend |
601 |
j = list(jnab) |
602 |
|
603 |
#ifdef IS_MPI |
604 |
call get_interatomic_vector(q_group_Row(:,i), & |
605 |
q_group_Col(:,j), d_grp, rgrpsq) |
606 |
#else |
607 |
call get_interatomic_vector(q_group(:,i), & |
608 |
q_group(:,j), d_grp, rgrpsq) |
609 |
#endif |
610 |
call get_switch(rgrpsq, sw, dswdr, rgrp, group_switch, & |
611 |
in_switching_region) |
612 |
|
613 |
n_in_j = groupStart(j+1) - groupStart(j) |
614 |
|
615 |
do ia = groupStart(i), groupStart(i+1)-1 |
616 |
atom1 = groupList(ia) |
617 |
|
618 |
prepair_inner2: do jb = groupStart(j), groupStart(j+1)-1 |
619 |
|
620 |
atom2 = groupList(jb) |
621 |
|
622 |
if (skipThisPair(atom1, atom2)) cycle prepair_inner2 |
623 |
|
624 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
625 |
d_atm(1:3) = d_grp(1:3) |
626 |
ratmsq = rgrpsq |
627 |
else |
628 |
#ifdef IS_MPI |
629 |
call get_interatomic_vector(q_Row(:,atom1), & |
630 |
q_Col(:,atom2), d_atm, ratmsq) |
631 |
#else |
632 |
call get_interatomic_vector(q(:,atom1), & |
633 |
q(:,atom2), d_atm, ratmsq) |
634 |
#endif |
635 |
endif |
636 |
|
637 |
#ifdef IS_MPI |
638 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
639 |
rgrpsq, d_grp, do_pot, do_stress, & |
640 |
u_l, A, f, t, pot_local) |
641 |
#else |
642 |
call do_prepair(atom1, atom2, ratmsq, d_atm, sw, & |
643 |
rgrpsq, d_grp, do_pot, do_stress, & |
644 |
u_l, A, f, t, pot) |
645 |
#endif |
646 |
enddo prepair_inner2 |
647 |
enddo |
648 |
enddo |
649 |
endif |
650 |
enddo |
651 |
endif |
652 |
!! Do rest of preforce calculations |
653 |
!! do necessary preforce calculations |
654 |
call do_preforce(nlocal,pot) |
655 |
! we have already updated the neighbor list set it to false... |
656 |
update_nlist = .false. |
657 |
else |
658 |
!! See if we need to update neighbor lists for non pre-pair |
659 |
call checkNeighborList(nGroup, q_group, listSkin, update_nlist) |
660 |
end if |
661 |
|
662 |
!---------------------------------MAIN Pair LOOP->>>>>>>>>>>>> |
663 |
|
664 |
if (update_nlist) then |
665 |
|
666 |
!! save current configuration, construct neighbor list, |
667 |
!! and calculate forces |
668 |
|
669 |
call saveNeighborList(nGroup, q_group) |
670 |
|
671 |
neighborListSize = size(list) |
672 |
nlist = 0 |
673 |
|
674 |
istart = 1 |
675 |
#ifdef IS_MPI |
676 |
iend = nrow_group |
677 |
#else |
678 |
iend = nGroup - 1 |
679 |
#endif |
680 |
do i = istart, iend |
681 |
|
682 |
point(i) = nlist + 1 |
683 |
|
684 |
n_in_i = groupStart(i+1) - groupStart(i) |
685 |
|
686 |
#ifdef IS_MPI |
687 |
jstart = 1 |
688 |
jend = ncol_group |
689 |
#else |
690 |
jstart = i+1 |
691 |
jend = nGroup |
692 |
#endif |
693 |
do j = jstart, jend |
694 |
|
695 |
#ifdef IS_MPI |
696 |
call get_interatomic_vector(q_group_Row(:,i), & |
697 |
q_group_Col(:,j), d_grp, rgrpsq) |
698 |
#else |
699 |
call get_interatomic_vector(q_group(:,i), & |
700 |
q_group(:,j), d_grp, rgrpsq) |
701 |
#endif |
702 |
if (rgrpsq < rlistsq) then |
703 |
nlist = nlist + 1 |
704 |
|
705 |
if (nlist > neighborListSize) then |
706 |
call expandNeighborList(nGroup, listerror) |
707 |
if (listerror /= 0) then |
708 |
error = -1 |
709 |
write(DEFAULT_ERROR,*) "ERROR: nlist > list size and max allocations exceeded." |
710 |
return |
711 |
end if |
712 |
neighborListSize = size(list) |
713 |
endif |
714 |
|
715 |
list(nlist) = j |
716 |
|
717 |
vij = 0.0d0 |
718 |
fij(1:3) = 0.0d0 |
719 |
|
720 |
call get_switch(rgrpsq, sw, dswdr, rgrp, group_switch, & |
721 |
in_switching_region) |
722 |
|
723 |
n_in_j = groupStart(j+1) - groupStart(j) |
724 |
|
725 |
do ia = groupStart(i), groupStart(i+1)-1 |
726 |
atom1 = groupList(ia) |
727 |
|
728 |
inner1: do jb = groupStart(j), groupStart(j+1)-1 |
729 |
atom2 = groupList(jb) |
730 |
|
731 |
if (skipThisPair(atom1, atom2)) cycle inner1 |
732 |
|
733 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
734 |
d_atm(1:3) = d_grp(1:3) |
735 |
ratmsq = rgrpsq |
736 |
else |
737 |
#ifdef IS_MPI |
738 |
call get_interatomic_vector(q_Row(:,atom1), & |
739 |
q_Col(:,atom2), d_atm, ratmsq) |
740 |
#else |
741 |
call get_interatomic_vector(q(:,atom1), & |
742 |
q(:,atom2), d_atm, ratmsq) |
743 |
#endif |
744 |
endif |
745 |
#ifdef IS_MPI |
746 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
747 |
do_pot, & |
748 |
u_l, A, f, t, pot_local, vpair, fpair) |
749 |
#else |
750 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
751 |
do_pot, & |
752 |
u_l, A, f, t, pot, vpair, fpair) |
753 |
#endif |
754 |
vij = vij + vpair |
755 |
fij(1:3) = fij(1:3) + fpair(1:3) |
756 |
|
757 |
enddo inner1 |
758 |
enddo |
759 |
|
760 |
if (in_switching_region) then |
761 |
swderiv = vij*dswdr/rgrp |
762 |
fij(1) = fij(1) + swderiv*d_grp(1) |
763 |
fij(2) = fij(2) + swderiv*d_grp(2) |
764 |
fij(3) = fij(3) + swderiv*d_grp(3) |
765 |
|
766 |
do ia=groupStart(i), groupStart(i+1)-1 |
767 |
atom1=groupList(ia) |
768 |
mf = mfact(atom1) |
769 |
#ifdef IS_MPI |
770 |
f_Row(1,atom1) = f_Row(1,atom1) + swderiv*d_grp(1)*mf |
771 |
f_Row(2,atom1) = f_Row(2,atom1) + swderiv*d_grp(2)*mf |
772 |
f_Row(3,atom1) = f_Row(3,atom1) + swderiv*d_grp(3)*mf |
773 |
#else |
774 |
f(1,atom1) = f(1,atom1) + swderiv*d_grp(1)*mf |
775 |
f(2,atom1) = f(2,atom1) + swderiv*d_grp(2)*mf |
776 |
f(3,atom1) = f(3,atom1) + swderiv*d_grp(3)*mf |
777 |
#endif |
778 |
enddo |
779 |
|
780 |
do jb=groupStart(j), groupStart(j+1)-1 |
781 |
atom2=groupList(jb) |
782 |
mf = mfact(atom2) |
783 |
#ifdef IS_MPI |
784 |
f_Col(1,atom2) = f_Col(1,atom2) - swderiv*d_grp(1)*mf |
785 |
f_Col(2,atom2) = f_Col(2,atom2) - swderiv*d_grp(2)*mf |
786 |
f_Col(3,atom2) = f_Col(3,atom2) - swderiv*d_grp(3)*mf |
787 |
#else |
788 |
f(1,atom2) = f(1,atom2) - swderiv*d_grp(1)*mf |
789 |
f(2,atom2) = f(2,atom2) - swderiv*d_grp(2)*mf |
790 |
f(3,atom2) = f(3,atom2) - swderiv*d_grp(3)*mf |
791 |
#endif |
792 |
enddo |
793 |
endif |
794 |
|
795 |
if (do_stress) call add_stress_tensor(d_grp, fij) |
796 |
|
797 |
end if |
798 |
enddo |
799 |
enddo |
800 |
|
801 |
#ifdef IS_MPI |
802 |
point(nrow_group + 1) = nlist + 1 |
803 |
#else |
804 |
point(nGroup) = nlist + 1 |
805 |
#endif |
806 |
|
807 |
else !! (of update_check) |
808 |
|
809 |
! use the list to find the neighbors |
810 |
|
811 |
istart = 1 |
812 |
#ifdef IS_MPI |
813 |
iend = nrow_group |
814 |
#else |
815 |
iend = nGroup - 1 |
816 |
#endif |
817 |
|
818 |
do i = istart, iend |
819 |
|
820 |
n_in_i = groupStart(i+1) - groupStart(i) |
821 |
|
822 |
JBEG = POINT(i) |
823 |
JEND = POINT(i+1) - 1 |
824 |
! check that group i has neighbors |
825 |
if (jbeg .le. jend) then |
826 |
|
827 |
do jnab = jbeg, jend |
828 |
j = list(jnab) |
829 |
|
830 |
#ifdef IS_MPI |
831 |
call get_interatomic_vector(q_group_Row(:,i), & |
832 |
q_group_Col(:,j), d_grp, rgrpsq) |
833 |
#else |
834 |
call get_interatomic_vector(q_group(:,i), & |
835 |
q_group(:,j), d_grp, rgrpsq) |
836 |
#endif |
837 |
vij = 0.0d0 |
838 |
fij(1:3) = 0.0d0 |
839 |
|
840 |
call get_switch(rgrpsq, sw, dswdr, rgrp, group_switch, & |
841 |
in_switching_region) |
842 |
|
843 |
n_in_j = groupStart(j+1) - groupStart(j) |
844 |
|
845 |
do ia = groupStart(i), groupStart(i+1)-1 |
846 |
atom1 = groupList(ia) |
847 |
|
848 |
inner2: do jb = groupStart(j), groupStart(j+1)-1 |
849 |
|
850 |
atom2 = groupList(jb) |
851 |
|
852 |
if (skipThisPair(atom1, atom2)) cycle inner2 |
853 |
|
854 |
if ((n_in_i .eq. 1).and.(n_in_j .eq. 1)) then |
855 |
d_atm(1:3) = d_grp(1:3) |
856 |
ratmsq = rgrpsq |
857 |
else |
858 |
#ifdef IS_MPI |
859 |
call get_interatomic_vector(q_Row(:,atom1), & |
860 |
q_Col(:,atom2), d_atm, ratmsq) |
861 |
#else |
862 |
call get_interatomic_vector(q(:,atom1), & |
863 |
q(:,atom2), d_atm, ratmsq) |
864 |
#endif |
865 |
endif |
866 |
#ifdef IS_MPI |
867 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
868 |
do_pot, & |
869 |
u_l, A, f, t, pot_local, vpair, fpair) |
870 |
#else |
871 |
call do_pair(atom1, atom2, ratmsq, d_atm, sw, & |
872 |
do_pot, & |
873 |
u_l, A, f, t, pot, vpair, fpair) |
874 |
#endif |
875 |
vij = vij + vpair |
876 |
fij(1:3) = fij(1:3) + fpair(1:3) |
877 |
|
878 |
enddo inner2 |
879 |
enddo |
880 |
|
881 |
if (in_switching_region) then |
882 |
swderiv = vij*dswdr/rgrp |
883 |
fij(1) = fij(1) + swderiv*d_grp(1) |
884 |
fij(2) = fij(2) + swderiv*d_grp(2) |
885 |
fij(3) = fij(3) + swderiv*d_grp(3) |
886 |
|
887 |
do ia=groupStart(i), groupStart(i+1)-1 |
888 |
atom1=groupList(ia) |
889 |
mf = mfact(atom1) |
890 |
#ifdef IS_MPI |
891 |
f_Row(1,atom1) = f_Row(1,atom1) + swderiv*d_grp(1)*mf |
892 |
f_Row(2,atom1) = f_Row(2,atom1) + swderiv*d_grp(2)*mf |
893 |
f_Row(3,atom1) = f_Row(3,atom1) + swderiv*d_grp(3)*mf |
894 |
#else |
895 |
f(1,atom1) = f(1,atom1) + swderiv*d_grp(1)*mf |
896 |
f(2,atom1) = f(2,atom1) + swderiv*d_grp(2)*mf |
897 |
f(3,atom1) = f(3,atom1) + swderiv*d_grp(3)*mf |
898 |
#endif |
899 |
enddo |
900 |
|
901 |
do jb=groupStart(j), groupStart(j+1)-1 |
902 |
atom2=groupList(jb) |
903 |
mf = mfact(atom2) |
904 |
#ifdef IS_MPI |
905 |
f_Col(1,atom2) = f_Col(1,atom2) - swderiv*d_grp(1)*mf |
906 |
f_Col(2,atom2) = f_Col(2,atom2) - swderiv*d_grp(2)*mf |
907 |
f_Col(3,atom2) = f_Col(3,atom2) - swderiv*d_grp(3)*mf |
908 |
#else |
909 |
f(1,atom2) = f(1,atom2) - swderiv*d_grp(1)*mf |
910 |
f(2,atom2) = f(2,atom2) - swderiv*d_grp(2)*mf |
911 |
f(3,atom2) = f(3,atom2) - swderiv*d_grp(3)*mf |
912 |
#endif |
913 |
enddo |
914 |
endif |
915 |
|
916 |
if (do_stress) call add_stress_tensor(d_grp, fij) |
917 |
|
918 |
enddo |
919 |
endif |
920 |
enddo |
921 |
endif |
922 |
|
923 |
! phew, done with main loop. |
924 |
|
925 |
!! Do timing |
926 |
#ifdef PROFILE |
927 |
call cpu_time(forceTimeFinal) |
928 |
forceTime = forceTime + forceTimeFinal - forceTimeInitial |
929 |
#endif |
930 |
|
931 |
#ifdef IS_MPI |
932 |
!!distribute forces |
933 |
|
934 |
f_temp = 0.0_dp |
935 |
call scatter(f_Row,f_temp,plan_row3d) |
936 |
do i = 1,nlocal |
937 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
938 |
end do |
939 |
|
940 |
f_temp = 0.0_dp |
941 |
call scatter(f_Col,f_temp,plan_col3d) |
942 |
do i = 1,nlocal |
943 |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
944 |
end do |
945 |
|
946 |
if (FF_UsesDirectionalAtoms() .and. SIM_uses_directional_atoms) then |
947 |
t_temp = 0.0_dp |
948 |
call scatter(t_Row,t_temp,plan_row3d) |
949 |
do i = 1,nlocal |
950 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
951 |
end do |
952 |
t_temp = 0.0_dp |
953 |
call scatter(t_Col,t_temp,plan_col3d) |
954 |
|
955 |
do i = 1,nlocal |
956 |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
957 |
end do |
958 |
endif |
959 |
|
960 |
if (do_pot) then |
961 |
! scatter/gather pot_row into the members of my column |
962 |
call scatter(pot_Row, pot_Temp, plan_row) |
963 |
|
964 |
! scatter/gather pot_local into all other procs |
965 |
! add resultant to get total pot |
966 |
do i = 1, nlocal |
967 |
pot_local = pot_local + pot_Temp(i) |
968 |
enddo |
969 |
|
970 |
pot_Temp = 0.0_DP |
971 |
|
972 |
call scatter(pot_Col, pot_Temp, plan_col) |
973 |
do i = 1, nlocal |
974 |
pot_local = pot_local + pot_Temp(i) |
975 |
enddo |
976 |
|
977 |
endif |
978 |
#endif |
979 |
|
980 |
if (FF_RequiresPostpairCalc() .and. SIM_requires_postpair_calc) then |
981 |
|
982 |
if (FF_uses_RF .and. SIM_uses_RF) then |
983 |
|
984 |
#ifdef IS_MPI |
985 |
call scatter(rf_Row,rf,plan_row3d) |
986 |
call scatter(rf_Col,rf_Temp,plan_col3d) |
987 |
do i = 1,nlocal |
988 |
rf(1:3,i) = rf(1:3,i) + rf_Temp(1:3,i) |
989 |
end do |
990 |
#endif |
991 |
|
992 |
do i = 1, nLocal |
993 |
|
994 |
rfpot = 0.0_DP |
995 |
#ifdef IS_MPI |
996 |
me_i = atid_row(i) |
997 |
#else |
998 |
me_i = atid(i) |
999 |
#endif |
1000 |
|
1001 |
if (PropertyMap(me_i)%is_DP) then |
1002 |
|
1003 |
mu_i = PropertyMap(me_i)%dipole_moment |
1004 |
|
1005 |
!! The reaction field needs to include a self contribution |
1006 |
!! to the field: |
1007 |
call accumulate_self_rf(i, mu_i, u_l) |
1008 |
!! Get the reaction field contribution to the |
1009 |
!! potential and torques: |
1010 |
call reaction_field_final(i, mu_i, u_l, rfpot, t, do_pot) |
1011 |
#ifdef IS_MPI |
1012 |
pot_local = pot_local + rfpot |
1013 |
#else |
1014 |
pot = pot + rfpot |
1015 |
|
1016 |
#endif |
1017 |
endif |
1018 |
enddo |
1019 |
endif |
1020 |
endif |
1021 |
|
1022 |
|
1023 |
#ifdef IS_MPI |
1024 |
|
1025 |
if (do_pot) then |
1026 |
pot = pot + pot_local |
1027 |
!! we assume the c code will do the allreduce to get the total potential |
1028 |
!! we could do it right here if we needed to... |
1029 |
endif |
1030 |
|
1031 |
if (do_stress) then |
1032 |
call mpi_allreduce(tau_Temp, tau, 9,mpi_double_precision,mpi_sum, & |
1033 |
mpi_comm_world,mpi_err) |
1034 |
call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, & |
1035 |
mpi_comm_world,mpi_err) |
1036 |
endif |
1037 |
|
1038 |
#else |
1039 |
|
1040 |
if (do_stress) then |
1041 |
tau = tau_Temp |
1042 |
virial = virial_Temp |
1043 |
endif |
1044 |
|
1045 |
#endif |
1046 |
|
1047 |
|
1048 |
end subroutine do_force_loop |
1049 |
|
1050 |
|
1051 |
subroutine do_pair(i, j, rijsq, d, sw, do_pot, & |
1052 |
u_l, A, f, t, pot, vpair, fpair) |
1053 |
|
1054 |
real( kind = dp ) :: pot, vpair, sw |
1055 |
real( kind = dp ), dimension(3) :: fpair |
1056 |
real( kind = dp ), dimension(nLocal) :: mfact |
1057 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
1058 |
real( kind = dp ), dimension(9,nLocal) :: A |
1059 |
real( kind = dp ), dimension(3,nLocal) :: f |
1060 |
real( kind = dp ), dimension(3,nLocal) :: t |
1061 |
|
1062 |
logical, intent(inout) :: do_pot |
1063 |
integer, intent(in) :: i, j |
1064 |
real ( kind = dp ), intent(inout) :: rijsq |
1065 |
real ( kind = dp ) :: r |
1066 |
real ( kind = dp ), intent(inout) :: d(3) |
1067 |
integer :: me_i, me_j |
1068 |
|
1069 |
r = sqrt(rijsq) |
1070 |
vpair = 0.0d0 |
1071 |
fpair(1:3) = 0.0d0 |
1072 |
|
1073 |
#ifdef IS_MPI |
1074 |
if (tagRow(i) .eq. tagColumn(j)) then |
1075 |
write(0,*) 'do_pair is doing', i , j, tagRow(i), tagColumn(j) |
1076 |
endif |
1077 |
me_i = atid_row(i) |
1078 |
me_j = atid_col(j) |
1079 |
#else |
1080 |
me_i = atid(i) |
1081 |
me_j = atid(j) |
1082 |
#endif |
1083 |
|
1084 |
if (FF_uses_LJ .and. SIM_uses_LJ) then |
1085 |
|
1086 |
if ( PropertyMap(me_i)%is_LJ .and. PropertyMap(me_j)%is_LJ ) then |
1087 |
!write(*,*) 'calling lj with' |
1088 |
!write(*,*) i, j, r, rijsq |
1089 |
!write(*,'(3es12.3)') d(1), d(2), d(3) |
1090 |
!write(*,'(3es12.3)') sw, vpair, pot |
1091 |
!write(*,*) |
1092 |
|
1093 |
call do_lj_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, do_pot) |
1094 |
endif |
1095 |
|
1096 |
endif |
1097 |
|
1098 |
if (FF_uses_charges .and. SIM_uses_charges) then |
1099 |
|
1100 |
if (PropertyMap(me_i)%is_Charge .and. PropertyMap(me_j)%is_Charge) then |
1101 |
call do_charge_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, do_pot) |
1102 |
endif |
1103 |
|
1104 |
endif |
1105 |
|
1106 |
if (FF_uses_dipoles .and. SIM_uses_dipoles) then |
1107 |
|
1108 |
if ( PropertyMap(me_i)%is_DP .and. PropertyMap(me_j)%is_DP) then |
1109 |
call do_dipole_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, u_l, f, t, & |
1110 |
do_pot) |
1111 |
if (FF_uses_RF .and. SIM_uses_RF) then |
1112 |
call accumulate_rf(i, j, r, u_l, sw) |
1113 |
call rf_correct_forces(i, j, d, r, u_l, sw, f, fpair) |
1114 |
endif |
1115 |
endif |
1116 |
|
1117 |
endif |
1118 |
|
1119 |
if (FF_uses_Sticky .and. SIM_uses_sticky) then |
1120 |
|
1121 |
if ( PropertyMap(me_i)%is_Sticky .and. PropertyMap(me_j)%is_Sticky) then |
1122 |
call do_sticky_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, A, f, t, & |
1123 |
do_pot) |
1124 |
endif |
1125 |
|
1126 |
endif |
1127 |
|
1128 |
|
1129 |
if (FF_uses_GB .and. SIM_uses_GB) then |
1130 |
|
1131 |
if ( PropertyMap(me_i)%is_GB .and. PropertyMap(me_j)%is_GB) then |
1132 |
call do_gb_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, u_l, f, t, & |
1133 |
do_pot) |
1134 |
endif |
1135 |
|
1136 |
endif |
1137 |
|
1138 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
1139 |
|
1140 |
if ( PropertyMap(me_i)%is_EAM .and. PropertyMap(me_j)%is_EAM) then |
1141 |
call do_eam_pair(i, j, d, r, rijsq, sw, vpair, fpair, pot, f, & |
1142 |
do_pot) |
1143 |
endif |
1144 |
|
1145 |
endif |
1146 |
|
1147 |
end subroutine do_pair |
1148 |
|
1149 |
subroutine do_prepair(i, j, rijsq, d, sw, rcijsq, dc, & |
1150 |
do_pot, do_stress, u_l, A, f, t, pot) |
1151 |
|
1152 |
real( kind = dp ) :: pot, sw |
1153 |
real( kind = dp ), dimension(3,nLocal) :: u_l |
1154 |
real (kind=dp), dimension(9,nLocal) :: A |
1155 |
real (kind=dp), dimension(3,nLocal) :: f |
1156 |
real (kind=dp), dimension(3,nLocal) :: t |
1157 |
|
1158 |
logical, intent(inout) :: do_pot, do_stress |
1159 |
integer, intent(in) :: i, j |
1160 |
real ( kind = dp ), intent(inout) :: rijsq, rcijsq |
1161 |
real ( kind = dp ) :: r, rc |
1162 |
real ( kind = dp ), intent(inout) :: d(3), dc(3) |
1163 |
|
1164 |
logical :: is_EAM_i, is_EAM_j |
1165 |
|
1166 |
integer :: me_i, me_j |
1167 |
|
1168 |
|
1169 |
r = sqrt(rijsq) |
1170 |
if (SIM_uses_molecular_cutoffs) then |
1171 |
rc = sqrt(rcijsq) |
1172 |
else |
1173 |
rc = r |
1174 |
endif |
1175 |
|
1176 |
|
1177 |
#ifdef IS_MPI |
1178 |
if (tagRow(i) .eq. tagColumn(j)) then |
1179 |
write(0,*) 'do_prepair is doing', i , j, tagRow(i), tagColumn(j) |
1180 |
endif |
1181 |
|
1182 |
me_i = atid_row(i) |
1183 |
me_j = atid_col(j) |
1184 |
|
1185 |
#else |
1186 |
|
1187 |
me_i = atid(i) |
1188 |
me_j = atid(j) |
1189 |
|
1190 |
#endif |
1191 |
|
1192 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
1193 |
|
1194 |
if (PropertyMap(me_i)%is_EAM .and. PropertyMap(me_j)%is_EAM) & |
1195 |
call calc_EAM_prepair_rho(i, j, d, r, rijsq ) |
1196 |
|
1197 |
endif |
1198 |
|
1199 |
end subroutine do_prepair |
1200 |
|
1201 |
|
1202 |
subroutine do_preforce(nlocal,pot) |
1203 |
integer :: nlocal |
1204 |
real( kind = dp ) :: pot |
1205 |
|
1206 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
1207 |
call calc_EAM_preforce_Frho(nlocal,pot) |
1208 |
endif |
1209 |
|
1210 |
|
1211 |
end subroutine do_preforce |
1212 |
|
1213 |
|
1214 |
subroutine get_interatomic_vector(q_i, q_j, d, r_sq) |
1215 |
|
1216 |
real (kind = dp), dimension(3) :: q_i |
1217 |
real (kind = dp), dimension(3) :: q_j |
1218 |
real ( kind = dp ), intent(out) :: r_sq |
1219 |
real( kind = dp ) :: d(3), scaled(3) |
1220 |
integer i |
1221 |
|
1222 |
d(1:3) = q_j(1:3) - q_i(1:3) |
1223 |
|
1224 |
! Wrap back into periodic box if necessary |
1225 |
if ( SIM_uses_PBC ) then |
1226 |
|
1227 |
if( .not.boxIsOrthorhombic ) then |
1228 |
! calc the scaled coordinates. |
1229 |
|
1230 |
scaled = matmul(HmatInv, d) |
1231 |
|
1232 |
! wrap the scaled coordinates |
1233 |
|
1234 |
scaled = scaled - anint(scaled) |
1235 |
|
1236 |
|
1237 |
! calc the wrapped real coordinates from the wrapped scaled |
1238 |
! coordinates |
1239 |
|
1240 |
d = matmul(Hmat,scaled) |
1241 |
|
1242 |
else |
1243 |
! calc the scaled coordinates. |
1244 |
|
1245 |
do i = 1, 3 |
1246 |
scaled(i) = d(i) * HmatInv(i,i) |
1247 |
|
1248 |
! wrap the scaled coordinates |
1249 |
|
1250 |
scaled(i) = scaled(i) - anint(scaled(i)) |
1251 |
|
1252 |
! calc the wrapped real coordinates from the wrapped scaled |
1253 |
! coordinates |
1254 |
|
1255 |
d(i) = scaled(i)*Hmat(i,i) |
1256 |
enddo |
1257 |
endif |
1258 |
|
1259 |
endif |
1260 |
|
1261 |
r_sq = dot_product(d,d) |
1262 |
|
1263 |
end subroutine get_interatomic_vector |
1264 |
|
1265 |
subroutine zero_work_arrays() |
1266 |
|
1267 |
#ifdef IS_MPI |
1268 |
|
1269 |
q_Row = 0.0_dp |
1270 |
q_Col = 0.0_dp |
1271 |
|
1272 |
q_group_Row = 0.0_dp |
1273 |
q_group_Col = 0.0_dp |
1274 |
|
1275 |
u_l_Row = 0.0_dp |
1276 |
u_l_Col = 0.0_dp |
1277 |
|
1278 |
A_Row = 0.0_dp |
1279 |
A_Col = 0.0_dp |
1280 |
|
1281 |
f_Row = 0.0_dp |
1282 |
f_Col = 0.0_dp |
1283 |
f_Temp = 0.0_dp |
1284 |
|
1285 |
t_Row = 0.0_dp |
1286 |
t_Col = 0.0_dp |
1287 |
t_Temp = 0.0_dp |
1288 |
|
1289 |
pot_Row = 0.0_dp |
1290 |
pot_Col = 0.0_dp |
1291 |
pot_Temp = 0.0_dp |
1292 |
|
1293 |
rf_Row = 0.0_dp |
1294 |
rf_Col = 0.0_dp |
1295 |
rf_Temp = 0.0_dp |
1296 |
|
1297 |
#endif |
1298 |
|
1299 |
if (FF_uses_EAM .and. SIM_uses_EAM) then |
1300 |
call clean_EAM() |
1301 |
endif |
1302 |
|
1303 |
rf = 0.0_dp |
1304 |
tau_Temp = 0.0_dp |
1305 |
virial_Temp = 0.0_dp |
1306 |
end subroutine zero_work_arrays |
1307 |
|
1308 |
function skipThisPair(atom1, atom2) result(skip_it) |
1309 |
integer, intent(in) :: atom1 |
1310 |
integer, intent(in), optional :: atom2 |
1311 |
logical :: skip_it |
1312 |
integer :: unique_id_1, unique_id_2 |
1313 |
integer :: me_i,me_j |
1314 |
integer :: i |
1315 |
|
1316 |
skip_it = .false. |
1317 |
|
1318 |
!! there are a number of reasons to skip a pair or a particle |
1319 |
!! mostly we do this to exclude atoms who are involved in short |
1320 |
!! range interactions (bonds, bends, torsions), but we also need |
1321 |
!! to exclude some overcounted interactions that result from |
1322 |
!! the parallel decomposition |
1323 |
|
1324 |
#ifdef IS_MPI |
1325 |
!! in MPI, we have to look up the unique IDs for each atom |
1326 |
unique_id_1 = tagRow(atom1) |
1327 |
#else |
1328 |
!! in the normal loop, the atom numbers are unique |
1329 |
unique_id_1 = atom1 |
1330 |
#endif |
1331 |
|
1332 |
!! We were called with only one atom, so just check the global exclude |
1333 |
!! list for this atom |
1334 |
if (.not. present(atom2)) then |
1335 |
do i = 1, nExcludes_global |
1336 |
if (excludesGlobal(i) == unique_id_1) then |
1337 |
skip_it = .true. |
1338 |
return |
1339 |
end if |
1340 |
end do |
1341 |
return |
1342 |
end if |
1343 |
|
1344 |
#ifdef IS_MPI |
1345 |
unique_id_2 = tagColumn(atom2) |
1346 |
#else |
1347 |
unique_id_2 = atom2 |
1348 |
#endif |
1349 |
|
1350 |
#ifdef IS_MPI |
1351 |
!! this situation should only arise in MPI simulations |
1352 |
if (unique_id_1 == unique_id_2) then |
1353 |
skip_it = .true. |
1354 |
return |
1355 |
end if |
1356 |
|
1357 |
!! this prevents us from doing the pair on multiple processors |
1358 |
if (unique_id_1 < unique_id_2) then |
1359 |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
1360 |
skip_it = .true. |
1361 |
return |
1362 |
endif |
1363 |
else |
1364 |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
1365 |
skip_it = .true. |
1366 |
return |
1367 |
endif |
1368 |
endif |
1369 |
#endif |
1370 |
|
1371 |
!! the rest of these situations can happen in all simulations: |
1372 |
do i = 1, nExcludes_global |
1373 |
if ((excludesGlobal(i) == unique_id_1) .or. & |
1374 |
(excludesGlobal(i) == unique_id_2)) then |
1375 |
skip_it = .true. |
1376 |
return |
1377 |
endif |
1378 |
enddo |
1379 |
|
1380 |
do i = 1, nSkipsForAtom(unique_id_1) |
1381 |
if (skipsForAtom(unique_id_1, i) .eq. unique_id_2) then |
1382 |
skip_it = .true. |
1383 |
return |
1384 |
endif |
1385 |
end do |
1386 |
|
1387 |
return |
1388 |
end function skipThisPair |
1389 |
|
1390 |
function FF_UsesDirectionalAtoms() result(doesit) |
1391 |
logical :: doesit |
1392 |
doesit = FF_uses_dipoles .or. FF_uses_sticky .or. & |
1393 |
FF_uses_GB .or. FF_uses_RF |
1394 |
end function FF_UsesDirectionalAtoms |
1395 |
|
1396 |
function FF_RequiresPrepairCalc() result(doesit) |
1397 |
logical :: doesit |
1398 |
doesit = FF_uses_EAM |
1399 |
end function FF_RequiresPrepairCalc |
1400 |
|
1401 |
function FF_RequiresPostpairCalc() result(doesit) |
1402 |
logical :: doesit |
1403 |
doesit = FF_uses_RF |
1404 |
end function FF_RequiresPostpairCalc |
1405 |
|
1406 |
#ifdef PROFILE |
1407 |
function getforcetime() result(totalforcetime) |
1408 |
real(kind=dp) :: totalforcetime |
1409 |
totalforcetime = forcetime |
1410 |
end function getforcetime |
1411 |
#endif |
1412 |
|
1413 |
!! This cleans componets of force arrays belonging only to fortran |
1414 |
|
1415 |
subroutine add_stress_tensor(dpair, fpair) |
1416 |
|
1417 |
real( kind = dp ), dimension(3), intent(in) :: dpair, fpair |
1418 |
|
1419 |
! because the d vector is the rj - ri vector, and |
1420 |
! because fx, fy, fz are the force on atom i, we need a |
1421 |
! negative sign here: |
1422 |
|
1423 |
tau_Temp(1) = tau_Temp(1) - dpair(1) * fpair(1) |
1424 |
tau_Temp(2) = tau_Temp(2) - dpair(1) * fpair(2) |
1425 |
tau_Temp(3) = tau_Temp(3) - dpair(1) * fpair(3) |
1426 |
tau_Temp(4) = tau_Temp(4) - dpair(2) * fpair(1) |
1427 |
tau_Temp(5) = tau_Temp(5) - dpair(2) * fpair(2) |
1428 |
tau_Temp(6) = tau_Temp(6) - dpair(2) * fpair(3) |
1429 |
tau_Temp(7) = tau_Temp(7) - dpair(3) * fpair(1) |
1430 |
tau_Temp(8) = tau_Temp(8) - dpair(3) * fpair(2) |
1431 |
tau_Temp(9) = tau_Temp(9) - dpair(3) * fpair(3) |
1432 |
|
1433 |
!write(*,'(6es12.3)') fpair(1:3), tau_Temp(1), tau_Temp(5), tau_temp(9) |
1434 |
virial_Temp = virial_Temp + & |
1435 |
(tau_Temp(1) + tau_Temp(5) + tau_Temp(9)) |
1436 |
|
1437 |
end subroutine add_stress_tensor |
1438 |
|
1439 |
end module do_Forces |
1440 |
|