4 |
|
|
5 |
|
!! @author Charles F. Vardeman II |
6 |
|
!! @author Matthew Meineke |
7 |
< |
!! @version $Id: do_Forces.F90,v 1.6 2003-03-31 22:09:39 chuckv Exp $, $Date: 2003-03-31 22:09:39 $, $Name: not supported by cvs2svn $, $Revision: 1.6 $ |
7 |
> |
!! @version $Id: do_Forces.F90,v 1.21 2003-07-16 21:30:55 mmeineke Exp $, $Date: 2003-07-16 21:30:55 $, $Name: not supported by cvs2svn $, $Revision: 1.21 $ |
8 |
|
|
9 |
|
module do_Forces |
10 |
|
use force_globals |
17 |
|
use dipole_dipole |
18 |
|
use reaction_field |
19 |
|
use gb_pair |
20 |
+ |
use vector_class |
21 |
|
#ifdef IS_MPI |
22 |
|
use mpiSimulation |
23 |
|
#endif |
28 |
|
#define __FORTRAN90 |
29 |
|
#include "fForceField.h" |
30 |
|
|
31 |
< |
logical, save :: do_forces_initialized = .false. |
31 |
> |
logical, save :: do_forces_initialized = .false., haveRlist = .false. |
32 |
> |
logical, save :: havePolicies = .false. |
33 |
|
logical, save :: FF_uses_LJ |
34 |
|
logical, save :: FF_uses_sticky |
35 |
|
logical, save :: FF_uses_dipoles |
37 |
|
logical, save :: FF_uses_GB |
38 |
|
logical, save :: FF_uses_EAM |
39 |
|
|
40 |
+ |
real(kind=dp), save :: rlist, rlistsq |
41 |
+ |
|
42 |
|
public :: init_FF |
43 |
|
public :: do_force_loop |
44 |
+ |
public :: setRlistDF |
45 |
|
|
46 |
|
contains |
47 |
|
|
48 |
+ |
subroutine setRlistDF( this_rlist ) |
49 |
+ |
|
50 |
+ |
real(kind=dp) :: this_rlist |
51 |
+ |
|
52 |
+ |
rlist = this_rlist |
53 |
+ |
rlistsq = rlist * rlist |
54 |
+ |
|
55 |
+ |
haveRlist = .true. |
56 |
+ |
if( havePolicies ) do_forces_initialized = .true. |
57 |
+ |
|
58 |
+ |
end subroutine setRlistDF |
59 |
+ |
|
60 |
|
subroutine init_FF(LJMIXPOLICY, use_RF_c, thisStat) |
61 |
|
|
62 |
|
integer, intent(in) :: LJMIXPOLICY |
104 |
|
!! check to make sure the FF_uses_RF setting makes sense |
105 |
|
|
106 |
|
if (FF_uses_dipoles) then |
90 |
– |
rrf = getRrf() |
91 |
– |
rt = getRt() |
92 |
– |
call initialize_dipole(rrf, rt) |
107 |
|
if (FF_uses_RF) then |
108 |
|
dielect = getDielect() |
109 |
< |
call initialize_rf(rrf, rt, dielect) |
109 |
> |
call initialize_rf(dielect) |
110 |
|
endif |
111 |
|
else |
112 |
|
if (FF_uses_RF) then |
114 |
|
thisStat = -1 |
115 |
|
return |
116 |
|
endif |
117 |
< |
endif |
117 |
> |
endif |
118 |
|
|
119 |
|
if (FF_uses_LJ) then |
120 |
|
|
107 |
– |
call getRcut(rcut) |
108 |
– |
|
121 |
|
select case (LJMIXPOLICY) |
122 |
|
case (LB_MIXING_RULE) |
123 |
< |
call init_lj_FF(LB_MIXING_RULE, rcut, my_status) |
123 |
> |
call init_lj_FF(LB_MIXING_RULE, my_status) |
124 |
|
case (EXPLICIT_MIXING_RULE) |
125 |
< |
call init_lj_FF(EXPLICIT_MIXING_RULE, rcut, my_status) |
125 |
> |
call init_lj_FF(EXPLICIT_MIXING_RULE, my_status) |
126 |
|
case default |
127 |
|
write(default_error,*) 'unknown LJ Mixing Policy!' |
128 |
|
thisStat = -1 |
152 |
|
|
153 |
|
if (FF_uses_GB .and. FF_uses_LJ) then |
154 |
|
endif |
155 |
+ |
if (.not. do_forces_initialized) then |
156 |
+ |
!! Create neighbor lists |
157 |
+ |
call expandNeighborList(getNlocal(), my_status) |
158 |
+ |
if (my_Status /= 0) then |
159 |
+ |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
160 |
+ |
thisStat = -1 |
161 |
+ |
return |
162 |
+ |
endif |
163 |
+ |
endif |
164 |
|
|
165 |
< |
|
166 |
< |
do_forces_initialized = .true. |
167 |
< |
|
165 |
> |
havePolicies = .true. |
166 |
> |
if( haveRlist ) do_forces_initialized = .true. |
167 |
> |
|
168 |
|
end subroutine init_FF |
169 |
|
|
170 |
|
|
189 |
|
logical :: do_pot |
190 |
|
logical :: do_stress |
191 |
|
#ifdef IS_MPI |
192 |
< |
real( kind = DP ) :: pot_local = 0.0_dp |
192 |
> |
real( kind = DP ) :: pot_local |
193 |
|
integer :: nrow |
194 |
|
integer :: ncol |
195 |
|
#endif |
198 |
|
logical :: update_nlist |
199 |
|
integer :: i, j, jbeg, jend, jnab |
200 |
|
integer :: nlist |
201 |
< |
real( kind = DP ) :: rijsq, rlistsq, rcutsq, rlist, rcut |
201 |
> |
real( kind = DP ) :: rijsq |
202 |
|
real(kind=dp),dimension(3) :: d |
203 |
|
real(kind=dp) :: rfpot, mu_i, virial |
204 |
|
integer :: me_i |
207 |
|
integer :: listerror, error |
208 |
|
integer :: localError |
209 |
|
|
210 |
+ |
real(kind=dp) :: listSkin = 1.0 |
211 |
+ |
|
212 |
+ |
|
213 |
|
!! initialize local variables |
214 |
|
|
215 |
|
#ifdef IS_MPI |
216 |
+ |
pot_local = 0.0_dp |
217 |
|
nlocal = getNlocal() |
218 |
|
nrow = getNrow(plan_row) |
219 |
|
ncol = getNcol(plan_col) |
221 |
|
nlocal = getNlocal() |
222 |
|
natoms = nlocal |
223 |
|
#endif |
224 |
< |
|
200 |
< |
call getRcut(rcut,rc2=rcutsq) |
201 |
< |
call getRlist(rlist,rlistsq) |
202 |
< |
|
224 |
> |
|
225 |
|
call check_initialization(localError) |
226 |
|
if ( localError .ne. 0 ) then |
227 |
|
error = -1 |
251 |
|
|
252 |
|
if (FF_RequiresPrepairCalc() .and. SimRequiresPrepairCalc()) then |
253 |
|
!! See if we need to update neighbor lists |
254 |
< |
call checkNeighborList(nlocal, q, rcut, rlist, update_nlist) |
254 |
> |
call checkNeighborList(nlocal, q, listSkin, update_nlist) |
255 |
|
!! if_mpi_gather_stuff_for_prepair |
256 |
|
!! do_prepair_loop_if_needed |
257 |
|
!! if_mpi_scatter_stuff_from_prepair |
258 |
|
!! if_mpi_gather_stuff_from_prepair_to_main_loop |
259 |
|
else |
260 |
|
!! See if we need to update neighbor lists |
261 |
< |
call checkNeighborList(nlocal, q, rcut, rlist, update_nlist) |
261 |
> |
call checkNeighborList(nlocal, q, listSkin, update_nlist) |
262 |
|
endif |
263 |
|
|
264 |
|
#ifdef IS_MPI |
267 |
|
|
268 |
|
!! save current configuration, construct neighbor list, |
269 |
|
!! and calculate forces |
270 |
< |
call saveNeighborList(q) |
270 |
> |
call saveNeighborList(nlocal, q) |
271 |
|
|
272 |
|
neighborListSize = size(list) |
273 |
|
nlist = 0 |
281 |
|
|
282 |
|
call get_interatomic_vector(q_Row(:,i), q_Col(:,j), d, rijsq) |
283 |
|
|
284 |
< |
if (rijsq < rlistsq) then |
284 |
> |
if (rijsq < rlistsq) then |
285 |
|
|
286 |
|
nlist = nlist + 1 |
287 |
|
|
297 |
|
|
298 |
|
list(nlist) = j |
299 |
|
|
300 |
< |
if (rijsq < rcutsq) then |
301 |
< |
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
302 |
< |
u_l, A, f, t,pot) |
281 |
< |
endif |
300 |
> |
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
301 |
> |
u_l, A, f, t, pot_local) |
302 |
> |
|
303 |
|
endif |
304 |
|
enddo inner |
305 |
|
enddo |
320 |
|
|
321 |
|
call get_interatomic_vector(q_Row(:,i), q_Col(:,j), d, rijsq) |
322 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
323 |
< |
u_l, A, f, t,pot) |
323 |
> |
u_l, A, f, t, pot_local) |
324 |
|
|
325 |
|
enddo |
326 |
|
endif |
333 |
|
|
334 |
|
! save current configuration, contruct neighbor list, |
335 |
|
! and calculate forces |
336 |
< |
call saveNeighborList(q) |
336 |
> |
call saveNeighborList(natoms, q) |
337 |
|
|
338 |
|
neighborListSize = size(list) |
339 |
|
|
349 |
|
call get_interatomic_vector(q(:,i), q(:,j), d, rijsq) |
350 |
|
|
351 |
|
|
352 |
< |
if (rijsq < rlistsq) then |
352 |
> |
if (rijsq < rlistsq) then |
353 |
|
|
354 |
|
nlist = nlist + 1 |
355 |
|
|
365 |
|
|
366 |
|
list(nlist) = j |
367 |
|
|
368 |
< |
if (rijsq < rcutsq) then |
369 |
< |
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
370 |
< |
u_l, A, f, t,pot) |
350 |
< |
endif |
368 |
> |
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
369 |
> |
u_l, A, f, t, pot) |
370 |
> |
|
371 |
|
endif |
372 |
|
enddo inner |
373 |
|
enddo |
388 |
|
|
389 |
|
call get_interatomic_vector(q(:,i), q(:,j), d, rijsq) |
390 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
391 |
< |
u_l, A, f, t,pot) |
391 |
> |
u_l, A, f, t, pot) |
392 |
|
|
393 |
|
enddo |
394 |
|
endif |
492 |
|
#ifdef IS_MPI |
493 |
|
|
494 |
|
if (do_pot) then |
495 |
< |
pot = pot_local |
495 |
> |
pot = pot + pot_local |
496 |
|
!! we assume the c code will do the allreduce to get the total potential |
497 |
|
!! we could do it right here if we needed to... |
498 |
|
endif |
499 |
|
|
500 |
|
if (do_stress) then |
501 |
< |
call mpi_allreduce(tau, tau_Temp,9,mpi_double_precision,mpi_sum, & |
501 |
> |
call mpi_allreduce(tau_Temp, tau, 9,mpi_double_precision,mpi_sum, & |
502 |
|
mpi_comm_world,mpi_err) |
503 |
< |
call mpi_allreduce(virial, virial_Temp,1,mpi_double_precision,mpi_sum, & |
503 |
> |
call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, & |
504 |
|
mpi_comm_world,mpi_err) |
505 |
|
endif |
506 |
|
|
515 |
|
|
516 |
|
end subroutine do_force_loop |
517 |
|
|
518 |
< |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t,pot) |
518 |
> |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t, pot) |
519 |
|
|
520 |
|
real( kind = dp ) :: pot |
521 |
< |
real( kind = dp ), dimension(:,:) :: u_l |
522 |
< |
real (kind=dp), dimension(:,:) :: A |
523 |
< |
real (kind=dp), dimension(:,:) :: f |
524 |
< |
real (kind=dp), dimension(:,:) :: t |
521 |
> |
real( kind = dp ), dimension(3,getNlocal()) :: u_l |
522 |
> |
real (kind=dp), dimension(9,getNlocal()) :: A |
523 |
> |
real (kind=dp), dimension(3,getNlocal()) :: f |
524 |
> |
real (kind=dp), dimension(3,getNlocal()) :: t |
525 |
|
|
526 |
|
logical, intent(inout) :: do_pot, do_stress |
527 |
|
integer, intent(in) :: i, j |
537 |
|
r = sqrt(rijsq) |
538 |
|
|
539 |
|
#ifdef IS_MPI |
540 |
+ |
if (tagRow(i) .eq. tagColumn(j)) then |
541 |
+ |
write(0,*) 'do_pair is doing', i , j, tagRow(i), tagColumn(j) |
542 |
+ |
endif |
543 |
|
|
544 |
|
me_i = atid_row(i) |
545 |
|
me_j = atid_col(j) |
598 |
|
endif |
599 |
|
endif |
600 |
|
|
601 |
+ |
|
602 |
+ |
|
603 |
|
end subroutine do_pair |
604 |
|
|
605 |
|
|
608 |
|
real (kind = dp), dimension(3) :: q_i |
609 |
|
real (kind = dp), dimension(3) :: q_j |
610 |
|
real ( kind = dp ), intent(out) :: r_sq |
611 |
< |
real( kind = dp ) :: d(3) |
612 |
< |
real( kind = dp ) :: d_old(3) |
613 |
< |
d(1:3) = q_i(1:3) - q_j(1:3) |
614 |
< |
d_old = d |
611 |
> |
real( kind = dp ) :: d(3), scaled(3) |
612 |
> |
integer i |
613 |
> |
|
614 |
> |
d(1:3) = q_j(1:3) - q_i(1:3) |
615 |
> |
|
616 |
|
! Wrap back into periodic box if necessary |
617 |
|
if ( SimUsesPBC() ) then |
618 |
|
|
619 |
< |
d(1:3) = d(1:3) - box(1:3) * sign(1.0_dp,d(1:3)) * & |
620 |
< |
int(abs(d(1:3)/box(1:3)) + 0.5_dp) |
619 |
> |
if( .not.boxIsOrthorhombic ) then |
620 |
> |
! calc the scaled coordinates. |
621 |
> |
|
622 |
> |
scaled = matmul(HmatInv, d) |
623 |
> |
|
624 |
> |
! wrap the scaled coordinates |
625 |
> |
|
626 |
> |
scaled = scaled - anint(scaled) |
627 |
> |
|
628 |
> |
|
629 |
> |
! calc the wrapped real coordinates from the wrapped scaled |
630 |
> |
! coordinates |
631 |
> |
|
632 |
> |
d = matmul(Hmat,scaled) |
633 |
> |
|
634 |
> |
else |
635 |
> |
! calc the scaled coordinates. |
636 |
> |
|
637 |
> |
do i = 1, 3 |
638 |
> |
scaled(i) = d(i) * HmatInv(i,i) |
639 |
> |
|
640 |
> |
! wrap the scaled coordinates |
641 |
> |
|
642 |
> |
scaled(i) = scaled(i) - anint(scaled(i)) |
643 |
> |
|
644 |
> |
! calc the wrapped real coordinates from the wrapped scaled |
645 |
> |
! coordinates |
646 |
> |
|
647 |
> |
d(i) = scaled(i)*Hmat(i,i) |
648 |
> |
enddo |
649 |
> |
endif |
650 |
|
|
651 |
|
endif |
652 |
+ |
|
653 |
|
r_sq = dot_product(d,d) |
654 |
< |
|
654 |
> |
|
655 |
|
end subroutine get_interatomic_vector |
656 |
< |
|
656 |
> |
|
657 |
|
subroutine check_initialization(error) |
658 |
|
integer, intent(out) :: error |
659 |
|
|
710 |
|
rf = 0.0_dp |
711 |
|
tau_Temp = 0.0_dp |
712 |
|
virial_Temp = 0.0_dp |
657 |
– |
|
713 |
|
end subroutine zero_work_arrays |
714 |
|
|
715 |
|
function skipThisPair(atom1, atom2) result(skip_it) |
753 |
|
#else |
754 |
|
unique_id_2 = atom2 |
755 |
|
#endif |
756 |
< |
|
756 |
> |
|
757 |
|
#ifdef IS_MPI |
758 |
|
!! this situation should only arise in MPI simulations |
759 |
|
if (unique_id_1 == unique_id_2) then |
763 |
|
|
764 |
|
!! this prevents us from doing the pair on multiple processors |
765 |
|
if (unique_id_1 < unique_id_2) then |
766 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 0) skip_it = .true. |
767 |
< |
return |
766 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
767 |
> |
skip_it = .true. |
768 |
> |
return |
769 |
> |
endif |
770 |
|
else |
771 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 1) skip_it = .true. |
772 |
< |
return |
771 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
772 |
> |
skip_it = .true. |
773 |
> |
return |
774 |
> |
endif |
775 |
|
endif |
776 |
|
#endif |
777 |
< |
|
777 |
> |
|
778 |
|
!! the rest of these situations can happen in all simulations: |
779 |
|
do i = 1, nExcludes_global |
780 |
|
if ((excludesGlobal(i) == unique_id_1) .or. & |
783 |
|
return |
784 |
|
endif |
785 |
|
enddo |
786 |
< |
|
786 |
> |
|
787 |
|
do i = 1, nExcludes_local |
788 |
|
if (excludesLocal(1,i) == unique_id_1) then |
789 |
|
if (excludesLocal(2,i) == unique_id_2) then |