4 |
|
|
5 |
|
!! @author Charles F. Vardeman II |
6 |
|
!! @author Matthew Meineke |
7 |
< |
!! @version $Id: do_Forces.F90,v 1.5 2003-03-31 21:50:59 chuckv Exp $, $Date: 2003-03-31 21:50:59 $, $Name: not supported by cvs2svn $, $Revision: 1.5 $ |
7 |
> |
!! @version $Id: do_Forces.F90,v 1.14 2003-04-10 16:22:00 mmeineke Exp $, $Date: 2003-04-10 16:22:00 $, $Name: not supported by cvs2svn $, $Revision: 1.14 $ |
8 |
|
|
9 |
|
module do_Forces |
10 |
|
use force_globals |
140 |
|
|
141 |
|
if (FF_uses_GB .and. FF_uses_LJ) then |
142 |
|
endif |
143 |
< |
|
143 |
> |
if (.not. do_forces_initialized) then |
144 |
> |
!! Create neighbor lists |
145 |
> |
call expandNeighborList(getNlocal(), my_status) |
146 |
> |
if (my_Status /= 0) then |
147 |
> |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
148 |
> |
thisStat = -1 |
149 |
> |
return |
150 |
> |
endif |
151 |
> |
endif |
152 |
|
|
153 |
|
do_forces_initialized = .true. |
154 |
|
|
175 |
|
logical ( kind = 2) :: do_pot_c, do_stress_c |
176 |
|
logical :: do_pot |
177 |
|
logical :: do_stress |
178 |
< |
#ifdef IS_MPI |
178 |
> |
#ifdef IS_MPI |
179 |
|
real( kind = DP ) :: pot_local |
180 |
|
integer :: nrow |
181 |
|
integer :: ncol |
197 |
|
!! initialize local variables |
198 |
|
|
199 |
|
#ifdef IS_MPI |
200 |
+ |
pot_local = 0.0_dp |
201 |
|
nlocal = getNlocal() |
202 |
|
nrow = getNrow(plan_row) |
203 |
|
ncol = getNcol(plan_col) |
205 |
|
nlocal = getNlocal() |
206 |
|
natoms = nlocal |
207 |
|
#endif |
208 |
< |
|
208 |
> |
|
209 |
|
call getRcut(rcut,rc2=rcutsq) |
210 |
|
call getRlist(rlist,rlistsq) |
211 |
|
|
254 |
|
|
255 |
|
!! save current configuration, construct neighbor list, |
256 |
|
!! and calculate forces |
257 |
< |
call saveNeighborList(q) |
257 |
> |
call saveNeighborList(nlocal, q) |
258 |
|
|
259 |
|
neighborListSize = size(list) |
260 |
|
nlist = 0 |
286 |
|
|
287 |
|
if (rijsq < rcutsq) then |
288 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
289 |
< |
u_l, A, f, t,pot) |
289 |
> |
u_l, A, f, t, pot_local) |
290 |
|
endif |
291 |
|
endif |
292 |
|
enddo inner |
308 |
|
|
309 |
|
call get_interatomic_vector(q_Row(:,i), q_Col(:,j), d, rijsq) |
310 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
311 |
< |
u_l, A, f, t,pot) |
311 |
> |
u_l, A, f, t, pot_local) |
312 |
|
|
313 |
|
enddo |
314 |
|
endif |
321 |
|
|
322 |
|
! save current configuration, contruct neighbor list, |
323 |
|
! and calculate forces |
324 |
< |
call saveNeighborList(q) |
324 |
> |
call saveNeighborList(natoms, q) |
325 |
|
|
326 |
|
neighborListSize = size(list) |
327 |
|
|
355 |
|
|
356 |
|
if (rijsq < rcutsq) then |
357 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
358 |
< |
u_l, A, f, t,pot) |
358 |
> |
u_l, A, f, t, pot) |
359 |
|
endif |
360 |
|
endif |
361 |
|
enddo inner |
377 |
|
|
378 |
|
call get_interatomic_vector(q(:,i), q(:,j), d, rijsq) |
379 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
380 |
< |
u_l, A, f, t,pot) |
380 |
> |
u_l, A, f, t, pot) |
381 |
|
|
382 |
|
enddo |
383 |
|
endif |
420 |
|
if (do_pot) then |
421 |
|
! scatter/gather pot_row into the members of my column |
422 |
|
call scatter(pot_Row, pot_Temp, plan_row) |
423 |
< |
|
423 |
> |
|
424 |
|
! scatter/gather pot_local into all other procs |
425 |
|
! add resultant to get total pot |
426 |
|
do i = 1, nlocal |
427 |
|
pot_local = pot_local + pot_Temp(i) |
428 |
|
enddo |
429 |
+ |
|
430 |
+ |
pot_Temp = 0.0_DP |
431 |
|
|
421 |
– |
pot_Temp = 0.0_DP |
422 |
– |
|
432 |
|
call scatter(pot_Col, pot_Temp, plan_col) |
433 |
|
do i = 1, nlocal |
434 |
|
pot_local = pot_local + pot_Temp(i) |
435 |
|
enddo |
436 |
< |
|
436 |
> |
|
437 |
|
endif |
438 |
|
#endif |
439 |
|
|
481 |
|
#ifdef IS_MPI |
482 |
|
|
483 |
|
if (do_pot) then |
484 |
< |
write(*,*) "Fortran is on pot:, pot, pot_local ", pot,pot_local |
476 |
< |
pot = pot_local |
484 |
> |
pot = pot + pot_local |
485 |
|
!! we assume the c code will do the allreduce to get the total potential |
486 |
|
!! we could do it right here if we needed to... |
487 |
|
endif |
488 |
|
|
489 |
|
if (do_stress) then |
490 |
< |
call mpi_allreduce(tau, tau_Temp,9,mpi_double_precision,mpi_sum, & |
490 |
> |
call mpi_allreduce(tau_Temp, tau,9,mpi_double_precision,mpi_sum, & |
491 |
|
mpi_comm_world,mpi_err) |
492 |
< |
call mpi_allreduce(virial, virial_Temp,1,mpi_double_precision,mpi_sum, & |
492 |
> |
call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, & |
493 |
|
mpi_comm_world,mpi_err) |
494 |
|
endif |
495 |
|
|
504 |
|
|
505 |
|
end subroutine do_force_loop |
506 |
|
|
507 |
< |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t,pot) |
507 |
> |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t, pot) |
508 |
|
|
509 |
|
real( kind = dp ) :: pot |
510 |
< |
real( kind = dp ), dimension(:,:) :: u_l |
511 |
< |
real (kind=dp), dimension(:,:) :: A |
512 |
< |
real (kind=dp), dimension(:,:) :: f |
513 |
< |
real (kind=dp), dimension(:,:) :: t |
510 |
> |
real( kind = dp ), dimension(3,getNlocal()) :: u_l |
511 |
> |
real (kind=dp), dimension(9,getNlocal()) :: A |
512 |
> |
real (kind=dp), dimension(3,getNlocal()) :: f |
513 |
> |
real (kind=dp), dimension(3,getNlocal()) :: t |
514 |
|
|
515 |
|
logical, intent(inout) :: do_pot, do_stress |
516 |
|
integer, intent(in) :: i, j |
594 |
|
real ( kind = dp ), intent(out) :: r_sq |
595 |
|
real( kind = dp ) :: d(3) |
596 |
|
real( kind = dp ) :: d_old(3) |
597 |
< |
d(1:3) = q_i(1:3) - q_j(1:3) |
597 |
> |
d(1:3) = q_j(1:3) - q_i(1:3) |
598 |
|
d_old = d |
599 |
|
! Wrap back into periodic box if necessary |
600 |
|
if ( SimUsesPBC() ) then |
663 |
|
rf = 0.0_dp |
664 |
|
tau_Temp = 0.0_dp |
665 |
|
virial_Temp = 0.0_dp |
658 |
– |
|
666 |
|
end subroutine zero_work_arrays |
667 |
|
|
668 |
|
function skipThisPair(atom1, atom2) result(skip_it) |
706 |
|
#else |
707 |
|
unique_id_2 = atom2 |
708 |
|
#endif |
709 |
< |
|
709 |
> |
|
710 |
|
#ifdef IS_MPI |
711 |
|
!! this situation should only arise in MPI simulations |
712 |
|
if (unique_id_1 == unique_id_2) then |
716 |
|
|
717 |
|
!! this prevents us from doing the pair on multiple processors |
718 |
|
if (unique_id_1 < unique_id_2) then |
719 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 0) skip_it = .true. |
720 |
< |
return |
719 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
720 |
> |
skip_it = .true. |
721 |
> |
return |
722 |
> |
endif |
723 |
|
else |
724 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 1) skip_it = .true. |
725 |
< |
return |
724 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
725 |
> |
skip_it = .true. |
726 |
> |
return |
727 |
> |
endif |
728 |
|
endif |
729 |
|
#endif |
730 |
< |
|
730 |
> |
|
731 |
|
!! the rest of these situations can happen in all simulations: |
732 |
|
do i = 1, nExcludes_global |
733 |
|
if ((excludesGlobal(i) == unique_id_1) .or. & |
736 |
|
return |
737 |
|
endif |
738 |
|
enddo |
739 |
< |
|
739 |
> |
|
740 |
|
do i = 1, nExcludes_local |
741 |
|
if (excludesLocal(1,i) == unique_id_1) then |
742 |
|
if (excludesLocal(2,i) == unique_id_2) then |