4 |
|
|
5 |
|
!! @author Charles F. Vardeman II |
6 |
|
!! @author Matthew Meineke |
7 |
< |
!! @version $Id: do_Forces.F90,v 1.1.1.1 2003-03-21 17:42:12 mmeineke Exp $, $Date: 2003-03-21 17:42:12 $, $Name: not supported by cvs2svn $, $Revision: 1.1.1.1 $ |
7 |
> |
!! @version $Id: do_Forces.F90,v 1.17 2003-07-02 21:26:55 mmeineke Exp $, $Date: 2003-07-02 21:26:55 $, $Name: not supported by cvs2svn $, $Revision: 1.17 $ |
8 |
|
|
9 |
|
module do_Forces |
10 |
|
use force_globals |
140 |
|
|
141 |
|
if (FF_uses_GB .and. FF_uses_LJ) then |
142 |
|
endif |
143 |
< |
|
143 |
> |
if (.not. do_forces_initialized) then |
144 |
> |
!! Create neighbor lists |
145 |
> |
call expandNeighborList(getNlocal(), my_status) |
146 |
> |
if (my_Status /= 0) then |
147 |
> |
write(default_error,*) "SimSetup: ExpandNeighborList returned error." |
148 |
> |
thisStat = -1 |
149 |
> |
return |
150 |
> |
endif |
151 |
> |
endif |
152 |
|
|
153 |
|
do_forces_initialized = .true. |
154 |
|
|
175 |
|
logical ( kind = 2) :: do_pot_c, do_stress_c |
176 |
|
logical :: do_pot |
177 |
|
logical :: do_stress |
178 |
< |
#ifdef IS_MPI |
178 |
> |
#ifdef IS_MPI |
179 |
|
real( kind = DP ) :: pot_local |
180 |
|
integer :: nrow |
181 |
|
integer :: ncol |
197 |
|
!! initialize local variables |
198 |
|
|
199 |
|
#ifdef IS_MPI |
200 |
+ |
pot_local = 0.0_dp |
201 |
|
nlocal = getNlocal() |
202 |
|
nrow = getNrow(plan_row) |
203 |
|
ncol = getNcol(plan_col) |
205 |
|
nlocal = getNlocal() |
206 |
|
natoms = nlocal |
207 |
|
#endif |
208 |
< |
|
208 |
> |
|
209 |
|
call getRcut(rcut,rc2=rcutsq) |
210 |
|
call getRlist(rlist,rlistsq) |
211 |
|
|
254 |
|
|
255 |
|
!! save current configuration, construct neighbor list, |
256 |
|
!! and calculate forces |
257 |
< |
call saveNeighborList(q) |
257 |
> |
call saveNeighborList(nlocal, q) |
258 |
|
|
259 |
|
neighborListSize = size(list) |
260 |
|
nlist = 0 |
286 |
|
|
287 |
|
if (rijsq < rcutsq) then |
288 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
289 |
< |
u_l, A, f, t,pot) |
289 |
> |
u_l, A, f, t, pot_local) |
290 |
|
endif |
291 |
|
endif |
292 |
|
enddo inner |
308 |
|
|
309 |
|
call get_interatomic_vector(q_Row(:,i), q_Col(:,j), d, rijsq) |
310 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
311 |
< |
u_l, A, f, t,pot) |
311 |
> |
u_l, A, f, t, pot_local) |
312 |
|
|
313 |
|
enddo |
314 |
|
endif |
321 |
|
|
322 |
|
! save current configuration, contruct neighbor list, |
323 |
|
! and calculate forces |
324 |
< |
call saveNeighborList(q) |
324 |
> |
call saveNeighborList(natoms, q) |
325 |
|
|
326 |
|
neighborListSize = size(list) |
327 |
|
|
332 |
|
|
333 |
|
inner: do j = i+1, natoms |
334 |
|
|
335 |
< |
if (skipThisPair(i,j)) cycle inner |
336 |
< |
|
335 |
> |
if (skipThisPair(i,j)) cycle inner |
336 |
> |
|
337 |
|
call get_interatomic_vector(q(:,i), q(:,j), d, rijsq) |
338 |
|
|
339 |
+ |
|
340 |
|
if (rijsq < rlistsq) then |
341 |
|
|
342 |
|
nlist = nlist + 1 |
355 |
|
|
356 |
|
if (rijsq < rcutsq) then |
357 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
358 |
< |
u_l, A, f, t,pot) |
358 |
> |
u_l, A, f, t, pot) |
359 |
|
endif |
360 |
|
endif |
361 |
|
enddo inner |
377 |
|
|
378 |
|
call get_interatomic_vector(q(:,i), q(:,j), d, rijsq) |
379 |
|
call do_pair(i, j, rijsq, d, do_pot, do_stress, & |
380 |
< |
u_l, A, f, t,pot) |
380 |
> |
u_l, A, f, t, pot) |
381 |
|
|
382 |
|
enddo |
383 |
|
endif |
390 |
|
|
391 |
|
#ifdef IS_MPI |
392 |
|
!!distribute forces |
393 |
< |
|
394 |
< |
call scatter(f_Row,f,plan_row3d) |
393 |
> |
|
394 |
> |
f_temp = 0.0_dp |
395 |
> |
call scatter(f_Row,f_temp,plan_row3d) |
396 |
> |
do i = 1,nlocal |
397 |
> |
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
398 |
> |
end do |
399 |
> |
|
400 |
> |
f_temp = 0.0_dp |
401 |
|
call scatter(f_Col,f_temp,plan_col3d) |
402 |
|
do i = 1,nlocal |
403 |
|
f(1:3,i) = f(1:3,i) + f_temp(1:3,i) |
404 |
|
end do |
405 |
|
|
406 |
|
if (FF_UsesDirectionalAtoms() .and. SimUsesDirectionalAtoms()) then |
407 |
< |
call scatter(t_Row,t,plan_row3d) |
407 |
> |
t_temp = 0.0_dp |
408 |
> |
call scatter(t_Row,t_temp,plan_row3d) |
409 |
> |
do i = 1,nlocal |
410 |
> |
t(1:3,i) = t(1:3,i) + t_temp(1:3,i) |
411 |
> |
end do |
412 |
> |
t_temp = 0.0_dp |
413 |
|
call scatter(t_Col,t_temp,plan_col3d) |
414 |
|
|
415 |
|
do i = 1,nlocal |
420 |
|
if (do_pot) then |
421 |
|
! scatter/gather pot_row into the members of my column |
422 |
|
call scatter(pot_Row, pot_Temp, plan_row) |
423 |
< |
|
423 |
> |
|
424 |
|
! scatter/gather pot_local into all other procs |
425 |
|
! add resultant to get total pot |
426 |
|
do i = 1, nlocal |
427 |
|
pot_local = pot_local + pot_Temp(i) |
428 |
|
enddo |
429 |
+ |
|
430 |
+ |
pot_Temp = 0.0_DP |
431 |
|
|
409 |
– |
pot_Temp = 0.0_DP |
410 |
– |
|
432 |
|
call scatter(pot_Col, pot_Temp, plan_col) |
433 |
|
do i = 1, nlocal |
434 |
|
pot_local = pot_local + pot_Temp(i) |
435 |
|
enddo |
436 |
< |
|
436 |
> |
|
437 |
|
endif |
438 |
|
#endif |
439 |
|
|
481 |
|
#ifdef IS_MPI |
482 |
|
|
483 |
|
if (do_pot) then |
484 |
< |
pot = pot_local |
484 |
> |
pot = pot + pot_local |
485 |
|
!! we assume the c code will do the allreduce to get the total potential |
486 |
|
!! we could do it right here if we needed to... |
487 |
|
endif |
488 |
|
|
489 |
|
if (do_stress) then |
490 |
< |
call mpi_allreduce(tau, tau_Temp,9,mpi_double_precision,mpi_sum, & |
490 |
> |
call mpi_allreduce(tau_Temp, tau, 9,mpi_double_precision,mpi_sum, & |
491 |
|
mpi_comm_world,mpi_err) |
492 |
< |
call mpi_allreduce(virial, virial_Temp,1,mpi_double_precision,mpi_sum, & |
492 |
> |
call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, & |
493 |
|
mpi_comm_world,mpi_err) |
494 |
|
endif |
495 |
|
|
504 |
|
|
505 |
|
end subroutine do_force_loop |
506 |
|
|
507 |
< |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t,pot) |
507 |
> |
subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t, pot) |
508 |
|
|
509 |
|
real( kind = dp ) :: pot |
510 |
< |
real( kind = dp ), dimension(:,:) :: u_l |
511 |
< |
real (kind=dp), dimension(:,:) :: A |
512 |
< |
real (kind=dp), dimension(:,:) :: f |
513 |
< |
real (kind=dp), dimension(:,:) :: t |
510 |
> |
real( kind = dp ), dimension(3,getNlocal()) :: u_l |
511 |
> |
real (kind=dp), dimension(9,getNlocal()) :: A |
512 |
> |
real (kind=dp), dimension(3,getNlocal()) :: f |
513 |
> |
real (kind=dp), dimension(3,getNlocal()) :: t |
514 |
|
|
515 |
|
logical, intent(inout) :: do_pot, do_stress |
516 |
|
integer, intent(in) :: i, j |
525 |
|
|
526 |
|
r = sqrt(rijsq) |
527 |
|
|
528 |
+ |
|
529 |
+ |
|
530 |
|
#ifdef IS_MPI |
531 |
+ |
if (tagRow(i) .eq. tagColumn(j)) then |
532 |
+ |
write(0,*) 'do_pair is doing', i , j, tagRow(i), tagColumn(j) |
533 |
+ |
endif |
534 |
|
|
535 |
|
me_i = atid_row(i) |
536 |
|
me_j = atid_col(j) |
558 |
|
|
559 |
|
call do_dipole_pair(i, j, d, r, rijsq, pot, u_l, f, t, & |
560 |
|
do_pot, do_stress) |
535 |
– |
|
561 |
|
if (FF_uses_RF .and. SimUsesRF()) then |
537 |
– |
|
562 |
|
call accumulate_rf(i, j, r, u_l) |
563 |
|
call rf_correct_forces(i, j, d, r, u_l, f, do_stress) |
540 |
– |
|
564 |
|
endif |
565 |
|
|
566 |
|
endif |
570 |
|
|
571 |
|
call getElementProperty(atypes, me_i, "is_Sticky", is_Sticky_i) |
572 |
|
call getElementProperty(atypes, me_j, "is_Sticky", is_Sticky_j) |
573 |
< |
|
573 |
> |
|
574 |
|
if ( is_Sticky_i .and. is_Sticky_j ) then |
575 |
|
call do_sticky_pair(i, j, d, r, rijsq, A, pot, f, t, & |
576 |
|
do_pot, do_stress) |
597 |
|
real (kind = dp), dimension(3) :: q_i |
598 |
|
real (kind = dp), dimension(3) :: q_j |
599 |
|
real ( kind = dp ), intent(out) :: r_sq |
600 |
< |
real( kind = dp ) :: d(3) |
601 |
< |
real( kind = dp ) :: d_old(3) |
602 |
< |
d(1:3) = q_i(1:3) - q_j(1:3) |
603 |
< |
d_old = d |
600 |
> |
real( kind = dp ) :: d(3), scaled(3) |
601 |
> |
integer i |
602 |
> |
|
603 |
> |
d(1:3) = q_j(1:3) - q_i(1:3) |
604 |
> |
|
605 |
|
! Wrap back into periodic box if necessary |
606 |
|
if ( SimUsesPBC() ) then |
607 |
+ |
|
608 |
+ |
if( .not.boxIsOrthorhombic ) then |
609 |
+ |
! calc the scaled coordinates. |
610 |
+ |
|
611 |
+ |
scaled = matmul(HmatInv, d) |
612 |
+ |
|
613 |
+ |
! wrap the scaled coordinates |
614 |
|
|
615 |
< |
d(1:3) = d(1:3) - box(1:3) * sign(1.0_dp,d(1:3)) * & |
616 |
< |
int(abs(d(1:3)/box(1:3)) + 0.5_dp) |
615 |
> |
scaled = scaled - anint(scaled) |
616 |
> |
|
617 |
|
|
618 |
+ |
! calc the wrapped real coordinates from the wrapped scaled |
619 |
+ |
! coordinates |
620 |
+ |
|
621 |
+ |
d = matmul(Hmat,scaled) |
622 |
+ |
|
623 |
+ |
else |
624 |
+ |
! calc the scaled coordinates. |
625 |
+ |
|
626 |
+ |
do i = 1, 3 |
627 |
+ |
scaled(i) = d(i) * HmatInv(i,i) |
628 |
+ |
|
629 |
+ |
! wrap the scaled coordinates |
630 |
+ |
|
631 |
+ |
scaled(i) = scaled(i) - anint(scaled(i)) |
632 |
+ |
|
633 |
+ |
! calc the wrapped real coordinates from the wrapped scaled |
634 |
+ |
! coordinates |
635 |
+ |
|
636 |
+ |
d(i) = scaled(i)*Hmat(i,i) |
637 |
+ |
enddo |
638 |
+ |
endif |
639 |
+ |
|
640 |
|
endif |
641 |
+ |
|
642 |
|
r_sq = dot_product(d,d) |
643 |
< |
|
643 |
> |
|
644 |
|
end subroutine get_interatomic_vector |
645 |
< |
|
645 |
> |
|
646 |
|
subroutine check_initialization(error) |
647 |
|
integer, intent(out) :: error |
648 |
|
|
699 |
|
rf = 0.0_dp |
700 |
|
tau_Temp = 0.0_dp |
701 |
|
virial_Temp = 0.0_dp |
648 |
– |
|
702 |
|
end subroutine zero_work_arrays |
703 |
|
|
704 |
|
function skipThisPair(atom1, atom2) result(skip_it) |
652 |
– |
|
705 |
|
integer, intent(in) :: atom1 |
706 |
|
integer, intent(in), optional :: atom2 |
707 |
|
logical :: skip_it |
708 |
|
integer :: unique_id_1, unique_id_2 |
709 |
+ |
integer :: me_i,me_j |
710 |
|
integer :: i |
711 |
|
|
712 |
|
skip_it = .false. |
724 |
|
!! in the normal loop, the atom numbers are unique |
725 |
|
unique_id_1 = atom1 |
726 |
|
#endif |
727 |
< |
|
727 |
> |
|
728 |
|
!! We were called with only one atom, so just check the global exclude |
729 |
|
!! list for this atom |
730 |
|
if (.not. present(atom2)) then |
742 |
|
#else |
743 |
|
unique_id_2 = atom2 |
744 |
|
#endif |
745 |
< |
|
745 |
> |
|
746 |
|
#ifdef IS_MPI |
747 |
|
!! this situation should only arise in MPI simulations |
748 |
|
if (unique_id_1 == unique_id_2) then |
752 |
|
|
753 |
|
!! this prevents us from doing the pair on multiple processors |
754 |
|
if (unique_id_1 < unique_id_2) then |
755 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 0) skip_it = .true. |
756 |
< |
return |
755 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 0) then |
756 |
> |
skip_it = .true. |
757 |
> |
return |
758 |
> |
endif |
759 |
|
else |
760 |
< |
if (mod(unique_id_1 + unique_id_2,2) == 1) skip_it = .true. |
761 |
< |
return |
760 |
> |
if (mod(unique_id_1 + unique_id_2,2) == 1) then |
761 |
> |
skip_it = .true. |
762 |
> |
return |
763 |
> |
endif |
764 |
|
endif |
765 |
|
#endif |
766 |
< |
|
766 |
> |
|
767 |
|
!! the rest of these situations can happen in all simulations: |
768 |
|
do i = 1, nExcludes_global |
769 |
|
if ((excludesGlobal(i) == unique_id_1) .or. & |
772 |
|
return |
773 |
|
endif |
774 |
|
enddo |
775 |
< |
|
775 |
> |
|
776 |
|
do i = 1, nExcludes_local |
777 |
|
if (excludesLocal(1,i) == unique_id_1) then |
778 |
|
if (excludesLocal(2,i) == unique_id_2) then |