ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/do_Forces.F90
(Generate patch)

Comparing trunk/OOPSE/libmdtools/do_Forces.F90 (file contents):
Revision 388 by chuckv, Fri Mar 21 22:11:50 2003 UTC vs.
Revision 490 by gezelter, Fri Apr 11 15:16:59 2003 UTC

# Line 4 | Line 4
4  
5   !! @author Charles F. Vardeman II
6   !! @author Matthew Meineke
7 < !! @version $Id: do_Forces.F90,v 1.2 2003-03-21 22:11:50 chuckv Exp $, $Date: 2003-03-21 22:11:50 $, $Name: not supported by cvs2svn $, $Revision: 1.2 $
7 > !! @version $Id: do_Forces.F90,v 1.15 2003-04-11 15:16:59 gezelter Exp $, $Date: 2003-04-11 15:16:59 $, $Name: not supported by cvs2svn $, $Revision: 1.15 $
8  
9   module do_Forces
10    use force_globals
# Line 140 | Line 140 | contains
140  
141      if (FF_uses_GB .and. FF_uses_LJ) then
142      endif
143 +    if (.not. do_forces_initialized) then
144 +       !! Create neighbor lists
145 +       call expandNeighborList(getNlocal(), my_status)
146 +       if (my_Status /= 0) then
147 +          write(default_error,*) "SimSetup: ExpandNeighborList returned error."
148 +          thisStat = -1
149 +          return
150 +       endif
151 +    endif
152  
144
153      do_forces_initialized = .true.    
154  
155    end subroutine init_FF
# Line 167 | Line 175 | contains
175      logical ( kind = 2) :: do_pot_c, do_stress_c
176      logical :: do_pot
177      logical :: do_stress
178 < #ifdef IS_MPI
178 > #ifdef IS_MPI
179      real( kind = DP ) :: pot_local
180      integer :: nrow
181      integer :: ncol
# Line 189 | Line 197 | contains
197      !! initialize local variables  
198  
199   #ifdef IS_MPI
200 +    pot_local = 0.0_dp
201      nlocal = getNlocal()
202      nrow   = getNrow(plan_row)
203      ncol   = getNcol(plan_col)
# Line 196 | Line 205 | contains
205      nlocal = getNlocal()
206      natoms = nlocal
207   #endif
208 <
208 >  
209      call getRcut(rcut,rc2=rcutsq)
210      call getRlist(rlist,rlistsq)
211      
# Line 245 | Line 254 | contains
254        
255         !! save current configuration, construct neighbor list,
256         !! and calculate forces
257 <       call saveNeighborList(q)
257 >       call saveNeighborList(nlocal, q)
258        
259         neighborListSize = size(list)
260         nlist = 0      
# Line 277 | Line 286 | contains
286                                  
287                  if (rijsq <  rcutsq) then
288                     call do_pair(i, j, rijsq, d, do_pot, do_stress, &
289 <                        u_l, A, f, t,pot)
289 >                        u_l, A, f, t, pot_local)
290                  endif
291               endif
292            enddo inner
# Line 299 | Line 308 | contains
308  
309                  call get_interatomic_vector(q_Row(:,i), q_Col(:,j), d, rijsq)
310                  call do_pair(i, j, rijsq, d, do_pot, do_stress, &
311 <                     u_l, A, f, t,pot)
311 >                     u_l, A, f, t, pot_local)
312  
313               enddo
314            endif
# Line 312 | Line 321 | contains
321        
322         ! save current configuration, contruct neighbor list,
323         ! and calculate forces
324 <       call saveNeighborList(q)
324 >       call saveNeighborList(natoms, q)
325        
326         neighborListSize = size(list)
327    
# Line 346 | Line 355 | contains
355                  
356                  if (rijsq <  rcutsq) then
357                     call do_pair(i, j, rijsq, d, do_pot, do_stress, &
358 <                        u_l, A, f, t,pot)
358 >                        u_l, A, f, t, pot)
359                  endif
360               endif
361            enddo inner
# Line 368 | Line 377 | contains
377  
378                  call get_interatomic_vector(q(:,i), q(:,j), d, rijsq)
379                  call do_pair(i, j, rijsq, d, do_pot, do_stress, &
380 <                     u_l, A, f, t,pot)
380 >                     u_l, A, f, t, pot)
381  
382               enddo
383            endif
# Line 381 | Line 390 | contains
390      
391   #ifdef IS_MPI
392      !!distribute forces
393 <    
394 <    call scatter(f_Row,f,plan_row3d)
393 >  
394 >    f_temp = 0.0_dp
395 >    call scatter(f_Row,f_temp,plan_row3d)
396 >    do i = 1,nlocal
397 >       f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
398 >    end do
399 >
400 >    f_temp = 0.0_dp
401      call scatter(f_Col,f_temp,plan_col3d)
402      do i = 1,nlocal
403         f(1:3,i) = f(1:3,i) + f_temp(1:3,i)
404      end do
405      
406      if (FF_UsesDirectionalAtoms() .and. SimUsesDirectionalAtoms()) then
407 <       call scatter(t_Row,t,plan_row3d)
407 >       t_temp = 0.0_dp
408 >       call scatter(t_Row,t_temp,plan_row3d)
409 >       do i = 1,nlocal
410 >          t(1:3,i) = t(1:3,i) + t_temp(1:3,i)
411 >       end do
412 >       t_temp = 0.0_dp
413         call scatter(t_Col,t_temp,plan_col3d)
414        
415         do i = 1,nlocal
# Line 400 | Line 420 | contains
420      if (do_pot) then
421         ! scatter/gather pot_row into the members of my column
422         call scatter(pot_Row, pot_Temp, plan_row)
423 <      
423 >
424         ! scatter/gather pot_local into all other procs
425         ! add resultant to get total pot
426         do i = 1, nlocal
427            pot_local = pot_local + pot_Temp(i)
428         enddo
429 <
430 <       pot_Temp = 0.0_DP
429 >      
430 >       pot_Temp = 0.0_DP
431  
432         call scatter(pot_Col, pot_Temp, plan_col)
433         do i = 1, nlocal
434            pot_local = pot_local + pot_Temp(i)
435         enddo
436 <      
436 >
437      endif    
438   #endif
439  
# Line 461 | Line 481 | contains
481   #ifdef IS_MPI
482  
483      if (do_pot) then
484 <       pot = pot_local
484 >       pot = pot + pot_local
485         !! we assume the c code will do the allreduce to get the total potential
486         !! we could do it right here if we needed to...
487      endif
488  
489      if (do_stress) then
490 <       call mpi_allreduce(tau, tau_Temp,9,mpi_double_precision,mpi_sum, &
490 >      call mpi_allreduce(tau_Temp, tau, 9,mpi_double_precision,mpi_sum, &
491              mpi_comm_world,mpi_err)
492 <       call mpi_allreduce(virial, virial_Temp,1,mpi_double_precision,mpi_sum, &
492 >       call mpi_allreduce(virial_Temp, virial,1,mpi_double_precision,mpi_sum, &
493              mpi_comm_world,mpi_err)
494      endif
495  
# Line 484 | Line 504 | contains
504      
505    end subroutine do_force_loop
506  
507 <  subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t,pot)
507 >  subroutine do_pair(i, j, rijsq, d, do_pot, do_stress, u_l, A, f, t, pot)
508  
509      real( kind = dp ) :: pot
510 <    real( kind = dp ), dimension(:,:) :: u_l
511 <    real (kind=dp), dimension(:,:) :: A
512 <    real (kind=dp), dimension(:,:) :: f
513 <    real (kind=dp), dimension(:,:) :: t
510 >    real( kind = dp ), dimension(3,getNlocal()) :: u_l
511 >    real (kind=dp), dimension(9,getNlocal()) :: A
512 >    real (kind=dp), dimension(3,getNlocal()) :: f
513 >    real (kind=dp), dimension(3,getNlocal()) :: t
514  
515      logical, intent(inout) :: do_pot, do_stress
516      integer, intent(in) :: i, j
# Line 505 | Line 525 | contains
525  
526      r = sqrt(rijsq)
527  
528 +
529 +
530   #ifdef IS_MPI
531 +    if (tagRow(i) .eq. tagColumn(j)) then
532 +       write(0,*) 'do_pair is doing', i , j, tagRow(i), tagColumn(j)
533 +    endif
534  
535      me_i = atid_row(i)
536      me_j = atid_col(j)
# Line 574 | Line 599 | contains
599      real ( kind = dp ), intent(out) :: r_sq
600      real( kind = dp ) :: d(3)
601      real( kind = dp ) :: d_old(3)
602 <    d(1:3) = q_i(1:3) - q_j(1:3)
602 >    d(1:3) = q_j(1:3) - q_i(1:3)
603      d_old = d
604      ! Wrap back into periodic box if necessary
605      if ( SimUsesPBC() ) then
606 <
606 >      
607         d(1:3) = d(1:3) - box(1:3) * sign(1.0_dp,d(1:3)) * &
608              int(abs(d(1:3)/box(1:3)) + 0.5_dp)
609 <
609 >      
610      endif
611      r_sq = dot_product(d,d)
612          
# Line 643 | Line 668 | contains
668      rf = 0.0_dp
669      tau_Temp = 0.0_dp
670      virial_Temp = 0.0_dp
646    
671    end subroutine zero_work_arrays
672    
673    function skipThisPair(atom1, atom2) result(skip_it)
# Line 687 | Line 711 | contains
711   #else
712      unique_id_2 = atom2
713   #endif
714 <    
714 >
715   #ifdef IS_MPI
716      !! this situation should only arise in MPI simulations
717      if (unique_id_1 == unique_id_2) then
# Line 697 | Line 721 | contains
721      
722      !! this prevents us from doing the pair on multiple processors
723      if (unique_id_1 < unique_id_2) then
724 <       if (mod(unique_id_1 + unique_id_2,2) == 0) skip_it = .true.
725 <       return
724 >       if (mod(unique_id_1 + unique_id_2,2) == 0) then
725 >          skip_it = .true.
726 >          return
727 >       endif
728      else                
729 <       if (mod(unique_id_1 + unique_id_2,2) == 1) skip_it = .true.
730 <       return
729 >       if (mod(unique_id_1 + unique_id_2,2) == 1) then
730 >          skip_it = .true.
731 >          return
732 >       endif
733      endif
734   #endif
735 <
735 >
736      !! the rest of these situations can happen in all simulations:
737      do i = 1, nExcludes_global      
738         if ((excludesGlobal(i) == unique_id_1) .or. &
# Line 713 | Line 741 | contains
741            return
742         endif
743      enddo
744 <
744 >
745      do i = 1, nExcludes_local
746         if (excludesLocal(1,i) == unique_id_1) then
747            if (excludesLocal(2,i) == unique_id_2) then

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines