ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-4/src/UseTheForce/DarkSide/simParallel.F90
Revision: 3135
Committed: Sat May 26 17:53:04 2007 UTC (17 years, 1 month ago) by chuckv
File size: 30826 byte(s)
Log Message:
Removed debug message from simParallel.

File Contents

# Content
1 !!
2 !! Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 !!
4 !! The University of Notre Dame grants you ("Licensee") a
5 !! non-exclusive, royalty free, license to use, modify and
6 !! redistribute this software in source and binary code form, provided
7 !! that the following conditions are met:
8 !!
9 !! 1. Acknowledgement of the program authors must be made in any
10 !! publication of scientific results based in part on use of the
11 !! program. An acceptable form of acknowledgement is citation of
12 !! the article in which the program was described (Matthew
13 !! A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher
14 !! J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented
15 !! Parallel Simulation Engine for Molecular Dynamics,"
16 !! J. Comput. Chem. 26, pp. 252-271 (2005))
17 !!
18 !! 2. Redistributions of source code must retain the above copyright
19 !! notice, this list of conditions and the following disclaimer.
20 !!
21 !! 3. Redistributions in binary form must reproduce the above copyright
22 !! notice, this list of conditions and the following disclaimer in the
23 !! documentation and/or other materials provided with the
24 !! distribution.
25 !!
26 !! This software is provided "AS IS," without a warranty of any
27 !! kind. All express or implied conditions, representations and
28 !! warranties, including any implied warranty of merchantability,
29 !! fitness for a particular purpose or non-infringement, are hereby
30 !! excluded. The University of Notre Dame and its licensors shall not
31 !! be liable for any damages suffered by licensee as a result of
32 !! using, modifying or distributing the software or its
33 !! derivatives. In no event will the University of Notre Dame or its
34 !! licensors be liable for any lost revenue, profit or data, or for
35 !! direct, indirect, special, consequential, incidental or punitive
36 !! damages, however caused and regardless of the theory of liability,
37 !! arising out of the use of or inability to use software, even if the
38 !! University of Notre Dame has been advised of the possibility of
39 !! such damages.
40 !!
41
42
43 !! MPI support for long range forces using force decomposition
44 !! on a square grid of processors.
45 !! Corresponds to mpiSimunation.cpp for C++
46 !! mpi_module also contains a private interface for mpi f90 routines.
47 !!
48 !! @author Charles F. Vardeman II
49 !! @author Matthew Meineke
50 !! @version $Id: simParallel.F90,v 1.10 2007-05-26 17:53:04 chuckv Exp $, $Date: 2007-05-26 17:53:04 $, $Name: not supported by cvs2svn $, $Revision: 1.10 $
51
52 module mpiSimulation
53 use definitions
54 use status
55 #ifdef IS_MPI
56 use oopseMPI
57 implicit none
58 PRIVATE
59
60
61 !! PUBLIC Subroutines contained in this module
62 !! gather and scatter are a generic interface
63 !! to gather and scatter routines
64 public :: gather, scatter
65 public :: setupSimParallel
66 public :: replanSimParallel
67 public :: getNatomsInCol
68 public :: getNatomsInRow
69 public :: getNgroupsInCol
70 public :: getNgroupsInRow
71 public :: isMPISimSet
72 public :: printComponentPlan
73 public :: getMyNode
74
75 !! PUBLIC Subroutines contained in MPI module
76 public :: mpi_bcast
77 public :: mpi_allreduce
78 ! public :: mpi_reduce
79 public :: mpi_send
80 public :: mpi_recv
81 public :: mpi_get_processor_name
82 public :: mpi_finalize
83
84 !! PUBLIC mpi variables
85 public :: mpi_comm_world
86 public :: mpi_character
87 public :: mpi_integer
88 public :: mpi_lor
89 public :: mpi_logical
90 public :: mpi_real
91 public :: mpi_double_precision
92 public :: mpi_sum
93 public :: mpi_max
94 public :: mpi_status_size
95 public :: mpi_any_source
96
97
98
99 !! Safety logical to prevent access to ComponetPlan until
100 !! set by C++.
101 logical, save :: ComponentPlanSet = .false.
102
103
104 !! generic mpi error declaration.
105 integer, public :: mpi_err
106 character(len = statusMsgSize) :: errMsg
107
108 #ifdef PROFILE
109 public :: printCommTime
110 public :: getCommTime
111 real,save :: commTime = 0.0
112 real :: commTimeInitial,commTimeFinal
113 #endif
114
115 !! Include mpiComponentPlan type. mpiComponentPlan is a
116 !! dual header file for both c and fortran.
117 #define __FORTRAN90
118 #include "UseTheForce/mpiComponentPlan.h"
119
120
121 !! Tags used during force loop for parallel simulation
122 integer, public, allocatable, dimension(:) :: AtomLocalToGlobal
123 integer, public, allocatable, dimension(:) :: AtomRowToGlobal
124 integer, public, allocatable, dimension(:) :: AtomColToGlobal
125 integer, public, allocatable, dimension(:) :: GroupLocalToGlobal
126 integer, public, allocatable, dimension(:) :: GroupRowToGlobal
127 integer, public, allocatable, dimension(:) :: GroupColToGlobal
128
129 !! Logical set true if mpiSimulation has been initialized
130 logical, save :: isSimSet = .false.
131
132
133 type (mpiComponentPlan), save :: mpiSim
134
135 !! gs_plan contains plans for gather and scatter routines
136 type, public :: gs_plan
137 private
138 type (mpiComponentPlan), pointer :: gsComponentPlan => NULL()
139 integer :: gsPlanSize !! size of this plan (nDim*nComponents)
140 integer :: globalPlanSize !! size of all components in plan
141 integer, dimension(:), pointer :: displs !! Displacements array for mpi indexed from 0.
142 integer, dimension(:), pointer :: counts !! Counts array for mpi indexed from 0.
143 integer :: myPlanComm !! My communicator for this plan
144 integer :: myPlanRank !! My rank in this plan
145 integer :: planNprocs !! Number of processors in this plan
146 end type gs_plan
147
148 ! plans for different decompositions
149 type (gs_plan), public, save :: plan_atom_row
150 type (gs_plan), public, save :: plan_atom_row_3d
151 type (gs_plan), public, save :: plan_atom_col
152 type (gs_plan), public, save :: plan_atom_col_3d
153 type (gs_plan), public, save :: plan_atom_row_Rotation
154 type (gs_plan), public, save :: plan_atom_col_Rotation
155 type (gs_plan), public, save :: plan_group_row
156 type (gs_plan), public, save :: plan_group_col
157 type (gs_plan), public, save :: plan_group_row_3d
158 type (gs_plan), public, save :: plan_group_col_3d
159
160 type (mpiComponentPlan), pointer :: simComponentPlan
161
162 ! interface for gather scatter routines
163 !! Generic interface for gather.
164 !! Gathers an local array into row or column array
165 !! Interface provided for integer, double and double
166 !! rank 2 arrays.
167 interface gather
168 module procedure gather_integer
169 module procedure gather_double
170 module procedure gather_double_2d
171 end interface
172
173 !! Generic interface for scatter.
174 !! Scatters a row or column array, adding componets
175 !! and reducing them to a local nComponent array.
176 !! Interface provided for double and double rank=2 arrays.
177
178 interface scatter
179 module procedure scatter_double
180 module procedure scatter_double_2d
181 end interface
182
183
184
185 contains
186
187 !! Sets up mpiComponentPlan with structure passed from C++.
188 subroutine setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
189 nGroupTags, groupTags, status)
190 !! Passed Arguments
191 !! mpiComponentPlan struct from C
192 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
193 !! Number of tags passed
194 integer, intent(in) :: nAtomTags, nGroupTags
195 !! Result status, 0 = normal, -1 = error
196 integer, intent(out) :: status
197 integer :: localStatus
198 !! Global reference tag for local particles
199 integer, dimension(nAtomTags), intent(inout) :: atomTags
200 integer, dimension(nGroupTags), intent(inout) :: groupTags
201
202 !write(*,*) 'mpiSim_mod thinks node', thisComponentPlan%myNode, &
203 ! ' has atomTags(1) = ', atomTags(1)
204
205 status = 0
206 if (componentPlanSet) then
207 return
208 endif
209 componentPlanSet = .true.
210
211 !! copy c component plan to fortran
212 mpiSim = thisComponentPlan
213
214 call make_Force_Grid(mpiSim, localStatus)
215 if (localStatus /= 0) then
216 write(errMsg, *) 'An error in making the force grid has occurred'
217 call handleError("setupSimParallel", errMsg)
218 status = -1
219 return
220 endif
221
222 call updateGridComponents(mpiSim, localStatus)
223 if (localStatus /= 0) then
224 write(errMsg,*) "Error updating grid components"
225 call handleError("setupSimParallel", errMsg)
226 status = -1
227 return
228 endif
229
230 !! initialize gather and scatter plans used in this simulation
231 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
232 mpiSim, mpiSim%rowComm, plan_atom_row)
233 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
234 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
235 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
236 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
237 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
238 mpiSim, mpiSim%rowComm, plan_group_row)
239 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
240 mpiSim, mpiSim%rowComm, plan_group_row_3d)
241
242 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
243 mpiSim, mpiSim%columnComm, plan_atom_col)
244 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
245 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
246 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
247 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
248 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
249 mpiSim, mpiSim%columnComm, plan_group_col)
250 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
251 mpiSim, mpiSim%columnComm, plan_group_col_3d)
252
253 ! Initialize tags
254
255 call setAtomTags(atomTags,localStatus)
256 if (localStatus /= 0) then
257 write(errMsg, *) 'An error in setting Atom Tags has occured'
258 call handleError("setupSimParallel", errMsg)
259 status = -1
260 return
261 endif
262
263
264 call setGroupTags(groupTags,localStatus)
265 if (localStatus /= 0) then
266 write(errMsg, *) 'An error in setting Group Tags has occured'
267 call handleError("setupSimParallel", errMsg)
268 status = -1
269 return
270 endif
271
272 isSimSet = .true.
273
274 ! call printComponentPlan(mpiSim,0)
275 end subroutine setupSimParallel
276
277 subroutine replanSimParallel(thisComponentPlan,status)
278 ! Passed Arguments
279 !! mpiComponentPlan struct from C
280 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
281 integer, intent(out) :: status
282 integer :: localStatus
283 integer :: mpierror
284 status = 0
285
286 call updateGridComponents(thisComponentPlan,localStatus)
287 if (localStatus /= 0) then
288 status = -1
289 return
290 endif
291
292 !! Unplan Gather Scatter plans
293 call unplan_gather_scatter(plan_atom_row)
294 call unplan_gather_scatter(plan_atom_row_3d)
295 call unplan_gather_scatter(plan_atom_row_Rotation)
296 call unplan_gather_scatter(plan_group_row)
297 call unplan_gather_scatter(plan_group_row_3d)
298
299 call unplan_gather_scatter(plan_atom_col)
300 call unplan_gather_scatter(plan_atom_col_3d)
301 call unplan_gather_scatter(plan_atom_col_Rotation)
302 call unplan_gather_scatter(plan_group_col)
303 call unplan_gather_scatter(plan_group_col_3d)
304
305 !! initialize gather and scatter plans used in this simulation
306 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
307 mpiSim, mpiSim%rowComm, plan_atom_row)
308 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
309 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
310 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
311 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
312 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
313 mpiSim, mpiSim%rowComm, plan_group_row)
314 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
315 mpiSim, mpiSim%rowComm, plan_group_row_3d)
316
317 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
318 mpiSim, mpiSim%columnComm, plan_atom_col)
319 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
320 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
321 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
322 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
323 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
324 mpiSim, mpiSim%columnComm, plan_group_col)
325 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
326 mpiSim, mpiSim%columnComm, plan_group_col_3d)
327
328 end subroutine replanSimParallel
329
330 !! Updates number of row and column components for long range forces.
331 subroutine updateGridComponents(thisComponentPlan, status)
332 type (mpiComponentPlan) :: thisComponentPlan !! mpiComponentPlan
333
334 !! Status return
335 !! - 0 Success
336 !! - -1 Failure
337 integer, intent(out) :: status
338 integer :: nAtomsLocal
339 integer :: nAtomsInRow = 0
340 integer :: nAtomsInColumn = 0
341 integer :: nGroupsLocal
342 integer :: nGroupsInRow = 0
343 integer :: nGroupsInColumn = 0
344 integer :: mpiErrors
345
346 status = 0
347 if (.not. componentPlanSet) return
348
349 if (thisComponentPlan%nAtomsLocal == 0) then
350 status = -1
351 return
352 endif
353 if (thisComponentPlan%nGroupsLocal == 0) then
354 write(*,*) 'tcp%ngl = ', thisComponentPlan%nGroupsLocal
355 status = -1
356 return
357 endif
358
359 nAtomsLocal = thisComponentPlan%nAtomsLocal
360 nGroupsLocal = thisComponentPlan%nGroupsLocal
361
362 call mpi_allreduce(nAtomsLocal, nAtomsInRow, 1, mpi_integer, &
363 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
364 if (mpiErrors /= 0) then
365 status = -1
366 return
367 endif
368
369 call mpi_allreduce(nAtomsLocal, nAtomsInColumn, 1, mpi_integer, &
370 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
371 if (mpiErrors /= 0) then
372 status = -1
373 return
374 endif
375
376 call mpi_allreduce(nGroupsLocal, nGroupsInRow, 1, mpi_integer, &
377 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
378 if (mpiErrors /= 0) then
379 status = -1
380 return
381 endif
382
383 call mpi_allreduce(nGroupsLocal, nGroupsInColumn, 1, mpi_integer, &
384 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
385 if (mpiErrors /= 0) then
386 status = -1
387 return
388 endif
389
390 thisComponentPlan%nAtomsInRow = nAtomsInRow
391 thisComponentPlan%nAtomsInColumn = nAtomsInColumn
392 thisComponentPlan%nGroupsInRow = nGroupsInRow
393 thisComponentPlan%nGroupsInColumn = nGroupsInColumn
394
395 end subroutine updateGridComponents
396
397
398 !! Creates a square force decomposition of processors into row and column
399 !! communicators.
400 subroutine make_Force_Grid(thisComponentPlan,status)
401 type (mpiComponentPlan) :: thisComponentPlan
402 integer, intent(out) :: status !! status returns -1 if error
403 integer :: nColumnsMax !! Maximum number of columns
404 integer :: nWorldProcessors !! Total number of processors in World comm.
405 integer :: rowIndex !! Row for this processor.
406 integer :: columnIndex !! Column for this processor.
407 integer :: nRows !! Total number of rows.
408 integer :: nColumns !! Total number of columns.
409 integer :: mpiErrors !! MPI errors.
410 integer :: rowCommunicator !! MPI row communicator.
411 integer :: columnCommunicator
412 integer :: myWorldRank
413 integer :: i
414
415
416 if (.not. ComponentPlanSet) return
417 status = 0
418
419 !! We make a dangerous assumption here that if numberProcessors is
420 !! zero, then we need to get the information from MPI.
421 if (thisComponentPlan%nProcessors == 0 ) then
422 call mpi_comm_size( MPI_COMM_WORLD, nWorldProcessors,mpiErrors)
423 if ( mpiErrors /= 0 ) then
424 status = -1
425 return
426 endif
427 call mpi_comm_rank( MPI_COMM_WORLD,myWorldRank,mpiErrors)
428 if ( mpiErrors /= 0 ) then
429 status = -1
430 return
431 endif
432
433 else
434 nWorldProcessors = thisComponentPlan%nProcessors
435 myWorldRank = thisComponentPlan%myNode
436 endif
437
438 nColumnsMax = nint(sqrt(real(nWorldProcessors,kind=dp)))
439
440 do i = 1, nColumnsMax
441 if (mod(nWorldProcessors,i) == 0) nColumns = i
442 end do
443
444 nRows = nWorldProcessors/nColumns
445
446 rowIndex = myWorldRank/nColumns
447
448 call mpi_comm_split(mpi_comm_world,rowIndex,0,rowCommunicator,mpiErrors)
449 if ( mpiErrors /= 0 ) then
450 write(errMsg, *) 'An error ',mpiErrors ,'occurred in splitting communicators'
451 call handleError("makeForceGrid", errMsg)
452 status = -1
453 return
454 endif
455
456 columnIndex = mod(myWorldRank,nColumns)
457 call mpi_comm_split(mpi_comm_world,columnIndex,0,columnCommunicator,mpiErrors)
458 if ( mpiErrors /= 0 ) then
459 write(errMsg, *) "MPI comm split faild at columnCommunicator by error ",mpiErrors
460 call handleError("makeForceGrid", errMsg)
461 status = -1
462 return
463 endif
464
465 ! Set appropriate components of thisComponentPlan
466 thisComponentPlan%rowComm = rowCommunicator
467 thisComponentPlan%columnComm = columnCommunicator
468 thisComponentPlan%rowIndex = rowIndex
469 thisComponentPlan%columnIndex = columnIndex
470 thisComponentPlan%nRows = nRows
471 thisComponentPlan%nColumns = nColumns
472
473 end subroutine make_Force_Grid
474
475 !! initalizes a gather scatter plan
476 subroutine plan_gather_scatter( nDim, nObjects, thisComponentPlan, &
477 thisComm, this_plan, status)
478 integer, intent(in) :: nDim !! Number of dimensions for gather scatter plan
479 integer, intent(in) :: nObjects
480 type (mpiComponentPlan), intent(in), target :: thisComponentPlan
481 type (gs_plan), intent(out) :: this_plan !! MPI Component Plan
482 integer, intent(in) :: thisComm !! MPI communicator for this plan
483
484 integer :: arraySize !! size to allocate plan for
485 integer, intent(out), optional :: status
486 integer :: ierror
487 integer :: i,junk
488
489 if (present(status)) status = 0
490
491 !! Set gsComponentPlan pointer
492 !! to the componet plan we want to use for this gather scatter plan.
493 !! WARNING this could be dangerous since thisComponentPlan was origionally
494 !! allocated in C++ and there is a significant difference between c and
495 !! f95 pointers....
496 this_plan%gsComponentPlan => thisComponentPlan
497
498 ! Set this plan size for displs array.
499 this_plan%gsPlanSize = nDim * nObjects
500
501 ! Duplicate communicator for this plan
502 call mpi_comm_dup(thisComm, this_plan%myPlanComm, mpi_err)
503 if (mpi_err /= 0) then
504 if (present(status)) status = -1
505 return
506 end if
507 call mpi_comm_rank(this_plan%myPlanComm, this_plan%myPlanRank, mpi_err)
508 if (mpi_err /= 0) then
509 if (present(status)) status = -1
510 return
511 end if
512
513 call mpi_comm_size(this_plan%myPlanComm, this_plan%planNprocs, mpi_err)
514
515 if (mpi_err /= 0) then
516 if (present(status)) status = -1
517 return
518 end if
519
520 !! counts and displacements arrays are indexed from 0 to be compatable
521 !! with MPI arrays.
522 allocate (this_plan%counts(0:this_plan%planNprocs-1),STAT=ierror)
523 if (ierror /= 0) then
524 if (present(status)) status = -1
525 return
526 end if
527
528 allocate (this_plan%displs(0:this_plan%planNprocs-1),STAT=ierror)
529 if (ierror /= 0) then
530 if (present(status)) status = -1
531 return
532 end if
533
534 !! gather all the local sizes into a size # processors array.
535 call mpi_allgather(this_plan%gsPlanSize,1,mpi_integer,this_plan%counts, &
536 1,mpi_integer,thisComm,mpi_err)
537
538 if (mpi_err /= 0) then
539 if (present(status)) status = -1
540 return
541 end if
542
543 !! figure out the total number of particles in this plan
544 this_plan%globalPlanSize = sum(this_plan%counts)
545
546 !! initialize plan displacements.
547 this_plan%displs(0) = 0
548 do i = 1, this_plan%planNprocs - 1,1
549 this_plan%displs(i) = this_plan%displs(i-1) + this_plan%counts(i-1)
550 end do
551 end subroutine plan_gather_scatter
552
553 subroutine unplan_gather_scatter(this_plan)
554 type (gs_plan), intent(inout) :: this_plan
555
556 this_plan%gsComponentPlan => null()
557 call mpi_comm_free(this_plan%myPlanComm,mpi_err)
558
559 deallocate(this_plan%counts)
560 deallocate(this_plan%displs)
561
562 end subroutine unplan_gather_scatter
563
564 subroutine gather_integer( sbuffer, rbuffer, this_plan, status)
565
566 type (gs_plan), intent(inout) :: this_plan
567 integer, dimension(:), intent(inout) :: sbuffer
568 integer, dimension(:), intent(inout) :: rbuffer
569 integer :: noffset
570 integer, intent(out), optional :: status
571 integer :: i
572
573 if (present(status)) status = 0
574 noffset = this_plan%displs(this_plan%myPlanRank)
575
576 ! if (getmyNode() == 1) then
577 ! write(*,*) "Node 0 printing allgatherv vars"
578 ! write(*,*) "Noffset: ", noffset
579 ! write(*,*) "PlanSize: ", this_plan%gsPlanSize
580 ! write(*,*) "PlanComm: ", this_plan%myPlanComm
581 ! end if
582
583 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_integer, &
584 rbuffer,this_plan%counts,this_plan%displs,mpi_integer, &
585 this_plan%myPlanComm, mpi_err)
586
587 if (mpi_err /= 0) then
588 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
589 call handleError("gather_integer", errMsg)
590 if (present(status)) status = -1
591 endif
592
593 end subroutine gather_integer
594
595 subroutine gather_double( sbuffer, rbuffer, this_plan, status)
596
597 type (gs_plan), intent(in) :: this_plan
598 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
599 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
600 integer :: noffset
601 integer, intent(out), optional :: status
602
603
604 if (present(status)) status = 0
605 noffset = this_plan%displs(this_plan%myPlanRank)
606 #ifdef PROFILE
607 call cpu_time(commTimeInitial)
608 #endif
609 #ifdef SINGLE_PRECISION
610 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
611 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
612 this_plan%myPlanComm, mpi_err)
613 #else
614 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
615 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
616 this_plan%myPlanComm, mpi_err)
617 #endif
618 #ifdef PROFILE
619 call cpu_time(commTimeFinal)
620 commTime = commTime + commTimeFinal - commTimeInitial
621 #endif
622
623 if (mpi_err /= 0) then
624 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
625 call handleError("gather_double", errMsg)
626 if (present(status)) status = -1
627 endif
628
629 end subroutine gather_double
630
631 subroutine gather_double_2d( sbuffer, rbuffer, this_plan, status)
632
633 type (gs_plan), intent(in) :: this_plan
634 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
635 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
636 integer :: noffset,i,ierror
637 integer, intent(out), optional :: status
638
639 external mpi_allgatherv
640
641 if (present(status)) status = 0
642
643 ! noffset = this_plan%displs(this_plan%me)
644 #ifdef PROFILE
645 call cpu_time(commTimeInitial)
646 #endif
647
648 #ifdef SINGLE_PRECISION
649 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
650 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
651 this_plan%myPlanComm, mpi_err)
652 #else
653 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
654 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
655 this_plan%myPlanComm, mpi_err)
656 #endif
657
658 #ifdef PROFILE
659 call cpu_time(commTimeFinal)
660 commTime = commTime + commTimeFinal - commTimeInitial
661 #endif
662
663 if (mpi_err /= 0) then
664 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
665 call handleError("gather_double_2d", errMsg)
666 if (present(status)) status = -1
667 endif
668
669 end subroutine gather_double_2d
670
671 subroutine scatter_double( sbuffer, rbuffer, this_plan, status)
672
673 type (gs_plan), intent(in) :: this_plan
674 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
675 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
676 integer, intent(out), optional :: status
677 external mpi_reduce_scatter
678
679 if (present(status)) status = 0
680
681 #ifdef PROFILE
682 call cpu_time(commTimeInitial)
683 #endif
684 #ifdef SINGLE_PRECISION
685 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
686 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
687 #else
688 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
689 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
690 #endif
691 #ifdef PROFILE
692 call cpu_time(commTimeFinal)
693 commTime = commTime + commTimeFinal - commTimeInitial
694 #endif
695
696 if (mpi_err /= 0) then
697 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
698 call handleError("scatter_double", errMsg)
699 if (present(status)) status = -1
700 endif
701
702 end subroutine scatter_double
703
704 subroutine scatter_double_2d( sbuffer, rbuffer, this_plan, status)
705
706 type (gs_plan), intent(in) :: this_plan
707 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
708 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
709 integer, intent(out), optional :: status
710 external mpi_reduce_scatter
711
712 if (present(status)) status = 0
713 #ifdef PROFILE
714 call cpu_time(commTimeInitial)
715 #endif
716 #ifdef SINGLE_PRECISION
717 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
718 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
719 #else
720 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
721 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
722 #endif
723 #ifdef PROFILE
724 call cpu_time(commTimeFinal)
725 commTime = commTime + commTimeFinal - commTimeInitial
726 #endif
727
728 if (mpi_err /= 0) then
729 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
730 call handleError("scatter_double_2d", errMsg)
731 if (present(status)) status = -1
732 endif
733
734 end subroutine scatter_double_2d
735
736 subroutine setAtomTags(tags, status)
737 integer, dimension(:) :: tags
738 integer :: status
739
740 integer :: alloc_stat
741
742 integer :: nAtomsInCol
743 integer :: nAtomsInRow
744
745 status = 0
746 ! allocate row arrays
747 nAtomsInRow = getNatomsInRow(plan_atom_row)
748 nAtomsInCol = getNatomsInCol(plan_atom_col)
749
750 if(.not. allocated(AtomLocalToGlobal)) then
751 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
752 if (alloc_stat /= 0 ) then
753 status = -1
754 return
755 endif
756 else
757 deallocate(AtomLocalToGlobal)
758 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
759 if (alloc_stat /= 0 ) then
760 status = -1
761 return
762 endif
763
764 endif
765
766 AtomLocalToGlobal = tags
767
768 if (.not. allocated(AtomRowToGlobal)) then
769 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
770 if (alloc_stat /= 0 ) then
771 status = -1
772 return
773 endif
774 else
775 deallocate(AtomRowToGlobal)
776 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
777 if (alloc_stat /= 0 ) then
778 status = -1
779 return
780 endif
781
782 endif
783 ! allocate column arrays
784 if (.not. allocated(AtomColToGlobal)) then
785 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
786 if (alloc_stat /= 0 ) then
787 status = -1
788 return
789 endif
790 else
791 deallocate(AtomColToGlobal)
792 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
793 if (alloc_stat /= 0 ) then
794 status = -1
795 return
796 endif
797 endif
798
799 call gather(tags, AtomRowToGlobal, plan_atom_row)
800 call gather(tags, AtomColToGlobal, plan_atom_col)
801
802 end subroutine setAtomTags
803
804 subroutine setGroupTags(tags, status)
805 integer, dimension(:) :: tags
806 integer :: status
807
808 integer :: alloc_stat
809
810 integer :: nGroupsInCol
811 integer :: nGroupsInRow
812
813 status = 0
814
815 nGroupsInRow = getNgroupsInRow(plan_group_row)
816 nGroupsInCol = getNgroupsInCol(plan_group_col)
817
818 if(allocated(GroupLocalToGlobal)) then
819 deallocate(GroupLocalToGlobal)
820 endif
821 allocate(GroupLocalToGlobal(size(tags)),STAT=alloc_stat)
822 if (alloc_stat /= 0 ) then
823 status = -1
824 return
825 endif
826
827 GroupLocalToGlobal = tags
828
829 if(allocated(GroupRowToGlobal)) then
830 deallocate(GroupRowToGlobal)
831 endif
832 allocate(GroupRowToGlobal(nGroupsInRow),STAT=alloc_stat)
833 if (alloc_stat /= 0 ) then
834 status = -1
835 return
836 endif
837
838 if(allocated(GroupColToGlobal)) then
839 deallocate(GroupColToGlobal)
840 endif
841 allocate(GroupColToGlobal(nGroupsInCol),STAT=alloc_stat)
842 if (alloc_stat /= 0 ) then
843 status = -1
844 return
845 endif
846
847 call gather(tags, GroupRowToGlobal, plan_group_row)
848 call gather(tags, GroupColToGlobal, plan_group_col)
849
850 end subroutine setGroupTags
851
852 function getNatomsInCol(thisplan) result(nInCol)
853 type (gs_plan), intent(in) :: thisplan
854 integer :: nInCol
855 nInCol = thisplan%gsComponentPlan%nAtomsInColumn
856 end function getNatomsInCol
857
858 function getNatomsInRow(thisplan) result(nInRow)
859 type (gs_plan), intent(in) :: thisplan
860 integer :: nInRow
861 nInRow = thisplan%gsComponentPlan%nAtomsInRow
862 end function getNatomsInRow
863
864 function getNgroupsInCol(thisplan) result(nInCol)
865 type (gs_plan), intent(in) :: thisplan
866 integer :: nInCol
867 nInCol = thisplan%gsComponentPlan%nGroupsInColumn
868 end function getNgroupsInCol
869
870 function getNgroupsInRow(thisplan) result(nInRow)
871 type (gs_plan), intent(in) :: thisplan
872 integer :: nInRow
873 nInRow = thisplan%gsComponentPlan%nGroupsInRow
874 end function getNgroupsInRow
875
876 function isMPISimSet() result(isthisSimSet)
877 logical :: isthisSimSet
878 if (isSimSet) then
879 isthisSimSet = .true.
880 else
881 isthisSimSet = .false.
882 endif
883 end function isMPISimSet
884
885 subroutine printComponentPlan(this_plan,printNode)
886
887 type (mpiComponentPlan), intent(in) :: this_plan
888 integer, optional :: printNode
889 logical :: print_me = .false.
890
891 if (present(printNode)) then
892 if (printNode == mpiSim%myNode) print_me = .true.
893 else
894 print_me = .true.
895 endif
896
897 if (print_me) then
898 write(default_error,*) "SetupSimParallel: writing component plan"
899
900 write(default_error,*) "nMolGlobal: ", mpiSim%nMolGlobal
901 write(default_error,*) "nAtomsGlobal: ", mpiSim%nAtomsGlobal
902 write(default_error,*) "nAtomsLocal: ", mpiSim%nAtomsLocal
903 write(default_error,*) "myNode: ", mpiSim%myNode
904 write(default_error,*) "nProcessors: ", mpiSim%nProcessors
905 write(default_error,*) "rowComm: ", mpiSim%rowComm
906 write(default_error,*) "columnComm: ", mpiSim%columnComm
907 write(default_error,*) "nRows: ", mpiSim%nRows
908 write(default_error,*) "nColumns: ", mpiSim%nColumns
909 write(default_error,*) "nAtomsInRow: ", mpiSim%nAtomsInRow
910 write(default_error,*) "nAtomsInColumn: ", mpiSim%nAtomsInColumn
911 write(default_error,*) "rowIndex: ", mpiSim%rowIndex
912 write(default_error,*) "columnIndex: ", mpiSim%columnIndex
913 endif
914 end subroutine printComponentPlan
915
916 function getMyNode() result(myNode)
917 integer :: myNode
918 myNode = mpiSim%myNode
919 end function getMyNode
920
921 #ifdef PROFILE
922 subroutine printCommTime()
923 write(*,*) "MPI communication time is: ", commTime
924 end subroutine printCommTime
925
926 function getCommTime() result(comm_time)
927 real :: comm_time
928 comm_time = commTime
929 end function getCommTime
930
931 #endif
932
933 #endif // is_mpi
934 end module mpiSimulation
935