ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-4/src/UseTheForce/DarkSide/simParallel.F90
Revision: 3346
Committed: Thu Feb 14 21:37:05 2008 UTC (17 years, 2 months ago) by chuckv
File size: 30850 byte(s)
Log Message:
Changes to simparalell to remove MPI stuff.

File Contents

# Content
1 !!
2 !! Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 !!
4 !! The University of Notre Dame grants you ("Licensee") a
5 !! non-exclusive, royalty free, license to use, modify and
6 !! redistribute this software in source and binary code form, provided
7 !! that the following conditions are met:
8 !!
9 !! 1. Acknowledgement of the program authors must be made in any
10 !! publication of scientific results based in part on use of the
11 !! program. An acceptable form of acknowledgement is citation of
12 !! the article in which the program was described (Matthew
13 !! A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher
14 !! J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented
15 !! Parallel Simulation Engine for Molecular Dynamics,"
16 !! J. Comput. Chem. 26, pp. 252-271 (2005))
17 !!
18 !! 2. Redistributions of source code must retain the above copyright
19 !! notice, this list of conditions and the following disclaimer.
20 !!
21 !! 3. Redistributions in binary form must reproduce the above copyright
22 !! notice, this list of conditions and the following disclaimer in the
23 !! documentation and/or other materials provided with the
24 !! distribution.
25 !!
26 !! This software is provided "AS IS," without a warranty of any
27 !! kind. All express or implied conditions, representations and
28 !! warranties, including any implied warranty of merchantability,
29 !! fitness for a particular purpose or non-infringement, are hereby
30 !! excluded. The University of Notre Dame and its licensors shall not
31 !! be liable for any damages suffered by licensee as a result of
32 !! using, modifying or distributing the software or its
33 !! derivatives. In no event will the University of Notre Dame or its
34 !! licensors be liable for any lost revenue, profit or data, or for
35 !! direct, indirect, special, consequential, incidental or punitive
36 !! damages, however caused and regardless of the theory of liability,
37 !! arising out of the use of or inability to use software, even if the
38 !! University of Notre Dame has been advised of the possibility of
39 !! such damages.
40 !!
41
42
43 !! MPI support for long range forces using force decomposition
44 !! on a square grid of processors.
45 !! Corresponds to mpiSimunation.cpp for C++
46 !! mpi_module also contains a private interface for mpi f90 routines.
47 !!
48 !! @author Charles F. Vardeman II
49 !! @author Matthew Meineke
50 !! @version $Id: simParallel.F90,v 1.11 2008-02-14 21:37:05 chuckv Exp $, $Date: 2008-02-14 21:37:05 $, $Name: not supported by cvs2svn $, $Revision: 1.11 $
51
52 module mpiSimulation
53 use definitions
54 use status
55 #ifdef IS_MPI
56 use oopseMPI
57 implicit none
58 PRIVATE
59 #endif
60
61
62 !! Include mpiComponentPlan type. mpiComponentPlan is a
63 !! dual header file for both c and fortran.
64 #define __FORTRAN90
65 #include "UseTheForce/mpiComponentPlan.h"
66
67 #ifdef IS_MPI
68
69 !! PUBLIC Subroutines contained in this module
70 !! gather and scatter are a generic interface
71 !! to gather and scatter routines
72 public :: gather, scatter
73 public :: setupSimParallel
74 public :: replanSimParallel
75 public :: getNatomsInCol
76 public :: getNatomsInRow
77 public :: getNgroupsInCol
78 public :: getNgroupsInRow
79 public :: isMPISimSet
80 public :: printComponentPlan
81 public :: getMyNode
82
83 !! PUBLIC Subroutines contained in MPI module
84 public :: mpi_bcast
85 public :: mpi_allreduce
86 ! public :: mpi_reduce
87 public :: mpi_send
88 public :: mpi_recv
89 public :: mpi_get_processor_name
90 public :: mpi_finalize
91
92 !! PUBLIC mpi variables
93 public :: mpi_comm_world
94 public :: mpi_character
95 public :: mpi_integer
96 public :: mpi_lor
97 public :: mpi_logical
98 public :: mpi_real
99 public :: mpi_double_precision
100 public :: mpi_sum
101 public :: mpi_max
102 public :: mpi_status_size
103 public :: mpi_any_source
104
105
106
107 !! Safety logical to prevent access to ComponetPlan until
108 !! set by C++.
109 logical, save :: ComponentPlanSet = .false.
110
111
112 !! generic mpi error declaration.
113 integer, public :: mpi_err
114 character(len = statusMsgSize) :: errMsg
115
116 #ifdef PROFILE
117 public :: printCommTime
118 public :: getCommTime
119 real,save :: commTime = 0.0
120 real :: commTimeInitial,commTimeFinal
121 #endif
122
123
124
125
126 !! Tags used during force loop for parallel simulation
127 integer, public, allocatable, dimension(:) :: AtomLocalToGlobal
128 integer, public, allocatable, dimension(:) :: AtomRowToGlobal
129 integer, public, allocatable, dimension(:) :: AtomColToGlobal
130 integer, public, allocatable, dimension(:) :: GroupLocalToGlobal
131 integer, public, allocatable, dimension(:) :: GroupRowToGlobal
132 integer, public, allocatable, dimension(:) :: GroupColToGlobal
133
134 !! Logical set true if mpiSimulation has been initialized
135 logical, save :: isSimSet = .false.
136
137
138 type (mpiComponentPlan), save :: mpiSim
139
140 !! gs_plan contains plans for gather and scatter routines
141 type, public :: gs_plan
142 private
143 type (mpiComponentPlan), pointer :: gsComponentPlan => NULL()
144 integer :: gsPlanSize !! size of this plan (nDim*nComponents)
145 integer :: globalPlanSize !! size of all components in plan
146 integer, dimension(:), pointer :: displs !! Displacements array for mpi indexed from 0.
147 integer, dimension(:), pointer :: counts !! Counts array for mpi indexed from 0.
148 integer :: myPlanComm !! My communicator for this plan
149 integer :: myPlanRank !! My rank in this plan
150 integer :: planNprocs !! Number of processors in this plan
151 end type gs_plan
152
153 ! plans for different decompositions
154 type (gs_plan), public, save :: plan_atom_row
155 type (gs_plan), public, save :: plan_atom_row_3d
156 type (gs_plan), public, save :: plan_atom_col
157 type (gs_plan), public, save :: plan_atom_col_3d
158 type (gs_plan), public, save :: plan_atom_row_Rotation
159 type (gs_plan), public, save :: plan_atom_col_Rotation
160 type (gs_plan), public, save :: plan_group_row
161 type (gs_plan), public, save :: plan_group_col
162 type (gs_plan), public, save :: plan_group_row_3d
163 type (gs_plan), public, save :: plan_group_col_3d
164
165 type (mpiComponentPlan), pointer :: simComponentPlan
166
167 ! interface for gather scatter routines
168 !! Generic interface for gather.
169 !! Gathers an local array into row or column array
170 !! Interface provided for integer, double and double
171 !! rank 2 arrays.
172 interface gather
173 module procedure gather_integer
174 module procedure gather_double
175 module procedure gather_double_2d
176 end interface
177
178 !! Generic interface for scatter.
179 !! Scatters a row or column array, adding componets
180 !! and reducing them to a local nComponent array.
181 !! Interface provided for double and double rank=2 arrays.
182
183 interface scatter
184 module procedure scatter_double
185 module procedure scatter_double_2d
186 end interface
187
188
189
190 contains
191
192 !! Sets up mpiComponentPlan with structure passed from C++.
193 subroutine setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
194 nGroupTags, groupTags, status)
195 !! Passed Arguments
196 !! mpiComponentPlan struct from C
197 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
198 !! Number of tags passed
199 integer, intent(in) :: nAtomTags, nGroupTags
200 !! Result status, 0 = normal, -1 = error
201 integer, intent(out) :: status
202 integer :: localStatus
203 !! Global reference tag for local particles
204 integer, dimension(nAtomTags), intent(inout) :: atomTags
205 integer, dimension(nGroupTags), intent(inout) :: groupTags
206
207 !write(*,*) 'mpiSim_mod thinks node', thisComponentPlan%myNode, &
208 ! ' has atomTags(1) = ', atomTags(1)
209
210 status = 0
211 if (componentPlanSet) then
212 return
213 endif
214 componentPlanSet = .true.
215
216 !! copy c component plan to fortran
217 mpiSim = thisComponentPlan
218
219 call make_Force_Grid(mpiSim, localStatus)
220 if (localStatus /= 0) then
221 write(errMsg, *) 'An error in making the force grid has occurred'
222 call handleError("setupSimParallel", errMsg)
223 status = -1
224 return
225 endif
226
227 call updateGridComponents(mpiSim, localStatus)
228 if (localStatus /= 0) then
229 write(errMsg,*) "Error updating grid components"
230 call handleError("setupSimParallel", errMsg)
231 status = -1
232 return
233 endif
234
235 !! initialize gather and scatter plans used in this simulation
236 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
237 mpiSim, mpiSim%rowComm, plan_atom_row)
238 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
239 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
240 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
241 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
242 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
243 mpiSim, mpiSim%rowComm, plan_group_row)
244 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
245 mpiSim, mpiSim%rowComm, plan_group_row_3d)
246
247 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
248 mpiSim, mpiSim%columnComm, plan_atom_col)
249 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
250 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
251 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
252 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
253 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
254 mpiSim, mpiSim%columnComm, plan_group_col)
255 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
256 mpiSim, mpiSim%columnComm, plan_group_col_3d)
257
258 ! Initialize tags
259
260 call setAtomTags(atomTags,localStatus)
261 if (localStatus /= 0) then
262 write(errMsg, *) 'An error in setting Atom Tags has occured'
263 call handleError("setupSimParallel", errMsg)
264 status = -1
265 return
266 endif
267
268
269 call setGroupTags(groupTags,localStatus)
270 if (localStatus /= 0) then
271 write(errMsg, *) 'An error in setting Group Tags has occured'
272 call handleError("setupSimParallel", errMsg)
273 status = -1
274 return
275 endif
276
277 isSimSet = .true.
278
279 ! call printComponentPlan(mpiSim,0)
280 end subroutine setupSimParallel
281
282 subroutine replanSimParallel(thisComponentPlan,status)
283 ! Passed Arguments
284 !! mpiComponentPlan struct from C
285 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
286 integer, intent(out) :: status
287 integer :: localStatus
288 integer :: mpierror
289 status = 0
290
291 call updateGridComponents(thisComponentPlan,localStatus)
292 if (localStatus /= 0) then
293 status = -1
294 return
295 endif
296
297 !! Unplan Gather Scatter plans
298 call unplan_gather_scatter(plan_atom_row)
299 call unplan_gather_scatter(plan_atom_row_3d)
300 call unplan_gather_scatter(plan_atom_row_Rotation)
301 call unplan_gather_scatter(plan_group_row)
302 call unplan_gather_scatter(plan_group_row_3d)
303
304 call unplan_gather_scatter(plan_atom_col)
305 call unplan_gather_scatter(plan_atom_col_3d)
306 call unplan_gather_scatter(plan_atom_col_Rotation)
307 call unplan_gather_scatter(plan_group_col)
308 call unplan_gather_scatter(plan_group_col_3d)
309
310 !! initialize gather and scatter plans used in this simulation
311 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
312 mpiSim, mpiSim%rowComm, plan_atom_row)
313 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
314 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
315 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
316 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
317 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
318 mpiSim, mpiSim%rowComm, plan_group_row)
319 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
320 mpiSim, mpiSim%rowComm, plan_group_row_3d)
321
322 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
323 mpiSim, mpiSim%columnComm, plan_atom_col)
324 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
325 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
326 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
327 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
328 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
329 mpiSim, mpiSim%columnComm, plan_group_col)
330 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
331 mpiSim, mpiSim%columnComm, plan_group_col_3d)
332
333 end subroutine replanSimParallel
334
335 !! Updates number of row and column components for long range forces.
336 subroutine updateGridComponents(thisComponentPlan, status)
337 type (mpiComponentPlan) :: thisComponentPlan !! mpiComponentPlan
338
339 !! Status return
340 !! - 0 Success
341 !! - -1 Failure
342 integer, intent(out) :: status
343 integer :: nAtomsLocal
344 integer :: nAtomsInRow = 0
345 integer :: nAtomsInColumn = 0
346 integer :: nGroupsLocal
347 integer :: nGroupsInRow = 0
348 integer :: nGroupsInColumn = 0
349 integer :: mpiErrors
350
351 status = 0
352 if (.not. componentPlanSet) return
353
354 if (thisComponentPlan%nAtomsLocal == 0) then
355 status = -1
356 return
357 endif
358 if (thisComponentPlan%nGroupsLocal == 0) then
359 write(*,*) 'tcp%ngl = ', thisComponentPlan%nGroupsLocal
360 status = -1
361 return
362 endif
363
364 nAtomsLocal = thisComponentPlan%nAtomsLocal
365 nGroupsLocal = thisComponentPlan%nGroupsLocal
366
367 call mpi_allreduce(nAtomsLocal, nAtomsInRow, 1, mpi_integer, &
368 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
369 if (mpiErrors /= 0) then
370 status = -1
371 return
372 endif
373
374 call mpi_allreduce(nAtomsLocal, nAtomsInColumn, 1, mpi_integer, &
375 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
376 if (mpiErrors /= 0) then
377 status = -1
378 return
379 endif
380
381 call mpi_allreduce(nGroupsLocal, nGroupsInRow, 1, mpi_integer, &
382 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
383 if (mpiErrors /= 0) then
384 status = -1
385 return
386 endif
387
388 call mpi_allreduce(nGroupsLocal, nGroupsInColumn, 1, mpi_integer, &
389 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
390 if (mpiErrors /= 0) then
391 status = -1
392 return
393 endif
394
395 thisComponentPlan%nAtomsInRow = nAtomsInRow
396 thisComponentPlan%nAtomsInColumn = nAtomsInColumn
397 thisComponentPlan%nGroupsInRow = nGroupsInRow
398 thisComponentPlan%nGroupsInColumn = nGroupsInColumn
399
400 end subroutine updateGridComponents
401
402
403 !! Creates a square force decomposition of processors into row and column
404 !! communicators.
405 subroutine make_Force_Grid(thisComponentPlan,status)
406 type (mpiComponentPlan) :: thisComponentPlan
407 integer, intent(out) :: status !! status returns -1 if error
408 integer :: nColumnsMax !! Maximum number of columns
409 integer :: nWorldProcessors !! Total number of processors in World comm.
410 integer :: rowIndex !! Row for this processor.
411 integer :: columnIndex !! Column for this processor.
412 integer :: nRows !! Total number of rows.
413 integer :: nColumns !! Total number of columns.
414 integer :: mpiErrors !! MPI errors.
415 integer :: rowCommunicator !! MPI row communicator.
416 integer :: columnCommunicator
417 integer :: myWorldRank
418 integer :: i
419
420
421 if (.not. ComponentPlanSet) return
422 status = 0
423
424 !! We make a dangerous assumption here that if numberProcessors is
425 !! zero, then we need to get the information from MPI.
426 if (thisComponentPlan%nProcessors == 0 ) then
427 call mpi_comm_size( MPI_COMM_WORLD, nWorldProcessors,mpiErrors)
428 if ( mpiErrors /= 0 ) then
429 status = -1
430 return
431 endif
432 call mpi_comm_rank( MPI_COMM_WORLD,myWorldRank,mpiErrors)
433 if ( mpiErrors /= 0 ) then
434 status = -1
435 return
436 endif
437
438 else
439 nWorldProcessors = thisComponentPlan%nProcessors
440 myWorldRank = thisComponentPlan%myNode
441 endif
442
443 nColumnsMax = nint(sqrt(real(nWorldProcessors,kind=dp)))
444
445 do i = 1, nColumnsMax
446 if (mod(nWorldProcessors,i) == 0) nColumns = i
447 end do
448
449 nRows = nWorldProcessors/nColumns
450
451 rowIndex = myWorldRank/nColumns
452
453 call mpi_comm_split(mpi_comm_world,rowIndex,0,rowCommunicator,mpiErrors)
454 if ( mpiErrors /= 0 ) then
455 write(errMsg, *) 'An error ',mpiErrors ,'occurred in splitting communicators'
456 call handleError("makeForceGrid", errMsg)
457 status = -1
458 return
459 endif
460
461 columnIndex = mod(myWorldRank,nColumns)
462 call mpi_comm_split(mpi_comm_world,columnIndex,0,columnCommunicator,mpiErrors)
463 if ( mpiErrors /= 0 ) then
464 write(errMsg, *) "MPI comm split faild at columnCommunicator by error ",mpiErrors
465 call handleError("makeForceGrid", errMsg)
466 status = -1
467 return
468 endif
469
470 ! Set appropriate components of thisComponentPlan
471 thisComponentPlan%rowComm = rowCommunicator
472 thisComponentPlan%columnComm = columnCommunicator
473 thisComponentPlan%rowIndex = rowIndex
474 thisComponentPlan%columnIndex = columnIndex
475 thisComponentPlan%nRows = nRows
476 thisComponentPlan%nColumns = nColumns
477
478 end subroutine make_Force_Grid
479
480 !! initalizes a gather scatter plan
481 subroutine plan_gather_scatter( nDim, nObjects, thisComponentPlan, &
482 thisComm, this_plan, status)
483 integer, intent(in) :: nDim !! Number of dimensions for gather scatter plan
484 integer, intent(in) :: nObjects
485 type (mpiComponentPlan), intent(in), target :: thisComponentPlan
486 type (gs_plan), intent(out) :: this_plan !! MPI Component Plan
487 integer, intent(in) :: thisComm !! MPI communicator for this plan
488
489 integer :: arraySize !! size to allocate plan for
490 integer, intent(out), optional :: status
491 integer :: ierror
492 integer :: i,junk
493
494 if (present(status)) status = 0
495
496 !! Set gsComponentPlan pointer
497 !! to the componet plan we want to use for this gather scatter plan.
498 !! WARNING this could be dangerous since thisComponentPlan was origionally
499 !! allocated in C++ and there is a significant difference between c and
500 !! f95 pointers....
501 this_plan%gsComponentPlan => thisComponentPlan
502
503 ! Set this plan size for displs array.
504 this_plan%gsPlanSize = nDim * nObjects
505
506 ! Duplicate communicator for this plan
507 call mpi_comm_dup(thisComm, this_plan%myPlanComm, mpi_err)
508 if (mpi_err /= 0) then
509 if (present(status)) status = -1
510 return
511 end if
512 call mpi_comm_rank(this_plan%myPlanComm, this_plan%myPlanRank, mpi_err)
513 if (mpi_err /= 0) then
514 if (present(status)) status = -1
515 return
516 end if
517
518 call mpi_comm_size(this_plan%myPlanComm, this_plan%planNprocs, mpi_err)
519
520 if (mpi_err /= 0) then
521 if (present(status)) status = -1
522 return
523 end if
524
525 !! counts and displacements arrays are indexed from 0 to be compatable
526 !! with MPI arrays.
527 allocate (this_plan%counts(0:this_plan%planNprocs-1),STAT=ierror)
528 if (ierror /= 0) then
529 if (present(status)) status = -1
530 return
531 end if
532
533 allocate (this_plan%displs(0:this_plan%planNprocs-1),STAT=ierror)
534 if (ierror /= 0) then
535 if (present(status)) status = -1
536 return
537 end if
538
539 !! gather all the local sizes into a size # processors array.
540 call mpi_allgather(this_plan%gsPlanSize,1,mpi_integer,this_plan%counts, &
541 1,mpi_integer,thisComm,mpi_err)
542
543 if (mpi_err /= 0) then
544 if (present(status)) status = -1
545 return
546 end if
547
548 !! figure out the total number of particles in this plan
549 this_plan%globalPlanSize = sum(this_plan%counts)
550
551 !! initialize plan displacements.
552 this_plan%displs(0) = 0
553 do i = 1, this_plan%planNprocs - 1,1
554 this_plan%displs(i) = this_plan%displs(i-1) + this_plan%counts(i-1)
555 end do
556 end subroutine plan_gather_scatter
557
558 subroutine unplan_gather_scatter(this_plan)
559 type (gs_plan), intent(inout) :: this_plan
560
561 this_plan%gsComponentPlan => null()
562 call mpi_comm_free(this_plan%myPlanComm,mpi_err)
563
564 deallocate(this_plan%counts)
565 deallocate(this_plan%displs)
566
567 end subroutine unplan_gather_scatter
568
569 subroutine gather_integer( sbuffer, rbuffer, this_plan, status)
570
571 type (gs_plan), intent(inout) :: this_plan
572 integer, dimension(:), intent(inout) :: sbuffer
573 integer, dimension(:), intent(inout) :: rbuffer
574 integer :: noffset
575 integer, intent(out), optional :: status
576 integer :: i
577
578 if (present(status)) status = 0
579 noffset = this_plan%displs(this_plan%myPlanRank)
580
581 ! if (getmyNode() == 1) then
582 ! write(*,*) "Node 0 printing allgatherv vars"
583 ! write(*,*) "Noffset: ", noffset
584 ! write(*,*) "PlanSize: ", this_plan%gsPlanSize
585 ! write(*,*) "PlanComm: ", this_plan%myPlanComm
586 ! end if
587
588 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_integer, &
589 rbuffer,this_plan%counts,this_plan%displs,mpi_integer, &
590 this_plan%myPlanComm, mpi_err)
591
592 if (mpi_err /= 0) then
593 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
594 call handleError("gather_integer", errMsg)
595 if (present(status)) status = -1
596 endif
597
598 end subroutine gather_integer
599
600 subroutine gather_double( sbuffer, rbuffer, this_plan, status)
601
602 type (gs_plan), intent(in) :: this_plan
603 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
604 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
605 integer :: noffset
606 integer, intent(out), optional :: status
607
608
609 if (present(status)) status = 0
610 noffset = this_plan%displs(this_plan%myPlanRank)
611 #ifdef PROFILE
612 call cpu_time(commTimeInitial)
613 #endif
614 #ifdef SINGLE_PRECISION
615 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
616 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
617 this_plan%myPlanComm, mpi_err)
618 #else
619 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
620 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
621 this_plan%myPlanComm, mpi_err)
622 #endif
623 #ifdef PROFILE
624 call cpu_time(commTimeFinal)
625 commTime = commTime + commTimeFinal - commTimeInitial
626 #endif
627
628 if (mpi_err /= 0) then
629 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
630 call handleError("gather_double", errMsg)
631 if (present(status)) status = -1
632 endif
633
634 end subroutine gather_double
635
636 subroutine gather_double_2d( sbuffer, rbuffer, this_plan, status)
637
638 type (gs_plan), intent(in) :: this_plan
639 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
640 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
641 integer :: noffset,i,ierror
642 integer, intent(out), optional :: status
643
644 external mpi_allgatherv
645
646 if (present(status)) status = 0
647
648 ! noffset = this_plan%displs(this_plan%me)
649 #ifdef PROFILE
650 call cpu_time(commTimeInitial)
651 #endif
652
653 #ifdef SINGLE_PRECISION
654 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_real, &
655 rbuffer,this_plan%counts,this_plan%displs,mpi_real, &
656 this_plan%myPlanComm, mpi_err)
657 #else
658 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
659 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
660 this_plan%myPlanComm, mpi_err)
661 #endif
662
663 #ifdef PROFILE
664 call cpu_time(commTimeFinal)
665 commTime = commTime + commTimeFinal - commTimeInitial
666 #endif
667
668 if (mpi_err /= 0) then
669 write(errMsg, *) "mpi_allgatherv failed by error message ",mpi_err
670 call handleError("gather_double_2d", errMsg)
671 if (present(status)) status = -1
672 endif
673
674 end subroutine gather_double_2d
675
676 subroutine scatter_double( sbuffer, rbuffer, this_plan, status)
677
678 type (gs_plan), intent(in) :: this_plan
679 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
680 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
681 integer, intent(out), optional :: status
682 external mpi_reduce_scatter
683
684 if (present(status)) status = 0
685
686 #ifdef PROFILE
687 call cpu_time(commTimeInitial)
688 #endif
689 #ifdef SINGLE_PRECISION
690 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
691 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
692 #else
693 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
694 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
695 #endif
696 #ifdef PROFILE
697 call cpu_time(commTimeFinal)
698 commTime = commTime + commTimeFinal - commTimeInitial
699 #endif
700
701 if (mpi_err /= 0) then
702 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
703 call handleError("scatter_double", errMsg)
704 if (present(status)) status = -1
705 endif
706
707 end subroutine scatter_double
708
709 subroutine scatter_double_2d( sbuffer, rbuffer, this_plan, status)
710
711 type (gs_plan), intent(in) :: this_plan
712 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
713 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
714 integer, intent(out), optional :: status
715 external mpi_reduce_scatter
716
717 if (present(status)) status = 0
718 #ifdef PROFILE
719 call cpu_time(commTimeInitial)
720 #endif
721 #ifdef SINGLE_PRECISION
722 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
723 mpi_real, MPI_SUM, this_plan%myPlanComm, mpi_err)
724 #else
725 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
726 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
727 #endif
728 #ifdef PROFILE
729 call cpu_time(commTimeFinal)
730 commTime = commTime + commTimeFinal - commTimeInitial
731 #endif
732
733 if (mpi_err /= 0) then
734 write(errMsg, *) "mpi_reduce_scatter failed by error message ",mpi_err
735 call handleError("scatter_double_2d", errMsg)
736 if (present(status)) status = -1
737 endif
738
739 end subroutine scatter_double_2d
740
741 subroutine setAtomTags(tags, status)
742 integer, dimension(:) :: tags
743 integer :: status
744
745 integer :: alloc_stat
746
747 integer :: nAtomsInCol
748 integer :: nAtomsInRow
749
750 status = 0
751 ! allocate row arrays
752 nAtomsInRow = getNatomsInRow(plan_atom_row)
753 nAtomsInCol = getNatomsInCol(plan_atom_col)
754
755 if(.not. allocated(AtomLocalToGlobal)) then
756 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
757 if (alloc_stat /= 0 ) then
758 status = -1
759 return
760 endif
761 else
762 deallocate(AtomLocalToGlobal)
763 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
764 if (alloc_stat /= 0 ) then
765 status = -1
766 return
767 endif
768
769 endif
770
771 AtomLocalToGlobal = tags
772
773 if (.not. allocated(AtomRowToGlobal)) then
774 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
775 if (alloc_stat /= 0 ) then
776 status = -1
777 return
778 endif
779 else
780 deallocate(AtomRowToGlobal)
781 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
782 if (alloc_stat /= 0 ) then
783 status = -1
784 return
785 endif
786
787 endif
788 ! allocate column arrays
789 if (.not. allocated(AtomColToGlobal)) then
790 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
791 if (alloc_stat /= 0 ) then
792 status = -1
793 return
794 endif
795 else
796 deallocate(AtomColToGlobal)
797 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
798 if (alloc_stat /= 0 ) then
799 status = -1
800 return
801 endif
802 endif
803
804 call gather(tags, AtomRowToGlobal, plan_atom_row)
805 call gather(tags, AtomColToGlobal, plan_atom_col)
806
807 end subroutine setAtomTags
808
809 subroutine setGroupTags(tags, status)
810 integer, dimension(:) :: tags
811 integer :: status
812
813 integer :: alloc_stat
814
815 integer :: nGroupsInCol
816 integer :: nGroupsInRow
817
818 status = 0
819
820 nGroupsInRow = getNgroupsInRow(plan_group_row)
821 nGroupsInCol = getNgroupsInCol(plan_group_col)
822
823 if(allocated(GroupLocalToGlobal)) then
824 deallocate(GroupLocalToGlobal)
825 endif
826 allocate(GroupLocalToGlobal(size(tags)),STAT=alloc_stat)
827 if (alloc_stat /= 0 ) then
828 status = -1
829 return
830 endif
831
832 GroupLocalToGlobal = tags
833
834 if(allocated(GroupRowToGlobal)) then
835 deallocate(GroupRowToGlobal)
836 endif
837 allocate(GroupRowToGlobal(nGroupsInRow),STAT=alloc_stat)
838 if (alloc_stat /= 0 ) then
839 status = -1
840 return
841 endif
842
843 if(allocated(GroupColToGlobal)) then
844 deallocate(GroupColToGlobal)
845 endif
846 allocate(GroupColToGlobal(nGroupsInCol),STAT=alloc_stat)
847 if (alloc_stat /= 0 ) then
848 status = -1
849 return
850 endif
851
852 call gather(tags, GroupRowToGlobal, plan_group_row)
853 call gather(tags, GroupColToGlobal, plan_group_col)
854
855 end subroutine setGroupTags
856
857 function getNatomsInCol(thisplan) result(nInCol)
858 type (gs_plan), intent(in) :: thisplan
859 integer :: nInCol
860 nInCol = thisplan%gsComponentPlan%nAtomsInColumn
861 end function getNatomsInCol
862
863 function getNatomsInRow(thisplan) result(nInRow)
864 type (gs_plan), intent(in) :: thisplan
865 integer :: nInRow
866 nInRow = thisplan%gsComponentPlan%nAtomsInRow
867 end function getNatomsInRow
868
869 function getNgroupsInCol(thisplan) result(nInCol)
870 type (gs_plan), intent(in) :: thisplan
871 integer :: nInCol
872 nInCol = thisplan%gsComponentPlan%nGroupsInColumn
873 end function getNgroupsInCol
874
875 function getNgroupsInRow(thisplan) result(nInRow)
876 type (gs_plan), intent(in) :: thisplan
877 integer :: nInRow
878 nInRow = thisplan%gsComponentPlan%nGroupsInRow
879 end function getNgroupsInRow
880
881 function isMPISimSet() result(isthisSimSet)
882 logical :: isthisSimSet
883 if (isSimSet) then
884 isthisSimSet = .true.
885 else
886 isthisSimSet = .false.
887 endif
888 end function isMPISimSet
889
890 subroutine printComponentPlan(this_plan,printNode)
891
892 type (mpiComponentPlan), intent(in) :: this_plan
893 integer, optional :: printNode
894 logical :: print_me = .false.
895
896 if (present(printNode)) then
897 if (printNode == mpiSim%myNode) print_me = .true.
898 else
899 print_me = .true.
900 endif
901
902 if (print_me) then
903 write(default_error,*) "SetupSimParallel: writing component plan"
904
905 write(default_error,*) "nMolGlobal: ", mpiSim%nMolGlobal
906 write(default_error,*) "nAtomsGlobal: ", mpiSim%nAtomsGlobal
907 write(default_error,*) "nAtomsLocal: ", mpiSim%nAtomsLocal
908 write(default_error,*) "myNode: ", mpiSim%myNode
909 write(default_error,*) "nProcessors: ", mpiSim%nProcessors
910 write(default_error,*) "rowComm: ", mpiSim%rowComm
911 write(default_error,*) "columnComm: ", mpiSim%columnComm
912 write(default_error,*) "nRows: ", mpiSim%nRows
913 write(default_error,*) "nColumns: ", mpiSim%nColumns
914 write(default_error,*) "nAtomsInRow: ", mpiSim%nAtomsInRow
915 write(default_error,*) "nAtomsInColumn: ", mpiSim%nAtomsInColumn
916 write(default_error,*) "rowIndex: ", mpiSim%rowIndex
917 write(default_error,*) "columnIndex: ", mpiSim%columnIndex
918 endif
919 end subroutine printComponentPlan
920
921 function getMyNode() result(myNode)
922 integer :: myNode
923 myNode = mpiSim%myNode
924 end function getMyNode
925
926 #ifdef PROFILE
927 subroutine printCommTime()
928 write(*,*) "MPI communication time is: ", commTime
929 end subroutine printCommTime
930
931 function getCommTime() result(comm_time)
932 real :: comm_time
933 comm_time = commTime
934 end function getCommTime
935
936 #endif
937
938 #endif // is_mpi
939 end module mpiSimulation
940