ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-4/src/UseTheForce/DarkSide/simParallel.F90
Revision: 1930
Committed: Wed Jan 12 22:41:40 2005 UTC (19 years, 6 months ago) by gezelter
File size: 29719 byte(s)
Log Message:
merging new_design branch into OOPSE-2.0

File Contents

# Content
1
2 !!
3 !! Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
4 !!
5 !! The University of Notre Dame grants you ("Licensee") a
6 !! non-exclusive, royalty free, license to use, modify and
7 !! redistribute this software in source and binary code form, provided
8 !! that the following conditions are met:
9 !!
10 !! 1. Acknowledgement of the program authors must be made in any
11 !! publication of scientific results based in part on use of the
12 !! program. An acceptable form of acknowledgement is citation of
13 !! the article in which the program was described (Matthew
14 !! A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher
15 !! J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented
16 !! Parallel Simulation Engine for Molecular Dynamics,"
17 !! J. Comput. Chem. 26, pp. 252-271 (2005))
18 !!
19 !! 2. Redistributions of source code must retain the above copyright
20 !! notice, this list of conditions and the following disclaimer.
21 !!
22 !! 3. Redistributions in binary form must reproduce the above copyright
23 !! notice, this list of conditions and the following disclaimer in the
24 !! documentation and/or other materials provided with the
25 !! distribution.
26 !!
27 !! This software is provided "AS IS," without a warranty of any
28 !! kind. All express or implied conditions, representations and
29 !! warranties, including any implied warranty of merchantability,
30 !! fitness for a particular purpose or non-infringement, are hereby
31 !! excluded. The University of Notre Dame and its licensors shall not
32 !! be liable for any damages suffered by licensee as a result of
33 !! using, modifying or distributing the software or its
34 !! derivatives. In no event will the University of Notre Dame or its
35 !! licensors be liable for any lost revenue, profit or data, or for
36 !! direct, indirect, special, consequential, incidental or punitive
37 !! damages, however caused and regardless of the theory of liability,
38 !! arising out of the use of or inability to use software, even if the
39 !! University of Notre Dame has been advised of the possibility of
40 !! such damages.
41 !!
42
43
44 !! MPI support for long range forces using force decomposition
45 !! on a square grid of processors.
46 !! Corresponds to mpiSimunation.cpp for C++
47 !! mpi_module also contains a private interface for mpi f90 routines.
48 !!
49 !! @author Charles F. Vardeman II
50 !! @author Matthew Meineke
51 !! @version $Id: simParallel.F90,v 1.2 2005-01-12 22:40:45 gezelter Exp $, $Date: 2005-01-12 22:40:45 $, $Name: not supported by cvs2svn $, $Revision: 1.2 $
52
53 module mpiSimulation
54 use definitions
55 #ifdef IS_MPI
56 use oopseMPI
57 implicit none
58 PRIVATE
59
60
61 !! PUBLIC Subroutines contained in this module
62 !! gather and scatter are a generic interface
63 !! to gather and scatter routines
64 public :: gather, scatter
65 public :: setupSimParallel
66 public :: replanSimParallel
67 public :: getNatomsInCol
68 public :: getNatomsInRow
69 public :: getNgroupsInCol
70 public :: getNgroupsInRow
71 public :: isMPISimSet
72 public :: printComponentPlan
73 public :: getMyNode
74
75 !! PUBLIC Subroutines contained in MPI module
76 public :: mpi_bcast
77 public :: mpi_allreduce
78 ! public :: mpi_reduce
79 public :: mpi_send
80 public :: mpi_recv
81 public :: mpi_get_processor_name
82 public :: mpi_finalize
83
84 !! PUBLIC mpi variables
85 public :: mpi_comm_world
86 public :: mpi_character
87 public :: mpi_integer
88 public :: mpi_double_precision
89 public :: mpi_sum
90 public :: mpi_max
91 public :: mpi_status_size
92 public :: mpi_any_source
93
94
95
96 !! Safety logical to prevent access to ComponetPlan until
97 !! set by C++.
98 logical, save :: ComponentPlanSet = .false.
99
100
101 !! generic mpi error declaration.
102 integer, public :: mpi_err
103
104 #ifdef PROFILE
105 public :: printCommTime
106 public :: getCommTime
107 real,save :: commTime = 0.0
108 real :: commTimeInitial,commTimeFinal
109 #endif
110
111 !! Include mpiComponentPlan type. mpiComponentPlan is a
112 !! dual header file for both c and fortran.
113 #define __FORTRAN90
114 #include "UseTheForce/mpiComponentPlan.h"
115
116
117 !! Tags used during force loop for parallel simulation
118 integer, public, allocatable, dimension(:) :: AtomLocalToGlobal
119 integer, public, allocatable, dimension(:) :: AtomRowToGlobal
120 integer, public, allocatable, dimension(:) :: AtomColToGlobal
121 integer, public, allocatable, dimension(:) :: GroupLocalToGlobal
122 integer, public, allocatable, dimension(:) :: GroupRowToGlobal
123 integer, public, allocatable, dimension(:) :: GroupColToGlobal
124
125 !! Logical set true if mpiSimulation has been initialized
126 logical, save :: isSimSet = .false.
127
128
129 type (mpiComponentPlan), save :: mpiSim
130
131 !! gs_plan contains plans for gather and scatter routines
132 type, public :: gs_plan
133 private
134 type (mpiComponentPlan), pointer :: gsComponentPlan => NULL()
135 integer :: gsPlanSize !! size of this plan (nDim*nComponents)
136 integer :: globalPlanSize !! size of all components in plan
137 integer, dimension(:), pointer :: displs !! Displacements array for mpi indexed from 0.
138 integer, dimension(:), pointer :: counts !! Counts array for mpi indexed from 0.
139 integer :: myPlanComm !! My communicator for this plan
140 integer :: myPlanRank !! My rank in this plan
141 integer :: planNprocs !! Number of processors in this plan
142 end type gs_plan
143
144 ! plans for different decompositions
145 type (gs_plan), public, save :: plan_atom_row
146 type (gs_plan), public, save :: plan_atom_row_3d
147 type (gs_plan), public, save :: plan_atom_col
148 type (gs_plan), public, save :: plan_atom_col_3d
149 type (gs_plan), public, save :: plan_atom_row_Rotation
150 type (gs_plan), public, save :: plan_atom_col_Rotation
151 type (gs_plan), public, save :: plan_group_row
152 type (gs_plan), public, save :: plan_group_col
153 type (gs_plan), public, save :: plan_group_row_3d
154 type (gs_plan), public, save :: plan_group_col_3d
155
156 type (mpiComponentPlan), pointer :: simComponentPlan
157
158 ! interface for gather scatter routines
159 !! Generic interface for gather.
160 !! Gathers an local array into row or column array
161 !! Interface provided for integer, double and double
162 !! rank 2 arrays.
163 interface gather
164 module procedure gather_integer
165 module procedure gather_double
166 module procedure gather_double_2d
167 end interface
168
169 !! Generic interface for scatter.
170 !! Scatters a row or column array, adding componets
171 !! and reducing them to a local nComponent array.
172 !! Interface provided for double and double rank=2 arrays.
173
174 interface scatter
175 module procedure scatter_double
176 module procedure scatter_double_2d
177 end interface
178
179
180
181 contains
182
183 !! Sets up mpiComponentPlan with structure passed from C++.
184 subroutine setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
185 nGroupTags, groupTags, status)
186 !! Passed Arguments
187 !! mpiComponentPlan struct from C
188 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
189 !! Number of tags passed
190 integer, intent(in) :: nAtomTags, nGroupTags
191 !! Result status, 0 = normal, -1 = error
192 integer, intent(out) :: status
193 integer :: localStatus
194 !! Global reference tag for local particles
195 integer, dimension(nAtomTags), intent(inout) :: atomTags
196 integer, dimension(nGroupTags), intent(inout) :: groupTags
197
198 !write(*,*) 'mpiSim_mod thinks node', thisComponentPlan%myNode, &
199 ! ' has atomTags(1) = ', atomTags(1)
200
201 status = 0
202 if (componentPlanSet) then
203 return
204 endif
205 componentPlanSet = .true.
206
207 !! copy c component plan to fortran
208 mpiSim = thisComponentPlan
209 !write(*,*) "Seting up simParallel"
210
211 call make_Force_Grid(mpiSim, localStatus)
212 if (localStatus /= 0) then
213 write(default_error,*) "Error creating force grid"
214 status = -1
215 return
216 endif
217
218 call updateGridComponents(mpiSim, localStatus)
219 if (localStatus /= 0) then
220 write(default_error,*) "Error updating grid components"
221 status = -1
222 return
223 endif
224
225 !! initialize gather and scatter plans used in this simulation
226 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
227 mpiSim, mpiSim%rowComm, plan_atom_row)
228 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
229 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
230 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
231 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
232 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
233 mpiSim, mpiSim%rowComm, plan_group_row)
234 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
235 mpiSim, mpiSim%rowComm, plan_group_row_3d)
236
237 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
238 mpiSim, mpiSim%columnComm, plan_atom_col)
239 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
240 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
241 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
242 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
243 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
244 mpiSim, mpiSim%columnComm, plan_group_col)
245 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
246 mpiSim, mpiSim%columnComm, plan_group_col_3d)
247
248 ! Initialize tags
249
250 call setAtomTags(atomTags,localStatus)
251 if (localStatus /= 0) then
252 status = -1
253 return
254 endif
255
256
257 call setGroupTags(groupTags,localStatus)
258 if (localStatus /= 0) then
259 status = -1
260 return
261 endif
262
263 isSimSet = .true.
264
265 ! call printComponentPlan(mpiSim,0)
266 end subroutine setupSimParallel
267
268 subroutine replanSimParallel(thisComponentPlan,status)
269 ! Passed Arguments
270 !! mpiComponentPlan struct from C
271 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
272 integer, intent(out) :: status
273 integer :: localStatus
274 integer :: mpierror
275 status = 0
276
277 call updateGridComponents(thisComponentPlan,localStatus)
278 if (localStatus /= 0) then
279 status = -1
280 return
281 endif
282
283 !! Unplan Gather Scatter plans
284 call unplan_gather_scatter(plan_atom_row)
285 call unplan_gather_scatter(plan_atom_row_3d)
286 call unplan_gather_scatter(plan_atom_row_Rotation)
287 call unplan_gather_scatter(plan_group_row)
288 call unplan_gather_scatter(plan_group_row_3d)
289
290 call unplan_gather_scatter(plan_atom_col)
291 call unplan_gather_scatter(plan_atom_col_3d)
292 call unplan_gather_scatter(plan_atom_col_Rotation)
293 call unplan_gather_scatter(plan_group_col)
294 call unplan_gather_scatter(plan_group_col_3d)
295
296 !! initialize gather and scatter plans used in this simulation
297 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
298 mpiSim, mpiSim%rowComm, plan_atom_row)
299 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
300 mpiSim, mpiSim%rowComm, plan_atom_row_3d)
301 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
302 mpiSim, mpiSim%rowComm, plan_atom_row_Rotation)
303 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
304 mpiSim, mpiSim%rowComm, plan_group_row)
305 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
306 mpiSim, mpiSim%rowComm, plan_group_row_3d)
307
308 call plan_gather_scatter(1, mpiSim%nAtomsLocal, &
309 mpiSim, mpiSim%columnComm, plan_atom_col)
310 call plan_gather_scatter(nDim, mpiSim%nAtomsLocal, &
311 mpiSim, mpiSim%columnComm, plan_atom_col_3d)
312 call plan_gather_scatter(9, mpiSim%nAtomsLocal, &
313 mpiSim, mpiSim%columnComm, plan_atom_col_Rotation)
314 call plan_gather_scatter(1, mpiSim%nGroupsLocal, &
315 mpiSim, mpiSim%columnComm, plan_group_col)
316 call plan_gather_scatter(nDim, mpiSim%nGroupsLocal, &
317 mpiSim, mpiSim%columnComm, plan_group_col_3d)
318
319 end subroutine replanSimParallel
320
321 !! Updates number of row and column components for long range forces.
322 subroutine updateGridComponents(thisComponentPlan, status)
323 type (mpiComponentPlan) :: thisComponentPlan !! mpiComponentPlan
324
325 !! Status return
326 !! - 0 Success
327 !! - -1 Failure
328 integer, intent(out) :: status
329 integer :: nAtomsLocal
330 integer :: nAtomsInRow = 0
331 integer :: nAtomsInColumn = 0
332 integer :: nGroupsLocal
333 integer :: nGroupsInRow = 0
334 integer :: nGroupsInColumn = 0
335 integer :: mpiErrors
336
337 status = 0
338 if (.not. componentPlanSet) return
339
340 if (thisComponentPlan%nAtomsLocal == 0) then
341 status = -1
342 return
343 endif
344 if (thisComponentPlan%nGroupsLocal == 0) then
345 write(*,*) 'tcp%ngl = ', thisComponentPlan%nGroupsLocal
346 status = -1
347 return
348 endif
349
350 nAtomsLocal = thisComponentPlan%nAtomsLocal
351 nGroupsLocal = thisComponentPlan%nGroupsLocal
352
353 call mpi_allreduce(nAtomsLocal, nAtomsInRow, 1, mpi_integer, &
354 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
355 if (mpiErrors /= 0) then
356 status = -1
357 return
358 endif
359
360 call mpi_allreduce(nAtomsLocal, nAtomsInColumn, 1, mpi_integer, &
361 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
362 if (mpiErrors /= 0) then
363 status = -1
364 return
365 endif
366
367 call mpi_allreduce(nGroupsLocal, nGroupsInRow, 1, mpi_integer, &
368 mpi_sum, thisComponentPlan%rowComm, mpiErrors)
369 if (mpiErrors /= 0) then
370 status = -1
371 return
372 endif
373
374 call mpi_allreduce(nGroupsLocal, nGroupsInColumn, 1, mpi_integer, &
375 mpi_sum, thisComponentPlan%columnComm, mpiErrors)
376 if (mpiErrors /= 0) then
377 status = -1
378 return
379 endif
380
381 thisComponentPlan%nAtomsInRow = nAtomsInRow
382 thisComponentPlan%nAtomsInColumn = nAtomsInColumn
383 thisComponentPlan%nGroupsInRow = nGroupsInRow
384 thisComponentPlan%nGroupsInColumn = nGroupsInColumn
385
386 end subroutine updateGridComponents
387
388
389 !! Creates a square force decomposition of processors into row and column
390 !! communicators.
391 subroutine make_Force_Grid(thisComponentPlan,status)
392 type (mpiComponentPlan) :: thisComponentPlan
393 integer, intent(out) :: status !! status returns -1 if error
394 integer :: nColumnsMax !! Maximum number of columns
395 integer :: nWorldProcessors !! Total number of processors in World comm.
396 integer :: rowIndex !! Row for this processor.
397 integer :: columnIndex !! Column for this processor.
398 integer :: nRows !! Total number of rows.
399 integer :: nColumns !! Total number of columns.
400 integer :: mpiErrors !! MPI errors.
401 integer :: rowCommunicator !! MPI row communicator.
402 integer :: columnCommunicator
403 integer :: myWorldRank
404 integer :: i
405
406
407 if (.not. ComponentPlanSet) return
408 status = 0
409
410 !! We make a dangerous assumption here that if numberProcessors is
411 !! zero, then we need to get the information from MPI.
412 if (thisComponentPlan%nProcessors == 0 ) then
413 call mpi_comm_size( MPI_COMM_WORLD, nWorldProcessors,mpiErrors)
414 if ( mpiErrors /= 0 ) then
415 status = -1
416 return
417 endif
418 call mpi_comm_rank( MPI_COMM_WORLD,myWorldRank,mpiErrors)
419 if ( mpiErrors /= 0 ) then
420 status = -1
421 return
422 endif
423
424 else
425 nWorldProcessors = thisComponentPlan%nProcessors
426 myWorldRank = thisComponentPlan%myNode
427 endif
428
429 nColumnsMax = nint(sqrt(real(nWorldProcessors,kind=dp)))
430
431 do i = 1, nColumnsMax
432 if (mod(nWorldProcessors,i) == 0) nColumns = i
433 end do
434
435 nRows = nWorldProcessors/nColumns
436
437 rowIndex = myWorldRank/nColumns
438
439 call mpi_comm_split(mpi_comm_world,rowIndex,0,rowCommunicator,mpiErrors)
440 if ( mpiErrors /= 0 ) then
441 write(default_error,*) "MPI comm split failed at row communicator"
442 status = -1
443 return
444 endif
445
446 columnIndex = mod(myWorldRank,nColumns)
447 call mpi_comm_split(mpi_comm_world,columnIndex,0,columnCommunicator,mpiErrors)
448 if ( mpiErrors /= 0 ) then
449 write(default_error,*) "MPI comm split faild at columnCommunicator"
450 status = -1
451 return
452 endif
453
454 ! Set appropriate components of thisComponentPlan
455 thisComponentPlan%rowComm = rowCommunicator
456 thisComponentPlan%columnComm = columnCommunicator
457 thisComponentPlan%rowIndex = rowIndex
458 thisComponentPlan%columnIndex = columnIndex
459 thisComponentPlan%nRows = nRows
460 thisComponentPlan%nColumns = nColumns
461
462 end subroutine make_Force_Grid
463
464 !! initalizes a gather scatter plan
465 subroutine plan_gather_scatter( nDim, nObjects, thisComponentPlan, &
466 thisComm, this_plan, status)
467 integer, intent(in) :: nDim !! Number of dimensions for gather scatter plan
468 integer, intent(in) :: nObjects
469 type (mpiComponentPlan), intent(in), target :: thisComponentPlan
470 type (gs_plan), intent(out) :: this_plan !! MPI Component Plan
471 integer, intent(in) :: thisComm !! MPI communicator for this plan
472
473 integer :: arraySize !! size to allocate plan for
474 integer, intent(out), optional :: status
475 integer :: ierror
476 integer :: i,junk
477
478 if (present(status)) status = 0
479
480 !! Set gsComponentPlan pointer
481 !! to the componet plan we want to use for this gather scatter plan.
482 !! WARNING this could be dangerous since thisComponentPlan was origionally
483 !! allocated in C++ and there is a significant difference between c and
484 !! f95 pointers....
485 this_plan%gsComponentPlan => thisComponentPlan
486
487 ! Set this plan size for displs array.
488 this_plan%gsPlanSize = nDim * nObjects
489
490 ! Duplicate communicator for this plan
491 call mpi_comm_dup(thisComm, this_plan%myPlanComm, mpi_err)
492 if (mpi_err /= 0) then
493 if (present(status)) status = -1
494 return
495 end if
496 call mpi_comm_rank(this_plan%myPlanComm, this_plan%myPlanRank, mpi_err)
497 if (mpi_err /= 0) then
498 if (present(status)) status = -1
499 return
500 end if
501
502 call mpi_comm_size(this_plan%myPlanComm, this_plan%planNprocs, mpi_err)
503
504 if (mpi_err /= 0) then
505 if (present(status)) status = -1
506 return
507 end if
508
509 !! counts and displacements arrays are indexed from 0 to be compatable
510 !! with MPI arrays.
511 allocate (this_plan%counts(0:this_plan%planNprocs-1),STAT=ierror)
512 if (ierror /= 0) then
513 if (present(status)) status = -1
514 return
515 end if
516
517 allocate (this_plan%displs(0:this_plan%planNprocs-1),STAT=ierror)
518 if (ierror /= 0) then
519 if (present(status)) status = -1
520 return
521 end if
522
523 !! gather all the local sizes into a size # processors array.
524 call mpi_allgather(this_plan%gsPlanSize,1,mpi_integer,this_plan%counts, &
525 1,mpi_integer,thisComm,mpi_err)
526
527 if (mpi_err /= 0) then
528 if (present(status)) status = -1
529 return
530 end if
531
532 !! figure out the total number of particles in this plan
533 this_plan%globalPlanSize = sum(this_plan%counts)
534
535 !! initialize plan displacements.
536 this_plan%displs(0) = 0
537 do i = 1, this_plan%planNprocs - 1,1
538 this_plan%displs(i) = this_plan%displs(i-1) + this_plan%counts(i-1)
539 end do
540 end subroutine plan_gather_scatter
541
542 subroutine unplan_gather_scatter(this_plan)
543 type (gs_plan), intent(inout) :: this_plan
544
545 this_plan%gsComponentPlan => null()
546 call mpi_comm_free(this_plan%myPlanComm,mpi_err)
547
548 deallocate(this_plan%counts)
549 deallocate(this_plan%displs)
550
551 end subroutine unplan_gather_scatter
552
553 subroutine gather_integer( sbuffer, rbuffer, this_plan, status)
554
555 type (gs_plan), intent(inout) :: this_plan
556 integer, dimension(:), intent(inout) :: sbuffer
557 integer, dimension(:), intent(inout) :: rbuffer
558 integer :: noffset
559 integer, intent(out), optional :: status
560 integer :: i
561
562 if (present(status)) status = 0
563 noffset = this_plan%displs(this_plan%myPlanRank)
564
565 ! if (getmyNode() == 1) then
566 ! write(*,*) "Node 0 printing allgatherv vars"
567 ! write(*,*) "Noffset: ", noffset
568 ! write(*,*) "PlanSize: ", this_plan%gsPlanSize
569 ! write(*,*) "PlanComm: ", this_plan%myPlanComm
570 ! end if
571
572 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_integer, &
573 rbuffer,this_plan%counts,this_plan%displs,mpi_integer, &
574 this_plan%myPlanComm, mpi_err)
575
576 if (mpi_err /= 0) then
577 if (present(status)) status = -1
578 endif
579
580 end subroutine gather_integer
581
582 subroutine gather_double( sbuffer, rbuffer, this_plan, status)
583
584 type (gs_plan), intent(in) :: this_plan
585 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
586 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
587 integer :: noffset
588 integer, intent(out), optional :: status
589
590
591 if (present(status)) status = 0
592 noffset = this_plan%displs(this_plan%myPlanRank)
593 #ifdef PROFILE
594 call cpu_time(commTimeInitial)
595 #endif
596 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
597 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
598 this_plan%myPlanComm, mpi_err)
599 #ifdef PROFILE
600 call cpu_time(commTimeFinal)
601 commTime = commTime + commTimeFinal - commTimeInitial
602 #endif
603
604 if (mpi_err /= 0) then
605 if (present(status)) status = -1
606 endif
607
608 end subroutine gather_double
609
610 subroutine gather_double_2d( sbuffer, rbuffer, this_plan, status)
611
612 type (gs_plan), intent(in) :: this_plan
613 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
614 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
615 integer :: noffset,i,ierror
616 integer, intent(out), optional :: status
617
618 external mpi_allgatherv
619
620 if (present(status)) status = 0
621
622 ! noffset = this_plan%displs(this_plan%me)
623 #ifdef PROFILE
624 call cpu_time(commTimeInitial)
625 #endif
626
627 call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, &
628 rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, &
629 this_plan%myPlanComm, mpi_err)
630
631 #ifdef PROFILE
632 call cpu_time(commTimeFinal)
633 commTime = commTime + commTimeFinal - commTimeInitial
634 #endif
635
636 if (mpi_err /= 0) then
637 if (present(status)) status = -1
638 endif
639
640 end subroutine gather_double_2d
641
642 subroutine scatter_double( sbuffer, rbuffer, this_plan, status)
643
644 type (gs_plan), intent(in) :: this_plan
645 real( kind = DP ), dimension(:), intent(inout) :: sbuffer
646 real( kind = DP ), dimension(:), intent(inout) :: rbuffer
647 integer, intent(out), optional :: status
648 external mpi_reduce_scatter
649
650 if (present(status)) status = 0
651
652 #ifdef PROFILE
653 call cpu_time(commTimeInitial)
654 #endif
655 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
656 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
657 #ifdef PROFILE
658 call cpu_time(commTimeFinal)
659 commTime = commTime + commTimeFinal - commTimeInitial
660 #endif
661
662 if (mpi_err /= 0) then
663 if (present(status)) status = -1
664 endif
665
666 end subroutine scatter_double
667
668 subroutine scatter_double_2d( sbuffer, rbuffer, this_plan, status)
669
670 type (gs_plan), intent(in) :: this_plan
671 real( kind = DP ), dimension(:,:), intent(inout) :: sbuffer
672 real( kind = DP ), dimension(:,:), intent(inout) :: rbuffer
673 integer, intent(out), optional :: status
674 external mpi_reduce_scatter
675
676 if (present(status)) status = 0
677 #ifdef PROFILE
678 call cpu_time(commTimeInitial)
679 #endif
680
681 call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, &
682 mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err)
683 #ifdef PROFILE
684 call cpu_time(commTimeFinal)
685 commTime = commTime + commTimeFinal - commTimeInitial
686 #endif
687
688 if (mpi_err /= 0) then
689 if (present(status)) status = -1
690 endif
691
692 end subroutine scatter_double_2d
693
694 subroutine setAtomTags(tags, status)
695 integer, dimension(:) :: tags
696 integer :: status
697
698 integer :: alloc_stat
699
700 integer :: nAtomsInCol
701 integer :: nAtomsInRow
702
703 status = 0
704 ! allocate row arrays
705 nAtomsInRow = getNatomsInRow(plan_atom_row)
706 nAtomsInCol = getNatomsInCol(plan_atom_col)
707
708 if(.not. allocated(AtomLocalToGlobal)) then
709 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
710 if (alloc_stat /= 0 ) then
711 status = -1
712 return
713 endif
714 else
715 deallocate(AtomLocalToGlobal)
716 allocate(AtomLocalToGlobal(size(tags)),STAT=alloc_stat)
717 if (alloc_stat /= 0 ) then
718 status = -1
719 return
720 endif
721
722 endif
723
724 AtomLocalToGlobal = tags
725
726 if (.not. allocated(AtomRowToGlobal)) then
727 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
728 if (alloc_stat /= 0 ) then
729 status = -1
730 return
731 endif
732 else
733 deallocate(AtomRowToGlobal)
734 allocate(AtomRowToGlobal(nAtomsInRow),STAT=alloc_stat)
735 if (alloc_stat /= 0 ) then
736 status = -1
737 return
738 endif
739
740 endif
741 ! allocate column arrays
742 if (.not. allocated(AtomColToGlobal)) then
743 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
744 if (alloc_stat /= 0 ) then
745 status = -1
746 return
747 endif
748 else
749 deallocate(AtomColToGlobal)
750 allocate(AtomColToGlobal(nAtomsInCol),STAT=alloc_stat)
751 if (alloc_stat /= 0 ) then
752 status = -1
753 return
754 endif
755 endif
756
757 call gather(tags, AtomRowToGlobal, plan_atom_row)
758 call gather(tags, AtomColToGlobal, plan_atom_col)
759
760 end subroutine setAtomTags
761
762 subroutine setGroupTags(tags, status)
763 integer, dimension(:) :: tags
764 integer :: status
765
766 integer :: alloc_stat
767
768 integer :: nGroupsInCol
769 integer :: nGroupsInRow
770
771 status = 0
772
773 nGroupsInRow = getNgroupsInRow(plan_group_row)
774 nGroupsInCol = getNgroupsInCol(plan_group_col)
775
776 if(allocated(GroupLocalToGlobal)) then
777 deallocate(GroupLocalToGlobal)
778 endif
779 allocate(GroupLocalToGlobal(size(tags)),STAT=alloc_stat)
780 if (alloc_stat /= 0 ) then
781 status = -1
782 return
783 endif
784
785 GroupLocalToGlobal = tags
786
787 if(allocated(GroupRowToGlobal)) then
788 deallocate(GroupRowToGlobal)
789 endif
790 allocate(GroupRowToGlobal(nGroupsInRow),STAT=alloc_stat)
791 if (alloc_stat /= 0 ) then
792 status = -1
793 return
794 endif
795
796 if(allocated(GroupColToGlobal)) then
797 deallocate(GroupColToGlobal)
798 endif
799 allocate(GroupColToGlobal(nGroupsInCol),STAT=alloc_stat)
800 if (alloc_stat /= 0 ) then
801 status = -1
802 return
803 endif
804
805 call gather(tags, GroupRowToGlobal, plan_group_row)
806 call gather(tags, GroupColToGlobal, plan_group_col)
807
808 end subroutine setGroupTags
809
810 function getNatomsInCol(thisplan) result(nInCol)
811 type (gs_plan), intent(in) :: thisplan
812 integer :: nInCol
813 nInCol = thisplan%gsComponentPlan%nAtomsInColumn
814 end function getNatomsInCol
815
816 function getNatomsInRow(thisplan) result(nInRow)
817 type (gs_plan), intent(in) :: thisplan
818 integer :: nInRow
819 nInRow = thisplan%gsComponentPlan%nAtomsInRow
820 end function getNatomsInRow
821
822 function getNgroupsInCol(thisplan) result(nInCol)
823 type (gs_plan), intent(in) :: thisplan
824 integer :: nInCol
825 nInCol = thisplan%gsComponentPlan%nGroupsInColumn
826 end function getNgroupsInCol
827
828 function getNgroupsInRow(thisplan) result(nInRow)
829 type (gs_plan), intent(in) :: thisplan
830 integer :: nInRow
831 nInRow = thisplan%gsComponentPlan%nGroupsInRow
832 end function getNgroupsInRow
833
834 function isMPISimSet() result(isthisSimSet)
835 logical :: isthisSimSet
836 if (isSimSet) then
837 isthisSimSet = .true.
838 else
839 isthisSimSet = .false.
840 endif
841 end function isMPISimSet
842
843 subroutine printComponentPlan(this_plan,printNode)
844
845 type (mpiComponentPlan), intent(in) :: this_plan
846 integer, optional :: printNode
847 logical :: print_me = .false.
848
849 if (present(printNode)) then
850 if (printNode == mpiSim%myNode) print_me = .true.
851 else
852 print_me = .true.
853 endif
854
855 if (print_me) then
856 write(default_error,*) "SetupSimParallel: writing component plan"
857
858 write(default_error,*) "nMolGlobal: ", mpiSim%nMolGlobal
859 write(default_error,*) "nAtomsGlobal: ", mpiSim%nAtomsGlobal
860 write(default_error,*) "nAtomsLocal: ", mpiSim%nAtomsLocal
861 write(default_error,*) "myNode: ", mpiSim%myNode
862 write(default_error,*) "nProcessors: ", mpiSim%nProcessors
863 write(default_error,*) "rowComm: ", mpiSim%rowComm
864 write(default_error,*) "columnComm: ", mpiSim%columnComm
865 write(default_error,*) "nRows: ", mpiSim%nRows
866 write(default_error,*) "nColumns: ", mpiSim%nColumns
867 write(default_error,*) "nAtomsInRow: ", mpiSim%nAtomsInRow
868 write(default_error,*) "nAtomsInColumn: ", mpiSim%nAtomsInColumn
869 write(default_error,*) "rowIndex: ", mpiSim%rowIndex
870 write(default_error,*) "columnIndex: ", mpiSim%columnIndex
871 endif
872 end subroutine printComponentPlan
873
874 function getMyNode() result(myNode)
875 integer :: myNode
876 myNode = mpiSim%myNode
877 end function getMyNode
878
879 #ifdef PROFILE
880 subroutine printCommTime()
881 write(*,*) "MPI communication time is: ", commTime
882 end subroutine printCommTime
883
884 function getCommTime() result(comm_time)
885 real :: comm_time
886 comm_time = commTime
887 end function getCommTime
888
889 #endif
890
891 #endif // is_mpi
892 end module mpiSimulation
893
894 #ifdef IS_MPI
895 subroutine setFsimParallel(thisComponentPlan, nAtomTags, atomTags, &
896 nGroupTags, groupTags, status)
897 use mpiSimulation
898
899 !! Passed Arguments
900 !! mpiComponentPlan struct from C
901 type (mpiComponentPlan), intent(inout) :: thisComponentPlan
902 !! Number of tags passed
903 integer, intent(in) :: nAtomTags, nGroupTags
904 !! Result status, 0 = normal, -1 = error
905 integer, intent(out) :: status
906 integer :: localStatus
907 !! Global reference tag for local particles
908 integer, dimension(nAtomTags), intent(inout) :: atomTags
909 integer, dimension(nGroupTags), intent(inout) :: groupTags
910
911 call setupSimParallel(thisComponentPlan, nAtomTags, atomTags, &
912 nGroupTags, groupTags, status)
913
914
915 end subroutine
916
917 #endif
918
919