1 |
#ifdef IS_MPI |
2 |
|
3 |
!! MPI support for long range forces using force decomposition |
4 |
!! on a square grid of processors. |
5 |
!! Corresponds to mpiSimunation.cpp for C++ |
6 |
!! mpi_module also contains a private interface for mpi f90 routines. |
7 |
!! |
8 |
!! @author Charles F. Vardeman II |
9 |
!! @author Matthew Meineke |
10 |
!! @version $Id: mpiSimulation_module.F90,v 1.1.1.1 2003-03-21 17:42:12 mmeineke Exp $, $Date: 2003-03-21 17:42:12 $, $Name: not supported by cvs2svn $, $Revision: 1.1.1.1 $ |
11 |
|
12 |
module mpiSimulation |
13 |
use definitions |
14 |
use mpi |
15 |
implicit none |
16 |
PRIVATE |
17 |
|
18 |
|
19 |
!! PUBLIC Subroutines contained in this module |
20 |
!! gather and scatter are a generic interface |
21 |
!! to gather and scatter routines |
22 |
public :: gather, scatter |
23 |
public :: setupSimParallel |
24 |
public :: replanSimParallel |
25 |
public :: getNcol |
26 |
public :: getNrow |
27 |
public :: isMPISimSet |
28 |
public :: printComponentPlan |
29 |
public :: getMyNode |
30 |
|
31 |
!! PUBLIC Subroutines contained in MPI module |
32 |
public :: mpi_bcast |
33 |
public :: mpi_allreduce |
34 |
public :: mpi_reduce |
35 |
public :: mpi_send |
36 |
public :: mpi_recv |
37 |
public :: mpi_get_processor_name |
38 |
public :: mpi_finalize |
39 |
|
40 |
!! PUBLIC mpi variables |
41 |
public :: mpi_comm_world |
42 |
public :: mpi_character |
43 |
public :: mpi_integer |
44 |
public :: mpi_double_precision |
45 |
public :: mpi_sum |
46 |
public :: mpi_max |
47 |
public :: mpi_status_size |
48 |
public :: mpi_any_source |
49 |
|
50 |
!! Safety logical to prevent access to ComponetPlan until |
51 |
!! set by C++. |
52 |
logical :: ComponentPlanSet = .false. |
53 |
|
54 |
|
55 |
!! generic mpi error declaration. |
56 |
integer,public :: mpi_err |
57 |
|
58 |
|
59 |
|
60 |
!! Include mpiComponentPlan type. mpiComponentPlan is a |
61 |
!! dual header file for both c and fortran. |
62 |
#define __FORTRAN90 |
63 |
#include "mpiComponentPlan.h" |
64 |
|
65 |
|
66 |
|
67 |
!! Tags used during force loop for parallel simulation |
68 |
integer, allocatable, dimension(:) :: tagLocal |
69 |
integer, public, allocatable, dimension(:) :: tagRow |
70 |
integer, public, allocatable, dimension(:) :: tagColumn |
71 |
|
72 |
!! Logical set true if mpiSimulation has been initialized |
73 |
logical :: isSimSet = .false. |
74 |
|
75 |
|
76 |
type (mpiComponentPlan) :: mpiSim |
77 |
|
78 |
!! gs_plan contains plans for gather and scatter routines |
79 |
type, public :: gs_plan |
80 |
private |
81 |
type (mpiComponentPlan), pointer :: gsComponentPlan => NULL() |
82 |
integer :: gsPlanSize !! size of this plan (nDim*nComponents) |
83 |
integer :: globalPlanSize !! size of all components in plan |
84 |
integer, dimension(:), pointer :: displs !! Displacements array for mpi indexed from 0. |
85 |
integer, dimension(:), pointer :: counts !! Counts array for mpi indexed from 0. |
86 |
integer :: myPlanComm !! My communicator for this plan |
87 |
integer :: myPlanRank !! My rank in this plan |
88 |
integer :: planNprocs !! Number of processors in this plan |
89 |
end type gs_plan |
90 |
|
91 |
! plans for different decompositions |
92 |
type (gs_plan), public :: plan_row |
93 |
type (gs_plan), public :: plan_row3d |
94 |
type (gs_plan), public :: plan_col |
95 |
type (gs_plan), public :: plan_col3d |
96 |
type(gs_plan), public :: plan_row_Rotation |
97 |
type(gs_plan), public :: plan_col_Rotation |
98 |
|
99 |
type (mpiComponentPlan), pointer :: simComponentPlan |
100 |
|
101 |
! interface for gather scatter routines |
102 |
!! Generic interface for gather. |
103 |
!! Gathers an local array into row or column array |
104 |
!! Interface provided for integer, double and double |
105 |
!! rank 2 arrays. |
106 |
interface gather |
107 |
module procedure gather_integer |
108 |
module procedure gather_double |
109 |
module procedure gather_double_2d |
110 |
end interface |
111 |
|
112 |
!! Generic interface for scatter. |
113 |
!! Scatters a row or column array, adding componets |
114 |
!! and reducing them to a local nComponent array. |
115 |
!! Interface provided for double and double rank=2 arrays. |
116 |
|
117 |
interface scatter |
118 |
module procedure scatter_double |
119 |
module procedure scatter_double_2d |
120 |
end interface |
121 |
|
122 |
|
123 |
|
124 |
contains |
125 |
|
126 |
!! Sets up mpiComponentPlan with structure passed from C++. |
127 |
subroutine setupSimParallel(thisComponentPlan,ntags,tags,status) |
128 |
! Passed Arguments |
129 |
! integer, intent(inout) :: nDim !! Number of dimensions |
130 |
!! mpiComponentPlan struct from C |
131 |
type (mpiComponentPlan), intent(inout) :: thisComponentPlan |
132 |
!! Number of tags passed, nlocal |
133 |
integer, intent(in) :: ntags |
134 |
!! Result status, 0 = normal, -1 = error |
135 |
integer, intent(out) :: status |
136 |
integer :: localStatus |
137 |
!! Global reference tag for local particles |
138 |
integer, dimension(ntags),intent(inout) :: tags |
139 |
|
140 |
|
141 |
status = 0 |
142 |
if (componentPlanSet) then |
143 |
return |
144 |
endif |
145 |
componentPlanSet = .true. |
146 |
|
147 |
!! copy c component plan to fortran |
148 |
mpiSim = thisComponentPlan |
149 |
write(*,*) "Seting up simParallel" |
150 |
|
151 |
call make_Force_Grid(mpiSim,localStatus) |
152 |
if (localStatus /= 0) then |
153 |
write(default_error,*) "Error creating force grid" |
154 |
status = -1 |
155 |
return |
156 |
endif |
157 |
|
158 |
call updateGridComponents(mpiSim,localStatus) |
159 |
if (localStatus /= 0) then |
160 |
write(default_error,*) "Error updating grid components" |
161 |
status = -1 |
162 |
return |
163 |
endif |
164 |
|
165 |
|
166 |
!! initialize gather and scatter plans used in this simulation |
167 |
call plan_gather_scatter(1,mpiSim%myNlocal,& |
168 |
mpiSim,mpiSim%rowComm,plan_row) |
169 |
call plan_gather_scatter(nDim,mpiSim%myNlocal,& |
170 |
mpiSim,mpiSim%rowComm,plan_row3d) |
171 |
call plan_gather_scatter(9,mpiSim%myNlocal,& |
172 |
mpiSim,mpiSim%rowComm,plan_row_Rotation) |
173 |
call plan_gather_scatter(1,mpiSim%myNlocal,& |
174 |
mpiSim,mpiSim%columnComm,plan_col) |
175 |
call plan_gather_scatter(nDim,mpiSim%myNlocal,& |
176 |
mpiSim,mpiSim%columnComm,plan_col3d) |
177 |
call plan_gather_scatter(9,mpiSim%myNlocal,& |
178 |
mpiSim,mpiSim%columnComm,plan_col_Rotation) |
179 |
|
180 |
|
181 |
|
182 |
! Initialize tags |
183 |
call setTags(tags,localStatus) |
184 |
if (localStatus /= 0) then |
185 |
status = -1 |
186 |
return |
187 |
endif |
188 |
isSimSet = .true. |
189 |
|
190 |
! call printComponentPlan(mpiSim,0) |
191 |
end subroutine setupSimParallel |
192 |
|
193 |
subroutine replanSimParallel(thisComponentPlan,status) |
194 |
! Passed Arguments |
195 |
!! mpiComponentPlan struct from C |
196 |
type (mpiComponentPlan), intent(inout) :: thisComponentPlan |
197 |
integer, intent(out) :: status |
198 |
integer :: localStatus |
199 |
integer :: mpierror |
200 |
status = 0 |
201 |
|
202 |
call updateGridComponents(thisComponentPlan,localStatus) |
203 |
if (localStatus /= 0) then |
204 |
status = -1 |
205 |
return |
206 |
endif |
207 |
|
208 |
!! Unplan Gather Scatter plans |
209 |
call unplan_gather_scatter(plan_row) |
210 |
call unplan_gather_scatter(plan_row3d) |
211 |
call unplan_gather_scatter(plan_row_Rotation) |
212 |
call unplan_gather_scatter(plan_col) |
213 |
call unplan_gather_scatter(plan_col3d) |
214 |
call unplan_gather_scatter(plan_col_Rotation) |
215 |
|
216 |
!! initialize gather and scatter plans used in this simulation |
217 |
call plan_gather_scatter(1,thisComponentPlan%myNlocal,& |
218 |
thisComponentPlan,thisComponentPlan%rowComm,plan_row) |
219 |
call plan_gather_scatter(nDim,thisComponentPlan%myNlocal,& |
220 |
thisComponentPlan,thisComponentPlan%rowComm,plan_row3d) |
221 |
call plan_gather_scatter(9,thisComponentPlan%myNlocal,& |
222 |
thisComponentPlan,thisComponentPlan%rowComm,plan_row_Rotation) |
223 |
call plan_gather_scatter(1,thisComponentPlan%myNlocal,& |
224 |
thisComponentPlan,thisComponentPlan%columnComm,plan_col) |
225 |
call plan_gather_scatter(nDim,thisComponentPlan%myNlocal,& |
226 |
thisComponentPlan,thisComponentPlan%rowComm,plan_col3d) |
227 |
call plan_gather_scatter(9,thisComponentPlan%myNlocal,& |
228 |
thisComponentPlan,thisComponentPlan%rowComm,plan_col_Rotation) |
229 |
|
230 |
|
231 |
|
232 |
end subroutine replanSimParallel |
233 |
|
234 |
!! Updates number of row and column components for long range forces. |
235 |
subroutine updateGridComponents(thisComponentPlan,status) |
236 |
type (mpiComponentPlan) :: thisComponentPlan !! mpiComponentPlan |
237 |
|
238 |
!! Status return |
239 |
!! - 0 Success |
240 |
!! - -1 Failure |
241 |
integer, intent(out) :: status |
242 |
integer :: nComponentsLocal |
243 |
integer :: nComponentsRow = 0 |
244 |
integer :: nComponentsColumn = 0 |
245 |
integer :: mpiErrors |
246 |
|
247 |
status = 0 |
248 |
if (.not. componentPlanSet) return |
249 |
|
250 |
if (thisComponentPlan%myNlocal == 0 ) then |
251 |
status = -1 |
252 |
return |
253 |
endif |
254 |
|
255 |
nComponentsLocal = thisComponentPlan%myNlocal |
256 |
|
257 |
call mpi_allreduce(nComponentsLocal,nComponentsRow,1,mpi_integer,& |
258 |
mpi_sum,thisComponentPlan%rowComm,mpiErrors) |
259 |
if (mpiErrors /= 0) then |
260 |
status = -1 |
261 |
return |
262 |
endif |
263 |
|
264 |
call mpi_allreduce(nComponentsLocal,nComponentsColumn,1,mpi_integer, & |
265 |
mpi_sum,thisComponentPlan%columnComm,mpiErrors) |
266 |
if (mpiErrors /= 0) then |
267 |
status = -1 |
268 |
return |
269 |
endif |
270 |
|
271 |
thisComponentPlan%nComponentsRow = nComponentsRow |
272 |
thisComponentPlan%nComponentsColumn = nComponentsColumn |
273 |
|
274 |
|
275 |
end subroutine updateGridComponents |
276 |
|
277 |
|
278 |
!! Creates a square force decomposition of processors into row and column |
279 |
!! communicators. |
280 |
subroutine make_Force_Grid(thisComponentPlan,status) |
281 |
type (mpiComponentPlan) :: thisComponentPlan |
282 |
integer, intent(out) :: status !! status returns -1 if error |
283 |
integer :: nColumnsMax !! Maximum number of columns |
284 |
integer :: nWorldProcessors !! Total number of processors in World comm. |
285 |
integer :: rowIndex !! Row for this processor. |
286 |
integer :: columnIndex !! Column for this processor. |
287 |
integer :: nRows !! Total number of rows. |
288 |
integer :: nColumns !! Total number of columns. |
289 |
integer :: mpiErrors !! MPI errors. |
290 |
integer :: rowCommunicator !! MPI row communicator. |
291 |
integer :: columnCommunicator |
292 |
integer :: myWorldRank |
293 |
integer :: i |
294 |
|
295 |
|
296 |
if (.not. ComponentPlanSet) return |
297 |
status = 0 |
298 |
|
299 |
!! We make a dangerous assumption here that if numberProcessors is |
300 |
!! zero, then we need to get the information from MPI. |
301 |
if (thisComponentPlan%numberProcessors == 0 ) then |
302 |
call mpi_comm_size( MPI_COMM_WORLD, nWorldProcessors,mpiErrors) |
303 |
if ( mpiErrors /= 0 ) then |
304 |
status = -1 |
305 |
return |
306 |
endif |
307 |
call mpi_comm_rank( MPI_COMM_WORLD,myWorldRank,mpiErrors) |
308 |
if ( mpiErrors /= 0 ) then |
309 |
status = -1 |
310 |
return |
311 |
endif |
312 |
|
313 |
else |
314 |
nWorldProcessors = thisComponentPlan%numberProcessors |
315 |
myWorldRank = thisComponentPlan%myNode |
316 |
endif |
317 |
|
318 |
|
319 |
|
320 |
|
321 |
nColumnsMax = nint(sqrt(real(nWorldProcessors,kind=dp))) |
322 |
|
323 |
do i = 1, nColumnsMax |
324 |
if (mod(nWorldProcessors,i) == 0) nColumns = i |
325 |
end do |
326 |
|
327 |
nRows = nWorldProcessors/nColumns |
328 |
|
329 |
rowIndex = myWorldRank/nColumns |
330 |
|
331 |
|
332 |
|
333 |
call mpi_comm_split(mpi_comm_world,rowIndex,0,rowCommunicator,mpiErrors) |
334 |
if ( mpiErrors /= 0 ) then |
335 |
write(default_error,*) "MPI comm split failed at row communicator" |
336 |
status = -1 |
337 |
return |
338 |
endif |
339 |
|
340 |
columnIndex = mod(myWorldRank,nColumns) |
341 |
call mpi_comm_split(mpi_comm_world,columnIndex,0,columnCommunicator,mpiErrors) |
342 |
if ( mpiErrors /= 0 ) then |
343 |
write(default_error,*) "MPI comm split faild at columnCommunicator" |
344 |
status = -1 |
345 |
return |
346 |
endif |
347 |
|
348 |
! Set appropriate components of thisComponentPlan |
349 |
thisComponentPlan%rowComm = rowCommunicator |
350 |
thisComponentPlan%columnComm = columnCommunicator |
351 |
thisComponentPlan%rowIndex = rowIndex |
352 |
thisComponentPlan%columnIndex = columnIndex |
353 |
thisComponentPlan%numberRows = nRows |
354 |
thisComponentPlan%numberColumns = nColumns |
355 |
|
356 |
|
357 |
end subroutine make_Force_Grid |
358 |
|
359 |
|
360 |
!! initalizes a gather scatter plan |
361 |
subroutine plan_gather_scatter( nDim,nComponents,thisComponentPlan, & |
362 |
thisComm, this_plan,status) |
363 |
integer, intent(in) :: nDim !! Number of dimensions for gather scatter plan |
364 |
integer, intent(in) :: nComponents |
365 |
type (mpiComponentPlan), intent(in), target :: thisComponentPlan |
366 |
type (gs_plan), intent(out) :: this_plan !! MPI Component Plan |
367 |
integer, intent(in) :: thisComm !! MPI communicator for this plan |
368 |
|
369 |
integer :: arraySize !! size to allocate plan for |
370 |
integer, intent(out), optional :: status |
371 |
integer :: ierror |
372 |
integer :: i,junk |
373 |
|
374 |
if (present(status)) status = 0 |
375 |
|
376 |
|
377 |
|
378 |
!! Set gsComponetPlan pointer |
379 |
!! to the componet plan we want to use for this gather scatter plan. |
380 |
!! WARNING this could be dangerous since thisComponentPlan was origionally |
381 |
!! allocated in C++ and there is a significant difference between c and |
382 |
!! f95 pointers.... |
383 |
this_plan%gsComponentPlan => thisComponentPlan |
384 |
|
385 |
! Set this plan size for displs array. |
386 |
this_plan%gsPlanSize = nDim * nComponents |
387 |
|
388 |
! Duplicate communicator for this plan |
389 |
call mpi_comm_dup(thisComm,this_plan%myPlanComm,mpi_err) |
390 |
if (mpi_err /= 0) then |
391 |
if (present(status)) status = -1 |
392 |
return |
393 |
end if |
394 |
call mpi_comm_rank(this_plan%myPlanComm,this_plan%myPlanRank,mpi_err) |
395 |
if (mpi_err /= 0) then |
396 |
if (present(status)) status = -1 |
397 |
return |
398 |
end if |
399 |
|
400 |
call mpi_comm_size(this_plan%myPlanComm,this_plan%planNprocs,mpi_err) |
401 |
|
402 |
if (mpi_err /= 0) then |
403 |
if (present(status)) status = -1 |
404 |
return |
405 |
end if |
406 |
|
407 |
!! counts and displacements arrays are indexed from 0 to be compatable |
408 |
!! with MPI arrays. |
409 |
allocate (this_plan%counts(0:this_plan%planNprocs-1),STAT=ierror) |
410 |
if (ierror /= 0) then |
411 |
if (present(status)) status = -1 |
412 |
return |
413 |
end if |
414 |
|
415 |
allocate (this_plan%displs(0:this_plan%planNprocs-1),STAT=ierror) |
416 |
if (ierror /= 0) then |
417 |
if (present(status)) status = -1 |
418 |
return |
419 |
end if |
420 |
|
421 |
!! gather all the local sizes into a size # processors array. |
422 |
call mpi_allgather(this_plan%gsPlanSize,1,mpi_integer,this_plan%counts, & |
423 |
1,mpi_integer,thisComm,mpi_err) |
424 |
|
425 |
if (mpi_err /= 0) then |
426 |
if (present(status)) status = -1 |
427 |
return |
428 |
end if |
429 |
|
430 |
|
431 |
!! figure out the total number of particles in this plan |
432 |
this_plan%globalPlanSize = sum(this_plan%counts) |
433 |
|
434 |
|
435 |
!! initialize plan displacements. |
436 |
this_plan%displs(0) = 0 |
437 |
do i = 1, this_plan%planNprocs - 1,1 |
438 |
this_plan%displs(i) = this_plan%displs(i-1) + this_plan%counts(i-1) |
439 |
end do |
440 |
|
441 |
|
442 |
end subroutine plan_gather_scatter |
443 |
|
444 |
|
445 |
subroutine unplan_gather_scatter(this_plan) |
446 |
type (gs_plan), intent(inout) :: this_plan |
447 |
|
448 |
|
449 |
this_plan%gsComponentPlan => null() |
450 |
call mpi_comm_free(this_plan%myPlanComm,mpi_err) |
451 |
|
452 |
deallocate(this_plan%counts) |
453 |
deallocate(this_plan%displs) |
454 |
|
455 |
end subroutine unplan_gather_scatter |
456 |
|
457 |
subroutine gather_integer( sbuffer, rbuffer, this_plan, status) |
458 |
|
459 |
type (gs_plan), intent(in) :: this_plan |
460 |
integer, dimension(:), intent(in) :: sbuffer |
461 |
integer, dimension(:), intent(in) :: rbuffer |
462 |
integer :: noffset |
463 |
integer, intent(out), optional :: status |
464 |
integer :: i |
465 |
|
466 |
|
467 |
|
468 |
if (present(status)) status = 0 |
469 |
noffset = this_plan%displs(this_plan%myPlanRank) |
470 |
|
471 |
! if (getmyNode() == 1) then |
472 |
! write(*,*) "Node 0 printing allgatherv vars" |
473 |
! write(*,*) "Noffset: ", noffset |
474 |
! write(*,*) "PlanSize: ", this_plan%gsPlanSize |
475 |
! write(*,*) "PlanComm: ", this_plan%myPlanComm |
476 |
! end if |
477 |
|
478 |
call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_integer, & |
479 |
rbuffer,this_plan%counts,this_plan%displs,mpi_integer, & |
480 |
this_plan%myPlanComm, mpi_err) |
481 |
|
482 |
if (mpi_err /= 0) then |
483 |
if (present(status)) status = -1 |
484 |
endif |
485 |
|
486 |
end subroutine gather_integer |
487 |
|
488 |
subroutine gather_double( sbuffer, rbuffer, this_plan, status) |
489 |
|
490 |
type (gs_plan), intent(in) :: this_plan |
491 |
real( kind = DP ), dimension(:), intent(in) :: sbuffer |
492 |
real( kind = DP ), dimension(:), intent(in) :: rbuffer |
493 |
integer :: noffset |
494 |
integer, intent(out), optional :: status |
495 |
|
496 |
|
497 |
if (present(status)) status = 0 |
498 |
noffset = this_plan%displs(this_plan%myPlanRank) |
499 |
|
500 |
call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, & |
501 |
rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, & |
502 |
this_plan%myPlanComm, mpi_err) |
503 |
|
504 |
if (mpi_err /= 0) then |
505 |
if (present(status)) status = -1 |
506 |
endif |
507 |
|
508 |
end subroutine gather_double |
509 |
|
510 |
subroutine gather_double_2d( sbuffer, rbuffer, this_plan, status) |
511 |
|
512 |
type (gs_plan), intent(in) :: this_plan |
513 |
real( kind = DP ), dimension(:,:), intent(in) :: sbuffer |
514 |
real( kind = DP ), dimension(:,:), intent(in) :: rbuffer |
515 |
integer :: noffset,i,ierror |
516 |
integer, intent(out), optional :: status |
517 |
|
518 |
external mpi_allgatherv |
519 |
|
520 |
if (present(status)) status = 0 |
521 |
|
522 |
! noffset = this_plan%displs(this_plan%me) |
523 |
|
524 |
call mpi_allgatherv(sbuffer,this_plan%gsPlanSize, mpi_double_precision, & |
525 |
rbuffer,this_plan%counts,this_plan%displs,mpi_double_precision, & |
526 |
this_plan%myPlanComm, mpi_err) |
527 |
|
528 |
if (mpi_err /= 0) then |
529 |
if (present(status)) status = -1 |
530 |
endif |
531 |
|
532 |
end subroutine gather_double_2d |
533 |
|
534 |
subroutine scatter_double( sbuffer, rbuffer, this_plan, status) |
535 |
|
536 |
type (gs_plan), intent(in) :: this_plan |
537 |
real( kind = DP ), dimension(:), intent(in) :: sbuffer |
538 |
real( kind = DP ), dimension(:), intent(in) :: rbuffer |
539 |
integer, intent(out), optional :: status |
540 |
external mpi_reduce_scatter |
541 |
|
542 |
if (present(status)) status = 0 |
543 |
|
544 |
call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, & |
545 |
mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err) |
546 |
|
547 |
if (mpi_err /= 0) then |
548 |
if (present(status)) status = -1 |
549 |
endif |
550 |
|
551 |
end subroutine scatter_double |
552 |
|
553 |
subroutine scatter_double_2d( sbuffer, rbuffer, this_plan, status) |
554 |
|
555 |
type (gs_plan), intent(in) :: this_plan |
556 |
real( kind = DP ), dimension(:,:), intent(in) :: sbuffer |
557 |
real( kind = DP ), dimension(:,:), intent(in) :: rbuffer |
558 |
integer, intent(out), optional :: status |
559 |
external mpi_reduce_scatter |
560 |
|
561 |
if (present(status)) status = 0 |
562 |
call mpi_reduce_scatter(sbuffer,rbuffer, this_plan%counts, & |
563 |
mpi_double_precision, MPI_SUM, this_plan%myPlanComm, mpi_err) |
564 |
|
565 |
if (mpi_err /= 0) then |
566 |
if (present(status)) status = -1 |
567 |
endif |
568 |
|
569 |
end subroutine scatter_double_2d |
570 |
|
571 |
|
572 |
subroutine setTags(tags,status) |
573 |
integer, dimension(:) :: tags |
574 |
integer :: status |
575 |
|
576 |
integer :: alloc_stat |
577 |
|
578 |
integer :: ncol |
579 |
integer :: nrow |
580 |
|
581 |
status = 0 |
582 |
! allocate row arrays |
583 |
nrow = getNrow(plan_row) |
584 |
ncol = getNcol(plan_col) |
585 |
|
586 |
if (.not. allocated(tagRow)) then |
587 |
allocate(tagRow(nrow),STAT=alloc_stat) |
588 |
if (alloc_stat /= 0 ) then |
589 |
status = -1 |
590 |
return |
591 |
endif |
592 |
else |
593 |
deallocate(tagRow) |
594 |
allocate(tagRow(nrow),STAT=alloc_stat) |
595 |
if (alloc_stat /= 0 ) then |
596 |
status = -1 |
597 |
return |
598 |
endif |
599 |
|
600 |
endif |
601 |
! allocate column arrays |
602 |
if (.not. allocated(tagColumn)) then |
603 |
allocate(tagColumn(ncol),STAT=alloc_stat) |
604 |
if (alloc_stat /= 0 ) then |
605 |
status = -1 |
606 |
return |
607 |
endif |
608 |
else |
609 |
deallocate(tagColumn) |
610 |
allocate(tagColumn(ncol),STAT=alloc_stat) |
611 |
if (alloc_stat /= 0 ) then |
612 |
status = -1 |
613 |
return |
614 |
endif |
615 |
endif |
616 |
|
617 |
call gather(tags,tagRow,plan_row) |
618 |
call gather(tags,tagColumn,plan_col) |
619 |
|
620 |
end subroutine setTags |
621 |
|
622 |
pure function getNcol(thisplan) result(ncol) |
623 |
type (gs_plan), intent(in) :: thisplan |
624 |
integer :: ncol |
625 |
ncol = thisplan%gsComponentPlan%nComponentsColumn |
626 |
end function getNcol |
627 |
|
628 |
pure function getNrow(thisplan) result(ncol) |
629 |
type (gs_plan), intent(in) :: thisplan |
630 |
integer :: ncol |
631 |
ncol = thisplan%gsComponentPlan%nComponentsrow |
632 |
end function getNrow |
633 |
|
634 |
function isMPISimSet() result(isthisSimSet) |
635 |
logical :: isthisSimSet |
636 |
if (isSimSet) then |
637 |
isthisSimSet = .true. |
638 |
else |
639 |
isthisSimSet = .false. |
640 |
endif |
641 |
end function isMPISimSet |
642 |
|
643 |
|
644 |
|
645 |
subroutine printComponentPlan(this_plan,printNode) |
646 |
|
647 |
type (mpiComponentPlan), intent(in) :: this_plan |
648 |
integer, optional :: printNode |
649 |
logical :: print_me = .false. |
650 |
|
651 |
if (present(printNode)) then |
652 |
if (printNode == mpiSim%myNode) print_me = .true. |
653 |
else |
654 |
print_me = .true. |
655 |
endif |
656 |
|
657 |
if (print_me) then |
658 |
write(default_error,*) "SetupSimParallel: writing component plan" |
659 |
|
660 |
write(default_error,*) "nMolGlobal: ", mpiSim%nMolGlobal |
661 |
write(default_error,*) "nAtomsGlobal: ", mpiSim%nAtomsGlobal |
662 |
write(default_error,*) "nBondGlobal: ", mpiSim%nBondsGlobal |
663 |
write(default_error,*) "nTorsionsGlobal: ", mpiSim%nTorsionsGlobal |
664 |
write(default_error,*) "nSRIGlobal: ", mpiSim%nSRIGlobal |
665 |
write(default_error,*) "myMolStart: ", mpiSim%myMolStart |
666 |
write(default_error,*) "myMolEnd: ", mpiSim%myMolEnd |
667 |
write(default_error,*) "myMol: ", mpiSim%myMol |
668 |
write(default_error,*) "myNlocal: ", mpiSim%myNlocal |
669 |
write(default_error,*) "myNode: ", mpiSim%myNode |
670 |
write(default_error,*) "numberProcessors: ", mpiSim%numberProcessors |
671 |
write(default_error,*) "rowComm: ", mpiSim%rowComm |
672 |
write(default_error,*) "columnComm: ", mpiSim%columnComm |
673 |
write(default_error,*) "numberRows: ", mpiSim%numberRows |
674 |
write(default_error,*) "numberColumns: ", mpiSim%numberColumns |
675 |
write(default_error,*) "nComponentsRow: ", mpiSim%nComponentsRow |
676 |
write(default_error,*) "nComponentsColumn: ", mpiSim%nComponentsColumn |
677 |
write(default_error,*) "rowIndex: ", mpiSim%rowIndex |
678 |
write(default_error,*) "columnIndex: ", mpiSim%columnIndex |
679 |
endif |
680 |
end subroutine printComponentPlan |
681 |
|
682 |
function getMyNode() result(myNode) |
683 |
integer :: myNode |
684 |
myNode = mpiSim%myNode |
685 |
end function getMyNode |
686 |
|
687 |
|
688 |
end module mpiSimulation |
689 |
|
690 |
#endif // is_mpi |