-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathmaster_slave.F90
332 lines (266 loc) · 10.5 KB
/
master_slave.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
!>
!! \brief This module contains routines distributing the work over a number
!! of (MPI) slaves.
!! The module supplies one routine to the outside (do_grid),
!! but internally has two
!! methods to distribute the work: one which distributes
!! the work statically over the available MPI processes (do_grid_static)
!! and one which works with a master-slave set up (do_grid_master_slave).
!! In this latter set up rank 0 is the master and distributes the work over
!! the slaves dynamically.
!! The decision of which one to use depends on the number of MPI processes.
!! Above min_numproc_master_slave (module parameter) the master-slave set up
!! is used.
!!
!! The actual work done is contained in the (external) routine do_source.
!!
!! Module for Capreole / C2-Ray (f90)
!!
!! \b Author: Garrelt Mellema
!!
!! \b Date: 2013-09-05
!!
!! \b Version:
module master_slave_processing
use precision, only: dp
use my_mpi ! supplies all the MPI and OpenMP definitions and variables
use file_admin, only: logf,timefile,iterdump, results_dir, dump_dir
use clocks, only: timestamp_wallclock
use sourceprops, only: NumSrc, srcpos
use evolve_source, only: do_source
implicit none
private
public :: do_grid
!> Minimum number of MPI processes for using the master-slave setup
integer, parameter :: min_numproc_master_slave=10
!> Report about the sending of every 1000th source if check_progress
!! is 1000
integer, parameter :: check_progress = 1000000
contains
! ===========================================================================
!> Does the ray-tracing over the sources by distributing
!! the sources evenly over the available MPI processes-
subroutine do_grid (dt,niter)
real(kind=dp),intent(in) :: dt !< time step, passed on to evolve0D
integer,intent(in) :: niter !< interation counter, passed on to evolve0D
! Ray trace the whole grid for all sources.
! We can do this in two ways, depending on
! the number of processors. For many processors
! the master-slave setup should be more efficient.
if (npr > min_numproc_master_slave) then
call do_grid_master_slave (dt,niter)
else
call do_grid_static (dt,niter)
endif
end subroutine do_grid
! ===========================================================================
!> Does the ray-tracing over the sources by distributing
!! the sources evenly over the available MPI processes-
subroutine do_grid_static (dt,niter)
! Does the ray-tracing over the sources by distributing
! the sources evenly over the available MPI processes-
real(kind=dp),intent(in) :: dt !< time step, passed on to evolve0D
integer,intent(in) :: niter !< interation counter, passed on to evolve0D
integer :: ns1
! Source Loop - distributed for the MPI nodes
do ns1=1+rank,NumSrc,npr
#ifdef MPILOG
! Report
write(logf,*) 'Processor ',rank,' received: ',ns1
write(logf,*) ' that is source ',ns1 !SrcSeries(ns1)
write(logf,*) ' at:',srcpos(:,ns1)
flush(logf)
#endif
call do_source(dt,ns1,niter)
enddo
end subroutine do_grid_static
! ===========================================================================
!> Ray tracing the entire grid for all the sources using the
!! master-slave model for distributing the sources over the
!! MPI processes.
subroutine do_grid_master_slave (dt,niter)
! Ray tracing the entire grid for all the sources using the
! master-slave model for distributing the sources over the
! MPI processes.
real(kind=dp),intent(in) :: dt !< time step, passed on to evolve0D
integer,intent(in) :: niter !< interation counter, passed on to evolve0D
if (rank == 0) then
call do_grid_master ()
else
call do_grid_slave (dt,niter)
endif
end subroutine do_grid_master_slave
! ===========================================================================
!> The master task in the master-slave setup for distributing
!! the ray-tracing over the sources over the MPI processes.
subroutine do_grid_master ()
! The master task in the master-slave setup for distributing
! the ray-tracing over the sources over the MPI processes.
integer :: ns1
integer :: sources_done,whomax,who,answer
! counter for master-slave process
integer,dimension(:),allocatable :: counts
#ifdef MPI
integer :: mympierror
#endif
#ifdef MPI
! Source Loop - Master Slave with rank=0 as Master
sources_done = 0
ns1 = 0
! Allocate counter for master-slave process
if (.not.(allocated(counts))) allocate(counts(0:npr-1))
! send tasks to slaves
whomax = min(NumSrc,npr-1)
do who=1,whomax
if (ns1 <= NumSrc) then
ns1=ns1+1
call MPI_Send (ns1, 1, MPI_INTEGER, who, 1, &
MPI_COMM_NEW, mympierror)
endif
enddo
do while (sources_done < NumSrc)
! wait for an answer from a slave.
call MPI_Recv (answer, & ! address of receive buffer
1, & ! number of items to receive
MPI_INTEGER, & ! type of data
MPI_ANY_SOURCE, & ! can receive from any other
1, & ! tag
MPI_COMM_NEW, & ! communicator
mympi_status, & ! status
mympierror)
who = mympi_status(MPI_SOURCE) ! find out who sent us the answer
sources_done=sources_done+1 ! and the number of sources done
! put the slave on work again,
! but only if not all tasks have been sent.
! we use the value of num to detect this */
if (ns1 < NumSrc) then
ns1=ns1+1
! Report on the sending on nth source
if (mod(ns1,check_progress) == 0) then
write(logf,"(A,I12,A,I12,A,I6,A,I12)") &
"Sending source ",ns1," of ",NumSrc," to processor ",who, &
" sources done ", sources_done
flush(logf)
endif
call MPI_Send (ns1, 1, MPI_INTEGER, &
who, &
1, &
MPI_COMM_NEW, &
mympierror)
endif
enddo
! Now master sends a message to the slaves to signify that they
! should end the calculations. We use a special tag for that:
do who = 1,npr-1
call MPI_Send (0, 1, MPI_INTEGER, &
who, &
2, & ! tag
MPI_COMM_NEW, &
mympierror)
! the slave will send to master the number of calculations
! that have been performed.
! We put this number in the counts array.
call MPI_Recv (counts(who), & ! address of receive buffer
1, & ! number of items to receive
MPI_INTEGER, & ! type of data
who, & ! receive from process who
7, & ! tag
MPI_COMM_NEW, & ! communicator
mympi_status, & ! status
mympierror)
enddo
! Reporting to the log file
write(logf,*) 'Mean number of sources per processor: ', &
real(NumSrc)/real(npr-1)
write(logf,*) 'Counted mean number of sources per processor: ', &
real(sum(counts(1:npr-1)))/real(npr-1)
write(logf,*) 'Minimum and maximum number of sources ', &
'per processor: ', &
minval(counts(1:npr-1)),maxval(counts(1:npr-1))
flush(logf)
#endif
end subroutine do_grid_master
! ===========================================================================
!> The slave task in the master-slave setup for distributing
!! the ray-tracing over the sources over the MPI processes.
subroutine do_grid_slave(dt,niter)
! The slave task in the master-slave setup for distributing
! the ray-tracing over the sources over the MPI processes.
real(kind=dp),intent(in) :: dt !< time step, passed on to evolve0D
integer,intent(in) :: niter !< interation counter, passed on to evolve0D
integer :: local_count
integer :: ns1
#ifdef MPI
integer :: mympierror
#endif
#ifdef MPI
local_count=0
call MPI_Recv (ns1, & ! address of receive buffer
1, & ! number of items to receive
MPI_INTEGER, & ! type of data
0, & ! can receive from master only
MPI_ANY_TAG, & ! can expect two values, so
! we use the wildcard MPI_ANY_TAG
! here
MPI_COMM_NEW, & ! communicator
mympi_status, & ! status
mympierror)
! if tag equals 2, then skip the calculations
if (mympi_status(MPI_TAG) /= 2) then
do
#ifdef MPILOG
! Report
write(logf,*) 'Processor ',rank,' received: ',ns1
write(logf,*) ' that is source ',ns1 !SrcSeries(ns1)
write(logf,*) ' at:',srcpos(:,ns1)
flush(logf)
#endif
! Do the source at hand
call do_source(dt,ns1,niter)
! Update local counter
local_count=local_count+1
! Send 'answer'
call MPI_Send (local_count, 1, & ! sending one int
MPI_INTEGER, 0, & ! to master
1, & ! tag
MPI_COMM_NEW, & ! communicator
mympierror)
! Receive new source number
call MPI_Recv (ns1, & ! address of receive buffer
1, & ! number of items to receive
MPI_INTEGER, & ! type of data
0, & ! can receive from master only
MPI_ANY_TAG, & ! can expect two values, so
! we use the wildcard MPI_ANY_TAG
! here
MPI_COMM_NEW, & ! communicator
mympi_status, & ! status
mympierror)
! leave this loop if tag equals 2
if (mympi_status(MPI_TAG) == 2) then
#ifdef MPILOG
write(logf,*) 'Stop received'
flush(logf)
#endif
exit
endif
enddo
endif
! this is the point that is reached when a task is received with
! tag = 2
! send the number of calculations to master and return
#ifdef MPILOG
! Report
write(logf,*) 'Processor ',rank,' did ',local_count,' sources'
flush(logf)
#endif
call MPI_Send (local_count, &
1, &
MPI_INTEGER, & ! sending one int
0, & ! to master
7, & ! tag
MPI_COMM_NEW,& ! communicator
mympierror)
#endif
end subroutine do_grid_slave
end module master_slave_processing