-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathmpi.F90
277 lines (215 loc) · 7.84 KB
/
mpi.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
!>
!! \brief This module contains data and routines for MPI parallelization
!!
!! Module for C2Ray / Capreole (3D)
!!
!! \b Author: Garrelt Mellema
!!
!! \b Date: 2008-06-01
!!
!! \b Version: True MPI (no dummy). Also reports on OpenMP parallelization.
!! Log files for nodes 1 and higher are called 'log.n', so node 0 it is
!! 'C2Ray.log'.
!! This module is also accepted by the F compiler (Dec 9, 2003)\n
module my_mpi
! Module for Capreole (3D)
! Author: Garrelt Mellema
! Date: 2003-06-01
! This module contains the routines for using the MPI library
! mpi_setup : set up MPI
! mpi_basic : basic MPI initialization
! mpi_topology: domain decomposition
! mpi_end: close down the MPI interface
! fnd3dnbrs: find neighbours in 3D domain decomposition
! rank 0 has a log file called C2Ray.log associated with it. If
! the log unit is equal to 6, no file is opened and all log output
! is sent to standard output.
! If the code is compiled with the option -DMPILOG the other processors get
! their own log files, called log.1, log.2, etc.
! All these files are opened in mpi_setup and closed in mpi_end.
! This is the system module:
!!include '/beosoft/mpich/include/mpif.h' ! necessary for MPI
!use mpi
use file_admin, only: logf, results_dir
#ifdef XLF
USE XLFUTILITY, only: hostnm => hostnm_ , flush => flush_
#endif
#ifdef IFORT
USE IFPORT, only: hostnm
#endif
#ifdef MY_OPENMP
USE OMP_LIB, only: omp_get_num_threads, omp_get_thread_num
#endif
#ifdef PGI
#ifdef MY_OPENMP
USE OMP_LIB, only: omp_get_num_threads, omp_get_thread_num
#endif
#endif
implicit none
include 'mpif.h'
integer,parameter,public :: NPDIM=3 ! dimension of problem
integer,public :: rank ! rank of the processor
integer,public :: npr ! number of processors
integer,public :: nthreads=1 ! number of threads (per processor)
integer,public :: MPI_COMM_NEW ! the (new) communicator
integer,public,dimension(MPI_STATUS_SIZE) :: mympi_status ! status array
logical,parameter :: reorder=.false. !< reorder the mpi structure (for hydro)
integer,dimension(NPDIM),public :: dims ! number of processors in
! each dimension
integer,dimension(NPDIM),public :: grid_struct ! coordinates of
!the processors in the grid
integer,public :: nbrleft,nbrright ! left and right neighbours
integer,public :: nbrdown,nbrup ! up and down neighbours
integer,public :: nbrabove,nbrbelow ! above and below neighbours
contains
!----------------------------------------------------------------------------
subroutine mpi_setup ( )
character(len=512) :: filename ! name of the log file
character(len=4) :: number
integer :: ierror
integer :: tn
character(len=100) :: hostname
call mpi_basic ()
if (rank == 0) then
if (logf /= 6) then
filename=trim(adjustl(trim(adjustl(results_dir))//"C2Ray.log"))
open(unit=logf,file=filename,status="unknown",action="write",&
position="append")
endif
write(unit=logf,fmt="(A)") "Log file for C2-Ray run"
write(unit=logf,fmt=*) " Number of MPI ranks used: ",npr
endif
! Find number of OpenMP threads (needed to establish OpenMP character
! of run (see evolve)
!$omp parallel default(shared)
#ifdef MY_OPENMP
nthreads=omp_get_num_threads()
#endif
!$omp end parallel
! Report OpenMP usage
#ifdef MY_OPENMP
if (rank == 0) then
write(logf,*) " Running in OpenMP mode"
write(logf,*) ' Number of OpenMP threads on MPI rank 0 is ',nthreads
endif
#endif
#ifdef MPILOG
! Open processor dependent log file
write(unit=number,fmt="(I4)") rank
filename=trim(adjustl("log."//trim(adjustl(number))))
if (rank /= 0) open(unit=logf,file=filename,status="unknown",action="write")
write(unit=logf,fmt=*) "Log file for rank ",rank," of a total of ",npr
! Figure out hostname
! NOTE: compiler dependent!!!
ierror=hostnm(hostname)
if (ierror == 0) then
write(logf,*) "Running on processor named ",trim(adjustl(hostname))
else
write(logf,*) "Error establishing identity of processor."
endif
! Report number of OpenMP threads
#ifdef MY_OPENMP
write(logf,*) ' Number of OpenMP threads is ',nthreads
#endif
! Let OpenMP threads report
!$omp parallel default(shared) private(tn)
#ifdef MY_OPENMP
tn=omp_get_thread_num()+1
write(logf,*) 'Thread number ',tn,' reporting'
#endif
!$omp end parallel
write(logf,*) "almost end of mpi setup"
flush(logf)
#endif
if (reorder) then
call mpi_topology ()
else
MPI_COMM_NEW=MPI_COMM_WORLD
endif
#ifdef MPILOG
write(logf,*) "end of mpi setup"
flush(logf)
#endif
end subroutine mpi_setup
!----------------------------------------------------------------------------
subroutine mpi_basic
integer :: ierror ! control variable for MPI
call MPI_INIT (ierror) ! Initialize MPI
call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierror) ! Find processor rank
! Find total number of processors (npr)
call MPI_COMM_SIZE(MPI_COMM_WORLD,npr,ierror)
end subroutine mpi_basic
!----------------------------------------------------------------------------
subroutine mpi_topology
logical,dimension(NPDIM) :: periods ! for periodic grid
logical :: reorder ! reorder the MPI_COMM_WORLD
integer :: ierror=0
! Make a new topology
dims(:)=0
call MPI_Dims_create(npr,NPDIM,dims,ierror)
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Dims_create"
write(logf,*) "MPI_Dims_create"
flush(logf)
#endif
periods(:)=.FALSE. ! non-periodic boundaries
reorder=.TRUE.
! makes MPI_COMM_NEW
! Warning: openmpi + gfortran seems to have problems here
! GM 091102
call MPI_Cart_create(MPI_COMM_WORLD,NPDIM,dims,periods,reorder, &
MPI_COMM_NEW,ierror)
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Cart_create"
write(logf,*) "MPI_Cart_create"
flush(logf)
#endif
! makes grid_struct
call MPI_Cart_get(MPI_COMM_NEW,NPDIM,dims, & ! makes grid_struct
periods,grid_struct,ierror)
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Cart_get"
write(logf,*) "MPI_Cart_get"
flush(logf)
#endif
! Find the neighbours.
! My neighbors are now +/- 1 with my rank. Handle the case of the
! boundaries by using MPI_PROC_NULL.
call fnd3dnbrs ()
end subroutine mpi_topology
!----------------------------------------------------------------------------
subroutine mpi_end
integer :: ierror=0
logical :: openlog
! Find out if log file is open
inquire(unit=logf,opened=openlog)
! Close log file
if (openlog) close(logf)
! Close MPI
call MPI_FINALIZE(ierror)
end subroutine mpi_end
!----------------------------------------------------------------------------
subroutine fnd3dnbrs
! This routine determines the neighbours in a 3-d decomposition of
! the grid. This assumes that MPI_Cart_create has already been called
integer :: ierror=0
call MPI_Cart_shift( MPI_COMM_NEW, 0, 1, nbrleft, nbrright, ierror )
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Cart_shift",0
write(logf,*) "MPI_Cart_shift",0
flush(logf)
#endif
call MPI_Cart_shift( MPI_COMM_NEW, 1, 1, nbrdown, nbrup, ierror )
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Cart_shift",1
write(logf,*) "MPI_Cart_shift",1
flush(logf)
#endif
call MPI_Cart_shift( MPI_COMM_NEW, 2, 1, nbrbelow, nbrabove, ierror )
#ifdef MPILOG
if (ierror /= 0 ) write(logf,*) "error in MPI_Cart_shift",2
write(logf,*) "MPI_Cart_shift",2
flush(logf)
#endif
end subroutine fnd3dnbrs
end module my_mpi