xref: /phasta/phSolver/common/turbsa.f (revision 3de22adcea938dcbea5f2b4ef2369694350775bd)
1c-----------------------------------------------------------------------
2c
3c     Spalart-Allmaras turbulence model constants
4c
5c-----------------------------------------------------------------------
6      module turbSA
7
8      real*8, allocatable ::  d2wall(:)
9      real*8, allocatable ::  wnrm(:,:)
10      integer, allocatable :: otwn(:)
11      real*8  saCb1, saCb2, saCw1, saCw2, saCw3, saCv1, saSigma,
12     &        saKappa, saKappaP2Inv, saCv2P3, saCw3P6, saSigmaInv
13      integer, allocatable :: sidmapg(:) ! list of all surfID's, low to high
14
15      parameter (
16     &     saCb1        = 0.1355d0,
17     &     saCb2        = 0.622d0,
18     &     saCw1        = 3.239067817d0,
19     &     saCw2        = 0.3d0,
20     &     saCw3        = 2.0d0,
21     &     saKappa      = 0.41d0,
22     &     saSigma      = 0.666666666666666667d0,
23     &     saCv1        = 7.1d0,
24     &     saKappaP2Inv = 5.94883997620464d0,
25     &     saCv1P3      = 3.579109999999999d+02,
26     &     saCw3P6      = 64.0d0,
27     &     saSigmaInv   = 1.50d0
28     &     )
29
30
31      end module
32
33c-----------------------------------------------------------------------
34c
35c     Initialize: compute the distance to the nearest wall for
36c     each vertex in the mesh.
37c
38c     Michael Yaworski (fall 1998)
39c
40c-----------------------------------------------------------------------
41      subroutine initTurb( x )
42
43      use     pointer_data
44      use     turbSA
45      include "common.h"
46      include "mpif.h"
47
48      character*20 fname1,  fmt1
49      real*8   x(numnp,nsd)
50      integer  nwall(numpe),      idisp(numpe)
51      character*5  cname
52      real*8, allocatable :: xwi(:,:,:), xw(:,:,:)
53
54!MR CHANGE
55      integer :: ierr
56      integer :: numparts, nfile, itmp2, id2wall, itwo, ifoundd2wall
57      integer iarray(10)
58      character*255 :: fnamer, fname2, temp2
59      character*64 :: temp1
60!MR CHANGE END
61
62      allocate ( d2wall(numnp) )
63
64!MR CHANGE
65      if(myrank.eq.master) then
66        write (*,*) 'entering initTurb'
67      endif
68      call MPI_BARRIER(MPI_COMM_WORLD,ierr)
69
70      ! First we try to read the d2wall field from either the restart files or the d2wall files
71      call read_d2wall(myrank,numnp,d2wall,ifoundd2wall)
72!MR CHANGE END
73
74      if(ifoundd2wall.eq.0) then   ! d2wall was not found so calculate the distance
75        if(myrank.eq.master) then
76          write (*,*) 'Computing the d2wall field'
77        endif
78c
79c   hard code trip point until we are sure it is worth doing
80c
81
82c
83c.... Count the welts (wall-elements)
84c
85         nwalli=0
86         do iblk = 1, nelblb    ! loop over boundary elt blocks
87            npro = lcblkb(1,iblk+1) - lcblkb(1,iblk)
88            do j = 1, npro
89               if(btest(miBCB(iblk)%p(j,1),4)) nwalli=nwalli+1
90            enddo
91         enddo
92c
93c.... Create wallnode-coord list for welts on processor
94c
95         if (nwalli.eq.0) nwalli=1 !  patch for mpi's lack of imagination
96         allocate (xwi(nwalli,nenb+1,nsd))
97         xwi = 1.0d32
98         xwi(:,nenb+1,:)=zero
99         nwalli = 0
100         do iblk = 1, nelblb    ! loop over boundary elt blocks
101c
102            iel    = lcblkb(1,iblk)
103            nenbl  = lcblkb(6,iblk) ! no. of vertices per bdry. face
104            npro   = lcblkb(1,iblk+1) - iel
105c
106            do j = 1, npro      ! loop over belts in this blk
107               if(btest(miBCB(iblk)%p(j,1),4)) then
108                  nwalli=nwalli+1
109c assemble local coord list
110                  do node = 1, nenbl
111                     xwi(nwalli,node,1:3)=x(mienb(iblk)%p(j,node),:)
112                  enddo
113c put the centroid coordinates in the last slot
114                  do node = 1, nenbl
115                     xwi(nwalli,nenb+1,:)=xwi(nwalli,nenb+1,:)
116     &                    +xwi(nwalli,node,:)
117                  enddo
118                  xwi(nwalli,nenb+1,:)=xwi(nwalli,nenb+1,:)/nenbl
119c
120               endif
121            enddo               ! loop over belts in this blk
122c
123         enddo                  ! loop over boundary elt blocks
124c
125         if (nwalli.eq.0) xwi=1.0e32 ! fix for mpi's lack of imagination
126         if (nwalli.eq.0) nwalli=1 !  patch for mpi's lack of imagination
127c
128c  Pool "number of welts" info from all processors
129c
130cMPI_ALLGATHER(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm)
131c[ IN sendbuf] starting address of send buffer (choice)
132c[ IN sendcount] number of elements in send buffer (integer)
133c[ IN sendtype] data type of send buffer elements (handle)
134c[ OUT recvbuf] address of receive buffer (choice)
135c[ IN recvcount] number of elements received from any process (integer)
136c[ IN recvtype] data type of receive buffer elements (handle)
137c[ IN comm] communicator (handle)
138         if (numpe.gt.1) then   ! multiple processors
139c write the number of wall elts on the jth processor to slot j of nwall
140            call MPI_ALLGATHER(nwalli,1,MPI_INTEGER,nwall,1,
141     &          MPI_INTEGER,MPI_COMM_WORLD,ierr)
142c count up the total number of wall elts among all processes
143            nwallt=0
144            do j=1,numpe
145               nwallt=nwallt+nwall(j)
146            enddo
147         else                   ! single processor
148c the local information is the global information for single-processor
149            nwall=nwalli
150            nwallt=nwalli
151         endif                  ! if-else for multiple processors
152c
153c  Make all-processor wallnode-coord collage
154c
155         allocate (xw(nwallt,nenb+1,nsd))
156         if (numpe.gt.1) then   ! multiple processors
157c we will gather coordinates from local on-proc sets to a global set
158c we will stack each processor's coordinate list atop that of the
159c previous processor.  If the coordinate list for processor i is
160c called xwi, then our global coordinate list xw will look like this:
161c ---------------------------------------------------------------
162c | xw1            | xw2                | xw3        |   ...    |
163c ---------------------------------------------------------------
164c  <---nwall(1)---> <-----nwall(2)-----> <-nwall(3)->
165c  <------------------------nwallt-----------------------...---->
166c To accomplish this with MPI, we use MPI_ALLGATHERV, summarized as:
167cMPI_ALLGATHERV(sendbuf,sendcount,sendtype,recvbuf,recvcount,disp,recvtype,comm)
168c[ IN sendbuf] starting address of send buffer (choice)
169c[ IN sendcount] number of elements in send buffer (integer)
170c[ IN sendtype] data type of send buffer elements (handle)
171c[ OUT recvbuf] address of receive buffer (choice)
172c[ IN recvcount] number of elements received from any process (integer)
173c[ IN disp] displacement array
174c[ IN recvtype] data type of receive buffer elements (handle)
175c[ IN comm] communicator (handle)
176c The displacement array disp is an array of integers in which the jth
177c entry indicates which slot of xw marks beginning of xwj
178c So, first we will build this displacement array
179            idisp(:)=0 ! starting with zero, since MPI likes C-numbering
180            do j=2,numpe
181               idisp(j)=idisp(j-1)+nwall(j-1) ! see diagram above
182            enddo
183c Now, we gather the data one slice at a time (1:nwalli)
184            do j=1,nenb+1
185               do k=1,nsd
186                  call MPI_ALLGATHERV(xwi(:,j,k),nwalli,
187     &                 MPI_DOUBLE_PRECISION,xw(:,j,k),nwall,idisp,
188     &                 MPI_DOUBLE_PRECISION,MPI_COMM_WORLD,ierr)
189               enddo
190            enddo
191         else                   ! single-processor
192c global data is local data in single processor case
193            xw=xwi
194         endif
195
196c
197c  For each point, loop over wall nodes and calculate distance;
198c  save the distance in this node's position of d2wall if it's
199c  shorter than currently stored distance
200c
201         d2wall=1.0e32
202         do i=1,numnp
203            do j=1, nwallt
204               do k=1,nenb+1
205                  distance =  ( x(i,1) - xw(j,k,1) )**2
206     &                 +( x(i,2) - xw(j,k,2) )**2
207     &                 +( x(i,3) - xw(j,k,3) )**2
208                  if ( d2wall(i).gt.distance ) d2wall(i) = distance
209               enddo
210            enddo
211         enddo
212         d2wall=sqrt(d2wall)
213c
214         deallocate(xwi)
215         deallocate(xw)
216c
217c.... write d2wall to a file so we don't have to do this again
218c
219
220!MR CHANGE #Fix this with SyncIO!!!!!
221!         write (fmt1,"('(''d2wall.'',i',i1,',1x)')") 1
222!         write (fname1,fmt1) 0
223!         fname1 = trim(fname1) // cname(myrank+1)
224!         if(myrank.eq.master) write (*,*) 'Writing dist file : ', fname1
225!         open (unit=72, file=fname1, status='unknown',
226!     &                                    form='unformatted', err=996)
227!
228!         write (72) d2wall
229!         close (72)
230
231!MR CHANGE END
232
233c         write(*,*) "make sure to: echo 0 > distcalc.dat"
234c         call MPI_BARRIER (MPI_COMM_WORLD,ierr)
235c         call error ('distcalc','complete',0)
236      endif
237
238!MR CHANGE
239      call MPI_BARRIER(MPI_COMM_WORLD,ierr)
240      if(myrank.eq.master) then
241        write (*,*) 'leaving initTurb'
242      endif
243!MR CHANGE
244
245      return
246995     call error ('d2wall  ','opening ', 72)
247996     call error ('d2wall  ','opening ', 72)
248
249      end subroutine
250
251
252