1 subroutine itrdrv (y, ac, 2 & uold, x, 3 & iBC, BC, 4 & iper, ilwork, shp, 5 & shgl, shpb, shglb, 6 & ifath, velbar, nsons ) 7c 8c---------------------------------------------------------------------- 9c 10c This iterative driver is the semi-discrete, predictor multi-corrector 11c algorithm. It contains the Hulbert Generalized Alpha method which 12c is 2nd order accurate for Rho_inf from 0 to 1. The method can be 13c made first-order accurate by setting Rho_inf=-1. It uses CGP and 14c GMRES iterative solvers. 15c 16c working arrays: 17c y (nshg,ndof) : Y variables 18c x (nshg,nsd) : node coordinates 19c iBC (nshg) : BC codes 20c BC (nshg,ndofBC) : BC constraint parameters 21c iper (nshg) : periodicity table 22c 23c 24c Zdenek Johan, Winter 1991. (Fortran 90) 25c Alberto Figueroa, Winter 2004. CMM-FSI 26c Irene Vignon, Fall 2004. Impedance BC 27c---------------------------------------------------------------------- 28c 29 use pvsQbi !gives us splag (the spmass at the end of this run 30 use specialBC !gives us itvn 31 use timedata !allows collection of time series 32 use convolImpFlow !for Imp bc 33 use convolRCRFlow !for RCR bc 34 use turbsa ! used to access d2wall 35 use wallData 36 use fncorpmod 37 use solvedata 38 use iso_c_binding 39 40c use readarrays !reads in uold and acold 41 42 include "common.h" 43 include "mpif.h" 44 include "auxmpi.h" 45#ifdef HAVE_SVLS 46 include "svLS.h" 47#endif 48#if !defined(HAVE_SVLS) && !defined(HAVE_LESLIB) 49#error "You must enable a linear solver during cmake setup" 50#endif 51 52c 53 54 55 real*8 y(nshg,ndof), ac(nshg,ndof), 56 & yold(nshg,ndof), acold(nshg,ndof), 57 & u(nshg,nsd), uold(nshg,nsd), 58 & x(numnp,nsd), solinc(nshg,ndof), 59 & BC(nshg,ndofBC), tf(nshg,ndof), 60 & GradV(nshg,nsdsq) 61 62c 63 real*8 res(nshg,ndof) 64c 65 real*8 shp(MAXTOP,maxsh,MAXQPT), 66 & shgl(MAXTOP,nsd,maxsh,MAXQPT), 67 & shpb(MAXTOP,maxsh,MAXQPT), 68 & shglb(MAXTOP,nsd,maxsh,MAXQPT) 69c 70 integer rowp(nshg,nnz), colm(nshg+1), 71 & iBC(nshg), 72 & ilwork(nlwork), 73 & iper(nshg), ifuncs(6) 74 75 real*8 vbc_prof(nshg,3) 76 77 integer stopjob 78 character*10 cname2 79 character*5 cname 80c 81c stuff for dynamic model s.w.avg and wall model 82c 83 dimension ifath(numnp), velbar(nfath,ndof), nsons(nfath) 84 85 dimension wallubar(2),walltot(2) 86c 87 real*8 almit, alfit, gamit 88c 89 character*20 fname1,fmt1 90 character*20 fname2,fmt2 91 character*60 fnamepold, fvarts 92 character*4 fname4c ! 4 characters 93 integer iarray(50) ! integers for headers 94 integer isgn(ndof), isgng(ndof) 95 96 real*8, allocatable, dimension(:,:) :: rerr 97 real*8, allocatable, dimension(:,:) :: ybar, strain, vorticity 98 real*8, allocatable, dimension(:,:) :: wallssVec, wallssVecbar 99 100 real*8 tcorecp(2), tcorecpscal(2) 101 102 real*8, allocatable, dimension(:,:,:) :: yphbar 103 real*8 CFLworst(numel) 104 105 integer :: iv_rankpernode, iv_totnodes, iv_totcores 106 integer :: iv_node, iv_core, iv_thread 107!-------------------------------------------------------------------- 108! Setting up svLS 109#ifdef HAVE_SVLS 110 INTEGER svLS_nFaces, gnNo, nNo, faIn, facenNo 111 INTEGER, ALLOCATABLE :: gNodes(:) 112 REAL*8, ALLOCATABLE :: sV(:,:) 113 114 TYPE(svLS_lhsType) svLS_lhs 115 TYPE(svLS_lsType) svLS_ls 116! repeat for scalar solves (up to 4 at this time which is consistent with rest of PHASTA) 117 TYPE(svLS_lhsType) svLS_lhs_S(4) 118 TYPE(svLS_lsType) svLS_ls_S(4) 119#endif 120 call initmpistat() ! see bottom of code to see just how easy it is 121 122 call initmemstat() 123 124!-------------------------------------------------------------------- 125! Setting up svLS Moved down for better org 126 127c 128c only master should be verbose 129c 130 131 if(numpe.gt.0 .and. myrank.ne.master)iverbose=0 132c 133 134 lskeep=lstep 135 136 call initTimeSeries() 137c 138c.... open history and aerodynamic forces files 139c 140 if (myrank .eq. master) then 141 open (unit=ihist, file=fhist, status='unknown') 142 open (unit=iforce, file=fforce, status='unknown') 143 open (unit=76, file="fort.76", status='unknown') 144 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 145 fnamepold = 'pold' 146 fnamepold = trim(fnamepold)//trim(cname2(lstep)) 147 fnamepold = trim(fnamepold)//'.dat' 148 fnamepold = trim(fnamepold) 149 open (unit=8177, file=fnamepold, status='unknown') 150 endif 151 endif 152c 153c.... initialize 154c 155 ifuncs(:) = 0 ! func. evaluation counter 156 istep = 0 157 yold = y 158 acold = ac 159 160!!!!!!!!!!!!!!!!!!! 161!Init output fields 162!!!!!!!!!!!!!!!!!! 163 numerr=10+isurf 164 allocate(rerr(nshg,numerr)) 165 rerr = zero 166 167 if(ierrcalc.eq.1 .or. ioybar.eq.1) then ! we need ybar for error too 168 if (ivort == 1) then 169 irank2ybar=17 170 allocate(ybar(nshg,irank2ybar)) ! more space for vorticity if requested 171 else 172 irank2ybar=13 173 allocate(ybar(nshg,irank2ybar)) 174 endif 175 ybar = zero ! Initialize ybar to zero, which is essential 176 endif 177 178 if(ivort == 1) then 179 allocate(strain(nshg,6)) 180 allocate(vorticity(nshg,5)) 181 endif 182 183 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 184 allocate(wallssVec(nshg,3)) 185 if (ioybar .eq. 1) then 186 allocate(wallssVecbar(nshg,3)) 187 wallssVecbar = zero ! Initialization important if mean wss computed 188 endif 189 endif 190 191! both nstepsincycle and nphasesincycle needs to be set 192 if(nstepsincycle.eq.0) nphasesincycle = 0 193 if(nphasesincycle.ne.0) then 194! & allocate(yphbar(nshg,5,nphasesincycle)) 195 if (ivort == 1) then 196 irank2yphbar=15 197 allocate(yphbar(nshg,irank2yphbar,nphasesincycle)) ! more space for vorticity 198 else 199 irank2yphbar=11 200 allocate(yphbar(nshg,irank2yphbar,nphasesincycle)) 201 endif 202 yphbar = zero 203 endif 204 205!!!!!!!!!!!!!!!!!!! 206!END Init output fields 207!!!!!!!!!!!!!!!!!! 208 209 vbc_prof(:,1:3) = BC(:,3:5) 210 if(iramp.eq.1) then 211 call BCprofileInit(vbc_prof,x) 212 endif 213 214c 215c.... ---------------> initialize Equation Solver <--------------- 216c 217 call initEQS(iBC, rowp, colm) 218c 219c... prepare lumped mass if needed 220c 221 if((flmpr.ne.0).or.(flmpl.ne.0)) call genlmass(x, shp,shgl) 222c 223c.... -----------------> End of initialization <----------------- 224c 225c.....open the necessary files to gather time series 226c 227 lstep0 = lstep+1 228 nsteprcr = nstep(1)+lstep 229c 230c.... loop through the time sequences 231c 232 233 234 do 3000 itsq = 1, ntseq 235 itseq = itsq 236 237c 238c.... set up the time integration parameters 239c 240 nstp = nstep(itseq) 241 nitr = niter(itseq) 242 LCtime = loctim(itseq) 243 dtol(:)= deltol(itseq,:) 244 245 call itrSetup ( y, acold ) 246 247c 248c...initialize the coefficients for the impedance convolution, 249c which are functions of alphaf so need to do it after itrSetup 250 if(numImpSrfs.gt.zero) then 251 call calcImpConvCoef (numImpSrfs, ntimeptpT) 252 endif 253c 254c...initialize the initial condition P(0)-RQ(0)-Pd(0) for RCR BC 255c need ndsurf so should be after initNABI 256 if(numRCRSrfs.gt.zero) then 257 call calcRCRic(y,nsrflistRCR,numRCRSrfs) 258 endif 259c 260c find the last solve of the flow in the step sequence so that we will 261c know when we are at/near end of step 262c 263c ilast=0 264 nitr=0 ! count number of flow solves in a step (# of iterations) 265 do i=1,seqsize 266 if(stepseq(i).eq.0) nitr=nitr+1 267 enddo 268 269 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 270 tcorecp(:) = zero ! used in solfar.f (solflow) 271 tcorecpscal(:) = zero ! used in solfar.f (solflow) 272 if(myrank.eq.0) then 273 tcorecp1 = TMRC() 274 endif 275 276c 277c.... loop through the time steps 278c 279 istop=0 280 rmub=datmat(1,2,1) 281 if(rmutarget.gt.0) then 282 rmue=rmutarget 283 else 284 rmue=datmat(1,2,1) ! keep constant 285 endif 286 287 if(iramp.eq.1) then 288 call BCprofileScale(vbc_prof,BC,yold) ! fix the yold values to the reset BC 289 isclr=1 ! fix scalar 290 do isclr=1,nsclr 291 call itrBCSclr(yold,ac,iBC,BC,iper,ilwork) 292 enddo 293 endif 294 295 do 2000 istp = 1, nstp 296 if(iramp.eq.1) 297 & call BCprofileScale(vbc_prof,BC,yold) 298 299 call rerun_check(stopjob) 300 if(myrank.eq.master) write(*,*) 301 & 'stopjob,lstep,istep', stopjob,lstep,istep 302 if(stopjob.eq.lstep) then 303 stopjob=-2 ! this is the code to finish 304 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 305 if(myrank.eq.master) write(*,*) 306 & 'line 473 says last step written so exit' 307 goto 2002 ! the step was written last step so just exit 308 else 309 if(myrank.eq.master) 310 & write(*,*) 'line 473 says last step not written' 311 istep=nstp ! have to do this so that solution will be written 312 goto 2001 313 endif 314 endif 315 316c.... if we have time varying boundary conditions update the values of BC. 317c these will be for time step n+1 so use lstep+1 318c 319 if(itvn.gt.0) call BCint((lstep+1)*Delt(1), shp, shgl, 320 & shpb, shglb, x, BC, iBC) 321 322c 323c ... calculate the pressure contribution that depends on the history for the Imp. BC 324c 325 if(numImpSrfs.gt.0) then 326 call pHist(poldImp,QHistImp,ImpConvCoef, 327 & ntimeptpT,numImpSrfs) 328 endif 329c 330c ... calc the pressure contribution that depends on the history for the RCR BC 331c 332 if(numRCRSrfs.gt.0) then 333 call CalcHopRCR (Delt(itseq), lstep, numRCRSrfs) 334 call CalcRCRConvCoef(lstep,numRCRSrfs) 335 call pHist(poldRCR,QHistRCR,RCRConvCoef,nsteprcr, 336 & numRCRSrfs) 337 endif 338 339 if(iLES.gt.0) then !complicated stuff has moved to 340 !routine below 341 call lesmodels(yold, acold, shgl, shp, 342 & iper, ilwork, rowp, colm, 343 & nsons, ifath, x, 344 & iBC, BC) 345 346 347 endif 348 349c.... set traction BCs for modeled walls 350c 351 if (itwmod.ne.0) then 352 call asbwmod(yold, acold, x, BC, iBC, 353 & iper, ilwork, ifath, velbar) 354 endif 355 356c 357c.... Determine whether the vorticity field needs to be computed for this time step or not 358c 359 call seticomputevort 360c 361c.... -----------------------> predictor phase <----------------------- 362c 363 call itrPredict(yold, y, acold, ac , uold, u, iBC) 364 call itrBC (y, ac, iBC, BC, iper,ilwork) 365 366 if(nsolt.eq.1) then 367 isclr=0 368 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 369 endif 370 do isclr=1,nsclr 371 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 372 enddo 373 iter=0 374 ilss=0 ! this is a switch thrown on first solve of LS redistance 375 do istepc=1,seqsize 376 icode=stepseq(istepc) 377 if(mod(icode,5).eq.0) then ! this is a solve 378 isolve=icode/10 379 if(icode.eq.0) then ! flow solve (encoded as 0) 380c 381 iter = iter+1 382 ifuncs(1) = ifuncs(1) + 1 383c 384 Force(1) = zero 385 Force(2) = zero 386 Force(3) = zero 387 HFlux = zero 388 lhs = 1 - min(1,mod(ifuncs(1)-1,LHSupd(1))) 389 390 call SolFlow(y, ac, u, 391 & yold, acold, uold, 392 & x, iBC, 393 & BC, res, 394 & nPermDims, nTmpDims, aperm, 395 & atemp, iper, 396 & ilwork, shp, shgl, 397 & shpb, shglb, rowp, 398 & colm, lhsK, lhsP, 399 & solinc, rerr, tcorecp, 400 & GradV, sumtime 401#ifdef HAVE_SVLS 402 & ,svLS_lhs, svLS_ls, svLS_nFaces) 403#else 404 & ) 405#endif 406 407 else ! scalar type solve 408 if (icode.eq.5) then ! Solve for Temperature 409 ! (encoded as (nsclr+1)*10) 410 isclr=0 411 ifuncs(2) = ifuncs(2) + 1 412 j=1 413 else ! solve a scalar (encoded at isclr*10) 414 isclr=isolve 415 ifuncs(isclr+2) = ifuncs(isclr+2) + 1 416 j=isclr+nsolt 417 if((iLSet.eq.2).and.(ilss.eq.0) 418 & .and.(isclr.eq.2)) then 419 ilss=1 ! throw switch (once per step) 420 y(:,7)=y(:,6) ! redistance field initialized 421 ac(:,7) = zero 422 call itrBCSclr ( y, ac, iBC, BC, iper, 423 & ilwork) 424c 425c....store the flow alpha, gamma parameter values and assigm them the 426c....Backward Euler parameters to solve the second levelset scalar 427c 428 alfit=alfi 429 gamit=gami 430 almit=almi 431 Deltt=Delt(1) 432 Dtglt=Dtgl 433 alfi = 1 434 gami = 1 435 almi = 1 436c Delt(1)= Deltt ! Give a pseudo time step 437 Dtgl = one / Delt(1) 438 endif ! level set eq. 2 439 endif ! deciding between temperature and scalar 440 441 lhs = 1 - min(1,mod(ifuncs(isclr+2)-1, 442 & LHSupd(isclr+2))) 443 444 call SolSclr(y, ac, u, 445 & yold, acold, uold, 446 & x, iBC, 447 & BC, nPermDimsS,nTmpDimsS, 448 & apermS(1,1,j), atempS, iper, 449 & ilwork, shp, shgl, 450 & shpb, shglb, rowp, 451 & colm, lhsS(1,j), 452 & solinc(1,isclr+5), tcorecpscal 453#ifdef HAVE_SVLS 454 & ,svLS_lhs_S(isclr), svLS_ls_S(isclr), svls_nfaces) 455#else 456 & ) 457#endif 458 459 460 endif ! end of scalar type solve 461 462 else ! this is an update (mod did not equal zero) 463 iupdate=icode/10 ! what to update 464 if(icode.eq.1) then !update flow 465 call itrCorrect ( y, ac, u, solinc, iBC) 466 call itrBC (y, ac, iBC, BC, iper, ilwork) 467 else ! update scalar 468 isclr=iupdate !unless 469 if(icode.eq.6) isclr=0 470 if(iRANS.lt.-100)then ! RANS 471 call itrCorrectSclrPos(y,ac,solinc(1,isclr+5)) 472 else 473 call itrCorrectSclr (y, ac, solinc(1,isclr+5)) 474 endif 475 if (ilset.eq.2 .and. isclr.eq.2) then 476 if (ivconstraint .eq. 1) then 477 call itrBCSclr ( y, ac, iBC, BC, iper, 478 & ilwork) 479c 480c ... applying the volume constraint on second level set scalar 481c 482 call solvecon (y, x, iBC, BC, 483 & iper, ilwork, shp, shgl) 484c 485 endif ! end of volume constraint calculations 486 endif ! end of redistance calculations 487c 488 call itrBCSclr ( y, ac, iBC, BC, iper, 489 & ilwork) 490 endif ! end of flow or scalar update 491 endif ! end of switch between solve or update 492 enddo ! loop over sequence in step 493c 494c 495c.... obtain the time average statistics 496c 497 if (ioform .eq. 2) then 498 499 call stsGetStats( y, yold, ac, acold, 500 & u, uold, x, 501 & shp, shgl, shpb, shglb, 502 & iBC, BC, iper, ilwork, 503 & rowp, colm, lhsK, lhsP ) 504 endif 505 506c 507c Find the solution at the end of the timestep and move it to old 508c 509c 510c ...First to reassign the parameters for the original time integrator scheme 511c 512 if((iLSet.eq.2).and.(ilss.eq.1)) then 513 alfi =alfit 514 gami =gamit 515 almi =almit 516 Delt(1)=Deltt 517 Dtgl =Dtglt 518 endif 519 call itrUpdate( yold, acold, uold, y, ac, u) 520 call itrBC (yold, acold, iBC, BC, iper,ilwork) 521 522 istep = istep + 1 523 lstep = lstep + 1 524c 525c .. Print memory consumption on BGQ 526c 527 call printmeminfo("itrdrv"//char(0)) 528 529c 530c .. Compute vorticity 531c 532 if ( icomputevort == 1) 533 & call computeVort( vorticity, GradV,strain) 534c 535c.... update and the aerodynamic forces 536c 537 call forces ( yold, ilwork ) 538 539c 540c .. write out the instantaneous solution 541c 5422001 continue ! we could get here by 2001 label if user requested stop 543 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 544 & istep.eq.nstep(itseq)) then 545 546!so that we can see progress in force file close it so that it flushes 547!and then reopen in append mode 548 549 close(iforce) 550 open (unit=iforce, file=fforce, position='append') 551 552! Call to restar() will open restart file in write mode (and not append mode) 553! that is needed as other fields are written in append mode 554 555 call restar ('out ', yold ,ac) 556 557 if(ivort == 1) then 558 call write_field(myrank,'a','vorticity',9,vorticity, 559 & 'd',nshg,5,lstep) 560 endif 561 562 call printmeminfo("itrdrv after checkpoint"//char(0)) 563 else if(stopjob.eq.-2) then 564 if(myrank.eq.master) then 565 write(*,*) 'line 755 says no write before stopping' 566 write(*,*) 'istep,nstep,irs',istep,nstep(itseq),irs 567 endif 568 endif !just the instantaneous stuff for videos 569c 570c.... compute the consistent boundary flux 571c 572 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 573 call Bflux ( yold, acold, uold, x, 574 & shp, shgl, shpb, 575 & shglb, ilwork, iBC, 576 & BC, iper, wallssVec) 577 endif 578 579 if(stopjob.eq.-2) goto 2003 580 581 582c 583c ... update the flow history for the impedance convolution, filter it and write it out 584c 585 if(numImpSrfs.gt.zero) then 586 call UpdHistConv(y,nsrflistImp,numImpSrfs) !uses Delt(1) 587 endif 588 589c 590c ... update the flow history for the RCR convolution 591c 592 if(numRCRSrfs.gt.zero) then 593 call UpdHistConv(y,nsrflistRCR,numRCRSrfs) !uses lstep 594 endif 595 596 597c... dump TIME SERIES 598 599 if (exts) then 600 ! Note: freq is only defined if exts is true, 601 ! i.e. if xyzts.dat is present in the #-procs_case 602 if ( mod(lstep-1,freq).eq.0) call dumpTimeSeries() 603 endif 604 605 if((irscale.ge.0).or.(itwmod.gt.0)) 606 & call getvel (yold, ilwork, iBC, 607 & nsons, ifath, velbar) 608 609 if((irscale.ge.0).and.(myrank.eq.master)) then 610 call genscale(yold, x, iper, 611 & iBC, ifath, velbar, 612 & nsons) 613 endif 614c 615c.... print out results. 616c 617 ntoutv=max(ntout,100) ! velb is not needed so often 618 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 619 if( (mod(lstep, ntoutv) .eq. 0) .and. 620 & ((irscale.ge.0).or.(itwmod.gt.0) .or. 621 & ((nsonmax.eq.1).and.(iLES.gt.0)))) 622 & call rwvelb ('out ', velbar ,ifail) 623 endif 624c 625c.... end of the NSTEP and NTSEQ loops 626c 627 628 629c 630c.... -------------------> error calculation <----------------- 631c 632 if(ierrcalc.eq.1 .or. ioybar.eq.1) 633 & call collectErrorYbar(ybar,yold,wallssVec,wallssVecBar, 634 & vorticity,yphbar,rerr,irank2ybar,irank2yphbar) 635 2003 continue ! we get here if stopjob equals lstep and this jumped over 636! the statistics computation because we have no new data to average in 637! rather we are just trying to output the last state that was not already 638! written 639c 640c.... ----------------------> Complete Restart Processing <---------------------- 641c 642! for now it is the same frequency but need to change this 643! soon.... but don't forget to change the field counter in 644! new_interface.cc 645! 646 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 647 & istep.eq.nstep(itseq)) then 648 649 lesId = numeqns(1) 650 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 651 if(myrank.eq.0) then 652 tcormr1 = TMRC() 653 endif 654 if((nsolflow.eq.1).and.(ipresPrjFlag.eq.1)) then 655#ifdef HAVE_LESLIB 656 call saveLesRestart( lesId, aperm , nshg, myrank, lstep, 657 & nPermDims ) 658 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 659 if(myrank.eq.0) then 660 tcormr2 = TMRC() 661 write(6,*) 'call saveLesRestart for projection and'// 662 & 'pressure projection vectors', tcormr2-tcormr1 663 endif 664#endif 665 endif 666 667 if(ierrcalc.eq.1) then 668c 669c.....smooth the error indicators 670c 671 do i=1,ierrsmooth 672 call errsmooth( rerr, x, iper, ilwork, shp, shgl, iBC ) 673 end do 674 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 675 if(myrank.eq.0) then 676 tcormr1 = TMRC() 677 endif 678 call write_error(myrank, lstep, nshg, 10, rerr ) 679 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 680 if(myrank.eq.0) then 681 tcormr2 = TMRC() 682 write(6,*) 'Time to write the error fields to the disks', 683 & tcormr2-tcormr1 684 endif 685 endif ! ierrcalc 686 687 if(ioybar.eq.1) then 688 if(ivort == 1) then 689 call write_field(myrank,'a','ybar',4, 690 & ybar,'d',nshg,17,lstep) 691 else 692 call write_field(myrank,'a','ybar',4, 693 & ybar,'d',nshg,13,lstep) 694 endif 695 696 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 697 call write_field(myrank,'a','wssbar',6, 698 & wallssVecBar,'d',nshg,3,lstep) 699 endif 700 701 if(nphasesincycle .gt. 0) then 702 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 703 if(myrank.eq.0) then 704 tcormr1 = TMRC() 705 endif 706 do iphase=1,nphasesincycle 707 if(ivort == 1) then 708 call write_phavg2(myrank,'a','phase_average',13,iphase, 709 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,15,lstep) 710 else 711 call write_phavg2(myrank,'a','phase_average',13,iphase, 712 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,11,lstep) 713 endif 714 end do 715 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 716 if(myrank.eq.0) then 717 tcormr2 = TMRC() 718 write(6,*) 'write all phase avg to the disks = ', 719 & tcormr2-tcormr1 720 endif 721 endif !nphasesincyle 722 endif !ioybar 723 724 if(iRANS.lt.0) then 725 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 726 if(myrank.eq.0) then 727 tcormr1 = TMRC() 728 endif 729 call write_field(myrank,'a','dwal',4,d2wall,'d', 730 & nshg,1,lstep) 731 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 732 if(myrank.eq.0) then 733 tcormr2 = TMRC() 734 write(6,*) 'Time to write dwal to the disks = ', 735 & tcormr2-tcormr1 736 endif 737 endif !iRANS 738 739 endif ! write out complete restart state 740 !next 2 lines are two ways to end early 741 if(stopjob.eq.-2) goto 2002 742 if(istop.eq.1000) goto 2002 ! stop when delta small (see rstatic) 743 2000 continue 744 2002 continue 745 746! done with time stepping so deallocate fields already written 747! 748 749 if(ioybar.eq.1) then 750 deallocate(ybar) 751 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 752 deallocate(wallssVecbar) 753 endif 754 if(nphasesincycle .gt. 0) then 755 deallocate(yphbar) 756 endif !nphasesincyle 757 endif !ioybar 758 if(ivort == 1) then 759 deallocate(strain,vorticity) 760 endif 761 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 762 deallocate(wallssVec) 763 endif 764 if(iRANS.lt.0) then 765 deallocate(d2wall) 766 endif 767 768 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 769 if(myrank.eq.0) then 770 tcorecp2 = TMRC() 771 write(6,*) 'T(core) cpu = ',tcorecp2-tcorecp1 772 write(6,*) '(Elm. form.',tcorecp(1), 773 & ',Lin. alg. sol.',tcorecp(2),')' 774 write(6,*) '(Elm. form. Scal.',tcorecpscal(1), 775 & ',Lin. alg. sol. Scal.',tcorecpscal(2),')' 776 write(6,*) '' 777 778 endif 779 780 call print_system_stats(tcorecp, tcorecpscal) 781 call print_mesh_stats() 782 call print_mpi_stats() 783 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 784! return 785c call MPI_Finalize() 786c call MPI_ABORT(MPI_COMM_WORLD, ierr) 787 788 call destroyWallData 789 call destroyfncorp 790 791 3000 continue 792 793 794c 795c.... close history and aerodynamic forces files 796c 797 if (myrank .eq. master) then 798! close (ihist) 799 close (iforce) 800 close(76) 801 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 802 close (8177) 803 endif 804 endif 805c 806c.... close varts file for probes 807c 808 call finalizeTimeSeries() 809 810 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 811 if(myrank.eq.0) then 812 write(*,*) 'itrdrv - done with aerodynamic forces' 813 endif 814 815 do isrf = 0,MAXSURF 816 if ( nsrflist(isrf).ne.0 .and. 817 & myrank.eq.irankfilesforce(isrf)) then 818 iunit=60+isrf 819 close(iunit) 820 endif 821 enddo 822 823 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 824 if(myrank.eq.0) then 825 write(*,*) 'itrdrv - done with MAXSURF' 826 endif 827 828 829 5 format(1X,F15.10,3X,F15.10,3X,F15.10,3X,F15.10) 830 444 format(6(2x,e14.7)) 831c 832c.... end 833c 834 if(nsolflow.eq.1) then 835 deallocate (lhsK) 836 deallocate (lhsP) 837 IF (svLSFlag .NE. 1) THEN 838 deallocate (aperm) 839 deallocate (atemp) 840 ENDIF 841 endif 842 if((nsclr+nsolt).gt.0) then 843 deallocate (lhsS) 844 deallocate (apermS) 845 deallocate (atempS) 846 endif 847 848 if(iabc==1) deallocate(acs) 849 850 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 851 if(myrank.eq.0) then 852 write(*,*) 'itrdrv - done - BACK TO process.f' 853 endif 854 855 return 856 end 857 858 subroutine lesmodels(y, ac, shgl, shp, 859 & iper, ilwork, rowp, colm, 860 & nsons, ifath, x, 861 & iBC, BC) 862 863 include "common.h" 864 865 real*8 y(nshg,ndof), ac(nshg,ndof), 866 & x(numnp,nsd), 867 & BC(nshg,ndofBC) 868 real*8 shp(MAXTOP,maxsh,MAXQPT), 869 & shgl(MAXTOP,nsd,maxsh,MAXQPT) 870 871c 872 integer rowp(nshg,nnz), colm(nshg+1), 873 & iBC(nshg), 874 & ilwork(nlwork), 875 & iper(nshg) 876 dimension ifath(numnp), nsons(nfath) 877 878 real*8, allocatable, dimension(:) :: fwr2,fwr3,fwr4 879 real*8, allocatable, dimension(:) :: stabdis,cdelsq1 880 real*8, allocatable, dimension(:,:) :: xavegt, xavegt2,xavegt3 881 882 if( (iLES.gt.1) ) then ! Allocate Stuff for advanced LES models 883 allocate (fwr2(nshg)) 884 allocate (fwr3(nshg)) 885 allocate (fwr4(nshg)) 886 allocate (xavegt(nfath,12)) 887 allocate (xavegt2(nfath,12)) 888 allocate (xavegt3(nfath,12)) 889 allocate (stabdis(nfath)) 890 endif 891 892c.... get dynamic model coefficient 893c 894 ilesmod=iLES/10 895c 896c digit bit set filter rule, 10 bit set model 897c 898 if (ilesmod.eq.0) then ! 0 < iLES< 10 => dyn. model calculated 899 ! at nodes based on discrete filtering 900 901 902 if(isubmod.eq.2) then 903 call SUPGdis(y, ac, shgl, shp, 904 & iper, ilwork, 905 & nsons, ifath, x, 906 & iBC, BC, stabdis, xavegt3) 907 endif 908 909 if( ((isubmod.eq.0).or.(isubmod.eq.2)))then ! If no 910 ! sub-model 911 ! or SUPG 912 ! model wanted 913 914 if(i2filt.eq.0)then ! If simple filter 915 916 if(modlstats .eq. 0) then ! If no model stats wanted 917 call getdmc (y, shgl, shp, 918 & iper, ilwork, nsons, 919 & ifath, x) 920 else ! else get model stats 921 call stdfdmc (y, shgl, shp, 922 & iper, ilwork, nsons, 923 & ifath, x) 924 endif ! end of stats if statement 925 926 else ! else if twice filtering 927 928 call widefdmc(y, shgl, shp, 929 & iper, ilwork, nsons, 930 & ifath, x) 931 932 933 endif ! end of simple filter if statement 934 935 endif ! end of SUPG or no sub-model if statement 936 937 938 if( (isubmod.eq.1) ) then ! If DFWR sub-model wanted 939 call cdelBHsq (y, shgl, shp, 940 & iper, ilwork, nsons, 941 & ifath, x, cdelsq1) 942 call FiltRat (y, shgl, shp, 943 & iper, ilwork, nsons, 944 & ifath, x, cdelsq1, 945 & fwr4, fwr3) 946 947 948 if (i2filt.eq.0) then ! If simple filter wanted 949 call DFWRsfdmc(y, shgl, shp, 950 & iper, ilwork, nsons, 951 & ifath, x, fwr2, fwr3) 952 else ! else if twice filtering wanted 953 call DFWRwfdmc(y, shgl, shp, 954 & iper, ilwork, nsons, 955 & ifath, x, fwr4, fwr4) 956 endif ! end of simple filter if statement 957 958 endif ! end of DFWR sub-model if statement 959 960 if( (isubmod.eq.2) )then ! If SUPG sub-model wanted 961 call dmcSUPG (y, ac, shgl, 962 & shp, iper, ilwork, 963 & nsons, ifath, x, 964 & iBC, BC, rowp, colm, 965 & xavegt2, stabdis) 966 endif 967 968 if(idis.eq.1)then ! If SUPG/Model dissipation wanted 969 call ediss (y, ac, shgl, 970 & shp, iper, ilwork, 971 & nsons, ifath, x, 972 & iBC, BC, xavegt) 973 endif 974 975 endif ! end of ilesmod 976 977 if (ilesmod .eq. 1) then ! 10 < iLES < 20 => dynamic-mixed 978 ! at nodes based on discrete filtering 979 call bardmc (y, shgl, shp, 980 & iper, ilwork, 981 & nsons, ifath, x) 982 endif 983 984 if (ilesmod .eq. 2) then ! 20 < iLES < 30 => dynamic at quad 985 ! pts based on lumped projection filt. 986 987 if(isubmod.eq.0)then 988 call projdmc (y, shgl, shp, 989 & iper, ilwork, x) 990 else 991 call cpjdmcnoi (y, shgl, shp, 992 & iper, ilwork, x, 993 & rowp, colm, 994 & iBC, BC) 995 endif 996 997 endif 998 999 if( (iLES.gt.1) ) then ! Deallocate Stuff for advanced LES models 1000 deallocate (fwr2) 1001 deallocate (fwr3) 1002 deallocate (fwr4) 1003 deallocate (xavegt) 1004 deallocate (xavegt2) 1005 deallocate (xavegt3) 1006 deallocate (stabdis) 1007 endif 1008 return 1009 end 1010 1011c 1012c...initialize the coefficients for the impedance convolution 1013c 1014 subroutine CalcImpConvCoef (numISrfs, numTpoints) 1015 1016 use convolImpFlow !uses flow history and impedance for convolution 1017 1018 include "common.h" !for alfi 1019 1020 integer numISrfs, numTpoints 1021 1022 allocate (ConvCoef(numTpoints+2,3)) !same time discret. for all imp. BC 1023 do j=1,numTpoints+2 1024 ConvCoef(j,:)=0.5/numTpoints !dt/2 divided by period T=N*dt 1025 ConvCoef(j,1)=ConvCoef(j,1)*(1.0-alfi)*(1.0-alfi) 1026 ConvCoef(j,2)=ConvCoef(j,2)*(1.0+2*alfi*(1.0-alfi)) 1027 ConvCoef(j,3)=ConvCoef(j,3)*alfi*alfi 1028 enddo 1029 ConvCoef(1,2)=zero 1030 ConvCoef(1,3)=zero 1031 ConvCoef(2,3)=zero 1032 ConvCoef(numTpoints+1,1)=zero 1033 ConvCoef(numTpoints+2,2)=zero 1034 ConvCoef(numTpoints+2,1)=zero 1035c 1036c...calculate the coefficients for the impedance convolution 1037c 1038 allocate (ImpConvCoef(numTpoints+2,numISrfs)) 1039 1040c..coefficients below assume Q linear in time step, Z constant 1041c do j=3,numTpoints 1042c ImpConvCoef(j,:) = ValueListImp(j-1,:)*ConvCoef(j,3) 1043c & + ValueListImp(j,:)*ConvCoef(j,2) 1044c & + ValueListImp(j+1,:)*ConvCoef(j,1) 1045c enddo 1046c ImpConvCoef(1,:) = ValueListImp(2,:)*ConvCoef(1,1) 1047c ImpConvCoef(2,:) = ValueListImp(2,:)*ConvCoef(2,2) 1048c & + ValueListImp(3,:)*ConvCoef(2,1) 1049c ImpConvCoef(numTpoints+1,:) = 1050c & ValueListImp(numTpoints,:)*ConvCoef(numTpoints+1,3) 1051c & + ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+1,2) 1052c ImpConvCoef(numTpoints+2,:) = 1053c & ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+2,3) 1054 1055c..try easiest convolution Q and Z constant per time step 1056 do j=3,numTpoints+1 1057 ImpConvCoef(j,:) = ValueListImp(j-1,:)/numTpoints 1058 enddo 1059 ImpConvCoef(1,:) =zero 1060 ImpConvCoef(2,:) =zero 1061 ImpConvCoef(numTpoints+2,:) = 1062 & ValueListImp(numTpoints+1,:)/numTpoints 1063c compensate for yalpha passed not y in Elmgmr() 1064 ImpConvCoef(numTpoints+1,:)= ImpConvCoef(numTpoints+1,:) 1065 & - ImpConvCoef(numTpoints+2,:)*(1.0-alfi)/alfi 1066 ImpConvCoef(numTpoints+2,:)= ImpConvCoef(numTpoints+2,:)/alfi 1067 return 1068 end 1069 1070c 1071c ... update the flow rate history for the impedance convolution, filter it and write it out 1072c 1073 subroutine UpdHistConv(y,nsrfIdList,numSrfs) 1074 1075 use convolImpFlow !brings ntimeptpT, QHistImp, QHistTry, QHistTryF, numImpSrfs 1076 use convolRCRFlow !brings QHistRCR, numRCRSrfs 1077 1078 include "common.h" 1079 1080 integer nsrfIdList(0:MAXSURF), numSrfs 1081 character*20 fname1 1082 character*10 cname2 1083 character*5 cname 1084 real*8 y(nshg,3) !velocity at time n+1 1085 real*8 NewQ(0:MAXSURF) !temporary unknown for the flow rate 1086 !that needs to be added to the flow history 1087 1088 call GetFlowQ(NewQ,y,nsrfIdList,numSrfs) !new flow at time n+1 1089c 1090c... for imp BC: shift QHist, add new constribution, filter and write out 1091c 1092 if(numImpSrfs.gt.zero) then 1093 do j=1, ntimeptpT 1094 QHistImp(j,1:numSrfs)=QHistImp(j+1,1:numSrfs) 1095 enddo 1096 QHistImp(ntimeptpT+1,1:numSrfs) = NewQ(1:numSrfs) 1097 1098c 1099c....filter the flow rate history 1100c 1101 cutfreq = 10 !hardcoded cutting frequency of the filter 1102 do j=1, ntimeptpT 1103 QHistTry(j,:)=QHistImp(j+1,:) 1104 enddo 1105 call Filter(QHistTryF,QHistTry,ntimeptpT,Delt(1),cutfreq) 1106c.... if no filter applied then uncomment next three lines 1107c do j=1, ntimeptpT 1108c QHistTryF(j,:)=QHistTry(j,:) 1109c enddo 1110 1111c QHistImp(1,:)=zero ! why do we do this? for beta(1,:) = zero it does not really matters 1112 do j=1, ntimeptpT 1113 QHistImp(j+1,:)=QHistTryF(j,:) 1114 enddo 1115c 1116c.... write out the new history of flow rates to Qhistor.dat 1117c 1118 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1119 & (istep .eq. nstep(1)))) .and. 1120 & (myrank .eq. master)) then 1121 open(unit=816, file='Qhistor.dat',status='replace') 1122 write(816,*) ntimeptpT 1123 do j=1,ntimeptpT+1 1124 write(816,*) (QHistImp(j,n),n=1, numSrfs) 1125 enddo 1126 close(816) 1127c... write out a copy with step number to be able to restart 1128 fname1 = 'Qhistor' 1129 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1130 open(unit=8166,file=trim(fname1),status='unknown') 1131 write(8166,*) ntimeptpT 1132 do j=1,ntimeptpT+1 1133 write(8166,*) (QHistImp(j,n),n=1, numSrfs) 1134 enddo 1135 close(8166) 1136 endif 1137 endif 1138 1139c 1140c... for RCR bc just add the new contribution 1141c 1142 if(numRCRSrfs.gt.zero) then 1143 QHistRCR(lstep+1,1:numSrfs) = NewQ(1:numSrfs) 1144c 1145c.... write out the new history of flow rates to Qhistor.dat 1146c 1147 if ((irs .ge. 1) .and. (myrank .eq. master)) then 1148 if(istep.eq.1) then 1149 open(unit=816,file='Qhistor.dat',status='unknown') 1150 else 1151 open(unit=816,file='Qhistor.dat',position='append') 1152 endif 1153 if(istep.eq.1) then 1154 do j=1,lstep 1155 write(816,*) j, (QHistRCR(j,n),n=1,numSrfs) ! read from file of previous run 1156 enddo 1157 endif 1158 write(816,*) lstep+1, (QHistRCR(lstep+1,n),n=1, numSrfs) 1159 close(816) 1160c... write out a copy with step number to be able to restart 1161 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1162 & (istep .eq. nstep(1)))) .and. 1163 & (myrank .eq. master)) then 1164 fname1 = 'Qhistor' 1165 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1166 open(unit=8166,file=trim(fname1),status='unknown') 1167 write(8166,*) lstep+1 1168 do j=1,lstep+1 1169 write(8166,*) (QHistRCR(j,n),n=1, numSrfs) 1170 enddo 1171 close(8166) 1172 endif 1173 endif 1174 endif 1175 1176 return 1177 end 1178 1179c 1180c...calculate the time varying coefficients for the RCR convolution 1181c 1182 subroutine CalcRCRConvCoef (stepn, numSrfs) 1183 1184 use convolRCRFlow !brings in ValueListRCR, dtRCR 1185 1186 include "common.h" !brings alfi 1187 1188 integer numSrfs, stepn 1189 1190 RCRConvCoef = zero 1191 if (stepn .eq. 0) then 1192 RCRConvCoef(1,:) = ValueListRCR(1,:)*(1.0-alfi) + 1193 & ValueListRCR(3,:)*(-alfi + 1.0 + 1/dtRCR(:) 1194 & - exp(-alfi*dtRCR(:))*(1 + 1/dtRCR(:))) 1195 RCRConvCoef(2,:) = ValueListRCR(1,:)*alfi 1196 & + ValueListRCR(3,:) 1197 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1198 endif 1199 if (stepn .ge. 1) then 1200 RCRConvCoef(1,:) =-ValueListRCR(3,:)*exp(-dtRCR(:)*(stepn+alfi)) 1201 & *(1 + (1 - exp(dtRCR(:)))/dtRCR(:)) 1202 RCRConvCoef(stepn+1,:) = ValueListRCR(1,:)*(1-alfi) 1203 & - ValueListRCR(3,:)*(alfi - 1 - 1/dtRCR(:) 1204 & + exp(-alfi*dtRCR(:))/dtRCR(:)*(2 - exp(-dtRCR(:)))) 1205 RCRConvCoef(stepn+2,:) = ValueListRCR(1,:)*alfi 1206 & + ValueListRCR(3,:) 1207 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1208 endif 1209 if (stepn .ge. 2) then 1210 do j=2,stepn 1211 RCRConvCoef(j,:) = ValueListRCR(3,:)/dtRCR(:)* 1212 & exp(-dtRCR(:)*(stepn + alfi + 2 - j))* 1213 & (1 - exp(dtRCR(:)))**2 1214 enddo 1215 endif 1216 1217c compensate for yalpha passed not y in Elmgmr() 1218 RCRConvCoef(stepn+1,:)= RCRConvCoef(stepn+1,:) 1219 & - RCRConvCoef(stepn+2,:)*(1.0-alfi)/alfi 1220 RCRConvCoef(stepn+2,:)= RCRConvCoef(stepn+2,:)/alfi 1221 1222 return 1223 end 1224 1225c 1226c...calculate the time dependent H operator for the RCR convolution 1227c 1228 subroutine CalcHopRCR (timestepRCR, stepn, numSrfs) 1229 1230 use convolRCRFlow !brings in HopRCR, dtRCR 1231 1232 include "common.h" 1233 1234 integer numSrfs, stepn 1235 real*8 PdistCur(0:MAXSURF), timestepRCR 1236 1237 HopRCR=zero 1238 call RCRint(timestepRCR*(stepn + alfi),PdistCur) 1239 HopRCR(1:numSrfs) = RCRic(1:numSrfs) 1240 & *exp(-dtRCR(1:numSrfs)*(stepn + alfi)) + PdistCur(1:numSrfs) 1241 return 1242 end 1243c 1244c ... initialize the influence of the initial conditions for the RCR BC 1245c 1246 subroutine calcRCRic(y,srfIdList,numSrfs) 1247 1248 use convolRCRFlow !brings RCRic, ValueListRCR, ValuePdist 1249 1250 include "common.h" 1251 1252 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled 1253 real*8 y(nshg,4) !need velocity and pressure 1254 real*8 Qini(0:MAXSURF) !initial flow rate 1255 real*8 PdistIni(0:MAXSURF) !initial distal pressure 1256 real*8 Pini(0:MAXSURF),CoupleArea(0:MAXSURF) ! initial pressure 1257 real*8 VelOnly(nshg,3), POnly(nshg) 1258 1259 allocate (RCRic(0:MAXSURF)) 1260 1261 if(lstep.eq.0) then 1262 VelOnly(:,1:3)=y(:,1:3) 1263 call GetFlowQ(Qini,VelOnly,srfIdList,numSrfs) !get initial flow 1264 QHistRCR(1,1:numSrfs)=Qini(1:numSrfs) !initialize QHistRCR 1265 POnly(:)=y(:,4) ! pressure 1266 call integrScalar(Pini,POnly,srfIdList,numSrfs) !get initial pressure integral 1267 POnly(:)=one ! one to get area 1268 call integrScalar(CoupleArea,POnly,srfIdList,numSrfs) !get surf area 1269 Pini(1:numSrfs) = Pini(1:numSrfs)/CoupleArea(1:numSrfs) 1270 else 1271 Qini(1:numSrfs)=QHistRCR(1,1:numSrfs) 1272 Pini(1:numSrfs)=zero ! hack 1273 endif 1274 call RCRint(istep,PdistIni) !get initial distal P (use istep) 1275 RCRic(1:numSrfs) = Pini(1:numSrfs) 1276 & - ValueListRCR(1,:)*Qini(1:numSrfs)-PdistIni(1:numSrfs) 1277 return 1278 end 1279 1280c.........function that integrates a scalar over a boundary 1281 subroutine integrScalar(scalInt,scal,srfIdList,numSrfs) 1282 1283 use pvsQbi !brings ndsurf, NASC 1284 1285 include "common.h" 1286 include "mpif.h" 1287 1288 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled, i, k 1289 real*8 scal(nshg), scalInt(0:MAXSURF), scalIntProc(0:MAXSURF) 1290 1291 scalIntProc = zero 1292 do i = 1,nshg 1293 if(numSrfs.gt.zero) then 1294 do k = 1,numSrfs 1295 irankCoupled = 0 1296 if (srfIdList(k).eq.ndsurf(i)) then 1297 irankCoupled=k 1298 scalIntProc(irankCoupled) = scalIntProc(irankCoupled) 1299 & + NASC(i)*scal(i) 1300 exit 1301 endif 1302 enddo 1303 endif 1304 enddo 1305c 1306c at this point, each scalint has its "nodes" contributions to the scalar 1307c accumulated into scalIntProc. Note, because NASC is on processor this 1308c will NOT be the scalar for the surface yet 1309c 1310c.... reduce integrated scalar for each surface, push on scalInt 1311c 1312 npars=MAXSURF+1 1313 call MPI_ALLREDUCE (scalIntProc, scalInt(:), npars, 1314 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 1315 1316 return 1317 end 1318 1319 subroutine writeTimingMessage(key,iomode,timing) 1320 use iso_c_binding 1321 use phstr 1322 implicit none 1323 1324 character(len=*) :: key 1325 integer :: iomode 1326 real*8 :: timing 1327 character(len=1024) :: timing_msg 1328 character(len=*), parameter :: 1329 & streamModeString = c_char_"stream"//c_null_char, 1330 & fileModeString = c_char_"disk"//c_null_char 1331 1332 timing_msg = c_char_"Time to write "//c_null_char 1333 call phstr_appendStr(timing_msg,key) 1334 if ( iomode .eq. -1 ) then 1335 call phstr_appendStr(timing_msg, streamModeString) 1336 else 1337 call phstr_appendStr(timing_msg, fileModeString) 1338 endif 1339 call phstr_appendStr(timing_msg, c_char_' = '//c_null_char) 1340 call phstr_appendDbl(timing_msg, timing) 1341 write(6,*) trim(timing_msg) 1342 return 1343 end subroutine 1344 1345 subroutine initmpistat() 1346 include "common.h" 1347 1348 impistat = 0 1349 impistat2 = 0 1350 iISend = 0 1351 iISendScal = 0 1352 iIRecv = 0 1353 iIRecvScal = 0 1354 iWaitAll = 0 1355 iWaitAllScal = 0 1356 iAllR = 0 1357 iAllRScal = 0 1358 rISend = zero 1359 rISendScal = zero 1360 rIRecv = zero 1361 rIRecvScal = zero 1362 rWaitAll = zero 1363 rWaitAllScal = zero 1364 rAllR = zero 1365 rAllRScal = zero 1366 rCommu = zero 1367 rCommuScal = zero 1368 return 1369 end subroutine 1370 1371 subroutine initTimeSeries() 1372 use timedata !allows collection of time series 1373 include "common.h" 1374 character*60 fvarts 1375 character*10 cname2 1376 1377 inquire(file='xyzts.dat',exist=exts) 1378 if(exts) then 1379 open(unit=626,file='xyzts.dat',status='old') 1380 read(626,*) ntspts, freq, tolpt, iterat, varcod 1381 call sTD ! sets data structures 1382 1383 do jj=1,ntspts ! read coordinate data where solution desired 1384 read(626,*) ptts(jj,1),ptts(jj,2),ptts(jj,3) 1385 enddo 1386 close(626) 1387 1388 statptts(:,:) = 0 1389 parptts(:,:) = zero 1390 varts(:,:) = zero 1391 1392 1393 iv_rankpernode = iv_rankpercore*iv_corepernode 1394 iv_totnodes = numpe/iv_rankpernode 1395 iv_totcores = iv_corepernode*iv_totnodes 1396 if (myrank .eq. 0) then 1397 write(*,*) 'Info for probes:' 1398 write(*,*) ' Ranks per core:',iv_rankpercore 1399 write(*,*) ' Cores per node:',iv_corepernode 1400 write(*,*) ' Ranks per node:',iv_rankpernode 1401 write(*,*) ' Total number of nodes:',iv_totnodes 1402 write(*,*) ' Total number of cores',iv_totcores 1403 endif 1404 1405! if (myrank .eq. numpe-1) then 1406 do jj=1,ntspts 1407 1408 ! Compute the adequate rank which will take care of probe jj 1409 jjm1 = jj-1 1410 iv_node = (iv_totnodes-1)-mod(jjm1,iv_totnodes) 1411 iv_core = (iv_corepernode-1) - mod((jjm1 - 1412 & mod(jjm1,iv_totnodes))/iv_totnodes,iv_corepernode) 1413 iv_thread = (iv_rankpercore-1) - mod((jjm1- 1414 & (mod(jjm1,iv_totcores)))/iv_totcores,iv_rankpercore) 1415 iv_rank(jj) = iv_node*iv_rankpernode 1416 & + iv_core*iv_rankpercore 1417 & + iv_thread 1418 1419 if(myrank == 0) then 1420 write(*,*) ' Probe', jj, 'handled by rank', 1421 & iv_rank(jj), ' on node', iv_node 1422 endif 1423 1424 ! Verification just in case 1425 if(iv_rank(jj) .lt.0 .or. iv_rank(jj) .ge. numpe) then 1426 write(*,*) 'WARNING: iv_rank(',jj,') is ', iv_rank(jj), 1427 & ' and reset to numpe-1' 1428 iv_rank(jj) = numpe-1 1429 endif 1430 1431 ! Open the varts files 1432 if(myrank == iv_rank(jj)) then 1433 fvarts='varts/varts' 1434 fvarts=trim(fvarts)//trim(cname2(jj)) 1435 fvarts=trim(fvarts)//trim(cname2(lstep)) 1436 fvarts=trim(fvarts)//'.dat' 1437 fvarts=trim(fvarts) 1438 open(unit=1000+jj, file=fvarts, status='unknown') 1439 endif 1440 enddo 1441! endif 1442 1443 endif 1444c 1445 return 1446 end subroutine 1447 1448 subroutine finalizeTimeSeries() 1449 use timedata !allows collection of time series 1450 include "common.h" 1451 if(exts) then 1452 do jj=1,ntspts 1453 if (myrank == iv_rank(jj)) then 1454 close(1000+jj) 1455 endif 1456 enddo 1457 call dTD ! deallocates time series arrays 1458 endif 1459 return 1460 end subroutine 1461 1462 1463 1464 subroutine initEQS(iBC, rowp, colm) 1465 1466 use solvedata 1467 include "common.h" 1468#ifdef HAVE_SVLS 1469 include "svLS.h" 1470 1471 TYPE(svLS_lhsType) svLS_lhs 1472 TYPE(svLS_lsType) svLS_ls 1473 TYPE(svLS_commuType) communicator 1474 TYPE(svLS_lsType) svLS_ls_S(4) 1475 TYPE(svLS_lhsType) svLS_lhs_S(4) 1476 TYPE(svLS_commuType) communicator_S(4) 1477 INTEGER svLS_nFaces, gnNo, nNo, faIn, facenNo 1478#endif 1479 integer, allocatable :: gNodes(:) 1480 real*8, allocatable :: sV(:,:) 1481 character*1024 servername 1482 integer rowp(nshg,nnz), colm(nshg+1), 1483 & iBC(nshg) 1484#ifdef HAVE_LESLIB 1485 integer eqnType 1486! IF (svLSFlag .EQ. 0) THEN !When we get a PETSc option it also could block this or a positive leslib 1487 call SolverLicenseServer(servername) 1488! ENDIF 1489#endif 1490c 1491c.... For linear solver Library 1492c 1493c 1494c.... assign parameter values 1495c 1496 do i = 1, 100 1497 numeqns(i) = i 1498 enddo 1499c 1500c.... determine how many scalar equations we are going to need to solve 1501c 1502 nsolt=mod(impl(1),2) ! 1 if solving temperature 1503 nsclrsol=nsolt+nsclr ! total number of scalars solved At 1504 ! some point we probably want to create 1505 ! a map, considering stepseq(), to find 1506 ! what is actually solved and only 1507 ! dimension lhs to the appropriate 1508 ! size. (see 1.6.1 and earlier for a 1509 ! "failed" attempt at this). 1510 1511 1512 nsolflow=mod(impl(1),100)/10 ! 1 if solving flow 1513 1514c 1515c.... Now, call lesNew routine to initialize 1516c memory space 1517c 1518 call genadj(colm, rowp, icnt ) ! preprocess the adjacency list 1519 1520 nnz_tot=icnt ! this is exactly the number of non-zero blocks on 1521 ! this proc 1522 1523 if (nsolflow.eq.1) then ! start of setup for the flow 1524 lesId = numeqns(1) 1525 eqnType = 1 1526 nDofs = 4 1527 1528!-------------------------------------------------------------------- 1529! Rest of configuration of svLS is added here, where we have LHS 1530! pointers 1531 1532 nPermDims = 1 1533 nTmpDims = 1 1534 1535 1536 allocate (lhsP(4,nnz_tot)) 1537 allocate (lhsK(9,nnz_tot)) 1538 1539! Setting up svLS or leslib for flow 1540 1541 IF (svLSFlag .EQ. 1) THEN 1542#ifdef HAVE_SVLS 1543 IF(nPrjs.eq.0) THEN 1544 svLSType=2 !GMRES if borrowed ACUSIM projection vectors variable set to zero 1545 ELSE 1546 svLSType=3 !NS solver 1547 ENDIF 1548! reltol for the NSSOLVE is the stop criterion on the outer loop 1549! reltolIn is (eps_GM, eps_CG) from the CompMech paper 1550! for now we are using 1551! Tolerance on ACUSIM Pressure Projection for CG and 1552! Tolerance on Momentum Equations for GMRES 1553! also using Kspaceand maxIters from setup for ACUSIM 1554! 1555 eps_outer=40.0*epstol(1) !following papers soggestion for now 1556 CALL svLS_LS_CREATE(svLS_ls, svLSType, dimKry=Kspace, 1557 2 relTol=eps_outer, relTolIn=(/epstol(1),prestol/), 1558 3 maxItr=maxIters, 1559 4 maxItrIn=(/maxIters,maxIters/)) 1560 1561 CALL svLS_COMMU_CREATE(communicator, MPI_COMM_WORLD) 1562 nNo=nshg 1563 gnNo=nshgt 1564 IF (ipvsq .GE. 2) THEN 1565 1566#if((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 1)) 1567 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 1568 2 + numImpSrfs + numRCRSrfs + numCORSrfs 1569#elif((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 0)) 1570 svLS_nFaces = 1 + numResistSrfs 1571 2 + numImpSrfs + numRCRSrfs + numCORSrfs 1572#elif((VER_CORONARY == 0)&&(VER_CLOSEDLOOP == 1)) 1573 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 1574 2 + numImpSrfs + numRCRSrfs 1575#else 1576 svLS_nFaces = 1 + numResistSrfs 1577 2 + numImpSrfs + numRCRSrfs 1578#endif 1579 1580 ELSE 1581 svLS_nFaces = 1 !not sure about this...looks like 1 means 0 for array size issues 1582 END IF 1583 1584 faIn = 1 1585 facenNo = 0 1586 DO i=1, nshg 1587 IF (IBITS(iBC(i),3,3) .NE. 0) facenNo = facenNo + 1 1588 END DO 1589 ALLOCATE(gNodes(facenNo), sV(nsd,facenNo)) 1590 sV = 0D0 1591 j = 0 1592 DO i=1, nshg 1593 IF (IBITS(iBC(i),3,3) .NE. 0) THEN 1594 j = j + 1 1595 gNodes(j) = i 1596 IF (.NOT.BTEST(iBC(i),3)) sV(1,j) = 1D0 1597 IF (.NOT.BTEST(iBC(i),4)) sV(2,j) = 1D0 1598 IF (.NOT.BTEST(iBC(i),5)) sV(3,j) = 1D0 1599 END IF 1600 END DO 1601 CALL svLS_LHS_CREATE(svLS_lhs, communicator, gnNo, nNo, 1602 2 nnz_tot, gNodes, colm, rowp, svLS_nFaces) 1603 CALL svLS_BC_CREATE(svLS_lhs, faIn, facenNo, 1604 2 nsd, BC_TYPE_Dir, gNodes, sV) 1605 DEALLOCATE(gNodes) 1606 DEALLOCATE(sV) 1607#else 1608 if(myrank.eq.0) write(*,*) 'your input requests svLS but your cmake did not build for it' 1609 call error('itrdrv ','nosVLS',svLSFlag) 1610#endif 1611 ENDIF 1612 1613 if(leslib.eq.1) then 1614#ifdef HAVE_LESLIB 1615!-------------------------------------------------------------------- 1616 call myfLesNew( lesId, 41994, 1617 & eqnType, 1618 & nDofs, minIters, maxIters, 1619 & Kspace, iprjFlag, nPrjs, 1620 & ipresPrjFlag, nPresPrjs, epstol(1), 1621 & prestol, iverbose, statsflow, 1622 & nPermDims, nTmpDims, servername ) 1623 1624 allocate (aperm(nshg,nPermDims)) 1625 allocate (atemp(nshg,nTmpDims)) 1626 call readLesRestart( lesId, aperm, nshg, myrank, lstep, 1627 & nPermDims ) 1628#else 1629 if(myrank.eq.0) write(*,*) 'your input requests leslib but your cmake did not build for it' 1630 call error('itrdrv ','nolslb',leslib) 1631#endif 1632 endif !leslib=1 1633 1634 else ! not solving flow just scalar 1635 nPermDims = 0 1636 nTmpDims = 0 1637 endif 1638 1639 1640 if(nsclrsol.gt.0) then 1641 do isolsc=1,nsclrsol 1642 lesId = numeqns(isolsc+1) 1643 eqnType = 2 1644 nDofs = 1 1645 isclpresPrjflag = 0 1646 nPresPrjs = 0 1647 isclprjFlag = 1 1648 indx=isolsc+2-nsolt ! complicated to keep epstol(2) for 1649 ! temperature followed by scalars 1650! Setting up svLS or leslib for scalar 1651#ifdef HAVE_SVLS 1652 IF (svLSFlag .EQ. 1) THEN 1653 svLSType=2 !only option for scalars 1654! reltol for the GMRES is the stop criterion 1655! also using Kspaceand maxIters from setup for ACUSIM 1656! 1657 CALL svLS_LS_CREATE(svLS_ls_S(isolsc), svLSType, dimKry=Kspace, 1658 2 relTol=epstol(indx), 1659 3 maxItr=maxIters 1660 4 ) 1661 1662 CALL svLS_COMMU_CREATE(communicator_S(isolsc), MPI_COMM_WORLD) 1663 1664 faIn = 1 1665 facenNo = 0 1666 ib=5+isolsc 1667 DO i=1, nshg 1668 IF (btest(iBC(i),ib)) facenNo = facenNo + 1 1669 END DO 1670 ALLOCATE(gNodes(facenNo), sV(1,facenNo)) 1671 sV = 0D0 1672 j = 0 1673 DO i=1, nshg 1674 IF (btest(iBC(i),ib)) THEN 1675 j = j + 1 1676 gNodes(j) = i 1677 END IF 1678 END DO 1679 1680 svLS_nFaces = 1 !not sure about this...should try it with zero 1681 CALL svLS_LHS_CREATE(svLS_lhs_S(isolsc), communicator_S(isolsc), gnNo, nNo, 1682 2 nnz_tot, gNodes, colm, rowp, svLS_nFaces) 1683 1684 CALL svLS_BC_CREATE(svLS_lhs_S(isolsc), faIn, facenNo, 1685 2 1, BC_TYPE_Dir, gNodes, sV(1,:)) 1686 DEALLOCATE(gNodes) 1687 DEALLOCATE(sV) 1688 1689 if( isolsc.eq.1) then ! if multiple scalars make sure done once 1690 allocate (apermS(1,1,1)) 1691 allocate (atempS(1,1)) !they can all share this 1692 endif 1693 ENDIF !svLS handing scalar solve 1694#endif 1695 1696 1697#ifdef HAVE_LESLIB 1698 if (leslib.eq.1) then 1699 call myfLesNew( lesId, 41994, 1700 & eqnType, 1701 & nDofs, minIters, maxIters, 1702 & Kspace, isclprjFlag, nPrjs, 1703 & isclpresPrjFlag, nPresPrjs, epstol(indx), 1704 & prestol, iverbose, statssclr, 1705 & nPermDimsS, nTmpDimsS, servername ) 1706 endif 1707#endif 1708 enddo !loop over scalars to solve (not yet worked out for multiple svLS solves 1709 allocate (lhsS(nnz_tot,nsclrsol)) 1710#ifdef HAVE_LESLIB 1711 if(leslib.eq.1) then ! we just prepped scalar solves for leslib so allocate arrays 1712c 1713c Assume all scalars have the same size needs 1714c 1715 allocate (apermS(nshg,nPermDimsS,nsclrsol)) 1716 allocate (atempS(nshg,nTmpDimsS)) !they can all share this 1717 endif 1718#endif 1719c 1720c actually they could even share with atemp but leave that for later 1721c 1722 else !no scalar solves at all so zero dims not used 1723 nPermDimsS = 0 1724 nTmpDimsS = 0 1725 endif 1726 return 1727 end subroutine 1728 1729 1730 subroutine seticomputevort 1731 include "common.h" 1732 icomputevort = 0 1733 if (ivort == 1) then ! Print vorticity = True in solver.inp 1734 ! We then compute the vorticity only if we 1735 ! 1) we write an intermediate checkpoint 1736 ! 2) we reach the last time step and write the last checkpoint 1737 ! 3) we accumulate statistics in ybar for every time step 1738 ! BEWARE: we need here lstep+1 and istep+1 because the lstep and 1739 ! istep gets incremened after the flowsolve, further below 1740 if (((irs .ge. 1) .and. (mod(lstep+1, ntout) .eq. 0)) .or. 1741 & istep+1.eq.nstep(itseq) .or. ioybar == 1) then 1742 icomputevort = 1 1743 endif 1744 endif 1745 1746! write(*,*) 'icomputevort: ',icomputevort, ' - istep: ', 1747! & istep,' - nstep(itseq):',nstep(itseq),'- lstep:', 1748! & lstep, '- ntout:', ntout 1749 return 1750 end subroutine 1751 1752 subroutine computeVort( vorticity, GradV,strain) 1753 include "common.h" 1754 1755 real*8 gradV(nshg,nsdsq), strain(nshg,6), vorticity(nshg,5) 1756 1757 ! vorticity components and magnitude 1758 vorticity(:,1) = GradV(:,8)-GradV(:,6) !omega_x 1759 vorticity(:,2) = GradV(:,3)-GradV(:,7) !omega_y 1760 vorticity(:,3) = GradV(:,4)-GradV(:,2) !omega_z 1761 vorticity(:,4) = sqrt( vorticity(:,1)*vorticity(:,1) 1762 & + vorticity(:,2)*vorticity(:,2) 1763 & + vorticity(:,3)*vorticity(:,3) ) 1764 ! Q 1765 strain(:,1) = GradV(:,1) !S11 1766 strain(:,2) = 0.5*(GradV(:,2)+GradV(:,4)) !S12 1767 strain(:,3) = 0.5*(GradV(:,3)+GradV(:,7)) !S13 1768 strain(:,4) = GradV(:,5) !S22 1769 strain(:,5) = 0.5*(GradV(:,6)+GradV(:,8)) !S23 1770 strain(:,6) = GradV(:,9) !S33 1771 1772 vorticity(:,5) = 0.25*( vorticity(:,4)*vorticity(:,4) !Q 1773 & - 2.0*( strain(:,1)*strain(:,1) 1774 & + 2* strain(:,2)*strain(:,2) 1775 & + 2* strain(:,3)*strain(:,3) 1776 & + strain(:,4)*strain(:,4) 1777 & + 2* strain(:,5)*strain(:,5) 1778 & + strain(:,6)*strain(:,6))) 1779 1780 return 1781 end subroutine 1782 1783 subroutine dumpTimeSeries() 1784 use timedata !allows collection of time series 1785 include "common.h" 1786 include "mpif.h" 1787 character*60 fvarts 1788 character*10 cname2 1789 1790 1791 if (numpe > 1) then 1792 do jj = 1, ntspts 1793 vartssoln((jj-1)*ndof+1:jj*ndof)=varts(jj,:) 1794 ivarts=zero 1795 enddo 1796 do k=1,ndof*ntspts 1797 if(vartssoln(k).ne.zero) ivarts(k)=1 1798 enddo 1799 1800! call MPI_REDUCE(vartssoln, vartssolng, ndof*ntspts, 1801! & MPI_DOUBLE_PRECISION, MPI_SUM, master, 1802! & MPI_COMM_WORLD, ierr) 1803 1804 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1805 call MPI_ALLREDUCE(vartssoln, vartssolng, 1806 & ndof*ntspts, 1807 & MPI_DOUBLE_PRECISION, MPI_SUM, 1808 & MPI_COMM_WORLD, ierr) 1809 1810! call MPI_REDUCE(ivarts, ivartsg, ndof*ntspts, 1811! & MPI_INTEGER, MPI_SUM, master, 1812! & MPI_COMM_WORLD, ierr) 1813 1814 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1815 call MPI_ALLREDUCE(ivarts, ivartsg, ndof*ntspts, 1816 & MPI_INTEGER, MPI_SUM, 1817 & MPI_COMM_WORLD, ierr) 1818 1819! if (myrank.eq.zero) then 1820 do jj = 1, ntspts 1821 1822 if(myrank .eq. iv_rank(jj)) then 1823 ! No need to update all varts components, only the one treated by the expected rank 1824 ! Note: keep varts as a vector, as multiple probes could be treated by one rank 1825 indxvarts = (jj-1)*ndof 1826 do k=1,ndof 1827 if(ivartsg(indxvarts+k).ne.0) then ! none of the vartssoln(parts) were non zero 1828 varts(jj,k)=vartssolng(indxvarts+k)/ 1829 & ivartsg(indxvarts+k) 1830 endif 1831 enddo 1832 endif !only if myrank eq iv_rank(jj) 1833 enddo 1834! endif !only on master 1835 endif !only if numpe > 1 1836 1837! if (myrank.eq.zero) then 1838 do jj = 1, ntspts 1839 if(myrank .eq. iv_rank(jj)) then 1840 ifile = 1000+jj 1841 write(ifile,555) lstep, (varts(jj,k),k=1,ndof) !Beware of format 555 - check ndof 1842c call flush(ifile) 1843 if (((irs .ge. 1) .and. 1844 & (mod(lstep, ntout) .eq. 0))) then 1845 close(ifile) 1846 fvarts='varts/varts' 1847 fvarts=trim(fvarts)//trim(cname2(jj)) 1848 fvarts=trim(fvarts)//trim(cname2(lskeep)) 1849 fvarts=trim(fvarts)//'.dat' 1850 fvarts=trim(fvarts) 1851 open(unit=ifile, file=fvarts, 1852 & position='append') 1853 endif !only when dumping restart 1854 endif 1855 enddo 1856! endif !only on master 1857 1858 varts(:,:) = zero ! reset the array for next step 1859 1860 1861!555 format(i6,5(2x,E12.5e2)) 1862555 format(i6,6(2x,E20.12e2)) !assuming ndof = 6 here 1863 1864 return 1865 end subroutine 1866 1867 subroutine collectErrorYbar(ybar,yold,wallssVec,wallssVecBar, 1868 & vorticity,yphbar,rerr,irank2ybar,irank2yphbar) 1869 include "common.h" 1870 real*8 ybar(nshg,irank2yphbar),yold(nshg,ndof),vorticity(nshg,5) 1871 real*8 yphbar(nshg,irank2yphbar,nphasesincycle) 1872 real*8 wallssvec(nshg,3),wallssVecBar(nshg,3), rerr(nshg,numerr) 1873c$$$c 1874c$$$c compute average 1875c$$$c 1876c$$$ tfact=one/istep 1877c$$$ ybar =tfact*yold + (one-tfact)*ybar 1878 1879c compute average 1880c ybar(:,1:3) are average velocity components 1881c ybar(:,4) is average pressure 1882c ybar(:,5) is average speed 1883c ybar(:,6:8) is average of sq. of each vel. component 1884c ybar(:,9) is average of sq. of pressure 1885c ybar(:,10:12) is average of cross vel. components : uv, uw and vw 1886c averaging procedure justified only for identical time step sizes 1887c ybar(:,13) is average of eddy viscosity 1888c ybar(:,14:16) is average vorticity components 1889c ybar(:,17) is average vorticity magnitude 1890c istep is number of time step 1891c 1892 icollectybar = 0 1893 if(nphasesincycle.eq.0 .or. 1894 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1895 icollectybar = 1 1896 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1897 & istepsinybar = 0 ! init. to zero in first cycle in avg. 1898 endif 1899 1900 if(icollectybar.eq.1) then 1901 istepsinybar = istepsinybar+1 1902 tfact=one/istepsinybar 1903 1904! if(myrank.eq.master .and. nphasesincycle.ne.0 .and. 1905! & mod((istep-1),nstepsincycle).eq.0) 1906! & write(*,*)'nsamples in phase average:',istepsinybar 1907 1908c ybar to contain the averaged ((u,v,w),p)-fields 1909c and speed average, i.e., sqrt(u^2+v^2+w^2) 1910c and avg. of sq. terms including 1911c u^2, v^2, w^2, p^2 and cross terms of uv, uw and vw 1912 1913 ybar(:,1) = tfact*yold(:,1) + (one-tfact)*ybar(:,1) 1914 ybar(:,2) = tfact*yold(:,2) + (one-tfact)*ybar(:,2) 1915 ybar(:,3) = tfact*yold(:,3) + (one-tfact)*ybar(:,3) 1916 ybar(:,4) = tfact*yold(:,4) + (one-tfact)*ybar(:,4) 1917 ybar(:,5) = tfact*sqrt(yold(:,1)**2+yold(:,2)**2+ 1918 & yold(:,3)**2) + (one-tfact)*ybar(:,5) 1919 ybar(:,6) = tfact*yold(:,1)**2 + 1920 & (one-tfact)*ybar(:,6) 1921 ybar(:,7) = tfact*yold(:,2)**2 + 1922 & (one-tfact)*ybar(:,7) 1923 ybar(:,8) = tfact*yold(:,3)**2 + 1924 & (one-tfact)*ybar(:,8) 1925 ybar(:,9) = tfact*yold(:,4)**2 + 1926 & (one-tfact)*ybar(:,9) 1927 ybar(:,10) = tfact*yold(:,1)*yold(:,2) + !uv 1928 & (one-tfact)*ybar(:,10) 1929 ybar(:,11) = tfact*yold(:,1)*yold(:,3) + !uw 1930 & (one-tfact)*ybar(:,11) 1931 ybar(:,12) = tfact*yold(:,2)*yold(:,3) + !vw 1932 & (one-tfact)*ybar(:,12) 1933 if(nsclr.gt.0) !nut 1934 & ybar(:,13) = tfact*yold(:,6) + (one-tfact)*ybar(:,13) 1935 1936 if(ivort == 1) then !vorticity 1937 ybar(:,14) = tfact*vorticity(:,1) + 1938 & (one-tfact)*ybar(:,14) 1939 ybar(:,15) = tfact*vorticity(:,2) + 1940 & (one-tfact)*ybar(:,15) 1941 ybar(:,16) = tfact*vorticity(:,3) + 1942 & (one-tfact)*ybar(:,16) 1943 ybar(:,17) = tfact*vorticity(:,4) + 1944 & (one-tfact)*ybar(:,17) 1945 endif 1946 1947 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1948 wallssVecBar(:,1) = tfact*wallssVec(:,1) 1949 & +(one-tfact)*wallssVecBar(:,1) 1950 wallssVecBar(:,2) = tfact*wallssVec(:,2) 1951 & +(one-tfact)*wallssVecBar(:,2) 1952 wallssVecBar(:,3) = tfact*wallssVec(:,3) 1953 & +(one-tfact)*wallssVecBar(:,3) 1954 endif 1955 endif !icollectybar.eq.1 1956c 1957c compute phase average 1958c 1959 if(nphasesincycle.ne.0 .and. 1960 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1961 1962c beginning of cycle is considered as ncycles_startphaseavg*nstepsincycle+1 1963 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1964 & icyclesinavg = 0 ! init. to zero in first cycle in avg. 1965 1966 ! find number of steps between phases 1967 nstepsbtwphase = nstepsincycle/nphasesincycle ! integer value 1968 if(mod(istep-1,nstepsincycle).eq.0) then 1969 iphase = 1 ! init. to one in beginning of every cycle 1970 icyclesinavg = icyclesinavg + 1 1971 endif 1972 1973 icollectphase = 0 1974 istepincycle = mod(istep,nstepsincycle) 1975 if(istepincycle.eq.0) istepincycle=nstepsincycle 1976 if(istepincycle.eq.iphase*nstepsbtwphase) then 1977 icollectphase = 1 1978 iphase = iphase+1 ! use 'iphase-1' below 1979 endif 1980 1981 if(icollectphase.eq.1) then 1982 tfactphase = one/icyclesinavg 1983 1984 if(myrank.eq.master) then 1985 write(*,*) 'nsamples in phase ',iphase-1,': ', 1986 & icyclesinavg 1987 endif 1988 1989 yphbar(:,1,iphase-1) = tfactphase*yold(:,1) + 1990 & (one-tfactphase)*yphbar(:,1,iphase-1) 1991 yphbar(:,2,iphase-1) = tfactphase*yold(:,2) + 1992 & (one-tfactphase)*yphbar(:,2,iphase-1) 1993 yphbar(:,3,iphase-1) = tfactphase*yold(:,3) + 1994 & (one-tfactphase)*yphbar(:,3,iphase-1) 1995 yphbar(:,4,iphase-1) = tfactphase*yold(:,4) + 1996 & (one-tfactphase)*yphbar(:,4,iphase-1) 1997 yphbar(:,5,iphase-1) = tfactphase*sqrt(yold(:,1)**2 1998 & +yold(:,2)**2+yold(:,3)**2) + 1999 & (one-tfactphase)*yphbar(:,5,iphase-1) 2000 yphbar(:,6,iphase-1) = 2001 & tfactphase*yold(:,1)*yold(:,1) 2002 & +(one-tfactphase)*yphbar(:,6,iphase-1) 2003 2004 yphbar(:,7,iphase-1) = 2005 & tfactphase*yold(:,1)*yold(:,2) 2006 & +(one-tfactphase)*yphbar(:,7,iphase-1) 2007 2008 yphbar(:,8,iphase-1) = 2009 & tfactphase*yold(:,1)*yold(:,3) 2010 & +(one-tfactphase)*yphbar(:,8,iphase-1) 2011 2012 yphbar(:,9,iphase-1) = 2013 & tfactphase*yold(:,2)*yold(:,2) 2014 & +(one-tfactphase)*yphbar(:,9,iphase-1) 2015 2016 yphbar(:,10,iphase-1) = 2017 & tfactphase*yold(:,2)*yold(:,3) 2018 & +(one-tfactphase)*yphbar(:,10,iphase-1) 2019 2020 yphbar(:,11,iphase-1) = 2021 & tfactphase*yold(:,3)*yold(:,3) 2022 & +(one-tfactphase)*yphbar(:,11,iphase-1) 2023 2024 if(ivort == 1) then 2025 yphbar(:,12,iphase-1) = 2026 & tfactphase*vorticity(:,1) 2027 & +(one-tfactphase)*yphbar(:,12,iphase-1) 2028 yphbar(:,13,iphase-1) = 2029 & tfactphase*vorticity(:,2) 2030 & +(one-tfactphase)*yphbar(:,13,iphase-1) 2031 yphbar(:,14,iphase-1) = 2032 & tfactphase*vorticity(:,3) 2033 & +(one-tfactphase)*yphbar(:,14,iphase-1) 2034 yphbar(:,15,iphase-1) = 2035 & tfactphase*vorticity(:,4) 2036 & +(one-tfactphase)*yphbar(:,15,iphase-1) 2037 endif 2038 endif !compute phase average 2039 endif !if(nphasesincycle.eq.0 .or. istep.gt.ncycles_startphaseavg*nstepsincycle) 2040c 2041c compute rms 2042c 2043 if(icollectybar.eq.1) then 2044 rerr(:, 7)=rerr(:, 7)+(yold(:,1)-ybar(:,1))**2 2045 rerr(:, 8)=rerr(:, 8)+(yold(:,2)-ybar(:,2))**2 2046 rerr(:, 9)=rerr(:, 9)+(yold(:,3)-ybar(:,3))**2 2047 rerr(:,10)=rerr(:,10)+(yold(:,4)-ybar(:,4))**2 2048 endif 2049 return 2050 end subroutine 2051 2052