1 subroutine itrdrv (y, ac, 2 & uold, x, 3 & iBC, BC, 4 & iper, ilwork, shp, 5 & shgl, shpb, shglb, 6 & ifath, velbar, nsons ) 7c 8c---------------------------------------------------------------------- 9c 10c This iterative driver is the semi-discrete, predictor multi-corrector 11c algorithm. It contains the Hulbert Generalized Alpha method which 12c is 2nd order accurate for Rho_inf from 0 to 1. The method can be 13c made first-order accurate by setting Rho_inf=-1. It uses CGP and 14c GMRES iterative solvers. 15c 16c working arrays: 17c y (nshg,ndof) : Y variables 18c x (nshg,nsd) : node coordinates 19c iBC (nshg) : BC codes 20c BC (nshg,ndofBC) : BC constraint parameters 21c iper (nshg) : periodicity table 22c 23c 24c Zdenek Johan, Winter 1991. (Fortran 90) 25c Alberto Figueroa, Winter 2004. CMM-FSI 26c Irene Vignon, Fall 2004. Impedance BC 27c---------------------------------------------------------------------- 28c 29 use pvsQbi !gives us splag (the spmass at the end of this run 30 use specialBC !gives us itvn 31 use timedata !allows collection of time series 32 use convolImpFlow !for Imp bc 33 use convolRCRFlow !for RCR bc 34!MR CHANGE 35 use turbsa ! used to access d2wall 36!MR CHANGE END 37 use fncorpmod 38 use iso_c_binding 39 40c use readarrays !reads in uold and acold 41 42 include "common.h" 43 include "mpif.h" 44 include "auxmpi.h" 45#ifdef HAVE_SVLS 46 include "svLS.h" 47#endif 48#if !defined(HAVE_SVLS) && !defined(HAVE_LESLIB) 49#error "You must enable a linear solver during cmake setup" 50#endif 51 52c 53 54 55 real*8 y(nshg,ndof), ac(nshg,ndof), 56 & yold(nshg,ndof), acold(nshg,ndof), 57 & u(nshg,nsd), uold(nshg,nsd), 58 & x(numnp,nsd), solinc(nshg,ndof), 59 & BC(nshg,ndofBC), tf(nshg,ndof), 60 & GradV(nshg,nsdsq) 61 62c 63 real*8 res(nshg,ndof) 64c 65 real*8 shp(MAXTOP,maxsh,MAXQPT), 66 & shgl(MAXTOP,nsd,maxsh,MAXQPT), 67 & shpb(MAXTOP,maxsh,MAXQPT), 68 & shglb(MAXTOP,nsd,maxsh,MAXQPT) 69c 70 integer rowp(nshg,nnz), colm(nshg+1), 71 & iBC(nshg), 72 & ilwork(nlwork), 73 & iper(nshg), ifuncs(6) 74 75 real*8 vbc_prof(nshg,3) 76 77 integer stopjob 78 character*10 cname2 79 character*5 cname 80c 81c stuff for dynamic model s.w.avg and wall model 82c 83 dimension ifath(numnp), velbar(nfath,ndof), nsons(nfath) 84 85 dimension wallubar(2),walltot(2) 86c 87c.... For linear solver Library 88c 89 integer eqnType, prjFlag, presPrjFlag, verbose 90c 91 real*8, allocatable, dimension(:,:) :: aperm, atemp, atempS 92 real*8, allocatable, dimension(:,:,:) :: apermS 93 94 real*8, allocatable, dimension(:,:) :: lhsP, lhsK, lhsS 95 real*8 almit, alfit, gamit 96c 97 character*1024 servername 98 character*20 fname1,fmt1 99 character*20 fname2,fmt2 100 character*60 fnamepold, fvarts 101 character*4 fname4c ! 4 characters 102 integer iarray(50) ! integers for headers 103 integer isgn(ndof), isgng(ndof) 104 105!MR CHANGE 106! real*8 rerr(nshg,10), ybar(nshg,13) ! including 7 sq. terms with 3 cross terms of uv, uw and vw 107! real*8 rerr(nshg,10), ybar(nshg,12) ! including 7 sq. terms with 3 cross terms of uv, uw and vw 108 real*8 rerr(nshg,10) 109 real*8, allocatable, dimension(:,:) :: ybar, strain, vorticity 110 real*8, allocatable, dimension(:,:) :: wallssVec, wallssVecbar 111!MR CHANGE 112 113 real*8 tcorecp(2), tcorecpscal(2) 114 115 integer, allocatable, dimension(:) :: ivarts 116 integer, allocatable, dimension(:) :: ivartsg 117 integer, allocatable, dimension(:) :: iv_rank 118 real*8, allocatable, dimension(:) :: vartssoln 119 real*8, allocatable, dimension(:) :: vartssolng 120 real*8, allocatable, dimension(:,:,:) :: yphbar 121 real*8 CFLworst(numel) 122 123!MR CHANGE 124 integer :: iv_rankpernode, iv_totnodes, iv_totcores 125 integer :: iv_node, iv_core, iv_thread 126!MR CHANGE 127!-------------------------------------------------------------------- 128! Setting up svLS 129#ifdef HAVE_SVLS 130 INTEGER svLS_nFaces, gnNo, nNo, faIn, facenNo 131 INTEGER, ALLOCATABLE :: gNodes(:) 132 REAL*8, ALLOCATABLE :: sV(:,:) 133 134 TYPE(svLS_commuType) communicator 135 TYPE(svLS_lhsType) svLS_lhs 136 TYPE(svLS_lsType) svLS_ls 137! repeat for scalar solves (up to 4 at this time which is consistent with rest of PHASTA) 138 TYPE(svLS_commuType) communicator_S(4) 139 TYPE(svLS_lhsType) svLS_lhs_S(4) 140 TYPE(svLS_lsType) svLS_ls_S(4) 141#endif 142 143 impistat = 0 144 impistat2 = 0 145 iISend = 0 146 iISendScal = 0 147 iIRecv = 0 148 iIRecvScal = 0 149 iWaitAll = 0 150 iWaitAllScal = 0 151 iAllR = 0 152 iAllRScal = 0 153 rISend = zero 154 rISendScal = zero 155 rIRecv = zero 156 rIRecvScal = zero 157 rWaitAll = zero 158 rWaitAllScal = zero 159 rAllR = zero 160 rAllRScal = zero 161 rCommu = zero 162 rCommuScal = zero 163 164 call initmemstat() 165 166c 167c hack SA variable 168c 169cHack BC(:,7)=BC(:,7)*0.001 170cHack if(lstep.eq.0) y(:,6)=y(:,6)*0.001 171!-------------------------------------------------------------------- 172! Setting up svLS Moved down for better org 173 174#ifdef HAVE_LESLIB 175! IF (svLSFlag .EQ. 0) THEN !When we get a PETSc option it also could block this or a positive leslib 176 call SolverLicenseServer(servername) 177! ENDIF 178#endif 179c 180c only master should be verbose 181c 182 183 if(numpe.gt.0 .and. myrank.ne.master)iverbose=0 184c 185 186 lskeep=lstep 187 188 inquire(file='xyzts.dat',exist=exts) 189 if(exts) then 190 191 open(unit=626,file='xyzts.dat',status='old') 192 read(626,*) ntspts, freq, tolpt, iterat, varcod 193 call sTD ! sets data structures 194 195 do jj=1,ntspts ! read coordinate data where solution desired 196 read(626,*) ptts(jj,1),ptts(jj,2),ptts(jj,3) 197 enddo 198 close(626) 199 200 statptts(:,:) = 0 201 parptts(:,:) = zero 202 varts(:,:) = zero 203 204 allocate (ivarts(ntspts*ndof)) 205 allocate (ivartsg(ntspts*ndof)) 206 allocate (iv_rank(ntspts)) 207 allocate (vartssoln(ntspts*ndof)) 208 allocate (vartssolng(ntspts*ndof)) 209 210 iv_rankpernode = iv_rankpercore*iv_corepernode 211 iv_totnodes = numpe/iv_rankpernode 212 iv_totcores = iv_corepernode*iv_totnodes 213 if (myrank .eq. 0) then 214 write(*,*) 'Info for probes:' 215 write(*,*) ' Ranks per core:',iv_rankpercore 216 write(*,*) ' Cores per node:',iv_corepernode 217 write(*,*) ' Ranks per node:',iv_rankpernode 218 write(*,*) ' Total number of nodes:',iv_totnodes 219 write(*,*) ' Total number of cores',iv_totcores 220 endif 221 222! if (myrank .eq. numpe-1) then 223 do jj=1,ntspts 224 225 ! Compute the adequate rank which will take care of probe jj 226 jjm1 = jj-1 227 iv_node = (iv_totnodes-1)-mod(jjm1,iv_totnodes) 228 iv_core = (iv_corepernode-1) - mod((jjm1 - 229 & mod(jjm1,iv_totnodes))/iv_totnodes,iv_corepernode) 230 iv_thread = (iv_rankpercore-1) - mod((jjm1- 231 & (mod(jjm1,iv_totcores)))/iv_totcores,iv_rankpercore) 232 iv_rank(jj) = iv_node*iv_rankpernode 233 & + iv_core*iv_rankpercore 234 & + iv_thread 235 236 if(myrank == 0) then 237 write(*,*) ' Probe', jj, 'handled by rank', 238 & iv_rank(jj), ' on node', iv_node 239 endif 240 241 ! Verification just in case 242 if(iv_rank(jj) .lt.0 .or. iv_rank(jj) .ge. numpe) then 243 write(*,*) 'WARNING: iv_rank(',jj,') is ', iv_rank(jj), 244 & ' and reset to numpe-1' 245 iv_rank(jj) = numpe-1 246 endif 247 248 ! Open the varts files 249 if(myrank == iv_rank(jj)) then 250 fvarts='varts/varts' 251 fvarts=trim(fvarts)//trim(cname2(jj)) 252 fvarts=trim(fvarts)//trim(cname2(lstep)) 253 fvarts=trim(fvarts)//'.dat' 254 fvarts=trim(fvarts) 255 open(unit=1000+jj, file=fvarts, status='unknown') 256 endif 257 enddo 258! endif 259 260 endif 261c 262c.... open history and aerodynamic forces files 263c 264 if (myrank .eq. master) then 265 open (unit=ihist, file=fhist, status='unknown') 266 open (unit=iforce, file=fforce, status='unknown') 267 open (unit=76, file="fort.76", status='unknown') 268 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 269 fnamepold = 'pold' 270 fnamepold = trim(fnamepold)//trim(cname2(lstep)) 271 fnamepold = trim(fnamepold)//'.dat' 272 fnamepold = trim(fnamepold) 273 open (unit=8177, file=fnamepold, status='unknown') 274 endif 275 endif 276c 277c.... initialize 278c 279 ifuncs(:) = 0 ! func. evaluation counter 280 istep = 0 281 yold = y 282 acold = ac 283 284 rerr = zero 285 286 if(ierrcalc.eq.1 .or. ioybar.eq.1) then ! we need ybar for error too 287 if (ivort == 1) then 288 allocate(ybar(nshg,17)) ! more space for vorticity if requested 289 else 290 allocate(ybar(nshg,13)) 291 endif 292 ybar = zero ! Initialize ybar to zero, which is essential 293 endif 294 295 if(ivort == 1) then 296 allocate(strain(nshg,6)) 297 allocate(vorticity(nshg,5)) 298 endif 299 300 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 301 allocate(wallssVec(nshg,3)) 302 if (ioybar .eq. 1) then 303 allocate(wallssVecbar(nshg,3)) 304 wallssVecbar = zero ! Initialization important if mean wss computed 305 endif 306 endif 307 308! both nstepsincycle and nphasesincycle needs to be set 309 if(nstepsincycle.eq.0) nphasesincycle = 0 310 if(nphasesincycle.ne.0) then 311! & allocate(yphbar(nshg,5,nphasesincycle)) 312 if (ivort == 1) then 313 allocate(yphbar(nshg,15,nphasesincycle)) ! more space for vorticity 314 else 315 allocate(yphbar(nshg,11,nphasesincycle)) 316 endif 317 yphbar = zero 318 endif 319 320!MR CHANGE END 321 322 vbc_prof(:,1:3) = BC(:,3:5) 323 if(iramp.eq.1) then 324 call BCprofileInit(vbc_prof,x) 325 endif 326 327c 328c.... ---------------> initialize LesLib Library <--------------- 329c 330c.... assign parameter values 331c 332 do i = 1, 100 333 numeqns(i) = i 334 enddo 335 nKvecs = Kspace 336 prjFlag = iprjFlag 337 presPrjFlag = ipresPrjFlag 338 verbose = iverbose 339c 340c.... determine how many scalar equations we are going to need to solve 341c 342 nsolt=mod(impl(1),2) ! 1 if solving temperature 343 nsclrsol=nsolt+nsclr ! total number of scalars solved At 344 ! some point we probably want to create 345 ! a map, considering stepseq(), to find 346 ! what is actually solved and only 347 ! dimension lhs to the appropriate 348 ! size. (see 1.6.1 and earlier for a 349 ! "failed" attempt at this). 350 351 352 nsolflow=mod(impl(1),100)/10 ! 1 if solving flow 353 354c 355c.... Now, call lesNew routine to initialize 356c memory space 357c 358 call genadj(colm, rowp, icnt ) ! preprocess the adjacency list 359 360 nnz_tot=icnt ! this is exactly the number of non-zero blocks on 361 ! this proc 362 363 if (nsolflow.eq.1) then ! start of setup for the flow 364 lesId = numeqns(1) 365 eqnType = 1 366 nDofs = 4 367 368!-------------------------------------------------------------------- 369! Rest of configuration of svLS is added here, where we have LHS 370! pointers 371 372 nPermDims = 1 373 nTmpDims = 1 374 375 allocate (lhsP(4,nnz_tot)) 376 allocate (lhsK(9,nnz_tot)) 377 378! Setting up svLS or leslib for flow 379 380 IF (svLSFlag .EQ. 1) THEN 381#ifdef HAVE_SVLS 382 IF(nPrjs.eq.0) THEN 383 svLSType=2 !GMRES if borrowed ACUSIM projection vectors variable set to zero 384 ELSE 385 svLSType=3 !NS solver 386 ENDIF 387! reltol for the NSSOLVE is the stop criterion on the outer loop 388! reltolIn is (eps_GM, eps_CG) from the CompMech paper 389! for now we are using 390! Tolerance on ACUSIM Pressure Projection for CG and 391! Tolerance on Momentum Equations for GMRES 392! also using Kspaceand maxIters from setup for ACUSIM 393! 394 eps_outer=40.0*epstol(1) !following papers soggestion for now 395 CALL svLS_LS_CREATE(svLS_ls, svLSType, dimKry=Kspace, 396 2 relTol=eps_outer, relTolIn=(/epstol(1),prestol/), 397 3 maxItr=maxIters, 398 4 maxItrIn=(/maxIters,maxIters/)) 399 400 CALL svLS_COMMU_CREATE(communicator, MPI_COMM_WORLD) 401 nNo=nshg 402 gnNo=nshgt 403 IF (ipvsq .GE. 2) THEN 404 405#if((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 1)) 406 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 407 2 + numImpSrfs + numRCRSrfs + numCORSrfs 408#elif((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 0)) 409 svLS_nFaces = 1 + numResistSrfs 410 2 + numImpSrfs + numRCRSrfs + numCORSrfs 411#elif((VER_CORONARY == 0)&&(VER_CLOSEDLOOP == 1)) 412 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 413 2 + numImpSrfs + numRCRSrfs 414#else 415 svLS_nFaces = 1 + numResistSrfs 416 2 + numImpSrfs + numRCRSrfs 417#endif 418 419 ELSE 420 svLS_nFaces = 1 !not sure about this...looks like 1 means 0 for array size issues 421 END IF 422 423 CALL svLS_LHS_CREATE(svLS_lhs, communicator, gnNo, nNo, 424 2 nnz_tot, ltg, colm, rowp, svLS_nFaces) 425 426 faIn = 1 427 facenNo = 0 428 DO i=1, nshg 429 IF (IBITS(iBC(i),3,3) .NE. 0) facenNo = facenNo + 1 430 END DO 431 ALLOCATE(gNodes(facenNo), sV(nsd,facenNo)) 432 sV = 0D0 433 j = 0 434 DO i=1, nshg 435 IF (IBITS(iBC(i),3,3) .NE. 0) THEN 436 j = j + 1 437 gNodes(j) = i 438 IF (.NOT.BTEST(iBC(i),3)) sV(1,j) = 1D0 439 IF (.NOT.BTEST(iBC(i),4)) sV(2,j) = 1D0 440 IF (.NOT.BTEST(iBC(i),5)) sV(3,j) = 1D0 441 END IF 442 END DO 443 CALL svLS_BC_CREATE(svLS_lhs, faIn, facenNo, 444 2 nsd, BC_TYPE_Dir, gNodes, sV) 445 DEALLOCATE(gNodes) 446 DEALLOCATE(sV) 447#else 448 if(myrank.eq.0) write(*,*) 'your input requests svLS but your cmake did not build for it' 449 call error('itrdrv ','nosVLS',svLSFlag) 450#endif 451 ENDIF 452 453 if(leslib.eq.1) then 454#ifdef HAVE_LESLIB 455!-------------------------------------------------------------------- 456 call myfLesNew( lesId, 41994, 457 & eqnType, 458 & nDofs, minIters, maxIters, 459 & nKvecs, prjFlag, nPrjs, 460 & presPrjFlag, nPresPrjs, epstol(1), 461 & prestol, verbose, statsflow, 462 & nPermDims, nTmpDims, servername ) 463 464 allocate (aperm(nshg,nPermDims)) 465 allocate (atemp(nshg,nTmpDims)) 466 call readLesRestart( lesId, aperm, nshg, myrank, lstep, 467 & nPermDims ) 468#else 469 if(myrank.eq.0) write(*,*) 'your input requests leslib but your cmake did not build for it' 470 call error('itrdrv ','nolslb',leslib) 471#endif 472 endif !leslib=1 473 474 else ! not solving flow just scalar 475 nPermDims = 0 476 nTempDims = 0 477 endif 478 479 480 if(nsclrsol.gt.0) then 481 do isolsc=1,nsclrsol 482 lesId = numeqns(isolsc+1) 483 eqnType = 2 484 nDofs = 1 485 presPrjflag = 0 486 nPresPrjs = 0 487 prjFlag = 1 488 indx=isolsc+2-nsolt ! complicated to keep epstol(2) for 489 ! temperature followed by scalars 490! Setting up svLS or leslib for scalar 491#ifdef HAVE_SVLS 492 IF (svLSFlag .EQ. 1) THEN 493 svLSType=2 !only option for scalars 494! reltol for the GMRES is the stop criterion 495! also using Kspaceand maxIters from setup for ACUSIM 496! 497 CALL svLS_LS_CREATE(svLS_ls_S(isolsc), svLSType, dimKry=Kspace, 498 2 relTol=epstol(indx), 499 3 maxItr=maxIters 500 4 ) 501 502 CALL svLS_COMMU_CREATE(communicator_S(isolsc), MPI_COMM_WORLD) 503 504 svLS_nFaces = 1 !not sure about this...should try it with zero 505 506 CALL svLS_LHS_CREATE(svLS_lhs_S(isolsc), communicator_S(isolsc), gnNo, nNo, 507 2 nnz_tot, ltg, colm, rowp, svLS_nFaces) 508 509 faIn = 1 510 facenNo = 0 511 ib=5+isolsc 512 DO i=1, nshg 513 IF (btest(iBC(i),ib)) facenNo = facenNo + 1 514 END DO 515 ALLOCATE(gNodes(facenNo), sV(1,facenNo)) 516 sV = 0D0 517 j = 0 518 DO i=1, nshg 519 IF (btest(iBC(i),ib)) THEN 520 j = j + 1 521 gNodes(j) = i 522 END IF 523 END DO 524 525 CALL svLS_BC_CREATE(svLS_lhs_S(isolsc), faIn, facenNo, 526 2 1, BC_TYPE_Dir, gNodes, sV(1,:)) 527 DEALLOCATE(gNodes) 528 DEALLOCATE(sV) 529 530 if( isolsc.eq.1) then ! if multiple scalars make sure done once 531 allocate (apermS(1,1,1)) 532 allocate (atempS(1,1)) !they can all share this 533 endif 534 ENDIF !svLS handing scalar solve 535#endif 536 537 538#ifdef HAVE_LESLIB 539 if (leslib.eq.1) then 540 call myfLesNew( lesId, 41994, 541 & eqnType, 542 & nDofs, minIters, maxIters, 543 & nKvecs, prjFlag, nPrjs, 544 & presPrjFlag, nPresPrjs, epstol(indx), 545 & prestol, verbose, statssclr, 546 & nPermDimsS, nTmpDimsS, servername ) 547 endif 548#endif 549 enddo !loop over scalars to solve (not yet worked out for multiple svLS solves 550 allocate (lhsS(nnz_tot,nsclrsol)) 551#ifdef HAVE_LESLIB 552 if(leslib.eq.1) then ! we just prepped scalar solves for leslib so allocate arrays 553c 554c Assume all scalars have the same size needs 555c 556 allocate (apermS(nshg,nPermDimsS,nsclrsol)) 557 allocate (atempS(nshg,nTmpDimsS)) !they can all share this 558 endif 559#endif 560c 561c actually they could even share with atemp but leave that for later 562c 563 else !no scalar solves at all so zero dims not used 564 nPermDimsS = 0 565 nTmpDimsS = 0 566 endif 567c 568c... prepare lumped mass if needed 569c 570 if((flmpr.ne.0).or.(flmpl.ne.0)) call genlmass(x, shp,shgl) 571c 572c.... -----------------> End of initialization <----------------- 573c 574c.....open the necessary files to gather time series 575c 576 lstep0 = lstep+1 577 nsteprcr = nstep(1)+lstep 578c 579c.... loop through the time sequences 580c 581 582 583 do 3000 itsq = 1, ntseq 584 itseq = itsq 585 586CAD tcorecp1 = second(0) 587CAD tcorewc1 = second(-1) 588c 589c.... set up the time integration parameters 590c 591 nstp = nstep(itseq) 592 nitr = niter(itseq) 593 LCtime = loctim(itseq) 594 dtol(:)= deltol(itseq,:) 595 596 call itrSetup ( y, acold ) 597c 598c...initialize the coefficients for the impedance convolution, 599c which are functions of alphaf so need to do it after itrSetup 600 if(numImpSrfs.gt.zero) then 601 call calcImpConvCoef (numImpSrfs, ntimeptpT) 602 endif 603c 604c...initialize the initial condition P(0)-RQ(0)-Pd(0) for RCR BC 605c need ndsurf so should be after initNABI 606 if(numRCRSrfs.gt.zero) then 607 call calcRCRic(y,nsrflistRCR,numRCRSrfs) 608 endif 609c 610c find the last solve of the flow in the step sequence so that we will 611c know when we are at/near end of step 612c 613c ilast=0 614 nitr=0 ! count number of flow solves in a step (# of iterations) 615 do i=1,seqsize 616 if(stepseq(i).eq.0) nitr=nitr+1 617 enddo 618 619 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 620 tcorecp(:) = zero ! used in solfar.f (solflow) 621 tcorecpscal(:) = zero ! used in solfar.f (solflow) 622 if(myrank.eq.0) then 623 tcorecp1 = TMRC() 624 endif 625 626c 627c.... loop through the time steps 628c 629 istop=0 630 rmub=datmat(1,2,1) 631 if(rmutarget.gt.0) then 632 rmue=rmutarget 633 else 634 rmue=datmat(1,2,1) ! keep constant 635 endif 636 637 if(iramp.eq.1) then 638 call BCprofileScale(vbc_prof,BC,yold) ! fix the yold values to the reset BC 639 isclr=1 ! fix scalar 640 do isclr=1,nsclr 641 call itrBCSclr(yold,ac,iBC,BC,iper,ilwork) 642 enddo 643 endif 644 645 do 2000 istp = 1, nstp 646 if(iramp.eq.1) 647 & call BCprofileScale(vbc_prof,BC,yold) 648 649 call rerun_check(stopjob) 650 if(myrank.eq.master) write(*,*) 651 & 'stopjob,lstep,istep', stopjob,lstep,istep 652 if(stopjob.eq.lstep) then 653 stopjob=-2 ! this is the code to finish 654 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 655 if(myrank.eq.master) write(*,*) 656 & 'line 473 says last step written so exit' 657 goto 2002 ! the step was written last step so just exit 658 else 659 if(myrank.eq.master) 660 & write(*,*) 'line 473 says last step not written' 661 istep=nstp ! have to do this so that solution will be written 662 goto 2001 663 endif 664 endif 665 666 xi=istp*1.0/nstp 667 datmat(1,2,1)=rmub*(1.0-xi)+xi*rmue 668c write(*,*) "current mol. visc = ", datmat(1,2,1) 669c.... if we have time varying boundary conditions update the values of BC. 670c these will be for time step n+1 so use lstep+1 671c 672 if(itvn.gt.0) call BCint((lstep+1)*Delt(1), shp, shgl, 673 & shpb, shglb, x, BC, iBC) 674 675c 676c ... calculate the pressure contribution that depends on the history for the Imp. BC 677c 678 if(numImpSrfs.gt.0) then 679 call pHist(poldImp,QHistImp,ImpConvCoef, 680 & ntimeptpT,numImpSrfs) 681 if (myrank.eq.master) 682 & write(8177,*) (poldImp(n), n=1,numImpSrfs) 683 endif 684c 685c ... calc the pressure contribution that depends on the history for the RCR BC 686c 687 if(numRCRSrfs.gt.0) then 688 call CalcHopRCR (Delt(itseq), lstep, numRCRSrfs) 689 call CalcRCRConvCoef(lstep,numRCRSrfs) 690 call pHist(poldRCR,QHistRCR,RCRConvCoef,nsteprcr, 691 & numRCRSrfs) 692 if (myrank.eq.master) 693 & write(8177,*) (poldRCR(n), n=1,numRCRSrfs) 694 endif 695c 696c Decay of scalars 697c 698 if(nsclr.gt.0 .and. tdecay.ne.1) then 699 yold(:,6:ndof)=y(:,6:ndof)*tdecay 700 BC(:,7:6+nsclr)= BC(:,7:6+nsclr)*tdecay 701 endif 702 703 if(nosource.eq.1) BC(:,7:6+nsclr)= BC(:,7:6+nsclr)*0.8 704 705 706 if(iLES.gt.0) then !complicated stuff has moved to 707 !routine below 708 call lesmodels(yold, acold, shgl, shp, 709 & iper, ilwork, rowp, colm, 710 & nsons, ifath, x, 711 & iBC, BC) 712 713 714 endif 715 716c.... set traction BCs for modeled walls 717c 718 if (itwmod.ne.0) then 719 call asbwmod(yold, acold, x, BC, iBC, 720 & iper, ilwork, ifath, velbar) 721 endif 722 723!MR CHANGE 724c 725c.... Determine whether the vorticity field needs to be computed for this time step or not 726c 727 icomputevort = 0 728 if (ivort == 1) then ! Print vorticity = True in solver.inp 729 ! We then compute the vorticity only if we 730 ! 1) we write an intermediate checkpoint 731 ! 2) we reach the last time step and write the last checkpoint 732 ! 3) we accumulate statistics in ybar for every time step 733 ! BEWARE: we need here lstep+1 and istep+1 because the lstep and 734 ! istep gets incremened after the flowsolve, further below 735 if (((irs .ge. 1) .and. (mod(lstep+1, ntout) .eq. 0)) .or. 736 & istep+1.eq.nstep(itseq) .or. ioybar == 1) then 737 icomputevort = 1 738 endif 739 endif 740 741! write(*,*) 'icomputevort: ',icomputevort, ' - istep: ', 742! & istep,' - nstep(itseq):',nstep(itseq),'- lstep:', 743! & lstep, '- ntout:', ntout 744!MR CHANGE 745 746c 747c.... -----------------------> predictor phase <----------------------- 748c 749 call itrPredict(yold, y, acold, ac , uold, u, iBC) 750 call itrBC (y, ac, iBC, BC, iper,ilwork) 751 752 if(nsolt.eq.1) then 753 isclr=0 754 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 755 endif 756 do isclr=1,nsclr 757 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 758 enddo 759 iter=0 760 ilss=0 ! this is a switch thrown on first solve of LS redistance 761 do istepc=1,seqsize 762 icode=stepseq(istepc) 763 if(mod(icode,5).eq.0) then ! this is a solve 764 isolve=icode/10 765 if(icode.eq.0) then ! flow solve (encoded as 0) 766c 767 iter = iter+1 768 ifuncs(1) = ifuncs(1) + 1 769c 770 Force(1) = zero 771 Force(2) = zero 772 Force(3) = zero 773 HFlux = zero 774 lhs = 1 - min(1,mod(ifuncs(1)-1,LHSupd(1))) 775 776 call SolFlow(y, ac, u, 777 & yold, acold, uold, 778 & x, iBC, 779 & BC, res, 780 & nPermDims, nTmpDims, aperm, 781 & atemp, iper, 782 & ilwork, shp, shgl, 783 & shpb, shglb, rowp, 784 & colm, lhsK, lhsP, 785 & solinc, rerr, tcorecp, 786 & GradV, sumtime 787#ifdef HAVE_SVLS 788 & ,svLS_lhs, svLS_ls, svLS_nFaces) 789#else 790 & ) 791#endif 792 793 else ! scalar type solve 794 if (icode.eq.5) then ! Solve for Temperature 795 ! (encoded as (nsclr+1)*10) 796 isclr=0 797 ifuncs(2) = ifuncs(2) + 1 798 j=1 799 else ! solve a scalar (encoded at isclr*10) 800 isclr=isolve 801 ifuncs(isclr+2) = ifuncs(isclr+2) + 1 802 j=isclr+nsolt 803 if((iLSet.eq.2).and.(ilss.eq.0) 804 & .and.(isclr.eq.2)) then 805 ilss=1 ! throw switch (once per step) 806 y(:,7)=y(:,6) ! redistance field initialized 807 ac(:,7) = zero 808 call itrBCSclr ( y, ac, iBC, BC, iper, 809 & ilwork) 810c 811c....store the flow alpha, gamma parameter values and assigm them the 812c....Backward Euler parameters to solve the second levelset scalar 813c 814 alfit=alfi 815 gamit=gami 816 almit=almi 817 Deltt=Delt(1) 818 Dtglt=Dtgl 819 alfi = 1 820 gami = 1 821 almi = 1 822c Delt(1)= Deltt ! Give a pseudo time step 823 Dtgl = one / Delt(1) 824 endif ! level set eq. 2 825 endif ! deciding between temperature and scalar 826 827 lhs = 1 - min(1,mod(ifuncs(isclr+2)-1, 828 & LHSupd(isclr+2))) 829 830 call SolSclr(y, ac, u, 831 & yold, acold, uold, 832 & x, iBC, 833 & BC, nPermDimsS,nTmpDimsS, 834 & apermS(1,1,j), atempS, iper, 835 & ilwork, shp, shgl, 836 & shpb, shglb, rowp, 837 & colm, lhsS(1,j), 838 & solinc(1,isclr+5), tcorecpscal 839#ifdef HAVE_SVLS 840 & ,svLS_lhs_S(isclr), svLS_ls_S(isclr), svls_nfaces) 841#else 842 & ) 843#endif 844 845 846 endif ! end of scalar type solve 847 848 else ! this is an update (mod did not equal zero) 849 iupdate=icode/10 ! what to update 850 if(icode.eq.1) then !update flow 851 call itrCorrect ( y, ac, u, solinc, iBC) 852 call itrBC (y, ac, iBC, BC, iper, ilwork) 853 else ! update scalar 854 isclr=iupdate !unless 855 if(icode.eq.6) isclr=0 856 if(iRANS.lt.-100)then ! RANS 857 call itrCorrectSclrPos(y,ac,solinc(1,isclr+5)) 858 else 859 call itrCorrectSclr (y, ac, solinc(1,isclr+5)) 860 endif 861 if (ilset.eq.2 .and. isclr.eq.2) then 862 if (ivconstraint .eq. 1) then 863 call itrBCSclr ( y, ac, iBC, BC, iper, 864 & ilwork) 865c 866c ... applying the volume constraint on second level set scalar 867c 868 call solvecon (y, x, iBC, BC, 869 & iper, ilwork, shp, shgl) 870c 871 endif ! end of volume constraint calculations 872 endif ! end of redistance calculations 873c 874 call itrBCSclr ( y, ac, iBC, BC, iper, 875 & ilwork) 876 endif ! end of flow or scalar update 877 endif ! end of switch between solve or update 878 enddo ! loop over sequence in step 879c 880c 881c.... obtain the time average statistics 882c 883 if (ioform .eq. 2) then 884 885 call stsGetStats( y, yold, ac, acold, 886 & u, uold, x, 887 & shp, shgl, shpb, shglb, 888 & iBC, BC, iper, ilwork, 889 & rowp, colm, lhsK, lhsP ) 890 endif 891 892c 893c Find the solution at the end of the timestep and move it to old 894c 895c 896c ...First to reassign the parameters for the original time integrator scheme 897c 898 if((iLSet.eq.2).and.(ilss.eq.1)) then 899 alfi =alfit 900 gami =gamit 901 almi =almit 902 Delt(1)=Deltt 903 Dtgl =Dtglt 904 endif 905 call itrUpdate( yold, acold, uold, y, ac, u) 906 call itrBC (yold, acold, iBC, BC, iper,ilwork) 907 908 istep = istep + 1 909 lstep = lstep + 1 910c 911c .. Print memory consumption on BGQ 912c 913 call printmeminfo("itrdrv"//char(0)) 914 915c 916c .. Compute vorticity 917c 918 if ( icomputevort == 1) then 919 920 ! vorticity components and magnitude 921 vorticity(:,1) = GradV(:,8)-GradV(:,6) !omega_x 922 vorticity(:,2) = GradV(:,3)-GradV(:,7) !omega_y 923 vorticity(:,3) = GradV(:,4)-GradV(:,2) !omega_z 924 vorticity(:,4) = sqrt( vorticity(:,1)*vorticity(:,1) 925 & + vorticity(:,2)*vorticity(:,2) 926 & + vorticity(:,3)*vorticity(:,3) ) 927 ! Q 928 strain(:,1) = GradV(:,1) !S11 929 strain(:,2) = 0.5*(GradV(:,2)+GradV(:,4)) !S12 930 strain(:,3) = 0.5*(GradV(:,3)+GradV(:,7)) !S13 931 strain(:,4) = GradV(:,5) !S22 932 strain(:,5) = 0.5*(GradV(:,6)+GradV(:,8)) !S23 933 strain(:,6) = GradV(:,9) !S33 934 935 vorticity(:,5) = 0.25*( vorticity(:,4)*vorticity(:,4) !Q 936 & - 2.0*( strain(:,1)*strain(:,1) 937 & + 2* strain(:,2)*strain(:,2) 938 & + 2* strain(:,3)*strain(:,3) 939 & + strain(:,4)*strain(:,4) 940 & + 2* strain(:,5)*strain(:,5) 941 & + strain(:,6)*strain(:,6))) 942 943 endif 944c 945c.... update and the aerodynamic forces 946c 947 call forces ( yold, ilwork ) 948 949c 950c .. write out the instantaneous solution 951c 9522001 continue ! we could get here by 2001 label if user requested stop 953 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 954 & istep.eq.nstep(itseq)) then 955 956!so that we can see progress in force file close it so that it flushes 957!and then reopen in append mode 958 959 close(iforce) 960 open (unit=iforce, file=fforce, position='append') 961 962! Call to restar() will open restart file in write mode (and not append mode) 963! that is needed as other fields are written in append mode 964 965 call restar ('out ', yold ,ac) 966 if(ideformwall == 1) then 967! call write_displ(myrank, lstep, nshg, 3, uold ) 968 if(myrank.eq.master) then 969 write(*,*) 'ideformwall is 1 and is a dead code path... exiting' 970 endif 971 stop 972 endif 973 974 if(ivort == 1) then 975 call write_field(myrank,'a','vorticity',9,vorticity, 976 & 'd',nshg,5,lstep) 977 endif 978 979 call printmeminfo("itrdrv after checkpoint"//char(0)) 980 else if(stopjob.eq.-2) then 981 if(myrank.eq.master) then 982 write(*,*) 'line 755 says no write before stopping' 983 write(*,*) 'istep,nstep,irs',istep,nstep(itseq),irs 984 endif 985 endif !just the instantaneous stuff for videos 986c 987c.... compute the consistent boundary flux 988c 989 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 990 call Bflux ( yold, acold, uold, x, 991 & shp, shgl, shpb, 992 & shglb, ilwork, iBC, 993 & BC, iper, wallssVec) 994 endif 995 996 if(stopjob.eq.-2) goto 2003 997 998 999c 1000c ... update the flow history for the impedance convolution, filter it and write it out 1001c 1002 if(numImpSrfs.gt.zero) then 1003 call UpdHistConv(y,nsrflistImp,numImpSrfs) !uses Delt(1) 1004 endif 1005 1006c 1007c ... update the flow history for the RCR convolution 1008c 1009 if(numRCRSrfs.gt.zero) then 1010 call UpdHistConv(y,nsrflistRCR,numRCRSrfs) !uses lstep 1011 endif 1012 1013 1014c... dump TIME SERIES 1015 1016 if (exts) then 1017 if (mod(lstep-1,freq).eq.0) then 1018 1019 if (numpe > 1) then 1020 do jj = 1, ntspts 1021 vartssoln((jj-1)*ndof+1:jj*ndof)=varts(jj,:) 1022 ivarts=zero 1023 enddo 1024 do k=1,ndof*ntspts 1025 if(vartssoln(k).ne.zero) ivarts(k)=1 1026 enddo 1027 1028! call MPI_REDUCE(vartssoln, vartssolng, ndof*ntspts, 1029! & MPI_DOUBLE_PRECISION, MPI_SUM, master, 1030! & MPI_COMM_WORLD, ierr) 1031 1032 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1033 call MPI_ALLREDUCE(vartssoln, vartssolng, 1034 & ndof*ntspts, 1035 & MPI_DOUBLE_PRECISION, MPI_SUM, 1036 & MPI_COMM_WORLD, ierr) 1037 1038! call MPI_REDUCE(ivarts, ivartsg, ndof*ntspts, 1039! & MPI_INTEGER, MPI_SUM, master, 1040! & MPI_COMM_WORLD, ierr) 1041 1042 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1043 call MPI_ALLREDUCE(ivarts, ivartsg, ndof*ntspts, 1044 & MPI_INTEGER, MPI_SUM, 1045 & MPI_COMM_WORLD, ierr) 1046 1047! if (myrank.eq.zero) then 1048 do jj = 1, ntspts 1049 1050 if(myrank .eq. iv_rank(jj)) then 1051 ! No need to update all varts components, only the one treated by the expected rank 1052 ! Note: keep varts as a vector, as multiple probes could be treated by one rank 1053 indxvarts = (jj-1)*ndof 1054 do k=1,ndof 1055 if(ivartsg(indxvarts+k).ne.0) then ! none of the vartssoln(parts) were non zero 1056 varts(jj,k)=vartssolng(indxvarts+k)/ 1057 & ivartsg(indxvarts+k) 1058 endif 1059 enddo 1060 endif !only if myrank eq iv_rank(jj) 1061 enddo 1062! endif !only on master 1063 endif !only if numpe > 1 1064 1065! if (myrank.eq.zero) then 1066 do jj = 1, ntspts 1067 if(myrank .eq. iv_rank(jj)) then 1068 ifile = 1000+jj 1069 write(ifile,555) lstep, (varts(jj,k),k=1,ndof) !Beware of format 555 - check ndof 1070c call flush(ifile) 1071 if (((irs .ge. 1) .and. 1072 & (mod(lstep, ntout) .eq. 0))) then 1073 close(ifile) 1074 fvarts='varts/varts' 1075 fvarts=trim(fvarts)//trim(cname2(jj)) 1076 fvarts=trim(fvarts)//trim(cname2(lskeep)) 1077 fvarts=trim(fvarts)//'.dat' 1078 fvarts=trim(fvarts) 1079 open(unit=ifile, file=fvarts, 1080 & position='append') 1081 endif !only when dumping restart 1082 endif 1083 enddo 1084! endif !only on master 1085 1086 varts(:,:) = zero ! reset the array for next step 1087 1088 1089!555 format(i6,5(2x,E12.5e2)) 1090555 format(i6,6(2x,E20.12e2)) !assuming ndof = 6 here 1091 1092 endif 1093 endif 1094 1095 if((irscale.ge.0).or.(itwmod.gt.0)) 1096 & call getvel (yold, ilwork, iBC, 1097 & nsons, ifath, velbar) 1098 1099 if((irscale.ge.0).and.(myrank.eq.master)) then 1100 call genscale(yold, x, iper, 1101 & iBC, ifath, velbar, 1102 & nsons) 1103 endif 1104c 1105c.... print out results. 1106c 1107 ntoutv=max(ntout,100) ! velb is not needed so often 1108 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 1109 if( (mod(lstep, ntoutv) .eq. 0) .and. 1110 & ((irscale.ge.0).or.(itwmod.gt.0) .or. 1111 & ((nsonmax.eq.1).and.(iLES.gt.0)))) 1112 & call rwvelb ('out ', velbar ,ifail) 1113 endif 1114c 1115c.... end of the NSTEP and NTSEQ loops 1116c 1117 1118 1119c 1120c.... -------------------> error calculation <----------------- 1121c 1122 if(ierrcalc.eq.1 .or. ioybar.eq.1) then 1123c$$$c 1124c$$$c compute average 1125c$$$c 1126c$$$ tfact=one/istep 1127c$$$ ybar =tfact*yold + (one-tfact)*ybar 1128 1129c compute average 1130c ybar(:,1:3) are average velocity components 1131c ybar(:,4) is average pressure 1132c ybar(:,5) is average speed 1133c ybar(:,6:8) is average of sq. of each vel. component 1134c ybar(:,9) is average of sq. of pressure 1135c ybar(:,10:12) is average of cross vel. components : uv, uw and vw 1136c averaging procedure justified only for identical time step sizes 1137c ybar(:,13) is average of eddy viscosity 1138c ybar(:,14:16) is average vorticity components 1139c ybar(:,17) is average vorticity magnitude 1140c istep is number of time step 1141c 1142 icollectybar = 0 1143 if(nphasesincycle.eq.0 .or. 1144 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1145 icollectybar = 1 1146 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1147 & istepsinybar = 0 ! init. to zero in first cycle in avg. 1148 endif 1149 1150 if(icollectybar.eq.1) then 1151 istepsinybar = istepsinybar+1 1152 tfact=one/istepsinybar 1153 1154 if(myrank.eq.master .and. nphasesincycle.ne.0 .and. 1155 & mod((istep-1),nstepsincycle).eq.0) 1156 & write(*,*)'nsamples in phase average:',istepsinybar 1157 1158c ybar to contain the averaged ((u,v,w),p)-fields 1159c and speed average, i.e., sqrt(u^2+v^2+w^2) 1160c and avg. of sq. terms including 1161c u^2, v^2, w^2, p^2 and cross terms of uv, uw and vw 1162 1163 ybar(:,1) = tfact*yold(:,1) + (one-tfact)*ybar(:,1) 1164 ybar(:,2) = tfact*yold(:,2) + (one-tfact)*ybar(:,2) 1165 ybar(:,3) = tfact*yold(:,3) + (one-tfact)*ybar(:,3) 1166 ybar(:,4) = tfact*yold(:,4) + (one-tfact)*ybar(:,4) 1167 ybar(:,5) = tfact*sqrt(yold(:,1)**2+yold(:,2)**2+ 1168 & yold(:,3)**2) + (one-tfact)*ybar(:,5) 1169 ybar(:,6) = tfact*yold(:,1)**2 + 1170 & (one-tfact)*ybar(:,6) 1171 ybar(:,7) = tfact*yold(:,2)**2 + 1172 & (one-tfact)*ybar(:,7) 1173 ybar(:,8) = tfact*yold(:,3)**2 + 1174 & (one-tfact)*ybar(:,8) 1175 ybar(:,9) = tfact*yold(:,4)**2 + 1176 & (one-tfact)*ybar(:,9) 1177 ybar(:,10) = tfact*yold(:,1)*yold(:,2) + !uv 1178 & (one-tfact)*ybar(:,10) 1179 ybar(:,11) = tfact*yold(:,1)*yold(:,3) + !uw 1180 & (one-tfact)*ybar(:,11) 1181 ybar(:,12) = tfact*yold(:,2)*yold(:,3) + !vw 1182 & (one-tfact)*ybar(:,12) 1183!MR CHANGE 1184 if(nsclr.gt.0) !nut 1185 & ybar(:,13) = tfact*yold(:,6) + (one-tfact)*ybar(:,13) 1186 1187 if(ivort == 1) then !vorticity 1188 ybar(:,14) = tfact*vorticity(:,1) + 1189 & (one-tfact)*ybar(:,14) 1190 ybar(:,15) = tfact*vorticity(:,2) + 1191 & (one-tfact)*ybar(:,15) 1192 ybar(:,16) = tfact*vorticity(:,3) + 1193 & (one-tfact)*ybar(:,16) 1194 ybar(:,17) = tfact*vorticity(:,4) + 1195 & (one-tfact)*ybar(:,17) 1196 endif 1197 1198 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1199 wallssVecBar(:,1) = tfact*wallssVec(:,1) 1200 & +(one-tfact)*wallssVecBar(:,1) 1201 wallssVecBar(:,2) = tfact*wallssVec(:,2) 1202 & +(one-tfact)*wallssVecBar(:,2) 1203 wallssVecBar(:,3) = tfact*wallssVec(:,3) 1204 & +(one-tfact)*wallssVecBar(:,3) 1205 endif 1206!MR CHANGE END 1207 endif 1208c 1209c compute phase average 1210c 1211 if(nphasesincycle.ne.0 .and. 1212 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1213 1214c beginning of cycle is considered as ncycles_startphaseavg*nstepsincycle+1 1215 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1216 & icyclesinavg = 0 ! init. to zero in first cycle in avg. 1217 1218 ! find number of steps between phases 1219 nstepsbtwphase = nstepsincycle/nphasesincycle ! integer value 1220 if(mod(istep-1,nstepsincycle).eq.0) then 1221 iphase = 1 ! init. to one in beginning of every cycle 1222 icyclesinavg = icyclesinavg + 1 1223 endif 1224 1225 icollectphase = 0 1226 istepincycle = mod(istep,nstepsincycle) 1227 if(istepincycle.eq.0) istepincycle=nstepsincycle 1228 if(istepincycle.eq.iphase*nstepsbtwphase) then 1229 icollectphase = 1 1230 iphase = iphase+1 ! use 'iphase-1' below 1231 endif 1232 1233 if(icollectphase.eq.1) then 1234 tfactphase = one/icyclesinavg 1235 1236 if(myrank.eq.master) then 1237 write(*,*) 'nsamples in phase ',iphase-1,': ', 1238 & icyclesinavg 1239 endif 1240 1241 yphbar(:,1,iphase-1) = tfactphase*yold(:,1) + 1242 & (one-tfactphase)*yphbar(:,1,iphase-1) 1243 yphbar(:,2,iphase-1) = tfactphase*yold(:,2) + 1244 & (one-tfactphase)*yphbar(:,2,iphase-1) 1245 yphbar(:,3,iphase-1) = tfactphase*yold(:,3) + 1246 & (one-tfactphase)*yphbar(:,3,iphase-1) 1247 yphbar(:,4,iphase-1) = tfactphase*yold(:,4) + 1248 & (one-tfactphase)*yphbar(:,4,iphase-1) 1249 yphbar(:,5,iphase-1) = tfactphase*sqrt(yold(:,1)**2 1250 & +yold(:,2)**2+yold(:,3)**2) + 1251 & (one-tfactphase)*yphbar(:,5,iphase-1) 1252!MR CHANGE 1253 yphbar(:,6,iphase-1) = 1254 & tfactphase*yold(:,1)*yold(:,1) 1255 & +(one-tfactphase)*yphbar(:,6,iphase-1) 1256 1257 yphbar(:,7,iphase-1) = 1258 & tfactphase*yold(:,1)*yold(:,2) 1259 & +(one-tfactphase)*yphbar(:,7,iphase-1) 1260 1261 yphbar(:,8,iphase-1) = 1262 & tfactphase*yold(:,1)*yold(:,3) 1263 & +(one-tfactphase)*yphbar(:,8,iphase-1) 1264 1265 yphbar(:,9,iphase-1) = 1266 & tfactphase*yold(:,2)*yold(:,2) 1267 & +(one-tfactphase)*yphbar(:,9,iphase-1) 1268 1269 yphbar(:,10,iphase-1) = 1270 & tfactphase*yold(:,2)*yold(:,3) 1271 & +(one-tfactphase)*yphbar(:,10,iphase-1) 1272 1273 yphbar(:,11,iphase-1) = 1274 & tfactphase*yold(:,3)*yold(:,3) 1275 & +(one-tfactphase)*yphbar(:,11,iphase-1) 1276 1277 if(ivort == 1) then 1278 yphbar(:,12,iphase-1) = 1279 & tfactphase*vorticity(:,1) 1280 & +(one-tfactphase)*yphbar(:,12,iphase-1) 1281 yphbar(:,13,iphase-1) = 1282 & tfactphase*vorticity(:,2) 1283 & +(one-tfactphase)*yphbar(:,13,iphase-1) 1284 yphbar(:,14,iphase-1) = 1285 & tfactphase*vorticity(:,3) 1286 & +(one-tfactphase)*yphbar(:,14,iphase-1) 1287 yphbar(:,15,iphase-1) = 1288 & tfactphase*vorticity(:,4) 1289 & +(one-tfactphase)*yphbar(:,15,iphase-1) 1290 endif 1291!MR CHANGE END 1292 endif 1293 endif 1294c 1295c compute rms 1296c 1297 if(icollectybar.eq.1) then 1298 rerr(:, 7)=rerr(:, 7)+(yold(:,1)-ybar(:,1))**2 1299 rerr(:, 8)=rerr(:, 8)+(yold(:,2)-ybar(:,2))**2 1300 rerr(:, 9)=rerr(:, 9)+(yold(:,3)-ybar(:,3))**2 1301 rerr(:,10)=rerr(:,10)+(yold(:,4)-ybar(:,4))**2 1302 endif 1303 endif 1304 2003 continue ! we get here if stopjob equals lstep and this jumped over 1305! the statistics computation because we have no new data to average in 1306! rather we are just trying to output the last state that was not already 1307! written 1308c 1309c.... ----------------------> Complete Restart Processing <---------------------- 1310c 1311! for now it is the same frequency but need to change this 1312! soon.... but don't forget to change the field counter in 1313! new_interface.cc 1314! 1315 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 1316 & istep.eq.nstep(itseq)) then 1317 if ((irs .ge. 1) .and. ((mod(lstep, ntout) .ne. 0) .or. 1318 & (nstp .eq. 0))) then 1319 if( 1320 & ((irscale.ge.0).or.(itwmod.gt.0) .or. 1321 & ((nsonmax.eq.1).and.iLES.gt.0))) 1322 & call rwvelb ('out ', velbar ,ifail) 1323 endif 1324 1325 lesId = numeqns(1) 1326 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1327 if(myrank.eq.0) then 1328 tcormr1 = TMRC() 1329 endif 1330 if((nsolflow.eq.1).and.(ipresPrjFlag.eq.1)) then 1331#ifdef HAVE_LESLIB 1332 call saveLesRestart( lesId, aperm , nshg, myrank, lstep, 1333 & nPermDims ) 1334 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1335 if(myrank.eq.0) then 1336 tcormr2 = TMRC() 1337 write(6,*) 'call saveLesRestart for projection and'// 1338 & 'pressure projection vectors', tcormr2-tcormr1 1339 endif 1340#endif 1341 endif 1342 1343 if(ierrcalc.eq.1) then 1344c 1345c.....smooth the error indicators 1346c 1347 do i=1,ierrsmooth 1348 call errsmooth( rerr, x, iper, ilwork, shp, shgl, iBC ) 1349 end do 1350 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1351 if(myrank.eq.0) then 1352 tcormr1 = TMRC() 1353 endif 1354 call write_error(myrank, lstep, nshg, 10, rerr ) 1355 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1356 if(myrank.eq.0) then 1357 tcormr2 = TMRC() 1358 write(6,*) 'Time to write the error fields to the disks', 1359 & tcormr2-tcormr1 1360 endif 1361 endif ! ierrcalc 1362 1363 if(ioybar.eq.1) then 1364 if(ivort == 1) then 1365 call write_field(myrank,'a','ybar',4, 1366 & ybar,'d',nshg,17,lstep) 1367 else 1368 call write_field(myrank,'a','ybar',4, 1369 & ybar,'d',nshg,13,lstep) 1370 endif 1371 1372 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1373 call write_field(myrank,'a','wssbar',6, 1374 & wallssVecBar,'d',nshg,3,lstep) 1375 endif 1376 1377 if(nphasesincycle .gt. 0) then 1378 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1379 if(myrank.eq.0) then 1380 tcormr1 = TMRC() 1381 endif 1382 do iphase=1,nphasesincycle 1383 if(ivort == 1) then 1384 call write_phavg2(myrank,'a','phase_average',13,iphase, 1385 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,15,lstep) 1386 else 1387 call write_phavg2(myrank,'a','phase_average',13,iphase, 1388 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,11,lstep) 1389 endif 1390 end do 1391 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1392 if(myrank.eq.0) then 1393 tcormr2 = TMRC() 1394 write(6,*) 'write all phase avg to the disks = ', 1395 & tcormr2-tcormr1 1396 endif 1397 endif !nphasesincyle 1398 endif !ioybar 1399 1400 if ( ( ihessian .eq. 1 ) .and. ( numpe < 2 ) )then 1401 uhess = zero 1402 gradu = zero 1403 tf = zero 1404 1405 do ku=1,nshg 1406 tf(ku,1) = x(ku,1)**3 1407 end do 1408 call hessian( yold, x, shp, shgl, iBC, 1409 & shpb, shglb, iper, ilwork, uhess, gradu ) 1410 1411 call write_hessian( uhess, gradu, nshg ) 1412 endif 1413 1414 if(iRANS.lt.0) then 1415 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1416 if(myrank.eq.0) then 1417 tcormr1 = TMRC() 1418 endif 1419 call write_field(myrank,'a','dwal',4,d2wall,'d', 1420 & nshg,1,lstep) 1421 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1422 if(myrank.eq.0) then 1423 tcormr2 = TMRC() 1424 write(6,*) 'Time to write dwal to the disks = ', 1425 & tcormr2-tcormr1 1426 endif 1427 endif !iRANS 1428 1429 endif ! write out complete restart state 1430 !next 2 lines are two ways to end early 1431 if(stopjob.eq.-2) goto 2002 1432 if(istop.eq.1000) goto 2002 ! stop when delta small (see rstatic) 1433 2000 continue 1434 2002 continue 1435 1436! done with time stepping so deallocate fields already written 1437! 1438 if(ioybar.eq.1) then 1439 deallocate(ybar) 1440 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1441 deallocate(wallssVecbar) 1442 endif 1443 if(nphasesincycle .gt. 0) then 1444 deallocate(yphbar) 1445 endif !nphasesincyle 1446 endif !ioybar 1447 if(ivort == 1) then 1448 deallocate(strain,vorticity) 1449 endif 1450 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1451 deallocate(wallssVec) 1452 endif 1453 if(iRANS.lt.0) then 1454 deallocate(d2wall) 1455 endif 1456 1457 1458CAD tcorecp2 = second(0) 1459CAD tcorewc2 = second(-1) 1460 1461CAD write(6,*) 'T(core) cpu-wallclock = ',tcorecp2-tcorecp1, 1462CAD & tcorewc2-tcorewc1 1463 1464 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1465 if(myrank.eq.0) then 1466 tcorecp2 = TMRC() 1467 write(6,*) 'T(core) cpu = ',tcorecp2-tcorecp1 1468 write(6,*) '(Elm. form.',tcorecp(1), 1469 & ',Lin. alg. sol.',tcorecp(2),')' 1470 write(6,*) '(Elm. form. Scal.',tcorecpscal(1), 1471 & ',Lin. alg. sol. Scal.',tcorecpscal(2),')' 1472 write(6,*) '' 1473 1474 endif 1475 1476 call print_system_stats(tcorecp, tcorecpscal) 1477 call print_mesh_stats() 1478 call print_mpi_stats() 1479 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1480! return 1481c call MPI_Finalize() 1482c call MPI_ABORT(MPI_COMM_WORLD, ierr) 1483 1484 3000 continue 1485 1486 1487c 1488c.... close history and aerodynamic forces files 1489c 1490 if (myrank .eq. master) then 1491! close (ihist) 1492 close (iforce) 1493 close(76) 1494 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 1495 close (8177) 1496 endif 1497 endif 1498c 1499c.... close varts file for probes 1500c 1501 if(exts) then 1502 do jj=1,ntspts 1503 if (myrank == iv_rank(jj)) then 1504 close(1000+jj) 1505 endif 1506 enddo 1507 deallocate (ivarts) 1508 deallocate (ivartsg) 1509 deallocate (iv_rank) 1510 deallocate (vartssoln) 1511 deallocate (vartssolng) 1512 endif 1513 1514!MR CHANGE 1515 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1516 if(myrank.eq.0) then 1517 write(*,*) 'itrdrv - done with aerodynamic forces' 1518 endif 1519!MR CHANGE 1520 1521 do isrf = 0,MAXSURF 1522! if ( nsrflist(isrf).ne.0 ) then 1523 if ( nsrflist(isrf).ne.0 .and. 1524 & myrank.eq.irankfilesforce(isrf)) then 1525 iunit=60+isrf 1526 close(iunit) 1527 endif 1528 enddo 1529 1530!MR CHANGE 1531 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1532 if(myrank.eq.0) then 1533 write(*,*) 'itrdrv - done with MAXSURF' 1534 endif 1535!MR CHANGE 1536 1537 1538 5 format(1X,F15.10,3X,F15.10,3X,F15.10,3X,F15.10) 1539 444 format(6(2x,e14.7)) 1540c 1541c.... end 1542c 1543 if(nsolflow.eq.1) then 1544 deallocate (lhsK) 1545 deallocate (lhsP) 1546 IF (svLSFlag .NE. 1) THEN 1547 deallocate (aperm) 1548 deallocate (atemp) 1549 ENDIF 1550 endif 1551 if(nsclrsol.gt.0) then 1552 deallocate (lhsS) 1553 deallocate (apermS) 1554 deallocate (atempS) 1555 endif 1556 1557 if(iabc==1) deallocate(acs) 1558 1559!MR CHANGE 1560 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1561 if(myrank.eq.0) then 1562 write(*,*) 'itrdrv - done - BACK TO process.f' 1563 endif 1564!MR CHANGE 1565 1566 1567 1568 return 1569 end 1570 1571 subroutine lesmodels(y, ac, shgl, shp, 1572 & iper, ilwork, rowp, colm, 1573 & nsons, ifath, x, 1574 & iBC, BC) 1575 1576 include "common.h" 1577 1578 real*8 y(nshg,ndof), ac(nshg,ndof), 1579 & x(numnp,nsd), 1580 & BC(nshg,ndofBC) 1581 real*8 shp(MAXTOP,maxsh,MAXQPT), 1582 & shgl(MAXTOP,nsd,maxsh,MAXQPT) 1583 1584c 1585 integer rowp(nshg,nnz), colm(nshg+1), 1586 & iBC(nshg), 1587 & ilwork(nlwork), 1588 & iper(nshg) 1589 dimension ifath(numnp), nsons(nfath) 1590 1591 real*8, allocatable, dimension(:) :: fwr2,fwr3,fwr4 1592 real*8, allocatable, dimension(:) :: stabdis,cdelsq1 1593 real*8, allocatable, dimension(:,:) :: xavegt, xavegt2,xavegt3 1594 1595 if( (iLES.gt.1) ) then ! Allocate Stuff for advanced LES models 1596 allocate (fwr2(nshg)) 1597 allocate (fwr3(nshg)) 1598 allocate (fwr4(nshg)) 1599 allocate (xavegt(nfath,12)) 1600 allocate (xavegt2(nfath,12)) 1601 allocate (xavegt3(nfath,12)) 1602 allocate (stabdis(nfath)) 1603 endif 1604 1605c.... get dynamic model coefficient 1606c 1607 ilesmod=iLES/10 1608c 1609c digit bit set filter rule, 10 bit set model 1610c 1611 if (ilesmod.eq.0) then ! 0 < iLES< 10 => dyn. model calculated 1612 ! at nodes based on discrete filtering 1613 1614 1615 if(isubmod.eq.2) then 1616 call SUPGdis(y, ac, shgl, shp, 1617 & iper, ilwork, 1618 & nsons, ifath, x, 1619 & iBC, BC, stabdis, xavegt3) 1620 endif 1621 1622 if( ((isubmod.eq.0).or.(isubmod.eq.2)))then ! If no 1623 ! sub-model 1624 ! or SUPG 1625 ! model wanted 1626 1627 if(i2filt.eq.0)then ! If simple filter 1628 1629 if(modlstats .eq. 0) then ! If no model stats wanted 1630 call getdmc (y, shgl, shp, 1631 & iper, ilwork, nsons, 1632 & ifath, x) 1633 else ! else get model stats 1634 call stdfdmc (y, shgl, shp, 1635 & iper, ilwork, nsons, 1636 & ifath, x) 1637 endif ! end of stats if statement 1638 1639 else ! else if twice filtering 1640 1641 call widefdmc(y, shgl, shp, 1642 & iper, ilwork, nsons, 1643 & ifath, x) 1644 1645 1646 endif ! end of simple filter if statement 1647 1648 endif ! end of SUPG or no sub-model if statement 1649 1650 1651 if( (isubmod.eq.1) ) then ! If DFWR sub-model wanted 1652 call cdelBHsq (y, shgl, shp, 1653 & iper, ilwork, nsons, 1654 & ifath, x, cdelsq1) 1655 call FiltRat (y, shgl, shp, 1656 & iper, ilwork, nsons, 1657 & ifath, x, cdelsq1, 1658 & fwr4, fwr3) 1659 1660 1661 if (i2filt.eq.0) then ! If simple filter wanted 1662 call DFWRsfdmc(y, shgl, shp, 1663 & iper, ilwork, nsons, 1664 & ifath, x, fwr2, fwr3) 1665 else ! else if twice filtering wanted 1666 call DFWRwfdmc(y, shgl, shp, 1667 & iper, ilwork, nsons, 1668 & ifath, x, fwr4, fwr4) 1669 endif ! end of simple filter if statement 1670 1671 endif ! end of DFWR sub-model if statement 1672 1673 if( (isubmod.eq.2) )then ! If SUPG sub-model wanted 1674 call dmcSUPG (y, ac, shgl, 1675 & shp, iper, ilwork, 1676 & nsons, ifath, x, 1677 & iBC, BC, rowp, colm, 1678 & xavegt2, stabdis) 1679 endif 1680 1681 if(idis.eq.1)then ! If SUPG/Model dissipation wanted 1682 call ediss (y, ac, shgl, 1683 & shp, iper, ilwork, 1684 & nsons, ifath, x, 1685 & iBC, BC, xavegt) 1686 endif 1687 1688 endif ! end of ilesmod 1689 1690 if (ilesmod .eq. 1) then ! 10 < iLES < 20 => dynamic-mixed 1691 ! at nodes based on discrete filtering 1692 call bardmc (y, shgl, shp, 1693 & iper, ilwork, 1694 & nsons, ifath, x) 1695 endif 1696 1697 if (ilesmod .eq. 2) then ! 20 < iLES < 30 => dynamic at quad 1698 ! pts based on lumped projection filt. 1699 1700 if(isubmod.eq.0)then 1701 call projdmc (y, shgl, shp, 1702 & iper, ilwork, x) 1703 else 1704 call cpjdmcnoi (y, shgl, shp, 1705 & iper, ilwork, x, 1706 & rowp, colm, 1707 & iBC, BC) 1708 endif 1709 1710 endif 1711 1712 if( (iLES.gt.1) ) then ! Deallocate Stuff for advanced LES models 1713 deallocate (fwr2) 1714 deallocate (fwr3) 1715 deallocate (fwr4) 1716 deallocate (xavegt) 1717 deallocate (xavegt2) 1718 deallocate (xavegt3) 1719 deallocate (stabdis) 1720 endif 1721 return 1722 end 1723 1724c 1725c...initialize the coefficients for the impedance convolution 1726c 1727 subroutine CalcImpConvCoef (numISrfs, numTpoints) 1728 1729 use convolImpFlow !uses flow history and impedance for convolution 1730 1731 include "common.h" !for alfi 1732 1733 integer numISrfs, numTpoints 1734 1735 allocate (ConvCoef(numTpoints+2,3)) !same time discret. for all imp. BC 1736 do j=1,numTpoints+2 1737 ConvCoef(j,:)=0.5/numTpoints !dt/2 divided by period T=N*dt 1738 ConvCoef(j,1)=ConvCoef(j,1)*(1.0-alfi)*(1.0-alfi) 1739 ConvCoef(j,2)=ConvCoef(j,2)*(1.0+2*alfi*(1.0-alfi)) 1740 ConvCoef(j,3)=ConvCoef(j,3)*alfi*alfi 1741 enddo 1742 ConvCoef(1,2)=zero 1743 ConvCoef(1,3)=zero 1744 ConvCoef(2,3)=zero 1745 ConvCoef(numTpoints+1,1)=zero 1746 ConvCoef(numTpoints+2,2)=zero 1747 ConvCoef(numTpoints+2,1)=zero 1748c 1749c...calculate the coefficients for the impedance convolution 1750c 1751 allocate (ImpConvCoef(numTpoints+2,numISrfs)) 1752 1753c..coefficients below assume Q linear in time step, Z constant 1754c do j=3,numTpoints 1755c ImpConvCoef(j,:) = ValueListImp(j-1,:)*ConvCoef(j,3) 1756c & + ValueListImp(j,:)*ConvCoef(j,2) 1757c & + ValueListImp(j+1,:)*ConvCoef(j,1) 1758c enddo 1759c ImpConvCoef(1,:) = ValueListImp(2,:)*ConvCoef(1,1) 1760c ImpConvCoef(2,:) = ValueListImp(2,:)*ConvCoef(2,2) 1761c & + ValueListImp(3,:)*ConvCoef(2,1) 1762c ImpConvCoef(numTpoints+1,:) = 1763c & ValueListImp(numTpoints,:)*ConvCoef(numTpoints+1,3) 1764c & + ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+1,2) 1765c ImpConvCoef(numTpoints+2,:) = 1766c & ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+2,3) 1767 1768c..try easiest convolution Q and Z constant per time step 1769 do j=3,numTpoints+1 1770 ImpConvCoef(j,:) = ValueListImp(j-1,:)/numTpoints 1771 enddo 1772 ImpConvCoef(1,:) =zero 1773 ImpConvCoef(2,:) =zero 1774 ImpConvCoef(numTpoints+2,:) = 1775 & ValueListImp(numTpoints+1,:)/numTpoints 1776c compensate for yalpha passed not y in Elmgmr() 1777 ImpConvCoef(numTpoints+1,:)= ImpConvCoef(numTpoints+1,:) 1778 & - ImpConvCoef(numTpoints+2,:)*(1.0-alfi)/alfi 1779 ImpConvCoef(numTpoints+2,:)= ImpConvCoef(numTpoints+2,:)/alfi 1780 return 1781 end 1782 1783c 1784c ... update the flow rate history for the impedance convolution, filter it and write it out 1785c 1786 subroutine UpdHistConv(y,nsrfIdList,numSrfs) 1787 1788 use convolImpFlow !brings ntimeptpT, QHistImp, QHistTry, QHistTryF, numImpSrfs 1789 use convolRCRFlow !brings QHistRCR, numRCRSrfs 1790 1791 include "common.h" 1792 1793 integer nsrfIdList(0:MAXSURF), numSrfs 1794 character*20 fname1 1795 character*10 cname2 1796 character*5 cname 1797 real*8 y(nshg,3) !velocity at time n+1 1798 real*8 NewQ(0:MAXSURF) !temporary unknown for the flow rate 1799 !that needs to be added to the flow history 1800 1801 call GetFlowQ(NewQ,y,nsrfIdList,numSrfs) !new flow at time n+1 1802c 1803c... for imp BC: shift QHist, add new constribution, filter and write out 1804c 1805 if(numImpSrfs.gt.zero) then 1806 do j=1, ntimeptpT 1807 QHistImp(j,1:numSrfs)=QHistImp(j+1,1:numSrfs) 1808 enddo 1809 QHistImp(ntimeptpT+1,1:numSrfs) = NewQ(1:numSrfs) 1810 1811c 1812c....filter the flow rate history 1813c 1814 cutfreq = 10 !hardcoded cutting frequency of the filter 1815 do j=1, ntimeptpT 1816 QHistTry(j,:)=QHistImp(j+1,:) 1817 enddo 1818 call Filter(QHistTryF,QHistTry,ntimeptpT,Delt(1),cutfreq) 1819c.... if no filter applied then uncomment next three lines 1820c do j=1, ntimeptpT 1821c QHistTryF(j,:)=QHistTry(j,:) 1822c enddo 1823 1824c QHistImp(1,:)=zero ! why do we do this? for beta(1,:) = zero it does not really matters 1825 do j=1, ntimeptpT 1826 QHistImp(j+1,:)=QHistTryF(j,:) 1827 enddo 1828c 1829c.... write out the new history of flow rates to Qhistor.dat 1830c 1831 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1832 & (istep .eq. nstep(1)))) .and. 1833 & (myrank .eq. master)) then 1834 open(unit=816, file='Qhistor.dat',status='replace') 1835 write(816,*) ntimeptpT 1836 do j=1,ntimeptpT+1 1837 write(816,*) (QHistImp(j,n),n=1, numSrfs) 1838 enddo 1839 close(816) 1840c... write out a copy with step number to be able to restart 1841 fname1 = 'Qhistor' 1842 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1843 open(unit=8166,file=trim(fname1),status='unknown') 1844 write(8166,*) ntimeptpT 1845 do j=1,ntimeptpT+1 1846 write(8166,*) (QHistImp(j,n),n=1, numSrfs) 1847 enddo 1848 close(8166) 1849 endif 1850 endif 1851 1852c 1853c... for RCR bc just add the new contribution 1854c 1855 if(numRCRSrfs.gt.zero) then 1856 QHistRCR(lstep+1,1:numSrfs) = NewQ(1:numSrfs) 1857c 1858c.... write out the new history of flow rates to Qhistor.dat 1859c 1860 if ((irs .ge. 1) .and. (myrank .eq. master)) then 1861 if(istep.eq.1) then 1862 open(unit=816,file='Qhistor.dat',status='unknown') 1863 else 1864 open(unit=816,file='Qhistor.dat',position='append') 1865 endif 1866 if(istep.eq.1) then 1867 do j=1,lstep 1868 write(816,*) j, (QHistRCR(j,n),n=1,numSrfs) ! read from file of previous run 1869 enddo 1870 endif 1871 write(816,*) lstep+1, (QHistRCR(lstep+1,n),n=1, numSrfs) 1872 close(816) 1873c... write out a copy with step number to be able to restart 1874 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1875 & (istep .eq. nstep(1)))) .and. 1876 & (myrank .eq. master)) then 1877 fname1 = 'Qhistor' 1878 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1879 open(unit=8166,file=trim(fname1),status='unknown') 1880 write(8166,*) lstep+1 1881 do j=1,lstep+1 1882 write(8166,*) (QHistRCR(j,n),n=1, numSrfs) 1883 enddo 1884 close(8166) 1885 endif 1886 endif 1887 endif 1888 1889 return 1890 end 1891 1892c 1893c...calculate the time varying coefficients for the RCR convolution 1894c 1895 subroutine CalcRCRConvCoef (stepn, numSrfs) 1896 1897 use convolRCRFlow !brings in ValueListRCR, dtRCR 1898 1899 include "common.h" !brings alfi 1900 1901 integer numSrfs, stepn 1902 1903 RCRConvCoef = zero 1904 if (stepn .eq. 0) then 1905 RCRConvCoef(1,:) = ValueListRCR(1,:)*(1.0-alfi) + 1906 & ValueListRCR(3,:)*(-alfi + 1.0 + 1/dtRCR(:) 1907 & - exp(-alfi*dtRCR(:))*(1 + 1/dtRCR(:))) 1908 RCRConvCoef(2,:) = ValueListRCR(1,:)*alfi 1909 & + ValueListRCR(3,:) 1910 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1911 endif 1912 if (stepn .ge. 1) then 1913 RCRConvCoef(1,:) =-ValueListRCR(3,:)*exp(-dtRCR(:)*(stepn+alfi)) 1914 & *(1 + (1 - exp(dtRCR(:)))/dtRCR(:)) 1915 RCRConvCoef(stepn+1,:) = ValueListRCR(1,:)*(1-alfi) 1916 & - ValueListRCR(3,:)*(alfi - 1 - 1/dtRCR(:) 1917 & + exp(-alfi*dtRCR(:))/dtRCR(:)*(2 - exp(-dtRCR(:)))) 1918 RCRConvCoef(stepn+2,:) = ValueListRCR(1,:)*alfi 1919 & + ValueListRCR(3,:) 1920 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1921 endif 1922 if (stepn .ge. 2) then 1923 do j=2,stepn 1924 RCRConvCoef(j,:) = ValueListRCR(3,:)/dtRCR(:)* 1925 & exp(-dtRCR(:)*(stepn + alfi + 2 - j))* 1926 & (1 - exp(dtRCR(:)))**2 1927 enddo 1928 endif 1929 1930c compensate for yalpha passed not y in Elmgmr() 1931 RCRConvCoef(stepn+1,:)= RCRConvCoef(stepn+1,:) 1932 & - RCRConvCoef(stepn+2,:)*(1.0-alfi)/alfi 1933 RCRConvCoef(stepn+2,:)= RCRConvCoef(stepn+2,:)/alfi 1934 1935 return 1936 end 1937 1938c 1939c...calculate the time dependent H operator for the RCR convolution 1940c 1941 subroutine CalcHopRCR (timestepRCR, stepn, numSrfs) 1942 1943 use convolRCRFlow !brings in HopRCR, dtRCR 1944 1945 include "common.h" 1946 1947 integer numSrfs, stepn 1948 real*8 PdistCur(0:MAXSURF), timestepRCR 1949 1950 HopRCR=zero 1951 call RCRint(timestepRCR*(stepn + alfi),PdistCur) 1952 HopRCR(1:numSrfs) = RCRic(1:numSrfs) 1953 & *exp(-dtRCR(1:numSrfs)*(stepn + alfi)) + PdistCur(1:numSrfs) 1954 return 1955 end 1956c 1957c ... initialize the influence of the initial conditions for the RCR BC 1958c 1959 subroutine calcRCRic(y,srfIdList,numSrfs) 1960 1961 use convolRCRFlow !brings RCRic, ValueListRCR, ValuePdist 1962 1963 include "common.h" 1964 1965 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled 1966 real*8 y(nshg,4) !need velocity and pressure 1967 real*8 Qini(0:MAXSURF) !initial flow rate 1968 real*8 PdistIni(0:MAXSURF) !initial distal pressure 1969 real*8 Pini(0:MAXSURF),CoupleArea(0:MAXSURF) ! initial pressure 1970 real*8 VelOnly(nshg,3), POnly(nshg) 1971 1972 allocate (RCRic(0:MAXSURF)) 1973 1974 if(lstep.eq.0) then 1975 VelOnly(:,1:3)=y(:,1:3) 1976 call GetFlowQ(Qini,VelOnly,srfIdList,numSrfs) !get initial flow 1977 QHistRCR(1,1:numSrfs)=Qini(1:numSrfs) !initialize QHistRCR 1978 POnly(:)=y(:,4) ! pressure 1979 call integrScalar(Pini,POnly,srfIdList,numSrfs) !get initial pressure integral 1980 POnly(:)=one ! one to get area 1981 call integrScalar(CoupleArea,POnly,srfIdList,numSrfs) !get surf area 1982 Pini(1:numSrfs) = Pini(1:numSrfs)/CoupleArea(1:numSrfs) 1983 else 1984 Qini(1:numSrfs)=QHistRCR(1,1:numSrfs) 1985 Pini(1:numSrfs)=zero ! hack 1986 endif 1987 call RCRint(istep,PdistIni) !get initial distal P (use istep) 1988 RCRic(1:numSrfs) = Pini(1:numSrfs) 1989 & - ValueListRCR(1,:)*Qini(1:numSrfs)-PdistIni(1:numSrfs) 1990 return 1991 end 1992 1993c.........function that integrates a scalar over a boundary 1994 subroutine integrScalar(scalInt,scal,srfIdList,numSrfs) 1995 1996 use pvsQbi !brings ndsurf, NASC 1997 1998 include "common.h" 1999 include "mpif.h" 2000 2001 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled, i, k 2002 real*8 scal(nshg), scalInt(0:MAXSURF), scalIntProc(0:MAXSURF) 2003 2004 scalIntProc = zero 2005 do i = 1,nshg 2006 if(numSrfs.gt.zero) then 2007 do k = 1,numSrfs 2008 irankCoupled = 0 2009 if (srfIdList(k).eq.ndsurf(i)) then 2010 irankCoupled=k 2011 scalIntProc(irankCoupled) = scalIntProc(irankCoupled) 2012 & + NASC(i)*scal(i) 2013 exit 2014 endif 2015 enddo 2016 endif 2017 enddo 2018c 2019c at this point, each scalint has its "nodes" contributions to the scalar 2020c accumulated into scalIntProc. Note, because NASC is on processor this 2021c will NOT be the scalar for the surface yet 2022c 2023c.... reduce integrated scalar for each surface, push on scalInt 2024c 2025 npars=MAXSURF+1 2026 call MPI_ALLREDUCE (scalIntProc, scalInt(:), npars, 2027 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 2028 2029 return 2030 end 2031 2032 subroutine writeTimingMessage(key,iomode,timing) 2033 use iso_c_binding 2034 use phstr 2035 implicit none 2036 2037 character(len=*) :: key 2038 integer :: iomode 2039 real*8 :: timing 2040 character(len=1024) :: timing_msg 2041 character(len=*), parameter :: 2042 & streamModeString = c_char_"stream"//c_null_char, 2043 & fileModeString = c_char_"disk"//c_null_char 2044 2045 timing_msg = c_char_"Time to write "//c_null_char 2046 call phstr_appendStr(timing_msg,key) 2047 if ( iomode .eq. -1 ) then 2048 call phstr_appendStr(timing_msg, streamModeString) 2049 else 2050 call phstr_appendStr(timing_msg, fileModeString) 2051 endif 2052 call phstr_appendStr(timing_msg, c_char_' = '//c_null_char) 2053 call phstr_appendDbl(timing_msg, timing) 2054 write(6,*) trim(timing_msg) 2055 return 2056 end subroutine 2057 2058