1 #define PETSCMAT_DLL 2 3 /* 4 Routines to compute overlapping regions of a parallel MPI matrix. 5 Used for finding submatrices that were shared across processors. 6 */ 7 #include "../src/mat/impls/sbaij/mpi/mpisbaij.h" 8 #include "petscbt.h" 9 10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*); 11 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*); 12 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ" 15 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov) 16 { 17 PetscErrorCode ierr; 18 PetscInt i,N=C->cmap->N, bs=C->rmap->bs; 19 IS *is_new; 20 21 PetscFunctionBegin; 22 ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr); 23 /* Convert the indices into block format */ 24 ierr = ISCompressIndicesGeneral(N,bs,is_max,is,is_new);CHKERRQ(ierr); 25 if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 26 for (i=0; i<ov; ++i) { 27 ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr); 28 } 29 for (i=0; i<is_max; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);} 30 ierr = ISExpandIndicesGeneral(N,bs,is_max,is_new,is);CHKERRQ(ierr); 31 for (i=0; i<is_max; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);} 32 ierr = PetscFree(is_new);CHKERRQ(ierr); 33 PetscFunctionReturn(0); 34 } 35 36 typedef enum {MINE,OTHER} WhoseOwner; 37 /* data1, odata1 and odata2 are packed in the format (for communication): 38 data[0] = is_max, no of is 39 data[1] = size of is[0] 40 ... 41 data[is_max] = size of is[is_max-1] 42 data[is_max + 1] = data(is[0]) 43 ... 44 data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i]) 45 ... 46 data2 is packed in the format (for creating output is[]): 47 data[0] = is_max, no of is 48 data[1] = size of is[0] 49 ... 50 data[is_max] = size of is[is_max-1] 51 data[is_max + 1] = data(is[0]) 52 ... 53 data[is_max + 1 + Mbs*i) = data(is[i]) 54 ... 55 */ 56 #undef __FUNCT__ 57 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once" 58 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[]) 59 { 60 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 61 PetscErrorCode ierr; 62 PetscMPIInt size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len; 63 const PetscInt *idx_i; 64 PetscInt idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i, 65 Mbs,i,j,k,*odata1,*odata2, 66 proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est; 67 PetscInt proc_end=0,*iwork,len_unused,nodata2; 68 PetscInt ois_max; /* max no of is[] in each of processor */ 69 char *t_p; 70 MPI_Comm comm; 71 MPI_Request *s_waits1,*s_waits2,r_req; 72 MPI_Status *s_status,r_status; 73 PetscBT *table; /* mark indices of this processor's is[] */ 74 PetscBT table_i; 75 PetscBT otable; /* mark indices of other processors' is[] */ 76 PetscInt bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners; 77 IS garray_local,garray_gl; 78 79 PetscFunctionBegin; 80 comm = ((PetscObject)C)->comm; 81 size = c->size; 82 rank = c->rank; 83 Mbs = c->Mbs; 84 85 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 86 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 87 88 /* create tables used in 89 step 1: table[i] - mark c->garray of proc [i] 90 step 3: table[i] - mark indices of is[i] when whose=MINE 91 table[0] - mark incideces of is[] when whose=OTHER */ 92 len = PetscMax(is_max, size);CHKERRQ(ierr); 93 ierr = PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);CHKERRQ(ierr); 94 for (i=0; i<len; i++) { 95 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 96 } 97 98 ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 99 100 /* 1. Send this processor's is[] to other processors */ 101 /*---------------------------------------------------*/ 102 /* allocate spaces */ 103 ierr = PetscMalloc(is_max*sizeof(PetscInt),&n);CHKERRQ(ierr); 104 len = 0; 105 for (i=0; i<is_max; i++) { 106 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 107 len += n[i]; 108 } 109 if (!len) { 110 is_max = 0; 111 } else { 112 len += 1 + is_max; /* max length of data1 for one processor */ 113 } 114 115 116 ierr = PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);CHKERRQ(ierr); 117 ierr = PetscMalloc(size*sizeof(PetscInt*),&data1_start);CHKERRQ(ierr); 118 for (i=0; i<size; i++) data1_start[i] = data1 + i*len; 119 120 ierr = PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscInt,&iwork,size+1,PetscInt,&Bowners);CHKERRQ(ierr); 121 122 /* gather c->garray from all processors */ 123 ierr = ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);CHKERRQ(ierr); 124 ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr); 125 ierr = ISDestroy(garray_local);CHKERRQ(ierr); 126 ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 127 Bowners[0] = 0; 128 for (i=0; i<size; i++) Bowners[i+1] += Bowners[i]; 129 130 if (is_max){ 131 /* hash table ctable which maps c->row to proc_id) */ 132 ierr = PetscMalloc(Mbs*sizeof(PetscInt),&ctable);CHKERRQ(ierr); 133 for (proc_id=0,j=0; proc_id<size; proc_id++) { 134 for (; j<C->rmap->range[proc_id+1]/bs; j++) { 135 ctable[j] = proc_id; 136 } 137 } 138 139 /* hash tables marking c->garray */ 140 ierr = ISGetIndices(garray_gl,&idx_i); 141 for (i=0; i<size; i++){ 142 table_i = table[i]; 143 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 144 for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/ 145 ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr); 146 } 147 } 148 ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr); 149 } /* if (is_max) */ 150 ierr = ISDestroy(garray_gl);CHKERRQ(ierr); 151 152 /* evaluate communication - mesg to who, length, and buffer space */ 153 for (i=0; i<size; i++) len_s[i] = 0; 154 155 /* header of data1 */ 156 for (proc_id=0; proc_id<size; proc_id++){ 157 iwork[proc_id] = 0; 158 *data1_start[proc_id] = is_max; 159 data1_start[proc_id]++; 160 for (j=0; j<is_max; j++) { 161 if (proc_id == rank){ 162 *data1_start[proc_id] = n[j]; 163 } else { 164 *data1_start[proc_id] = 0; 165 } 166 data1_start[proc_id]++; 167 } 168 } 169 170 for (i=0; i<is_max; i++) { 171 ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr); 172 for (j=0; j<n[i]; j++){ 173 idx = idx_i[j]; 174 *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */ 175 proc_end = ctable[idx]; 176 for (proc_id=0; proc_id<=proc_end; proc_id++){ /* for others to process */ 177 if (proc_id == rank ) continue; /* done before this loop */ 178 if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) 179 continue; /* no need for sending idx to [proc_id] */ 180 *data1_start[proc_id] = idx; data1_start[proc_id]++; 181 len_s[proc_id]++; 182 } 183 } 184 /* update header data */ 185 for (proc_id=0; proc_id<size; proc_id++){ 186 if (proc_id== rank) continue; 187 *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id]; 188 iwork[proc_id] = len_s[proc_id] ; 189 } 190 ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr); 191 } 192 193 nrqs = 0; nrqr = 0; 194 for (i=0; i<size; i++){ 195 data1_start[i] = data1 + i*len; 196 if (len_s[i]){ 197 nrqs++; 198 len_s[i] += 1 + is_max; /* add no. of header msg */ 199 } 200 } 201 202 for (i=0; i<is_max; i++) { 203 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 204 } 205 ierr = PetscFree(n);CHKERRQ(ierr); 206 ierr = PetscFree(ctable);CHKERRQ(ierr); 207 208 /* Determine the number of messages to expect, their lengths, from from-ids */ 209 ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr); 210 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr); 211 212 /* Now post the sends */ 213 ierr = PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);CHKERRQ(ierr); 214 k = 0; 215 for (proc_id=0; proc_id<size; proc_id++){ /* send data1 to processor [proc_id] */ 216 if (len_s[proc_id]){ 217 ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr); 218 k++; 219 } 220 } 221 222 /* 2. Receive other's is[] and process. Then send back */ 223 /*-----------------------------------------------------*/ 224 len = 0; 225 for (i=0; i<nrqr; i++){ 226 if (len_r1[i] > len)len = len_r1[i]; 227 } 228 ierr = PetscFree(len_r1);CHKERRQ(ierr); 229 ierr = PetscFree(id_r1);CHKERRQ(ierr); 230 231 for (proc_id=0; proc_id<size; proc_id++) 232 len_s[proc_id] = iwork[proc_id] = 0; 233 234 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&odata1);CHKERRQ(ierr); 235 ierr = PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);CHKERRQ(ierr); 236 ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr); 237 238 len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */ 239 len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */ 240 ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr); 241 nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */ 242 odata2_ptr[nodata2] = odata2; 243 len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */ 244 245 k = 0; 246 while (k < nrqr){ 247 /* Receive messages */ 248 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr); 249 if (flag){ 250 ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr); 251 proc_id = r_status.MPI_SOURCE; 252 ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 253 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 254 255 /* Process messages */ 256 /* make sure there is enough unused space in odata2 array */ 257 if (len_unused < len_max){ /* allocate more space for odata2 */ 258 ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr); 259 odata2_ptr[++nodata2] = odata2; 260 len_unused = len_est; 261 } 262 263 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr); 264 len = 1 + odata2[0]; 265 for (i=0; i<odata2[0]; i++){ 266 len += odata2[1 + i]; 267 } 268 269 /* Send messages back */ 270 ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr); 271 k++; 272 odata2 += len; 273 len_unused -= len; 274 len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */ 275 } 276 } 277 ierr = PetscFree(odata1);CHKERRQ(ierr); 278 ierr = PetscBTDestroy(otable);CHKERRQ(ierr); 279 280 /* 3. Do local work on this processor's is[] */ 281 /*-------------------------------------------*/ 282 /* make sure there is enough unused space in odata2(=data) array */ 283 len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */ 284 if (len_unused < len_max){ /* allocate more space for odata2 */ 285 ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr); 286 odata2_ptr[++nodata2] = odata2; 287 len_unused = len_est; 288 } 289 290 data = odata2; 291 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr); 292 ierr = PetscFree(data1_start);CHKERRQ(ierr); 293 294 /* 4. Receive work done on other processors, then merge */ 295 /*------------------------------------------------------*/ 296 /* get max number of messages that this processor expects to recv */ 297 ierr = MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 298 ierr = PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);CHKERRQ(ierr); 299 ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr); 300 301 k = 0; 302 while (k < nrqs){ 303 /* Receive messages */ 304 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status); 305 if (flag){ 306 ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr); 307 proc_id = r_status.MPI_SOURCE; 308 ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 309 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 310 if (len > 1+is_max){ /* Add data2 into data */ 311 data2_i = data2 + 1 + is_max; 312 for (i=0; i<is_max; i++){ 313 table_i = table[i]; 314 data_i = data + 1 + is_max + Mbs*i; 315 isz = data[1+i]; 316 for (j=0; j<data2[1+i]; j++){ 317 col = data2_i[j]; 318 if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;} 319 } 320 data[1+i] = isz; 321 if (i < is_max - 1) data2_i += data2[1+i]; 322 } 323 } 324 k++; 325 } 326 } 327 ierr = PetscFree(data2);CHKERRQ(ierr); 328 ierr = PetscFree2(table,t_p);CHKERRQ(ierr); 329 330 /* phase 1 sends are complete */ 331 ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 332 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 333 ierr = PetscFree(data1);CHKERRQ(ierr); 334 335 /* phase 2 sends are complete */ 336 if (nrqr){ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);} 337 ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr); 338 ierr = PetscFree(s_status);CHKERRQ(ierr); 339 340 /* 5. Create new is[] */ 341 /*--------------------*/ 342 for (i=0; i<is_max; i++) { 343 data_i = data + 1 + is_max + Mbs*i; 344 ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);CHKERRQ(ierr); 345 } 346 for (k=0; k<=nodata2; k++){ 347 ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr); 348 } 349 ierr = PetscFree(odata2_ptr);CHKERRQ(ierr); 350 351 PetscFunctionReturn(0); 352 } 353 354 #undef __FUNCT__ 355 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local" 356 /* 357 MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 358 the work on the local processor. 359 360 Inputs: 361 C - MAT_MPISBAIJ; 362 data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format. 363 whose - whose is[] to be processed, 364 MINE: this processor's is[] 365 OTHER: other processor's is[] 366 Output: 367 nidx - whose = MINE: 368 holds input and newly found indices in the same format as data 369 whose = OTHER: 370 only holds the newly found indices 371 table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'. 372 */ 373 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */ 374 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table) 375 { 376 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 377 Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data; 378 Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data; 379 PetscErrorCode ierr; 380 PetscInt row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l; 381 PetscInt a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n; 382 PetscBT table0; /* mark the indices of input is[] for look up */ 383 PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */ 384 385 PetscFunctionBegin; 386 Mbs = c->Mbs; mbs = a->mbs; 387 ai = a->i; aj = a->j; 388 bi = b->i; bj = b->j; 389 garray = c->garray; 390 rstart = c->rstartbs; 391 is_max = data[0]; 392 393 ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr); 394 395 nidx[0] = is_max; 396 idx_i = data + is_max + 1; /* ptr to input is[0] array */ 397 nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */ 398 for (i=0; i<is_max; i++) { /* for each is */ 399 isz = 0; 400 n = data[1+i]; /* size of input is[i] */ 401 402 /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */ 403 if (whose == MINE){ /* process this processor's is[] */ 404 table_i = table[i]; 405 nidx_i = nidx + 1+ is_max + Mbs*i; 406 } else { /* process other processor's is[] - only use one temp table */ 407 table_i = table[0]; 408 } 409 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 410 ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr); 411 if (n==0) { 412 nidx[1+i] = 0; /* size of new is[i] */ 413 continue; 414 } 415 416 isz0 = 0; col_max = 0; 417 for (j=0; j<n; j++){ 418 col = idx_i[j]; 419 if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs); 420 if(!PetscBTLookupSet(table_i,col)) { 421 ierr = PetscBTSet(table0,col);CHKERRQ(ierr); 422 if (whose == MINE) {nidx_i[isz0] = col;} 423 if (col_max < col) col_max = col; 424 isz0++; 425 } 426 } 427 428 if (whose == MINE) {isz = isz0;} 429 k = 0; /* no. of indices from input is[i] that have been examined */ 430 for (row=0; row<mbs; row++){ 431 a_start = ai[row]; a_end = ai[row+1]; 432 b_start = bi[row]; b_end = bi[row+1]; 433 if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]: 434 do row search: collect all col in this row */ 435 for (l = a_start; l<a_end ; l++){ /* Amat */ 436 col = aj[l] + rstart; 437 if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;} 438 } 439 for (l = b_start; l<b_end ; l++){ /* Bmat */ 440 col = garray[bj[l]]; 441 if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;} 442 } 443 k++; 444 if (k >= isz0) break; /* for (row=0; row<mbs; row++) */ 445 } else { /* row is not on input is[i]: 446 do col serach: add row onto nidx_i if there is a col in nidx_i */ 447 for (l = a_start; l<a_end ; l++){ /* Amat */ 448 col = aj[l] + rstart; 449 if (col > col_max) break; 450 if (PetscBTLookup(table0,col)){ 451 if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;} 452 break; /* for l = start; l<end ; l++) */ 453 } 454 } 455 for (l = b_start; l<b_end ; l++){ /* Bmat */ 456 col = garray[bj[l]]; 457 if (col > col_max) break; 458 if (PetscBTLookup(table0,col)){ 459 if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;} 460 break; /* for l = start; l<end ; l++) */ 461 } 462 } 463 } 464 } 465 466 if (i < is_max - 1){ 467 idx_i += n; /* ptr to input is[i+1] array */ 468 nidx_i += isz; /* ptr to output is[i+1] array */ 469 } 470 nidx[1+i] = isz; /* size of new is[i] */ 471 } /* for each is */ 472 ierr = PetscBTDestroy(table0);CHKERRQ(ierr); 473 474 PetscFunctionReturn(0); 475 } 476 477 478