1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix 4 and to find submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/baij/mpi/mpibaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat, PetscInt, char **, PetscInt *, PetscInt **); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat, PetscInt, PetscInt **, PetscInt **, PetscInt *); 11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **); 12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **); 13 14 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C, PetscInt imax, IS is[], PetscInt ov) 15 { 16 PetscInt i, N = C->cmap->N, bs = C->rmap->bs, n; 17 const PetscInt *idx; 18 IS *is_new; 19 20 PetscFunctionBegin; 21 PetscCall(PetscMalloc1(imax, &is_new)); 22 /* Convert the indices into block format */ 23 PetscCall(ISCompressIndicesGeneral(N, C->rmap->n, bs, imax, is, is_new)); 24 PetscCheck(ov >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Negative overlap specified"); 25 for (i = 0; i < ov; ++i) PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C, imax, is_new)); 26 for (i = 0; i < imax; i++) { 27 PetscCall(ISDestroy(&is[i])); 28 PetscCall(ISGetLocalSize(is_new[i], &n)); 29 PetscCall(ISGetIndices(is_new[i], &idx)); 30 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)is_new[i]), bs, n, idx, PETSC_COPY_VALUES, &is[i])); 31 PetscCall(ISDestroy(&is_new[i])); 32 } 33 PetscCall(PetscFree(is_new)); 34 PetscFunctionReturn(PETSC_SUCCESS); 35 } 36 37 /* 38 Sample message format: 39 If a processor A wants processor B to process some elements corresponding 40 to index sets is[1], is[5] 41 mesg [0] = 2 (no of index sets in the mesg) 42 ----------- 43 mesg [1] = 1 => is[1] 44 mesg [2] = sizeof(is[1]); 45 ----------- 46 mesg [5] = 5 => is[5] 47 mesg [6] = sizeof(is[5]); 48 ----------- 49 mesg [7] 50 mesg [n] data(is[1]) 51 ----------- 52 mesg[n+1] 53 mesg[m] data(is[5]) 54 ----------- 55 56 Notes: 57 nrqs - no of requests sent (or to be sent out) 58 nrqr - no of requests received (which have to be or which have been processed) 59 */ 60 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C, PetscInt imax, IS is[]) 61 { 62 Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data; 63 const PetscInt **idx, *idx_i; 64 PetscInt *n, *w3, *w4, **data, len; 65 PetscMPIInt size, rank, tag1, tag2, *w2, *w1, nrqr; 66 PetscInt Mbs, i, j, k, **rbuf, row, nrqs, msz, **outdat, **ptr; 67 PetscInt *ctr, *pa, *tmp, *isz, *isz1, **xdata, **rbuf2, *d_p; 68 PetscMPIInt *onodes1, *olengths1, *onodes2, *olengths2, proc = -1; 69 PetscBT *table; 70 MPI_Comm comm, *iscomms; 71 MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2; 72 char *t_p; 73 74 PetscFunctionBegin; 75 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 76 size = c->size; 77 rank = c->rank; 78 Mbs = c->Mbs; 79 80 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1)); 81 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2)); 82 83 PetscCall(PetscMalloc2(imax, (PetscInt ***)&idx, imax, &n)); 84 85 for (i = 0; i < imax; i++) { 86 PetscCall(ISGetIndices(is[i], &idx[i])); 87 PetscCall(ISGetLocalSize(is[i], &n[i])); 88 } 89 90 /* evaluate communication - mesg to who,length of mesg, and buffer space 91 required. Based on this, buffers are allocated, and data copied into them*/ 92 PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4)); 93 for (i = 0; i < imax; i++) { 94 PetscCall(PetscArrayzero(w4, size)); /* initialise work vector*/ 95 idx_i = idx[i]; 96 len = n[i]; 97 for (j = 0; j < len; j++) { 98 row = idx_i[j]; 99 PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Index set cannot have negative entries"); 100 PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc)); 101 w4[proc]++; 102 } 103 for (j = 0; j < size; j++) { 104 if (w4[j]) { 105 w1[j] += w4[j]; 106 w3[j]++; 107 } 108 } 109 } 110 111 nrqs = 0; /* no of outgoing messages */ 112 msz = 0; /* total mesg length (for all proc */ 113 w1[rank] = 0; /* no mesg sent to itself */ 114 w3[rank] = 0; 115 for (i = 0; i < size; i++) { 116 if (w1[i]) { 117 w2[i] = 1; 118 nrqs++; 119 } /* there exists a message to proc i */ 120 } 121 /* pa - is list of processors to communicate with */ 122 PetscCall(PetscMalloc1(nrqs, &pa)); 123 for (i = 0, j = 0; i < size; i++) { 124 if (w1[i]) { 125 pa[j] = i; 126 j++; 127 } 128 } 129 130 /* Each message would have a header = 1 + 2*(no of IS) + data */ 131 for (i = 0; i < nrqs; i++) { 132 j = pa[i]; 133 w1[j] += w2[j] + 2 * w3[j]; 134 msz += w1[j]; 135 } 136 137 /* Determine the number of messages to expect, their lengths, from from-ids */ 138 PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr)); 139 PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1)); 140 141 /* Now post the Irecvs corresponding to these messages */ 142 PetscCall(PetscPostIrecvInt(comm, tag1, nrqr, onodes1, olengths1, &rbuf, &r_waits1)); 143 144 /* Allocate Memory for outgoing messages */ 145 PetscCall(PetscMalloc4(size, &outdat, size, &ptr, msz, &tmp, size, &ctr)); 146 PetscCall(PetscArrayzero(outdat, size)); 147 PetscCall(PetscArrayzero(ptr, size)); 148 { 149 PetscInt *iptr = tmp, ict = 0; 150 for (i = 0; i < nrqs; i++) { 151 j = pa[i]; 152 iptr += ict; 153 outdat[j] = iptr; 154 ict = w1[j]; 155 } 156 } 157 158 /* Form the outgoing messages */ 159 /*plug in the headers*/ 160 for (i = 0; i < nrqs; i++) { 161 j = pa[i]; 162 outdat[j][0] = 0; 163 PetscCall(PetscArrayzero(outdat[j] + 1, 2 * w3[j])); 164 ptr[j] = outdat[j] + 2 * w3[j] + 1; 165 } 166 167 /* Memory for doing local proc's work*/ 168 { 169 PetscCall(PetscCalloc5(imax, &table, imax, &data, imax, &isz, Mbs * imax, &d_p, (Mbs / PETSC_BITS_PER_BYTE + 1) * imax, &t_p)); 170 171 for (i = 0; i < imax; i++) { 172 table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i; 173 data[i] = d_p + (Mbs)*i; 174 } 175 } 176 177 /* Parse the IS and update local tables and the outgoing buf with the data*/ 178 { 179 PetscInt n_i, *data_i, isz_i, *outdat_j, ctr_j; 180 PetscBT table_i; 181 182 for (i = 0; i < imax; i++) { 183 PetscCall(PetscArrayzero(ctr, size)); 184 n_i = n[i]; 185 table_i = table[i]; 186 idx_i = idx[i]; 187 data_i = data[i]; 188 isz_i = isz[i]; 189 for (j = 0; j < n_i; j++) { /* parse the indices of each IS */ 190 row = idx_i[j]; 191 PetscCall(PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc)); 192 if (proc != rank) { /* copy to the outgoing buffer */ 193 ctr[proc]++; 194 *ptr[proc] = row; 195 ptr[proc]++; 196 } else { /* Update the local table */ 197 if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row; 198 } 199 } 200 /* Update the headers for the current IS */ 201 for (j = 0; j < size; j++) { /* Can Optimise this loop by using pa[] */ 202 if ((ctr_j = ctr[j])) { 203 outdat_j = outdat[j]; 204 k = ++outdat_j[0]; 205 outdat_j[2 * k] = ctr_j; 206 outdat_j[2 * k - 1] = i; 207 } 208 } 209 isz[i] = isz_i; 210 } 211 } 212 213 /* Now post the sends */ 214 PetscCall(PetscMalloc1(nrqs, &s_waits1)); 215 for (i = 0; i < nrqs; ++i) { 216 j = pa[i]; 217 PetscCallMPI(MPI_Isend(outdat[j], w1[j], MPIU_INT, j, tag1, comm, s_waits1 + i)); 218 } 219 220 /* No longer need the original indices*/ 221 for (i = 0; i < imax; ++i) PetscCall(ISRestoreIndices(is[i], idx + i)); 222 PetscCall(PetscFree2(*(PetscInt ***)&idx, n)); 223 224 PetscCall(PetscMalloc1(imax, &iscomms)); 225 for (i = 0; i < imax; ++i) { 226 PetscCall(PetscCommDuplicate(PetscObjectComm((PetscObject)is[i]), &iscomms[i], NULL)); 227 PetscCall(ISDestroy(&is[i])); 228 } 229 230 /* Do Local work*/ 231 PetscCall(MatIncreaseOverlap_MPIBAIJ_Local(C, imax, table, isz, data)); 232 233 /* Receive messages*/ 234 PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE)); 235 PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE)); 236 237 /* Phase 1 sends are complete - deallocate buffers */ 238 PetscCall(PetscFree4(outdat, ptr, tmp, ctr)); 239 PetscCall(PetscFree4(w1, w2, w3, w4)); 240 241 PetscCall(PetscMalloc1(nrqr, &xdata)); 242 PetscCall(PetscMalloc1(nrqr, &isz1)); 243 PetscCall(MatIncreaseOverlap_MPIBAIJ_Receive(C, nrqr, rbuf, xdata, isz1)); 244 if (rbuf) { 245 PetscCall(PetscFree(rbuf[0])); 246 PetscCall(PetscFree(rbuf)); 247 } 248 249 /* Send the data back*/ 250 /* Do a global reduction to know the buffer space req for incoming messages*/ 251 { 252 PetscMPIInt *rw1; 253 254 PetscCall(PetscCalloc1(size, &rw1)); 255 256 for (i = 0; i < nrqr; ++i) { 257 proc = onodes1[i]; 258 rw1[proc] = isz1[i]; 259 } 260 261 /* Determine the number of messages to expect, their lengths, from from-ids */ 262 PetscCall(PetscGatherMessageLengths(comm, nrqr, nrqs, rw1, &onodes2, &olengths2)); 263 PetscCall(PetscFree(rw1)); 264 } 265 /* Now post the Irecvs corresponding to these messages */ 266 PetscCall(PetscPostIrecvInt(comm, tag2, nrqs, onodes2, olengths2, &rbuf2, &r_waits2)); 267 268 /* Now post the sends */ 269 PetscCall(PetscMalloc1(nrqr, &s_waits2)); 270 for (i = 0; i < nrqr; ++i) { 271 j = onodes1[i]; 272 PetscCallMPI(MPI_Isend(xdata[i], isz1[i], MPIU_INT, j, tag2, comm, s_waits2 + i)); 273 } 274 275 PetscCall(PetscFree(onodes1)); 276 PetscCall(PetscFree(olengths1)); 277 278 /* receive work done on other processors*/ 279 { 280 PetscMPIInt idex; 281 PetscInt is_no, ct1, max, *rbuf2_i, isz_i, *data_i, jmax; 282 PetscBT table_i; 283 284 for (i = 0; i < nrqs; ++i) { 285 PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &idex, MPI_STATUS_IGNORE)); 286 /* Process the message*/ 287 rbuf2_i = rbuf2[idex]; 288 ct1 = 2 * rbuf2_i[0] + 1; 289 jmax = rbuf2[idex][0]; 290 for (j = 1; j <= jmax; j++) { 291 max = rbuf2_i[2 * j]; 292 is_no = rbuf2_i[2 * j - 1]; 293 isz_i = isz[is_no]; 294 data_i = data[is_no]; 295 table_i = table[is_no]; 296 for (k = 0; k < max; k++, ct1++) { 297 row = rbuf2_i[ct1]; 298 if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row; 299 } 300 isz[is_no] = isz_i; 301 } 302 } 303 PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE)); 304 } 305 306 for (i = 0; i < imax; ++i) { 307 PetscCall(ISCreateGeneral(iscomms[i], isz[i], data[i], PETSC_COPY_VALUES, is + i)); 308 PetscCall(PetscCommDestroy(&iscomms[i])); 309 } 310 311 PetscCall(PetscFree(iscomms)); 312 PetscCall(PetscFree(onodes2)); 313 PetscCall(PetscFree(olengths2)); 314 315 PetscCall(PetscFree(pa)); 316 if (rbuf2) { 317 PetscCall(PetscFree(rbuf2[0])); 318 PetscCall(PetscFree(rbuf2)); 319 } 320 PetscCall(PetscFree(s_waits1)); 321 PetscCall(PetscFree(r_waits1)); 322 PetscCall(PetscFree(s_waits2)); 323 PetscCall(PetscFree(r_waits2)); 324 PetscCall(PetscFree5(table, data, isz, d_p, t_p)); 325 if (xdata) { 326 PetscCall(PetscFree(xdata[0])); 327 PetscCall(PetscFree(xdata)); 328 } 329 PetscCall(PetscFree(isz1)); 330 PetscFunctionReturn(PETSC_SUCCESS); 331 } 332 333 /* 334 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 335 the work on the local processor. 336 337 Inputs: 338 C - MAT_MPIBAIJ; 339 imax - total no of index sets processed at a time; 340 table - an array of char - size = Mbs bits. 341 342 Output: 343 isz - array containing the count of the solution elements corresponding 344 to each index set; 345 data - pointer to the solutions 346 */ 347 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C, PetscInt imax, PetscBT *table, PetscInt *isz, PetscInt **data) 348 { 349 Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data; 350 Mat A = c->A, B = c->B; 351 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data; 352 PetscInt start, end, val, max, rstart, cstart, *ai, *aj; 353 PetscInt *bi, *bj, *garray, i, j, k, row, *data_i, isz_i; 354 PetscBT table_i; 355 356 PetscFunctionBegin; 357 rstart = c->rstartbs; 358 cstart = c->cstartbs; 359 ai = a->i; 360 aj = a->j; 361 bi = b->i; 362 bj = b->j; 363 garray = c->garray; 364 365 for (i = 0; i < imax; i++) { 366 data_i = data[i]; 367 table_i = table[i]; 368 isz_i = isz[i]; 369 for (j = 0, max = isz[i]; j < max; j++) { 370 row = data_i[j] - rstart; 371 start = ai[row]; 372 end = ai[row + 1]; 373 for (k = start; k < end; k++) { /* Amat */ 374 val = aj[k] + cstart; 375 if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val; 376 } 377 start = bi[row]; 378 end = bi[row + 1]; 379 for (k = start; k < end; k++) { /* Bmat */ 380 val = garray[bj[k]]; 381 if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val; 382 } 383 } 384 isz[i] = isz_i; 385 } 386 PetscFunctionReturn(PETSC_SUCCESS); 387 } 388 /* 389 MatIncreaseOverlap_MPIBAIJ_Receive - Process the received messages, 390 and return the output 391 392 Input: 393 C - the matrix 394 nrqr - no of messages being processed. 395 rbuf - an array of pointers to the received requests 396 397 Output: 398 xdata - array of messages to be sent back 399 isz1 - size of each message 400 401 For better efficiency perhaps we should malloc separately each xdata[i], 402 then if a remalloc is required we need only copy the data for that one row 403 rather than all previous rows as it is now where a single large chunk of 404 memory is used. 405 406 */ 407 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C, PetscInt nrqr, PetscInt **rbuf, PetscInt **xdata, PetscInt *isz1) 408 { 409 Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data; 410 Mat A = c->A, B = c->B; 411 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data; 412 PetscInt rstart, cstart, *ai, *aj, *bi, *bj, *garray, i, j, k; 413 PetscInt row, total_sz, ct, ct1, ct2, ct3, mem_estimate, oct2, l, start, end; 414 PetscInt val, max1, max2, Mbs, no_malloc = 0, *tmp, new_estimate, ctr; 415 PetscInt *rbuf_i, kmax, rbuf_0; 416 PetscBT xtable; 417 418 PetscFunctionBegin; 419 Mbs = c->Mbs; 420 rstart = c->rstartbs; 421 cstart = c->cstartbs; 422 ai = a->i; 423 aj = a->j; 424 bi = b->i; 425 bj = b->j; 426 garray = c->garray; 427 428 for (i = 0, ct = 0, total_sz = 0; i < nrqr; ++i) { 429 rbuf_i = rbuf[i]; 430 rbuf_0 = rbuf_i[0]; 431 ct += rbuf_0; 432 for (j = 1; j <= rbuf_0; j++) total_sz += rbuf_i[2 * j]; 433 } 434 435 if (c->Mbs) max1 = ct * (a->nz + b->nz) / c->Mbs; 436 else max1 = 1; 437 mem_estimate = 3 * ((total_sz > max1 ? total_sz : max1) + 1); 438 if (nrqr) { 439 PetscCall(PetscMalloc1(mem_estimate, &xdata[0])); 440 ++no_malloc; 441 } 442 PetscCall(PetscBTCreate(Mbs, &xtable)); 443 PetscCall(PetscArrayzero(isz1, nrqr)); 444 445 ct3 = 0; 446 for (i = 0; i < nrqr; i++) { /* for easch mesg from proc i */ 447 rbuf_i = rbuf[i]; 448 rbuf_0 = rbuf_i[0]; 449 ct1 = 2 * rbuf_0 + 1; 450 ct2 = ct1; 451 ct3 += ct1; 452 for (j = 1; j <= rbuf_0; j++) { /* for each IS from proc i*/ 453 PetscCall(PetscBTMemzero(Mbs, xtable)); 454 oct2 = ct2; 455 kmax = rbuf_i[2 * j]; 456 for (k = 0; k < kmax; k++, ct1++) { 457 row = rbuf_i[ct1]; 458 if (!PetscBTLookupSet(xtable, row)) { 459 if (!(ct3 < mem_estimate)) { 460 new_estimate = (PetscInt)(1.5 * mem_estimate) + 1; 461 PetscCall(PetscMalloc1(new_estimate, &tmp)); 462 PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate)); 463 PetscCall(PetscFree(xdata[0])); 464 xdata[0] = tmp; 465 mem_estimate = new_estimate; 466 ++no_malloc; 467 for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1]; 468 } 469 xdata[i][ct2++] = row; 470 ct3++; 471 } 472 } 473 for (k = oct2, max2 = ct2; k < max2; k++) { 474 row = xdata[i][k] - rstart; 475 start = ai[row]; 476 end = ai[row + 1]; 477 for (l = start; l < end; l++) { 478 val = aj[l] + cstart; 479 if (!PetscBTLookupSet(xtable, val)) { 480 if (!(ct3 < mem_estimate)) { 481 new_estimate = (PetscInt)(1.5 * mem_estimate) + 1; 482 PetscCall(PetscMalloc1(new_estimate, &tmp)); 483 PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate)); 484 PetscCall(PetscFree(xdata[0])); 485 xdata[0] = tmp; 486 mem_estimate = new_estimate; 487 ++no_malloc; 488 for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1]; 489 } 490 xdata[i][ct2++] = val; 491 ct3++; 492 } 493 } 494 start = bi[row]; 495 end = bi[row + 1]; 496 for (l = start; l < end; l++) { 497 val = garray[bj[l]]; 498 if (!PetscBTLookupSet(xtable, val)) { 499 if (!(ct3 < mem_estimate)) { 500 new_estimate = (PetscInt)(1.5 * mem_estimate) + 1; 501 PetscCall(PetscMalloc1(new_estimate, &tmp)); 502 PetscCall(PetscArraycpy(tmp, xdata[0], mem_estimate)); 503 PetscCall(PetscFree(xdata[0])); 504 xdata[0] = tmp; 505 mem_estimate = new_estimate; 506 ++no_malloc; 507 for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1]; 508 } 509 xdata[i][ct2++] = val; 510 ct3++; 511 } 512 } 513 } 514 /* Update the header*/ 515 xdata[i][2 * j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 516 xdata[i][2 * j - 1] = rbuf_i[2 * j - 1]; 517 } 518 xdata[i][0] = rbuf_0; 519 if (i + 1 < nrqr) xdata[i + 1] = xdata[i] + ct2; 520 isz1[i] = ct2; /* size of each message */ 521 } 522 PetscCall(PetscBTDestroy(&xtable)); 523 PetscCall(PetscInfo(C, "Allocated %" PetscInt_FMT " bytes, required %" PetscInt_FMT ", no of mallocs = %" PetscInt_FMT "\n", mem_estimate, ct3, no_malloc)); 524 PetscFunctionReturn(PETSC_SUCCESS); 525 } 526 527 PetscErrorCode MatCreateSubMatrices_MPIBAIJ(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[]) 528 { 529 IS *isrow_block, *iscol_block; 530 Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data; 531 PetscInt nmax, nstages_local, nstages, i, pos, max_no, N = C->cmap->N, bs = C->rmap->bs; 532 Mat_SeqBAIJ *subc; 533 Mat_SubSppt *smat; 534 PetscBool sym = PETSC_FALSE, flg[2]; 535 536 PetscFunctionBegin; 537 PetscCall(PetscObjectTypeCompare((PetscObject)C, MATMPISBAIJ, flg)); 538 if (flg[0]) { 539 if (isrow == iscol) sym = PETSC_TRUE; 540 else { 541 flg[0] = flg[1] = PETSC_TRUE; 542 for (i = 0; i < ismax; i++) { 543 if (isrow[i] != iscol[i]) flg[0] = PETSC_FALSE; 544 PetscCall(ISGetLocalSize(iscol[0], &nmax)); 545 if (nmax == C->cmap->N && flg[1]) PetscCall(ISIdentity(iscol[0], flg + 1)); 546 } 547 sym = (PetscBool)(flg[0] || flg[1]); 548 } 549 } 550 /* The compression and expansion should be avoided. Doesn't point 551 out errors, might change the indices, hence buggey */ 552 PetscCall(PetscMalloc2(ismax, &isrow_block, ismax, &iscol_block)); 553 PetscCall(ISCompressIndicesGeneral(C->rmap->N, C->rmap->n, bs, ismax, isrow, isrow_block)); 554 if (isrow == iscol) { 555 for (i = 0; i < ismax; i++) { 556 iscol_block[i] = isrow_block[i]; 557 PetscCall(PetscObjectReference((PetscObject)iscol_block[i])); 558 } 559 } else PetscCall(ISCompressIndicesGeneral(N, C->cmap->n, bs, ismax, iscol, iscol_block)); 560 561 /* Determine the number of stages through which submatrices are done */ 562 if (!C->cmap->N) nmax = 20 * 1000000 / sizeof(PetscInt); 563 else nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt)); 564 if (!nmax) nmax = 1; 565 566 if (scall == MAT_INITIAL_MATRIX) { 567 nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */ 568 569 /* Make sure every processor loops through the nstages */ 570 PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C))); 571 572 /* Allocate memory to hold all the submatrices and dummy submatrices */ 573 PetscCall(PetscCalloc1(ismax + nstages, submat)); 574 } else { /* MAT_REUSE_MATRIX */ 575 if (ismax) { 576 subc = (Mat_SeqBAIJ *)((*submat)[0]->data); 577 smat = subc->submatis1; 578 } else { /* (*submat)[0] is a dummy matrix */ 579 smat = (Mat_SubSppt *)(*submat)[0]->data; 580 } 581 PetscCheck(smat, PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "MatCreateSubMatrices(...,MAT_REUSE_MATRIX,...) requires submat"); 582 nstages = smat->nstages; 583 } 584 585 for (i = 0, pos = 0; i < nstages; i++) { 586 if (pos + nmax <= ismax) max_no = nmax; 587 else if (pos >= ismax) max_no = 0; 588 else max_no = ismax - pos; 589 590 PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C, max_no, isrow_block + pos, iscol_block + pos, scall, *submat + pos, sym)); 591 if (!max_no) { 592 if (scall == MAT_INITIAL_MATRIX) { /* submat[pos] is a dummy matrix */ 593 smat = (Mat_SubSppt *)(*submat)[pos]->data; 594 smat->nstages = nstages; 595 } 596 pos++; /* advance to next dummy matrix if any */ 597 } else pos += max_no; 598 } 599 600 if (scall == MAT_INITIAL_MATRIX && ismax) { 601 /* save nstages for reuse */ 602 subc = (Mat_SeqBAIJ *)((*submat)[0]->data); 603 smat = subc->submatis1; 604 smat->nstages = nstages; 605 } 606 607 for (i = 0; i < ismax; i++) { 608 PetscCall(ISDestroy(&isrow_block[i])); 609 PetscCall(ISDestroy(&iscol_block[i])); 610 } 611 PetscCall(PetscFree2(isrow_block, iscol_block)); 612 PetscFunctionReturn(PETSC_SUCCESS); 613 } 614 615 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */ 616 PetscErrorCode MatCreateSubMatrices_MPIBAIJ_local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats, PetscBool sym) 617 { 618 Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data; 619 Mat A = c->A; 620 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)c->B->data, *subc; 621 const PetscInt **icol, **irow; 622 PetscInt *nrow, *ncol, start; 623 PetscMPIInt rank, size, tag0, tag2, tag3, tag4, *w1, *w2, *w3, *w4, nrqr; 624 PetscInt **sbuf1, **sbuf2, *sbuf2_i, i, j, k, l, ct1, ct2, **rbuf1, row, proc = -1; 625 PetscInt nrqs = 0, msz, **ptr = NULL, *req_size = NULL, *ctr = NULL, *pa, *tmp = NULL, tcol; 626 PetscInt **rbuf3 = NULL, *req_source1 = NULL, *req_source2, **sbuf_aj, **rbuf2 = NULL, max1, max2; 627 PetscInt **lens, is_no, ncols, *cols, mat_i, *mat_j, tmp2, jmax; 628 #if defined(PETSC_USE_CTABLE) 629 PetscHMapI *cmap, cmap_i = NULL, *rmap, rmap_i; 630 #else 631 PetscInt **cmap, *cmap_i = NULL, **rmap, *rmap_i; 632 #endif 633 const PetscInt *irow_i, *icol_i; 634 PetscInt ctr_j, *sbuf1_j, *sbuf_aj_i, *rbuf1_i, kmax, *lens_i; 635 MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2, *r_waits3; 636 MPI_Request *r_waits4, *s_waits3, *s_waits4; 637 MPI_Comm comm; 638 PetscScalar **rbuf4, *rbuf4_i = NULL, **sbuf_aa, *vals, *mat_a = NULL, *imat_a = NULL, *sbuf_aa_i; 639 PetscMPIInt *onodes1, *olengths1, end; 640 PetscInt **row2proc, *row2proc_i, *imat_ilen, *imat_j, *imat_i; 641 Mat_SubSppt *smat_i; 642 PetscBool *issorted, colflag, iscsorted = PETSC_TRUE; 643 PetscInt *sbuf1_i, *rbuf2_i, *rbuf3_i, ilen; 644 PetscInt bs = C->rmap->bs, bs2 = c->bs2, rstart = c->rstartbs; 645 PetscBool ijonly = c->ijonly; /* private flag indicates only matrix data structures are requested */ 646 PetscInt nzA, nzB, *a_i = a->i, *b_i = b->i, *a_j = a->j, *b_j = b->j, ctmp, imark, *cworkA, *cworkB; 647 PetscScalar *vworkA = NULL, *vworkB = NULL, *a_a = a->a, *b_a = b->a; 648 PetscInt cstart = c->cstartbs, *bmap = c->garray; 649 PetscBool *allrows, *allcolumns; 650 651 PetscFunctionBegin; 652 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 653 size = c->size; 654 rank = c->rank; 655 656 PetscCall(PetscMalloc5(ismax, &row2proc, ismax, &cmap, ismax, &rmap, ismax + 1, &allcolumns, ismax, &allrows)); 657 PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, ismax, &issorted)); 658 659 for (i = 0; i < ismax; i++) { 660 PetscCall(ISSorted(iscol[i], &issorted[i])); 661 if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */ 662 PetscCall(ISSorted(isrow[i], &issorted[i])); 663 664 /* Check for special case: allcolumns */ 665 PetscCall(ISIdentity(iscol[i], &colflag)); 666 PetscCall(ISGetLocalSize(iscol[i], &ncol[i])); 667 668 if (colflag && ncol[i] == c->Nbs) { 669 allcolumns[i] = PETSC_TRUE; 670 icol[i] = NULL; 671 } else { 672 allcolumns[i] = PETSC_FALSE; 673 PetscCall(ISGetIndices(iscol[i], &icol[i])); 674 } 675 676 /* Check for special case: allrows */ 677 PetscCall(ISIdentity(isrow[i], &colflag)); 678 PetscCall(ISGetLocalSize(isrow[i], &nrow[i])); 679 if (colflag && nrow[i] == c->Mbs) { 680 allrows[i] = PETSC_TRUE; 681 irow[i] = NULL; 682 } else { 683 allrows[i] = PETSC_FALSE; 684 PetscCall(ISGetIndices(isrow[i], &irow[i])); 685 } 686 } 687 688 if (scall == MAT_REUSE_MATRIX) { 689 /* Assumes new rows are same length as the old rows */ 690 for (i = 0; i < ismax; i++) { 691 subc = (Mat_SeqBAIJ *)(submats[i]->data); 692 PetscCheck(subc->mbs == nrow[i] && subc->nbs == ncol[i], PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size"); 693 694 /* Initial matrix as if empty */ 695 PetscCall(PetscArrayzero(subc->ilen, subc->mbs)); 696 697 /* Initial matrix as if empty */ 698 submats[i]->factortype = C->factortype; 699 700 smat_i = subc->submatis1; 701 702 nrqs = smat_i->nrqs; 703 nrqr = smat_i->nrqr; 704 rbuf1 = smat_i->rbuf1; 705 rbuf2 = smat_i->rbuf2; 706 rbuf3 = smat_i->rbuf3; 707 req_source2 = smat_i->req_source2; 708 709 sbuf1 = smat_i->sbuf1; 710 sbuf2 = smat_i->sbuf2; 711 ptr = smat_i->ptr; 712 tmp = smat_i->tmp; 713 ctr = smat_i->ctr; 714 715 pa = smat_i->pa; 716 req_size = smat_i->req_size; 717 req_source1 = smat_i->req_source1; 718 719 allcolumns[i] = smat_i->allcolumns; 720 allrows[i] = smat_i->allrows; 721 row2proc[i] = smat_i->row2proc; 722 rmap[i] = smat_i->rmap; 723 cmap[i] = smat_i->cmap; 724 } 725 726 if (!ismax) { /* Get dummy submatrices and retrieve struct submatis1 */ 727 PetscCheck(submats[0], PETSC_COMM_SELF, PETSC_ERR_ARG_NULL, "submats are null, cannot reuse"); 728 smat_i = (Mat_SubSppt *)submats[0]->data; 729 730 nrqs = smat_i->nrqs; 731 nrqr = smat_i->nrqr; 732 rbuf1 = smat_i->rbuf1; 733 rbuf2 = smat_i->rbuf2; 734 rbuf3 = smat_i->rbuf3; 735 req_source2 = smat_i->req_source2; 736 737 sbuf1 = smat_i->sbuf1; 738 sbuf2 = smat_i->sbuf2; 739 ptr = smat_i->ptr; 740 tmp = smat_i->tmp; 741 ctr = smat_i->ctr; 742 743 pa = smat_i->pa; 744 req_size = smat_i->req_size; 745 req_source1 = smat_i->req_source1; 746 747 allcolumns[0] = PETSC_FALSE; 748 } 749 } else { /* scall == MAT_INITIAL_MATRIX */ 750 /* Get some new tags to keep the communication clean */ 751 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag2)); 752 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag3)); 753 754 /* evaluate communication - mesg to who, length of mesg, and buffer space 755 required. Based on this, buffers are allocated, and data copied into them*/ 756 PetscCall(PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4)); /* mesg size, initialize work vectors */ 757 758 for (i = 0; i < ismax; i++) { 759 jmax = nrow[i]; 760 irow_i = irow[i]; 761 762 PetscCall(PetscMalloc1(jmax, &row2proc_i)); 763 row2proc[i] = row2proc_i; 764 765 if (issorted[i]) proc = 0; 766 for (j = 0; j < jmax; j++) { 767 if (!issorted[i]) proc = 0; 768 if (allrows[i]) row = j; 769 else row = irow_i[j]; 770 771 while (row >= c->rangebs[proc + 1]) proc++; 772 w4[proc]++; 773 row2proc_i[j] = proc; /* map row index to proc */ 774 } 775 for (j = 0; j < size; j++) { 776 if (w4[j]) { 777 w1[j] += w4[j]; 778 w3[j]++; 779 w4[j] = 0; 780 } 781 } 782 } 783 784 nrqs = 0; /* no of outgoing messages */ 785 msz = 0; /* total mesg length (for all procs) */ 786 w1[rank] = 0; /* no mesg sent to self */ 787 w3[rank] = 0; 788 for (i = 0; i < size; i++) { 789 if (w1[i]) { 790 w2[i] = 1; 791 nrqs++; 792 } /* there exists a message to proc i */ 793 } 794 PetscCall(PetscMalloc1(nrqs, &pa)); /*(proc -array)*/ 795 for (i = 0, j = 0; i < size; i++) { 796 if (w1[i]) { 797 pa[j] = i; 798 j++; 799 } 800 } 801 802 /* Each message would have a header = 1 + 2*(no of IS) + data */ 803 for (i = 0; i < nrqs; i++) { 804 j = pa[i]; 805 w1[j] += w2[j] + 2 * w3[j]; 806 msz += w1[j]; 807 } 808 PetscCall(PetscInfo(0, "Number of outgoing messages %" PetscInt_FMT " Total message length %" PetscInt_FMT "\n", nrqs, msz)); 809 810 /* Determine the number of messages to expect, their lengths, from from-ids */ 811 PetscCall(PetscGatherNumberOfMessages(comm, w2, w1, &nrqr)); 812 PetscCall(PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1)); 813 814 /* Now post the Irecvs corresponding to these messages */ 815 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag0)); 816 PetscCall(PetscPostIrecvInt(comm, tag0, nrqr, onodes1, olengths1, &rbuf1, &r_waits1)); 817 818 /* Allocate Memory for outgoing messages */ 819 PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr)); 820 PetscCall(PetscArrayzero(sbuf1, size)); 821 PetscCall(PetscArrayzero(ptr, size)); 822 823 { 824 PetscInt *iptr = tmp; 825 k = 0; 826 for (i = 0; i < nrqs; i++) { 827 j = pa[i]; 828 iptr += k; 829 sbuf1[j] = iptr; 830 k = w1[j]; 831 } 832 } 833 834 /* Form the outgoing messages. Initialize the header space */ 835 for (i = 0; i < nrqs; i++) { 836 j = pa[i]; 837 sbuf1[j][0] = 0; 838 PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j])); 839 ptr[j] = sbuf1[j] + 2 * w3[j] + 1; 840 } 841 842 /* Parse the isrow and copy data into outbuf */ 843 for (i = 0; i < ismax; i++) { 844 row2proc_i = row2proc[i]; 845 PetscCall(PetscArrayzero(ctr, size)); 846 irow_i = irow[i]; 847 jmax = nrow[i]; 848 for (j = 0; j < jmax; j++) { /* parse the indices of each IS */ 849 proc = row2proc_i[j]; 850 if (allrows[i]) row = j; 851 else row = irow_i[j]; 852 853 if (proc != rank) { /* copy to the outgoing buf*/ 854 ctr[proc]++; 855 *ptr[proc] = row; 856 ptr[proc]++; 857 } 858 } 859 /* Update the headers for the current IS */ 860 for (j = 0; j < size; j++) { /* Can Optimise this loop too */ 861 if ((ctr_j = ctr[j])) { 862 sbuf1_j = sbuf1[j]; 863 k = ++sbuf1_j[0]; 864 sbuf1_j[2 * k] = ctr_j; 865 sbuf1_j[2 * k - 1] = i; 866 } 867 } 868 } 869 870 /* Now post the sends */ 871 PetscCall(PetscMalloc1(nrqs, &s_waits1)); 872 for (i = 0; i < nrqs; ++i) { 873 j = pa[i]; 874 PetscCallMPI(MPI_Isend(sbuf1[j], w1[j], MPIU_INT, j, tag0, comm, s_waits1 + i)); 875 } 876 877 /* Post Receives to capture the buffer size */ 878 PetscCall(PetscMalloc1(nrqs, &r_waits2)); 879 PetscCall(PetscMalloc3(nrqs, &req_source2, nrqs, &rbuf2, nrqs, &rbuf3)); 880 if (nrqs) rbuf2[0] = tmp + msz; 881 for (i = 1; i < nrqs; ++i) rbuf2[i] = rbuf2[i - 1] + w1[pa[i - 1]]; 882 for (i = 0; i < nrqs; ++i) { 883 j = pa[i]; 884 PetscCallMPI(MPI_Irecv(rbuf2[i], w1[j], MPIU_INT, j, tag2, comm, r_waits2 + i)); 885 } 886 887 /* Send to other procs the buf size they should allocate */ 888 /* Receive messages*/ 889 PetscCall(PetscMalloc1(nrqr, &s_waits2)); 890 PetscCall(PetscMalloc3(nrqr, &sbuf2, nrqr, &req_size, nrqr, &req_source1)); 891 892 PetscCallMPI(MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE)); 893 for (i = 0; i < nrqr; ++i) { 894 req_size[i] = 0; 895 rbuf1_i = rbuf1[i]; 896 start = 2 * rbuf1_i[0] + 1; 897 end = olengths1[i]; 898 PetscCall(PetscMalloc1(end, &sbuf2[i])); 899 sbuf2_i = sbuf2[i]; 900 for (j = start; j < end; j++) { 901 row = rbuf1_i[j] - rstart; 902 ncols = a_i[row + 1] - a_i[row] + b_i[row + 1] - b_i[row]; 903 sbuf2_i[j] = ncols; 904 req_size[i] += ncols; 905 } 906 req_source1[i] = onodes1[i]; 907 /* form the header */ 908 sbuf2_i[0] = req_size[i]; 909 for (j = 1; j < start; j++) sbuf2_i[j] = rbuf1_i[j]; 910 911 PetscCallMPI(MPI_Isend(sbuf2_i, end, MPIU_INT, req_source1[i], tag2, comm, s_waits2 + i)); 912 } 913 914 PetscCall(PetscFree(onodes1)); 915 PetscCall(PetscFree(olengths1)); 916 917 PetscCall(PetscFree(r_waits1)); 918 PetscCall(PetscFree4(w1, w2, w3, w4)); 919 920 /* Receive messages*/ 921 PetscCall(PetscMalloc1(nrqs, &r_waits3)); 922 923 PetscCallMPI(MPI_Waitall(nrqs, r_waits2, MPI_STATUSES_IGNORE)); 924 for (i = 0; i < nrqs; ++i) { 925 PetscCall(PetscMalloc1(rbuf2[i][0], &rbuf3[i])); 926 req_source2[i] = pa[i]; 927 PetscCallMPI(MPI_Irecv(rbuf3[i], rbuf2[i][0], MPIU_INT, req_source2[i], tag3, comm, r_waits3 + i)); 928 } 929 PetscCall(PetscFree(r_waits2)); 930 931 /* Wait on sends1 and sends2 */ 932 PetscCallMPI(MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE)); 933 PetscCallMPI(MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE)); 934 PetscCall(PetscFree(s_waits1)); 935 PetscCall(PetscFree(s_waits2)); 936 937 /* Now allocate sending buffers for a->j, and send them off */ 938 PetscCall(PetscMalloc1(nrqr, &sbuf_aj)); 939 for (i = 0, j = 0; i < nrqr; i++) j += req_size[i]; 940 if (nrqr) PetscCall(PetscMalloc1(j, &sbuf_aj[0])); 941 for (i = 1; i < nrqr; i++) sbuf_aj[i] = sbuf_aj[i - 1] + req_size[i - 1]; 942 943 PetscCall(PetscMalloc1(nrqr, &s_waits3)); 944 { 945 for (i = 0; i < nrqr; i++) { 946 rbuf1_i = rbuf1[i]; 947 sbuf_aj_i = sbuf_aj[i]; 948 ct1 = 2 * rbuf1_i[0] + 1; 949 ct2 = 0; 950 for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) { 951 kmax = rbuf1[i][2 * j]; 952 for (k = 0; k < kmax; k++, ct1++) { 953 row = rbuf1_i[ct1] - rstart; 954 nzA = a_i[row + 1] - a_i[row]; 955 nzB = b_i[row + 1] - b_i[row]; 956 ncols = nzA + nzB; 957 cworkA = a_j + a_i[row]; 958 cworkB = b_j ? b_j + b_i[row] : NULL; 959 960 /* load the column indices for this row into cols */ 961 cols = sbuf_aj_i + ct2; 962 for (l = 0; l < nzB; l++) { 963 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 964 else break; 965 } 966 imark = l; 967 for (l = 0; l < nzA; l++) cols[imark + l] = cstart + cworkA[l]; 968 for (l = imark; l < nzB; l++) cols[nzA + l] = bmap[cworkB[l]]; 969 ct2 += ncols; 970 } 971 } 972 PetscCallMPI(MPI_Isend(sbuf_aj_i, req_size[i], MPIU_INT, req_source1[i], tag3, comm, s_waits3 + i)); 973 } 974 } 975 976 /* create col map: global col of C -> local col of submatrices */ 977 #if defined(PETSC_USE_CTABLE) 978 for (i = 0; i < ismax; i++) { 979 if (!allcolumns[i]) { 980 PetscCall(PetscHMapICreateWithSize(ncol[i], cmap + i)); 981 982 jmax = ncol[i]; 983 icol_i = icol[i]; 984 cmap_i = cmap[i]; 985 for (j = 0; j < jmax; j++) PetscCall(PetscHMapISet(cmap[i], icol_i[j] + 1, j + 1)); 986 } else cmap[i] = NULL; 987 } 988 #else 989 for (i = 0; i < ismax; i++) { 990 if (!allcolumns[i]) { 991 PetscCall(PetscCalloc1(c->Nbs, &cmap[i])); 992 jmax = ncol[i]; 993 icol_i = icol[i]; 994 cmap_i = cmap[i]; 995 for (j = 0; j < jmax; j++) cmap_i[icol_i[j]] = j + 1; 996 } else cmap[i] = NULL; 997 } 998 #endif 999 1000 /* Create lens which is required for MatCreate... */ 1001 for (i = 0, j = 0; i < ismax; i++) j += nrow[i]; 1002 PetscCall(PetscMalloc1(ismax, &lens)); 1003 1004 if (ismax) PetscCall(PetscCalloc1(j, &lens[0])); 1005 for (i = 1; i < ismax; i++) lens[i] = lens[i - 1] + nrow[i - 1]; 1006 1007 /* Update lens from local data */ 1008 for (i = 0; i < ismax; i++) { 1009 row2proc_i = row2proc[i]; 1010 jmax = nrow[i]; 1011 if (!allcolumns[i]) cmap_i = cmap[i]; 1012 irow_i = irow[i]; 1013 lens_i = lens[i]; 1014 for (j = 0; j < jmax; j++) { 1015 if (allrows[i]) row = j; 1016 else row = irow_i[j]; /* global blocked row of C */ 1017 1018 proc = row2proc_i[j]; 1019 if (proc == rank) { 1020 /* Get indices from matA and then from matB */ 1021 #if defined(PETSC_USE_CTABLE) 1022 PetscInt tt; 1023 #endif 1024 row = row - rstart; 1025 nzA = a_i[row + 1] - a_i[row]; 1026 nzB = b_i[row + 1] - b_i[row]; 1027 cworkA = a_j + a_i[row]; 1028 cworkB = b_j ? b_j + b_i[row] : NULL; 1029 1030 if (!allcolumns[i]) { 1031 #if defined(PETSC_USE_CTABLE) 1032 for (k = 0; k < nzA; k++) { 1033 PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[k] + 1, 0, &tt)); 1034 if (tt) lens_i[j]++; 1035 } 1036 for (k = 0; k < nzB; k++) { 1037 PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[k]] + 1, 0, &tt)); 1038 if (tt) lens_i[j]++; 1039 } 1040 1041 #else 1042 for (k = 0; k < nzA; k++) { 1043 if (cmap_i[cstart + cworkA[k]]) lens_i[j]++; 1044 } 1045 for (k = 0; k < nzB; k++) { 1046 if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++; 1047 } 1048 #endif 1049 } else { /* allcolumns */ 1050 lens_i[j] = nzA + nzB; 1051 } 1052 } 1053 } 1054 } 1055 1056 /* Create row map: global row of C -> local row of submatrices */ 1057 for (i = 0; i < ismax; i++) { 1058 if (!allrows[i]) { 1059 #if defined(PETSC_USE_CTABLE) 1060 PetscCall(PetscHMapICreateWithSize(nrow[i], rmap + i)); 1061 irow_i = irow[i]; 1062 jmax = nrow[i]; 1063 for (j = 0; j < jmax; j++) { 1064 if (allrows[i]) { 1065 PetscCall(PetscHMapISet(rmap[i], j + 1, j + 1)); 1066 } else { 1067 PetscCall(PetscHMapISet(rmap[i], irow_i[j] + 1, j + 1)); 1068 } 1069 } 1070 #else 1071 PetscCall(PetscCalloc1(c->Mbs, &rmap[i])); 1072 rmap_i = rmap[i]; 1073 irow_i = irow[i]; 1074 jmax = nrow[i]; 1075 for (j = 0; j < jmax; j++) { 1076 if (allrows[i]) rmap_i[j] = j; 1077 else rmap_i[irow_i[j]] = j; 1078 } 1079 #endif 1080 } else rmap[i] = NULL; 1081 } 1082 1083 /* Update lens from offproc data */ 1084 { 1085 PetscInt *rbuf2_i, *rbuf3_i, *sbuf1_i; 1086 1087 PetscCallMPI(MPI_Waitall(nrqs, r_waits3, MPI_STATUSES_IGNORE)); 1088 for (tmp2 = 0; tmp2 < nrqs; tmp2++) { 1089 sbuf1_i = sbuf1[pa[tmp2]]; 1090 jmax = sbuf1_i[0]; 1091 ct1 = 2 * jmax + 1; 1092 ct2 = 0; 1093 rbuf2_i = rbuf2[tmp2]; 1094 rbuf3_i = rbuf3[tmp2]; 1095 for (j = 1; j <= jmax; j++) { 1096 is_no = sbuf1_i[2 * j - 1]; 1097 max1 = sbuf1_i[2 * j]; 1098 lens_i = lens[is_no]; 1099 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1100 rmap_i = rmap[is_no]; 1101 for (k = 0; k < max1; k++, ct1++) { 1102 if (allrows[is_no]) { 1103 row = sbuf1_i[ct1]; 1104 } else { 1105 #if defined(PETSC_USE_CTABLE) 1106 PetscCall(PetscHMapIGetWithDefault(rmap_i, sbuf1_i[ct1] + 1, 0, &row)); 1107 row--; 1108 PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table"); 1109 #else 1110 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1111 #endif 1112 } 1113 max2 = rbuf2_i[ct1]; 1114 for (l = 0; l < max2; l++, ct2++) { 1115 if (!allcolumns[is_no]) { 1116 #if defined(PETSC_USE_CTABLE) 1117 PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol)); 1118 #else 1119 tcol = cmap_i[rbuf3_i[ct2]]; 1120 #endif 1121 if (tcol) lens_i[row]++; 1122 } else { /* allcolumns */ 1123 lens_i[row]++; /* lens_i[row] += max2 ? */ 1124 } 1125 } 1126 } 1127 } 1128 } 1129 } 1130 PetscCall(PetscFree(r_waits3)); 1131 PetscCallMPI(MPI_Waitall(nrqr, s_waits3, MPI_STATUSES_IGNORE)); 1132 PetscCall(PetscFree(s_waits3)); 1133 1134 /* Create the submatrices */ 1135 for (i = 0; i < ismax; i++) { 1136 PetscInt bs_tmp; 1137 if (ijonly) bs_tmp = 1; 1138 else bs_tmp = bs; 1139 1140 PetscCall(MatCreate(PETSC_COMM_SELF, submats + i)); 1141 PetscCall(MatSetSizes(submats[i], nrow[i] * bs_tmp, ncol[i] * bs_tmp, PETSC_DETERMINE, PETSC_DETERMINE)); 1142 1143 PetscCall(MatSetType(submats[i], sym ? ((PetscObject)A)->type_name : MATSEQBAIJ)); 1144 PetscCall(MatSeqBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i])); 1145 PetscCall(MatSeqSBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i])); /* this subroutine is used by SBAIJ routines */ 1146 1147 /* create struct Mat_SubSppt and attached it to submat */ 1148 PetscCall(PetscNew(&smat_i)); 1149 subc = (Mat_SeqBAIJ *)submats[i]->data; 1150 subc->submatis1 = smat_i; 1151 1152 smat_i->destroy = submats[i]->ops->destroy; 1153 submats[i]->ops->destroy = MatDestroySubMatrix_SeqBAIJ; 1154 submats[i]->factortype = C->factortype; 1155 1156 smat_i->id = i; 1157 smat_i->nrqs = nrqs; 1158 smat_i->nrqr = nrqr; 1159 smat_i->rbuf1 = rbuf1; 1160 smat_i->rbuf2 = rbuf2; 1161 smat_i->rbuf3 = rbuf3; 1162 smat_i->sbuf2 = sbuf2; 1163 smat_i->req_source2 = req_source2; 1164 1165 smat_i->sbuf1 = sbuf1; 1166 smat_i->ptr = ptr; 1167 smat_i->tmp = tmp; 1168 smat_i->ctr = ctr; 1169 1170 smat_i->pa = pa; 1171 smat_i->req_size = req_size; 1172 smat_i->req_source1 = req_source1; 1173 1174 smat_i->allcolumns = allcolumns[i]; 1175 smat_i->allrows = allrows[i]; 1176 smat_i->singleis = PETSC_FALSE; 1177 smat_i->row2proc = row2proc[i]; 1178 smat_i->rmap = rmap[i]; 1179 smat_i->cmap = cmap[i]; 1180 } 1181 1182 if (!ismax) { /* Create dummy submats[0] for reuse struct subc */ 1183 PetscCall(MatCreate(PETSC_COMM_SELF, &submats[0])); 1184 PetscCall(MatSetSizes(submats[0], 0, 0, PETSC_DETERMINE, PETSC_DETERMINE)); 1185 PetscCall(MatSetType(submats[0], MATDUMMY)); 1186 1187 /* create struct Mat_SubSppt and attached it to submat */ 1188 PetscCall(PetscNew(&smat_i)); 1189 submats[0]->data = (void *)smat_i; 1190 1191 smat_i->destroy = submats[0]->ops->destroy; 1192 submats[0]->ops->destroy = MatDestroySubMatrix_Dummy; 1193 submats[0]->factortype = C->factortype; 1194 1195 smat_i->id = 0; 1196 smat_i->nrqs = nrqs; 1197 smat_i->nrqr = nrqr; 1198 smat_i->rbuf1 = rbuf1; 1199 smat_i->rbuf2 = rbuf2; 1200 smat_i->rbuf3 = rbuf3; 1201 smat_i->sbuf2 = sbuf2; 1202 smat_i->req_source2 = req_source2; 1203 1204 smat_i->sbuf1 = sbuf1; 1205 smat_i->ptr = ptr; 1206 smat_i->tmp = tmp; 1207 smat_i->ctr = ctr; 1208 1209 smat_i->pa = pa; 1210 smat_i->req_size = req_size; 1211 smat_i->req_source1 = req_source1; 1212 1213 smat_i->allcolumns = PETSC_FALSE; 1214 smat_i->singleis = PETSC_FALSE; 1215 smat_i->row2proc = NULL; 1216 smat_i->rmap = NULL; 1217 smat_i->cmap = NULL; 1218 } 1219 1220 if (ismax) PetscCall(PetscFree(lens[0])); 1221 PetscCall(PetscFree(lens)); 1222 if (sbuf_aj) { 1223 PetscCall(PetscFree(sbuf_aj[0])); 1224 PetscCall(PetscFree(sbuf_aj)); 1225 } 1226 1227 } /* endof scall == MAT_INITIAL_MATRIX */ 1228 1229 /* Post recv matrix values */ 1230 if (!ijonly) { 1231 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag4)); 1232 PetscCall(PetscMalloc1(nrqs, &rbuf4)); 1233 PetscCall(PetscMalloc1(nrqs, &r_waits4)); 1234 for (i = 0; i < nrqs; ++i) { 1235 PetscCall(PetscMalloc1(rbuf2[i][0] * bs2, &rbuf4[i])); 1236 PetscCallMPI(MPI_Irecv(rbuf4[i], rbuf2[i][0] * bs2, MPIU_SCALAR, req_source2[i], tag4, comm, r_waits4 + i)); 1237 } 1238 1239 /* Allocate sending buffers for a->a, and send them off */ 1240 PetscCall(PetscMalloc1(nrqr, &sbuf_aa)); 1241 for (i = 0, j = 0; i < nrqr; i++) j += req_size[i]; 1242 1243 if (nrqr) PetscCall(PetscMalloc1(j * bs2, &sbuf_aa[0])); 1244 for (i = 1; i < nrqr; i++) sbuf_aa[i] = sbuf_aa[i - 1] + req_size[i - 1] * bs2; 1245 1246 PetscCall(PetscMalloc1(nrqr, &s_waits4)); 1247 1248 for (i = 0; i < nrqr; i++) { 1249 rbuf1_i = rbuf1[i]; 1250 sbuf_aa_i = sbuf_aa[i]; 1251 ct1 = 2 * rbuf1_i[0] + 1; 1252 ct2 = 0; 1253 for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) { 1254 kmax = rbuf1_i[2 * j]; 1255 for (k = 0; k < kmax; k++, ct1++) { 1256 row = rbuf1_i[ct1] - rstart; 1257 nzA = a_i[row + 1] - a_i[row]; 1258 nzB = b_i[row + 1] - b_i[row]; 1259 ncols = nzA + nzB; 1260 cworkB = b_j ? b_j + b_i[row] : NULL; 1261 vworkA = a_a + a_i[row] * bs2; 1262 vworkB = b_a ? b_a + b_i[row] * bs2 : NULL; 1263 1264 /* load the column values for this row into vals*/ 1265 vals = sbuf_aa_i + ct2 * bs2; 1266 for (l = 0; l < nzB; l++) { 1267 if ((bmap[cworkB[l]]) < cstart) { 1268 PetscCall(PetscArraycpy(vals + l * bs2, vworkB + l * bs2, bs2)); 1269 } else break; 1270 } 1271 imark = l; 1272 for (l = 0; l < nzA; l++) PetscCall(PetscArraycpy(vals + (imark + l) * bs2, vworkA + l * bs2, bs2)); 1273 for (l = imark; l < nzB; l++) PetscCall(PetscArraycpy(vals + (nzA + l) * bs2, vworkB + l * bs2, bs2)); 1274 1275 ct2 += ncols; 1276 } 1277 } 1278 PetscCallMPI(MPI_Isend(sbuf_aa_i, req_size[i] * bs2, MPIU_SCALAR, req_source1[i], tag4, comm, s_waits4 + i)); 1279 } 1280 } 1281 1282 /* Assemble the matrices */ 1283 /* First assemble the local rows */ 1284 for (i = 0; i < ismax; i++) { 1285 row2proc_i = row2proc[i]; 1286 subc = (Mat_SeqBAIJ *)submats[i]->data; 1287 imat_ilen = subc->ilen; 1288 imat_j = subc->j; 1289 imat_i = subc->i; 1290 imat_a = subc->a; 1291 1292 if (!allcolumns[i]) cmap_i = cmap[i]; 1293 rmap_i = rmap[i]; 1294 irow_i = irow[i]; 1295 jmax = nrow[i]; 1296 for (j = 0; j < jmax; j++) { 1297 if (allrows[i]) row = j; 1298 else row = irow_i[j]; 1299 proc = row2proc_i[j]; 1300 1301 if (proc == rank) { 1302 row = row - rstart; 1303 nzA = a_i[row + 1] - a_i[row]; 1304 nzB = b_i[row + 1] - b_i[row]; 1305 cworkA = a_j + a_i[row]; 1306 cworkB = b_j ? b_j + b_i[row] : NULL; 1307 if (!ijonly) { 1308 vworkA = a_a + a_i[row] * bs2; 1309 vworkB = b_a ? b_a + b_i[row] * bs2 : NULL; 1310 } 1311 1312 if (allrows[i]) { 1313 row = row + rstart; 1314 } else { 1315 #if defined(PETSC_USE_CTABLE) 1316 PetscCall(PetscHMapIGetWithDefault(rmap_i, row + rstart + 1, 0, &row)); 1317 row--; 1318 1319 PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table"); 1320 #else 1321 row = rmap_i[row + rstart]; 1322 #endif 1323 } 1324 mat_i = imat_i[row]; 1325 if (!ijonly) mat_a = imat_a + mat_i * bs2; 1326 mat_j = imat_j + mat_i; 1327 ilen = imat_ilen[row]; 1328 1329 /* load the column indices for this row into cols*/ 1330 if (!allcolumns[i]) { 1331 for (l = 0; l < nzB; l++) { 1332 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1333 #if defined(PETSC_USE_CTABLE) 1334 PetscCall(PetscHMapIGetWithDefault(cmap_i, ctmp + 1, 0, &tcol)); 1335 if (tcol) { 1336 #else 1337 if ((tcol = cmap_i[ctmp])) { 1338 #endif 1339 *mat_j++ = tcol - 1; 1340 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2)); 1341 mat_a += bs2; 1342 ilen++; 1343 } 1344 } else break; 1345 } 1346 imark = l; 1347 for (l = 0; l < nzA; l++) { 1348 #if defined(PETSC_USE_CTABLE) 1349 PetscCall(PetscHMapIGetWithDefault(cmap_i, cstart + cworkA[l] + 1, 0, &tcol)); 1350 if (tcol) { 1351 #else 1352 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1353 #endif 1354 *mat_j++ = tcol - 1; 1355 if (!ijonly) { 1356 PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2)); 1357 mat_a += bs2; 1358 } 1359 ilen++; 1360 } 1361 } 1362 for (l = imark; l < nzB; l++) { 1363 #if defined(PETSC_USE_CTABLE) 1364 PetscCall(PetscHMapIGetWithDefault(cmap_i, bmap[cworkB[l]] + 1, 0, &tcol)); 1365 if (tcol) { 1366 #else 1367 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1368 #endif 1369 *mat_j++ = tcol - 1; 1370 if (!ijonly) { 1371 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2)); 1372 mat_a += bs2; 1373 } 1374 ilen++; 1375 } 1376 } 1377 } else { /* allcolumns */ 1378 for (l = 0; l < nzB; l++) { 1379 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1380 *mat_j++ = ctmp; 1381 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2)); 1382 mat_a += bs2; 1383 ilen++; 1384 } else break; 1385 } 1386 imark = l; 1387 for (l = 0; l < nzA; l++) { 1388 *mat_j++ = cstart + cworkA[l]; 1389 if (!ijonly) { 1390 PetscCall(PetscArraycpy(mat_a, vworkA + l * bs2, bs2)); 1391 mat_a += bs2; 1392 } 1393 ilen++; 1394 } 1395 for (l = imark; l < nzB; l++) { 1396 *mat_j++ = bmap[cworkB[l]]; 1397 if (!ijonly) { 1398 PetscCall(PetscArraycpy(mat_a, vworkB + l * bs2, bs2)); 1399 mat_a += bs2; 1400 } 1401 ilen++; 1402 } 1403 } 1404 imat_ilen[row] = ilen; 1405 } 1406 } 1407 } 1408 1409 /* Now assemble the off proc rows */ 1410 if (!ijonly) PetscCallMPI(MPI_Waitall(nrqs, r_waits4, MPI_STATUSES_IGNORE)); 1411 for (tmp2 = 0; tmp2 < nrqs; tmp2++) { 1412 sbuf1_i = sbuf1[pa[tmp2]]; 1413 jmax = sbuf1_i[0]; 1414 ct1 = 2 * jmax + 1; 1415 ct2 = 0; 1416 rbuf2_i = rbuf2[tmp2]; 1417 rbuf3_i = rbuf3[tmp2]; 1418 if (!ijonly) rbuf4_i = rbuf4[tmp2]; 1419 for (j = 1; j <= jmax; j++) { 1420 is_no = sbuf1_i[2 * j - 1]; 1421 rmap_i = rmap[is_no]; 1422 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1423 subc = (Mat_SeqBAIJ *)submats[is_no]->data; 1424 imat_ilen = subc->ilen; 1425 imat_j = subc->j; 1426 imat_i = subc->i; 1427 if (!ijonly) imat_a = subc->a; 1428 max1 = sbuf1_i[2 * j]; 1429 for (k = 0; k < max1; k++, ct1++) { /* for each recved block row */ 1430 row = sbuf1_i[ct1]; 1431 1432 if (allrows[is_no]) { 1433 row = sbuf1_i[ct1]; 1434 } else { 1435 #if defined(PETSC_USE_CTABLE) 1436 PetscCall(PetscHMapIGetWithDefault(rmap_i, row + 1, 0, &row)); 1437 row--; 1438 PetscCheck(row >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "row not found in table"); 1439 #else 1440 row = rmap_i[row]; 1441 #endif 1442 } 1443 ilen = imat_ilen[row]; 1444 mat_i = imat_i[row]; 1445 if (!ijonly) mat_a = imat_a + mat_i * bs2; 1446 mat_j = imat_j + mat_i; 1447 max2 = rbuf2_i[ct1]; 1448 if (!allcolumns[is_no]) { 1449 for (l = 0; l < max2; l++, ct2++) { 1450 #if defined(PETSC_USE_CTABLE) 1451 PetscCall(PetscHMapIGetWithDefault(cmap_i, rbuf3_i[ct2] + 1, 0, &tcol)); 1452 #else 1453 tcol = cmap_i[rbuf3_i[ct2]]; 1454 #endif 1455 if (tcol) { 1456 *mat_j++ = tcol - 1; 1457 if (!ijonly) { 1458 PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2)); 1459 mat_a += bs2; 1460 } 1461 ilen++; 1462 } 1463 } 1464 } else { /* allcolumns */ 1465 for (l = 0; l < max2; l++, ct2++) { 1466 *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */ 1467 if (!ijonly) { 1468 PetscCall(PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2)); 1469 mat_a += bs2; 1470 } 1471 ilen++; 1472 } 1473 } 1474 imat_ilen[row] = ilen; 1475 } 1476 } 1477 } 1478 1479 if (!iscsorted) { /* sort column indices of the rows */ 1480 MatScalar *work; 1481 1482 PetscCall(PetscMalloc1(bs2, &work)); 1483 for (i = 0; i < ismax; i++) { 1484 subc = (Mat_SeqBAIJ *)submats[i]->data; 1485 imat_ilen = subc->ilen; 1486 imat_j = subc->j; 1487 imat_i = subc->i; 1488 if (!ijonly) imat_a = subc->a; 1489 if (allcolumns[i]) continue; 1490 1491 jmax = nrow[i]; 1492 for (j = 0; j < jmax; j++) { 1493 mat_i = imat_i[j]; 1494 mat_j = imat_j + mat_i; 1495 ilen = imat_ilen[j]; 1496 if (ijonly) { 1497 PetscCall(PetscSortInt(ilen, mat_j)); 1498 } else { 1499 mat_a = imat_a + mat_i * bs2; 1500 PetscCall(PetscSortIntWithDataArray(ilen, mat_j, mat_a, bs2 * sizeof(MatScalar), work)); 1501 } 1502 } 1503 } 1504 PetscCall(PetscFree(work)); 1505 } 1506 1507 if (!ijonly) { 1508 PetscCall(PetscFree(r_waits4)); 1509 PetscCallMPI(MPI_Waitall(nrqr, s_waits4, MPI_STATUSES_IGNORE)); 1510 PetscCall(PetscFree(s_waits4)); 1511 } 1512 1513 /* Restore the indices */ 1514 for (i = 0; i < ismax; i++) { 1515 if (!allrows[i]) PetscCall(ISRestoreIndices(isrow[i], irow + i)); 1516 if (!allcolumns[i]) PetscCall(ISRestoreIndices(iscol[i], icol + i)); 1517 } 1518 1519 for (i = 0; i < ismax; i++) { 1520 PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY)); 1521 PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY)); 1522 } 1523 1524 PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, issorted)); 1525 PetscCall(PetscFree5(row2proc, cmap, rmap, allcolumns, allrows)); 1526 1527 if (!ijonly) { 1528 if (sbuf_aa) { 1529 PetscCall(PetscFree(sbuf_aa[0])); 1530 PetscCall(PetscFree(sbuf_aa)); 1531 } 1532 1533 for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf4[i])); 1534 PetscCall(PetscFree(rbuf4)); 1535 } 1536 c->ijonly = PETSC_FALSE; /* set back to the default */ 1537 PetscFunctionReturn(PETSC_SUCCESS); 1538 } 1539