Actual source code: sbaijov.c

  1: /*$Id: sbaijov.c,v 1.65 2001/08/06 21:15:42 bsmith Exp $*/

  3: /*
  4:    Routines to compute overlapping regions of a parallel MPI matrix.
  5:    Used for finding submatrices that were shared across processors.
  6: */
 7:  #include src/mat/impls/sbaij/mpi/mpisbaij.h
 8:  #include petscbt.h

 10: static int MatIncreaseOverlap_MPISBAIJ_Once(Mat,int,IS*);
 11: static int MatIncreaseOverlap_MPISBAIJ_Local(Mat,int*,int,int*,PetscBT*);

 15: int MatIncreaseOverlap_MPISBAIJ(Mat C,int is_max,IS is[],int ov)
 16: {
 17:   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
 18:   int           i,ierr,N=C->N, bs=c->bs;
 19:   IS            *is_new;

 22:   PetscMalloc(is_max*sizeof(IS),&is_new);
 23:   /* Convert the indices into block format */
 24:   ISCompressIndicesGeneral(N,bs,is_max,is,is_new);
 25:   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
 26:   for (i=0; i<ov; ++i) {
 27:     MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
 28:   }
 29:   for (i=0; i<is_max; i++) {ISDestroy(is[i]);}
 30:   ISExpandIndicesGeneral(N,bs,is_max,is_new,is);
 31:   for (i=0; i<is_max; i++) {ISDestroy(is_new[i]);}
 32:   PetscFree(is_new);
 33:   return(0);
 34: }

 36: typedef enum {MINE,OTHER} WhoseOwner;
 37: /*  data1, odata1 and odata2 are packed in the format (for communication):
 38:        data[0]          = is_max, no of is 
 39:        data[1]          = size of is[0]
 40:         ...
 41:        data[is_max]     = size of is[is_max-1]
 42:        data[is_max + 1] = data(is[0])
 43:         ...
 44:        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
 45:         ...
 46:    data2 is packed in the format (for creating output is[]):
 47:        data[0]          = is_max, no of is 
 48:        data[1]          = size of is[0]
 49:         ...
 50:        data[is_max]     = size of is[is_max-1]
 51:        data[is_max + 1] = data(is[0])
 52:         ...
 53:        data[is_max + 1 + Mbs*i) = data(is[i])
 54:         ...
 55: */
 58: static int MatIncreaseOverlap_MPISBAIJ_Once(Mat C,int is_max,IS is[])
 59: {
 60:   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
 61:   int         len,idx,*idx_i,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
 62:               size,rank,Mbs,i,j,k,ierr,nrqs,nrqr,*odata1,*odata2,
 63:               tag1,tag2,flag,proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
 64:   int         *id_r1,*len_r1,proc_end=0,*iwork,*len_s,len_unused,nodata2;
 65:   int         ois_max; /* max no of is[] in each of processor */
 66:   char        *t_p;
 67:   MPI_Comm    comm;
 68:   MPI_Request *s_waits1,*s_waits2,r_req;
 69:   MPI_Status  *s_status,r_status;
 70:   PetscBT     *table=0;  /* mark indices of this processor's is[] */
 71:   PetscBT     table_i;
 72:   PetscBT     otable; /* mark indices of other processors' is[] */
 73:   int         bs=c->bs,Bn = c->B->n,Bnbs = Bn/bs,*Bowners;
 74:   IS          garray_local,garray_gl;


 78:   comm = C->comm;
 79:   size = c->size;
 80:   rank = c->rank;
 81:   Mbs  = c->Mbs;

 83:   PetscObjectGetNewTag((PetscObject)C,&tag1);
 84:   PetscObjectGetNewTag((PetscObject)C,&tag2);

 86:   MPI_Allreduce(&is_max,&ois_max,1,MPI_INT,MPI_MAX,comm);

 88:   /* 1. Send this processor's is[] to other processors */
 89:   /*---------------------------------------------------*/
 90:   /* allocate spaces */
 91:   PetscMalloc(is_max*sizeof(int),&n);
 92:   len = 0;
 93:   for (i=0; i<is_max; i++) {
 94:     ISGetLocalSize(is[i],&n[i]);
 95:     len += n[i];
 96:   }
 97:   if (len == 0) {
 98:     is_max = 0;
 99:   } else {
100:     len += 1 + is_max; /* max length of data1 for one processor */
101:   }
102: 
103:   PetscMalloc((size*len+1)*sizeof(int),&data1);
104:   PetscMalloc(size*sizeof(int*),&data1_start);
105:   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;

107:   PetscMalloc((size*4+1)*sizeof(int),&len_s);
108:   btable  = len_s + size;
109:   iwork   = btable + size;
110:   Bowners = iwork + size;

112:   /* gather c->garray from all processors */
113:   ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);
114:   ISAllGather(garray_local, &garray_gl);
115:   ISDestroy(garray_local);
116:   MPI_Allgather(&Bnbs,1,MPI_INT,Bowners+1,1,MPI_INT,comm);
117:   Bowners[0] = 0;
118:   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
119: 
120:   if (is_max){
121:     /* create hash table ctable which maps c->row to proc_id) */
122:     PetscMalloc(Mbs*sizeof(int),&ctable);
123:     for (proc_id=0,j=0; proc_id<size; proc_id++) {
124:       for (; j<c->rowners[proc_id+1]; j++) {
125:         ctable[j] = proc_id;
126:       }
127:     }

129:     /* create tables for marking indices */
130:     len_max = is_max*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*is_max*sizeof(char) + 1;
131:     PetscMalloc(len_max,&table);
132:     t_p  = (char *)(table + is_max);
133:     for (i=0; i<is_max; i++) {
134:       table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
135:     }

137:     /* hash table table_i[idx] = 1 if idx is on is[] array
138:                                = 0 otherwise */
139:     table_i = table[0];
140:     PetscBTMemzero(Mbs,table_i);
141:     for (i=0; i<is_max; i++){
142:       ISGetIndices(is[i],&idx_i);
143:       for (j=0; j<n[i]; j++){
144:         idx = idx_i[j];
145:         PetscBTSet(table_i,idx);
146:       }
147:       ISRestoreIndices(is[i],&idx_i);
148:     }

150:     /* hash table btable[id_proc] = 1 if a col index of B-matrix in [id_proc] is on is[] array 
151:                                   = 0 otherwise */
152:     ISGetIndices(garray_gl,&idx_i);
153:     for (i=0; i<size; i++){
154:       btable[i] = 0;
155:       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols */
156:         idx = idx_i[j];
157:         if(PetscBTLookup(table_i,idx)){
158:           btable[i] = 1;
159:           break;
160:         }
161:       }
162:     }
163:     ISRestoreIndices(garray_gl,&idx_i);
164:   }  /* if (is_max) */
165:   ISDestroy(garray_gl);

167:   /* evaluate communication - mesg to who, length, and buffer space */
168:   for (i=0; i<size; i++) len_s[i] = 0;
169: 
170:   /* header of data1 */
171:   for (proc_id=0; proc_id<size; proc_id++){
172:     iwork[proc_id] = 0;
173:     *data1_start[proc_id] = is_max;
174:     data1_start[proc_id]++;
175:     for (j=0; j<is_max; j++) {
176:       if (proc_id == rank){
177:         *data1_start[proc_id] = n[j];
178:       } else {
179:         *data1_start[proc_id] = 0;
180:       }
181:       data1_start[proc_id]++;
182:     }
183:   }
184: 
185:   for (i=0; i<is_max; i++) {
186:     ISGetIndices(is[i],&idx_i);
187:     for (j=0; j<n[i]; j++){
188:       idx = idx_i[j];
189:       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
190:       proc_end = ctable[idx];
191:       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
192:         if (proc_id == rank ) continue; /* done before this loop */
193:         if (proc_id < proc_end && !btable[proc_id]) continue;   /* no need for sending idx to [proc_id] */
194:         *data1_start[proc_id] = idx; data1_start[proc_id]++;
195:         len_s[proc_id]++;
196:       }
197:     }
198:     /* update header data */
199:     for (proc_id=0; proc_id<size; proc_id++){
200:       if (proc_id== rank) continue;
201:       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
202:       iwork[proc_id] = len_s[proc_id] ;
203:     }
204:     ISRestoreIndices(is[i],&idx_i);
205:   }

207:   nrqs = 0; nrqr = 0;
208:   for (i=0; i<size; i++){
209:     data1_start[i] = data1 + i*len;
210:     if (len_s[i]){
211:       nrqs++;
212:       len_s[i] += 1 + is_max; /* add no. of header msg */
213:     }
214:   }

216:   for (i=0; i<is_max; i++) {
217:     ISDestroy(is[i]);
218:   }
219:   PetscFree(n);
220:   if (ctable){PetscFree(ctable);}

222:   /* Determine the number of messages to expect, their lengths, from from-ids */
223:   PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);
224:   PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
225:   /* PetscPrintf(PETSC_COMM_SELF, "[%d] nrqs: %d, nrqr: %d\n",rank,nrqs,nrqr); */
226: 
227:   /*  Now  post the sends */
228:   PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);
229:   s_waits2 = s_waits1 + size;
230:   k = 0;
231:   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
232:     if (len_s[proc_id]){
233:       MPI_Isend(data1_start[proc_id],len_s[proc_id],MPI_INT,proc_id,tag1,comm,s_waits1+k);
234:       k++;
235:     }
236:   }
237: 
238:   /* 2. Do local work on this processor's is[] */
239:   /*-------------------------------------------*/
240:   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
241:   PetscMalloc((len_max+1)*sizeof(int),&data);
242:   MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
243:   PetscFree(data1_start);
244: 
245:   /* 3. Receive other's is[] and process. Then send back */
246:   /*-----------------------------------------------------*/
247:   len = 0;
248:   for (i=0; i<nrqr; i++){
249:     if (len_r1[i] > len)len = len_r1[i];
250:     /* PetscPrintf(PETSC_COMM_SELF, "[%d] expect to recv len=%d from [%d]\n",rank,len_r1[i],id_r1[i]); */
251:   }
252:   PetscMalloc((len+1)*sizeof(int),&odata1);
253:   PetscMalloc((size+1)*sizeof(int**),&odata2_ptr);
254:   PetscBTCreate(Mbs,otable);

256:   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
257:   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
258:   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
259:   PetscMalloc((len_est+1)*sizeof(int),&odata2_ptr[nodata2]);
260:   odata2     = odata2_ptr[nodata2];
261:   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
262: 
263:   k = 0;
264:   while (k < nrqr){
265:     /* Receive messages */
266:     MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
267:     if (flag){
268:       MPI_Get_count(&r_status,MPI_INT,&len);
269:       proc_id = r_status.MPI_SOURCE;
270:       MPI_Irecv(odata1,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
271:       MPI_Wait(&r_req,&r_status);
272:       /* PetscPrintf(PETSC_COMM_SELF, " [%d] recv %d from [%d]\n",rank,len,proc_id); */

274:       /*  Process messages */
275:       /*  check if there is enough unused space in odata2 array */
276:       if (len_unused < len_max){ /* allocate more space for odata2 */
277:         PetscMalloc((len_est+1)*sizeof(int),&odata2_ptr[++nodata2]);
278:         odata2 = odata2_ptr[nodata2];
279:         len_unused = len_est;
280:         /* PetscPrintf(PETSC_COMM_SELF, " [%d] Malloc odata2, nodata2: %d\n",rank,nodata2); */
281:       }

283:       MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
284:       len = 1 + odata2[0];
285:       for (i=0; i<odata2[0]; i++){
286:         len += odata2[1 + i];
287:       }

289:       /* Send messages back */
290:       MPI_Isend(odata2,len,MPI_INT,proc_id,tag2,comm,s_waits2+k);
291:       /* PetscPrintf(PETSC_COMM_SELF," [%d] send %d back to [%d] \n",rank,len,proc_id); */
292:       k++;
293:       odata2     += len;
294:       len_unused -= len;
295:     }
296:   }
297:   PetscFree(odata1);

299:   /* 4. Receive work done on other processors, then merge */
300:   /*------------------------------------------------------*/
301:   data2 = odata2;
302:   /* check if there is enough unused space in odata2(=data2) array */
303:   if (len_unused < len_max){ /* allocate more space for odata2 */
304:     PetscMalloc((len_est+1)*sizeof(int),&odata2_ptr[++nodata2]);
305:     data2 = odata2_ptr[nodata2];
306:     len_unused = len_est;
307:     /* PetscPrintf(PETSC_COMM_SELF, " [%d] Malloc data2, nodata2: %d\n",rank,nodata2); */
308:   }

310:   k = 0;
311:   while (k < nrqs){
312:     /* Receive messages */
313:     MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
314:     if (flag){
315:       MPI_Get_count(&r_status,MPI_INT,&len);
316:       proc_id = r_status.MPI_SOURCE;
317:       MPI_Irecv(data2,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
318:       MPI_Wait(&r_req,&r_status);
319:       /* PetscPrintf(PETSC_COMM_SELF," [%d] recv %d from [%d], data2:\n",rank,len,proc_id); */
320:       if (len > 1+is_max){ /* Add data2 into data */
321:         data2_i = data2 + 1 + is_max;
322:         for (i=0; i<is_max; i++){
323:           table_i = table[i];
324:           data_i  = data + 1 + is_max + Mbs*i;
325:           isz     = data[1+i];
326:           for (j=0; j<data2[1+i]; j++){
327:             col = data2_i[j];
328:             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
329:           }
330:           data[1+i] = isz;
331:           if (i < is_max - 1) data2_i += data2[1+i];
332:         }
333:       }
334:       k++;
335:     }
336:   }

338:   /* phase 1 sends are complete */
339:   PetscMalloc(size*sizeof(MPI_Status),&s_status);
340:   if (nrqs){
341:     MPI_Waitall(nrqs,s_waits1,s_status);
342:   }
343:   PetscFree(data1);
344: 
345:   /* phase 3 sends are complete */
346:   if (nrqr){
347:     MPI_Waitall(nrqr,s_waits2,s_status);
348:   }
349:   for (k=0; k<=nodata2; k++){
350:     PetscFree(odata2_ptr[k]);
351:   }
352:   PetscFree(odata2_ptr);

354:   /* 5. Create new is[] */
355:   /*--------------------*/
356:   for (i=0; i<is_max; i++) {
357:     data_i = data + 1 + is_max + Mbs*i;
358:     ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);
359:   }
360: 
361:   PetscFree(data);
362:   PetscFree(s_waits1);
363:   PetscFree(s_status);
364:   if (table) {PetscFree(table);}
365:   PetscBTDestroy(otable);

367:   PetscFree(len_s);
368:   PetscFree(id_r1);
369:   PetscFree(len_r1);

371:   return(0);
372: }

376: /*  
377:    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 
378:        the work on the local processor.

380:      Inputs:
381:       C      - MAT_MPISBAIJ;
382:       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
383:       whose  - whose is[] to be processed, 
384:                MINE:  this processor's is[]
385:                OTHER: other processor's is[]
386:      Output:  
387:        nidx  - whose = MINE:
388:                      holds input and newly found indices in the same format as data
389:                whose = OTHER:
390:                      only holds the newly found indices
391:        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
392: */
393: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
394: static int MatIncreaseOverlap_MPISBAIJ_Local(Mat C,int *data,int whose,int *nidx,PetscBT *table)
395: {
396:   Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
397:   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
398:   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(c->B)->data;
399:   int          ierr,row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
400:   int          a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
401:   PetscBT      table0;  /* mark the indices of input is[] for look up */
402:   PetscBT      table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
403: 
405:   Mbs = c->Mbs; mbs = a->mbs;
406:   ai = a->i; aj = a->j;
407:   bi = b->i; bj = b->j;
408:   garray = c->garray;
409:   rstart = c->rstart;
410:   is_max = data[0];

412:   PetscBTCreate(Mbs,table0);
413: 
414:   nidx[0] = is_max;
415:   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
416:   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
417:   for (i=0; i<is_max; i++) { /* for each is */
418:     isz  = 0;
419:     n = data[1+i]; /* size of input is[i] */

421:     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
422:     if (whose == MINE){ /* process this processor's is[] */
423:       table_i = table[i];
424:       nidx_i  = nidx + 1+ is_max + Mbs*i;
425:     } else {            /* process other processor's is[] - only use one temp table */
426:       table_i = *table;
427:     }
428:     PetscBTMemzero(Mbs,table_i);
429:     PetscBTMemzero(Mbs,table0);
430:     if (n==0) {
431:        nidx[1+i] = 0; /* size of new is[i] */
432:        continue;
433:     }

435:     isz0 = 0; col_max = 0;
436:     for (j=0; j<n; j++){
437:       col = idx_i[j];
438:       if (col_max < col) col_max = col;
439:       if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %d >= Mbs %d",col,Mbs);
440:       if(!PetscBTLookupSet(table_i,col)) {
441:         PetscBTSet(table0,col);
442:         if (whose == MINE) {nidx_i[isz0] = col;}
443:         isz0++;
444:       }
445:     }
446: 
447:     if (whose == MINE) {isz = isz0;}
448:     k = 0;  /* no. of indices from input is[i] that have been examined */
449:     for (row=0; row<mbs; row++){
450:       a_start = ai[row]; a_end = ai[row+1];
451:       b_start = bi[row]; b_end = bi[row+1];
452:       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
453:                                                 do row search: collect all col in this row */
454:         for (l = a_start; l<a_end ; l++){ /* Amat */
455:           col = aj[l] + rstart;
456:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
457:         }
458:         for (l = b_start; l<b_end ; l++){ /* Bmat */
459:           col = garray[bj[l]];
460:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
461:         }
462:         k++;
463:         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
464:       } else { /* row is not on input is[i]:
465:                   do col serach: add row onto nidx_i if there is a col in nidx_i */
466:         for (l = a_start; l<a_end ; l++){ /* Amat */
467:           col = aj[l] + rstart;
468:           if (col > col_max) break;
469:           if (PetscBTLookup(table0,col)){
470:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
471:             break; /* for l = start; l<end ; l++) */
472:           }
473:         }
474:         for (l = b_start; l<b_end ; l++){ /* Bmat */
475:           col = garray[bj[l]];
476:           if (col > col_max) break;
477:           if (PetscBTLookup(table0,col)){
478:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
479:             break; /* for l = start; l<end ; l++) */
480:           }
481:         }
482:       }
483:     }
484: 
485:     if (i < is_max - 1){
486:       idx_i  += n;   /* ptr to input is[i+1] array */
487:       nidx_i += isz; /* ptr to output is[i+1] array */
488:     }
489:     nidx[1+i] = isz; /* size of new is[i] */
490:   } /* for each is */
491:   PetscBTDestroy(table0);
492: 
493:   return(0);
494: }