Actual source code: isltog.c

  1: /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/

 3:  #include petscsys.h
 4:  #include src/vec/is/isimpl.h

  6: EXTERN int VecInitializePackage(char *);
  7: int IS_LTOGM_COOKIE = -1;

 11: /*@C
 12:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 14:     Not Collective

 16:     Input Parameter:
 17: .   ltog - local to global mapping

 19:     Output Parameter:
 20: .   n - the number of entries in the local mapping

 22:     Level: advanced

 24:     Concepts: mapping^local to global

 26: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 27: @*/
 28: int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
 29: {
 33:   *n = mapping->n;
 34:   return(0);
 35: }

 39: /*@C
 40:     ISLocalToGlobalMappingView - View a local to global mapping

 42:     Not Collective

 44:     Input Parameters:
 45: +   ltog - local to global mapping
 46: -   viewer - viewer

 48:     Level: advanced

 50:     Concepts: mapping^local to global

 52: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 53: @*/
 54: int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 55: {
 56:   int        i,ierr,rank;
 57:   PetscTruth isascii;

 61:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);

 64:   MPI_Comm_rank(mapping->comm,&rank);
 65:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 66:   if (isascii) {
 67:     for (i=0; i<mapping->n; i++) {
 68:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 69:     }
 70:     PetscViewerFlush(viewer);
 71:   } else {
 72:     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 73:   }

 75:   return(0);
 76: }

 80: /*@C
 81:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 82:     ordering and a global parallel ordering.

 84:     Not collective

 86:     Input Parameter:
 87: .   is - index set containing the global numbers for each local

 89:     Output Parameter:
 90: .   mapping - new mapping data structure

 92:     Level: advanced

 94:     Concepts: mapping^local to global

 96: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 97: @*/
 98: int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
 99: {
100:   int      n,*indices,ierr;
101:   MPI_Comm comm;


107:   PetscObjectGetComm((PetscObject)is,&comm);
108:   ISGetLocalSize(is,&n);
109:   ISGetIndices(is,&indices);
110:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
111:   ISRestoreIndices(is,&indices);

113:   return(0);
114: }


119: /*@C
120:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
121:     ordering and a global parallel ordering.

123:     Not Collective, but communicator may have more than one process

125:     Input Parameters:
126: +   comm - MPI communicator
127: .   n - the number of local elements
128: -   indices - the global index for each local element

130:     Output Parameter:
131: .   mapping - new mapping data structure

133:     Level: advanced

135:     Concepts: mapping^local to global

137: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
138: @*/
139: int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
140: {
141:   int *in,ierr;

146:   PetscMalloc((n+1)*sizeof(int),&in);
147:   PetscMemcpy(in,indices,n*sizeof(int));
148:   ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
149:   return(0);
150: }

154: /*@C
155:     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
156:     ordering and a global parallel ordering.

158:     Not Collective, but communicator may have more than one process

160:     Input Parameters:
161: +   comm - MPI communicator
162: .   n - the number of local elements
163: -   indices - the global index for each local element

165:     Output Parameter:
166: .   mapping - new mapping data structure

168:     Level: developer

170:     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
171:     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.

173:     Concepts: mapping^local to global

175: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
176: @*/
177: int ISLocalToGlobalMappingCreateNC(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
178: {

184:   *mapping = PETSC_NULL;
185: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
186:   VecInitializePackage(PETSC_NULL);
187: #endif
188:   if (IS_LTOGM_COOKIE == -1) {
189:     PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");
190:   }

192:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
193:                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
194:   PetscLogObjectCreate(*mapping);
195:   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));

197:   (*mapping)->n       = n;
198:   (*mapping)->indices = (int*)indices;

200:   /*
201:       Do not create the global to local mapping. This is only created if 
202:      ISGlobalToLocalMapping() is called 
203:   */
204:   (*mapping)->globals = 0;
205:   return(0);
206: }

210: /*@C
211:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
212:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
213:        and VecSetLocalToGlobalMappingBlock().

215:     Not Collective, but communicator may have more than one process

217:     Input Parameters:
218: +    inmap - original point-wise mapping
219: -    bs - block size

221:     Output Parameter:
222: .   outmap - block based mapping

224:     Level: advanced

226:     Concepts: mapping^local to global

228: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
229: @*/
230: int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
231: {
232:   int ierr,*ii,i,n;


236:   if (bs > 1) {
237:     n    = inmap->n/bs;
238:     PetscMalloc(n*sizeof(int),&ii);
239:     for (i=0; i<n; i++) {
240:       ii[i] = inmap->indices[bs*i]/bs;
241:     }
242:     ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
243:     PetscFree(ii);
244:   } else {
245:     *outmap = inmap;
246:     PetscObjectReference((PetscObject)inmap);
247:   }
248:   return(0);
249: }
250: 
253: /*@
254:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
255:    ordering and a global parallel ordering.

257:    Note Collective

259:    Input Parameters:
260: .  mapping - mapping data structure

262:    Level: advanced

264: .seealso: ISLocalToGlobalMappingCreate()
265: @*/
266: int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
267: {
271:   if (--mapping->refct > 0) return(0);
272:   if (mapping->refct < 0) {
273:     SETERRQ(1,"Mapping already destroyed");
274:   }

276:   PetscFree(mapping->indices);
277:   if (mapping->globals) {PetscFree(mapping->globals);}
278:   PetscLogObjectDestroy(mapping);
279:   PetscHeaderDestroy(mapping);
280:   return(0);
281: }
282: 
285: /*@
286:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
287:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
288:     context.

290:     Not collective

292:     Input Parameters:
293: +   mapping - mapping between local and global numbering
294: -   is - index set in local numbering

296:     Output Parameters:
297: .   newis - index set in global numbering

299:     Level: advanced

301:     Concepts: mapping^local to global

303: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
304:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
305: @*/
306: int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
307: {
308:   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;


315:   ISGetLocalSize(is,&n);
316:   ISGetIndices(is,&idxin);
317:   idxmap = mapping->indices;
318: 
319:   PetscMalloc((n+1)*sizeof(int),&idxout);
320:   for (i=0; i<n; i++) {
321:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
322:     idxout[i] = idxmap[idxin[i]];
323:   }
324:   ISRestoreIndices(is,&idxin);
325:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
326:   PetscFree(idxout);
327:   return(0);
328: }

330: /*MC
331:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
332:    and converts them to the global numbering.

334:    Not collective

336:    Input Parameters:
337: +  mapping - the local to global mapping context
338: .  N - number of integers
339: -  in - input indices in local numbering

341:    Output Parameter:
342: .  out - indices in global numbering

344:    Synopsis:
345:    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

347:    Notes: 
348:    The in and out array parameters may be identical.

350:    Level: advanced

352: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
353:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
354:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

356:     Concepts: mapping^local to global

358: M*/

360: /* -----------------------------------------------------------------------------------------*/

364: /*
365:     Creates the global fields in the ISLocalToGlobalMapping structure
366: */
367: static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
368: {
369:   int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

372:   end   = 0;
373:   start = 100000000;

375:   for (i=0; i<n; i++) {
376:     if (idx[i] < 0) continue;
377:     if (idx[i] < start) start = idx[i];
378:     if (idx[i] > end)   end   = idx[i];
379:   }
380:   if (start > end) {start = 0; end = -1;}
381:   mapping->globalstart = start;
382:   mapping->globalend   = end;

384:   PetscMalloc((end-start+2)*sizeof(int),&globals);
385:   mapping->globals = globals;
386:   for (i=0; i<end-start+1; i++) {
387:     globals[i] = -1;
388:   }
389:   for (i=0; i<n; i++) {
390:     if (idx[i] < 0) continue;
391:     globals[idx[i] - start] = i;
392:   }

394:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
395:   return(0);
396: }

400: /*@
401:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
402:     specified with a global numbering.

404:     Not collective

406:     Input Parameters:
407: +   mapping - mapping between local and global numbering
408: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
409:            IS_GTOLM_DROP - drops the indices with no local value from the output list
410: .   n - number of global indices to map
411: -   idx - global indices to map

413:     Output Parameters:
414: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
415: -   idxout - local index of each global index, one must pass in an array long enough 
416:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
417:              idxout == PETSC_NULL to determine the required length (returned in nout)
418:              and then allocate the required space and call ISGlobalToLocalMappingApply()
419:              a second time to set the values.

421:     Notes:
422:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

424:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
425:     array to compute these.

427:     Level: advanced

429:     Concepts: mapping^global to local

431: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
432:           ISLocalToGlobalMappingDestroy()
433: @*/
434: int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
435:                                   int n,const int idx[],int *nout,int idxout[])
436: {
437:   int i,ierr,*globals,nf = 0,tmp,start,end;

440:   if (!mapping->globals) {
441:     ISGlobalToLocalMappingSetUp_Private(mapping);
442:   }
443:   globals = mapping->globals;
444:   start   = mapping->globalstart;
445:   end     = mapping->globalend;

447:   if (type == IS_GTOLM_MASK) {
448:     if (idxout) {
449:       for (i=0; i<n; i++) {
450:         if (idx[i] < 0) idxout[i] = idx[i];
451:         else if (idx[i] < start) idxout[i] = -1;
452:         else if (idx[i] > end)   idxout[i] = -1;
453:         else                     idxout[i] = globals[idx[i] - start];
454:       }
455:     }
456:     if (nout) *nout = n;
457:   } else {
458:     if (idxout) {
459:       for (i=0; i<n; i++) {
460:         if (idx[i] < 0) continue;
461:         if (idx[i] < start) continue;
462:         if (idx[i] > end) continue;
463:         tmp = globals[idx[i] - start];
464:         if (tmp < 0) continue;
465:         idxout[nf++] = tmp;
466:       }
467:     } else {
468:       for (i=0; i<n; i++) {
469:         if (idx[i] < 0) continue;
470:         if (idx[i] < start) continue;
471:         if (idx[i] > end) continue;
472:         tmp = globals[idx[i] - start];
473:         if (tmp < 0) continue;
474:         nf++;
475:       }
476:     }
477:     if (nout) *nout = nf;
478:   }

480:   return(0);
481: }

485: /*@C
486:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
487:      each index shared by more than one processor 

489:     Collective on ISLocalToGlobalMapping

491:     Input Parameters:
492: .   mapping - the mapping from local to global indexing

494:     Output Parameter:
495: +   nproc - number of processors that are connected to this one
496: .   proc - neighboring processors
497: .   numproc - number of indices for each subdomain (processor)
498: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

500:     Level: advanced

502:     Concepts: mapping^local to global

504: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
505:           ISLocalToGlobalMappingRestoreInfo()
506: @*/
507: int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int *procs[],int *numprocs[],int **indices[])
508: {
509:   int         i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
510:   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
511:   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
512:   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
513:   int         first_procs,first_numprocs,*first_indices;
514:   MPI_Request *recv_waits,*send_waits;
515:   MPI_Status  recv_status,*send_status,*recv_statuses;
516:   MPI_Comm    comm = mapping->comm;
517:   PetscTruth  debug = PETSC_FALSE;

520:   MPI_Comm_size(comm,&size);
521:   MPI_Comm_rank(comm,&rank);
522:   if (size == 1) {
523:     *nproc         = 0;
524:     *procs         = PETSC_NULL;
525:     PetscMalloc(sizeof(int),numprocs);
526:     (*numprocs)[0] = 0;
527:     PetscMalloc(sizeof(int*),indices);
528:     (*indices)[0]  = PETSC_NULL;
529:     return(0);
530:   }

532:   PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);

534:   /*
535:     Notes on ISLocalToGlobalMappingGetInfo

537:     globally owned node - the nodes that have been assigned to this processor in global
538:            numbering, just for this routine.

540:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
541:            boundary (i.e. is has more than one local owner)

543:     locally owned node - node that exists on this processors subdomain

545:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
546:            local subdomain
547:   */
548:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
549:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
550:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

552:   for (i=0; i<n; i++) {
553:     if (lindices[i] > max) max = lindices[i];
554:   }
555:   MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);
556:   Ng++;
557:   MPI_Comm_size(comm,&size);
558:   MPI_Comm_rank(comm,&rank);
559:   scale  = Ng/size + 1;
560:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
561:   rstart = scale*rank;

563:   /* determine ownership ranges of global indices */
564:   PetscMalloc((2*size+1)*sizeof(int),&nprocs);
565:   PetscMemzero(nprocs,2*size*sizeof(int));

567:   /* determine owners of each local node  */
568:   PetscMalloc((n+1)*sizeof(int),&owner);
569:   for (i=0; i<n; i++) {
570:     proc             = lindices[i]/scale; /* processor that globally owns this index */
571:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
572:     owner[i]         = proc;
573:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
574:   }
575:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
576:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);

578:   /* inform other processors of number of messages and max length*/
579:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
580:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);

582:   /* post receives for owned rows */
583:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);
584:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
585:   for (i=0; i<nrecvs; i++) {
586:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
587:   }

589:   /* pack messages containing lists of local nodes to owners */
590:   PetscMalloc((2*n+1)*sizeof(int),&sends);
591:   PetscMalloc((size+1)*sizeof(int),&starts);
592:   starts[0]  = 0;
593:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
594:   for (i=0; i<n; i++) {
595:     sends[starts[owner[i]]++] = lindices[i];
596:     sends[starts[owner[i]]++] = i;
597:   }
598:   PetscFree(owner);
599:   starts[0]  = 0;
600:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

602:   /* send the messages */
603:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
604:   PetscMalloc((nsends+1)*sizeof(int),&dest);
605:   cnt = 0;
606:   for (i=0; i<size; i++) {
607:     if (nprocs[2*i]) {
608:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPI_INT,i,tag1,comm,send_waits+cnt);
609:       dest[cnt] = i;
610:       cnt++;
611:     }
612:   }
613:   PetscFree(starts);

615:   /* wait on receives */
616:   PetscMalloc((2*nrecvs+1)*sizeof(int),&source);
617:   len  = source + nrecvs;
618:   cnt  = nrecvs;
619:   PetscMalloc((ng+1)*sizeof(int),&nownedsenders);
620:   PetscMemzero(nownedsenders,ng*sizeof(int));
621:   while (cnt) {
622:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
623:     /* unpack receives into our local space */
624:     MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);
625:     source[imdex]  = recv_status.MPI_SOURCE;
626:     len[imdex]     = len[imdex]/2;
627:     /* count how many local owners for each of my global owned indices */
628:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
629:     cnt--;
630:   }
631:   PetscFree(recv_waits);

633:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
634:   nowned  = 0;
635:   nownedm = 0;
636:   for (i=0; i<ng; i++) {
637:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
638:   }

640:   /* create single array to contain rank of all local owners of each globally owned index */
641:   PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);
642:   PetscMalloc((ng+1)*sizeof(int),&starts);
643:   starts[0] = 0;
644:   for (i=1; i<ng; i++) {
645:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
646:     else starts[i] = starts[i-1];
647:   }

649:   /* for each nontrival globally owned node list all arriving processors */
650:   for (i=0; i<nrecvs; i++) {
651:     for (j=0; j<len[i]; j++) {
652:       node = recvs[2*i*nmax+2*j]-rstart;
653:       if (nownedsenders[node] > 1) {
654:         ownedsenders[starts[node]++] = source[i];
655:       }
656:     }
657:   }

659:   if (debug) { /* -----------------------------------  */
660:     starts[0]    = 0;
661:     for (i=1; i<ng; i++) {
662:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
663:       else starts[i] = starts[i-1];
664:     }
665:     for (i=0; i<ng; i++) {
666:       if (nownedsenders[i] > 1) {
667:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
668:         for (j=0; j<nownedsenders[i]; j++) {
669:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
670:         }
671:         PetscSynchronizedPrintf(comm,"\n");
672:       }
673:     }
674:     PetscSynchronizedFlush(comm);
675:   }/* -----------------------------------  */

677:   /* wait on original sends */
678:   if (nsends) {
679:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
680:     MPI_Waitall(nsends,send_waits,send_status);
681:     PetscFree(send_status);
682:   }
683:   PetscFree(send_waits);
684:   PetscFree(sends);
685:   PetscFree(nprocs);

687:   /* pack messages to send back to local owners */
688:   starts[0]    = 0;
689:   for (i=1; i<ng; i++) {
690:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
691:     else starts[i] = starts[i-1];
692:   }
693:   nsends2 = nrecvs;
694:   PetscMalloc((nsends2+1)*sizeof(int),&nprocs); /* length of each message */
695:   for (i=0; i<nrecvs; i++) {
696:     nprocs[i] = 1;
697:     for (j=0; j<len[i]; j++) {
698:       node = recvs[2*i*nmax+2*j]-rstart;
699:       if (nownedsenders[node] > 1) {
700:         nprocs[i] += 2 + nownedsenders[node];
701:       }
702:     }
703:   }
704:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
705:   PetscMalloc((nt+1)*sizeof(int),&sends2);
706:   PetscMalloc((nsends2+1)*sizeof(int),&starts2);
707:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
708:   /*
709:      Each message is 1 + nprocs[i] long, and consists of 
710:        (0) the number of nodes being sent back 
711:        (1) the local node number,
712:        (2) the number of processors sharing it,
713:        (3) the processors sharing it
714:   */
715:   for (i=0; i<nsends2; i++) {
716:     cnt = 1;
717:     sends2[starts2[i]] = 0;
718:     for (j=0; j<len[i]; j++) {
719:       node = recvs[2*i*nmax+2*j]-rstart;
720:       if (nownedsenders[node] > 1) {
721:         sends2[starts2[i]]++;
722:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
723:         sends2[starts2[i]+cnt++] = nownedsenders[node];
724:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));
725:         cnt += nownedsenders[node];
726:       }
727:     }
728:   }

730:   /* send the message lengths */
731:   for (i=0; i<nsends2; i++) {
732:     MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);
733:   }

735:   /* receive the message lengths */
736:   nrecvs2 = nsends;
737:   PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);
738:   PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);
739:   nt      = 0;
740:   for (i=0; i<nrecvs2; i++) {
741:      MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);
742:     nt   += lens2[i];
743:   }
744:   starts3[0] = 0;
745:   for (i=0; i<nrecvs2-1; i++) {
746:     starts3[i+1] = starts3[i] + lens2[i];
747:   }
748:   PetscMalloc((nt+1)*sizeof(int),&recvs2);
749:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
750:   for (i=0; i<nrecvs2; i++) {
751:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);
752:   }
753: 
754:   /* send the messages */
755:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
756:   for (i=0; i<nsends2; i++) {
757:     MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);
758:   }

760:   /* wait on receives */
761:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);
762:   MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
763:   PetscFree(recv_statuses);
764:   PetscFree(recv_waits);
765:   PetscFree(nprocs);

767:   if (debug) { /* -----------------------------------  */
768:     cnt = 0;
769:     for (i=0; i<nrecvs2; i++) {
770:       nt = recvs2[cnt++];
771:       for (j=0; j<nt; j++) {
772:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
773:         for (k=0; k<recvs2[cnt+1]; k++) {
774:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
775:         }
776:         cnt += 2 + recvs2[cnt+1];
777:         PetscSynchronizedPrintf(comm,"\n");
778:       }
779:     }
780:     PetscSynchronizedFlush(comm);
781:   } /* -----------------------------------  */

783:   /* count number subdomains for each local node */
784:   PetscMalloc(size*sizeof(int),&nprocs);
785:   PetscMemzero(nprocs,size*sizeof(int));
786:   cnt  = 0;
787:   for (i=0; i<nrecvs2; i++) {
788:     nt = recvs2[cnt++];
789:     for (j=0; j<nt; j++) {
790:       for (k=0; k<recvs2[cnt+1]; k++) {
791:         nprocs[recvs2[cnt+2+k]]++;
792:       }
793:       cnt += 2 + recvs2[cnt+1];
794:     }
795:   }
796:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
797:   *nproc    = nt;
798:   PetscMalloc((nt+1)*sizeof(int),procs);
799:   PetscMalloc((nt+1)*sizeof(int),numprocs);
800:   PetscMalloc((nt+1)*sizeof(int*),indices);
801:   PetscMalloc(size*sizeof(int),&bprocs);
802:   cnt       = 0;
803:   for (i=0; i<size; i++) {
804:     if (nprocs[i] > 0) {
805:       bprocs[i]        = cnt;
806:       (*procs)[cnt]    = i;
807:       (*numprocs)[cnt] = nprocs[i];
808:       PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);
809:       cnt++;
810:     }
811:   }

813:   /* make the list of subdomains for each nontrivial local node */
814:   PetscMemzero(*numprocs,nt*sizeof(int));
815:   cnt  = 0;
816:   for (i=0; i<nrecvs2; i++) {
817:     nt = recvs2[cnt++];
818:     for (j=0; j<nt; j++) {
819:       for (k=0; k<recvs2[cnt+1]; k++) {
820:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
821:       }
822:       cnt += 2 + recvs2[cnt+1];
823:     }
824:   }
825:   PetscFree(bprocs);
826:   PetscFree(recvs2);

828:   /* sort the node indexing by their global numbers */
829:   nt = *nproc;
830:   for (i=0; i<nt; i++) {
831:     PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);
832:     for (j=0; j<(*numprocs)[i]; j++) {
833:       tmp[j] = lindices[(*indices)[i][j]];
834:     }
835:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
836:     PetscFree(tmp);
837:   }

839:   if (debug) { /* -----------------------------------  */
840:     nt = *nproc;
841:     for (i=0; i<nt; i++) {
842:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
843:       for (j=0; j<(*numprocs)[i]; j++) {
844:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
845:       }
846:       PetscSynchronizedPrintf(comm,"\n");
847:     }
848:     PetscSynchronizedFlush(comm);
849:   } /* -----------------------------------  */

851:   /* wait on sends */
852:   if (nsends2) {
853:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
854:     MPI_Waitall(nsends2,send_waits,send_status);
855:     PetscFree(send_status);
856:   }

858:   PetscFree(starts3);
859:   PetscFree(dest);
860:   PetscFree(send_waits);

862:   PetscFree(nownedsenders);
863:   PetscFree(ownedsenders);
864:   PetscFree(starts);
865:   PetscFree(starts2);
866:   PetscFree(lens2);

868:   PetscFree(source);
869:   PetscFree(recvs);
870:   PetscFree(nprocs);
871:   PetscFree(sends2);

873:   /* put the information about myself as the first entry in the list */
874:   first_procs    = (*procs)[0];
875:   first_numprocs = (*numprocs)[0];
876:   first_indices  = (*indices)[0];
877:   for (i=0; i<*nproc; i++) {
878:     if ((*procs)[i] == rank) {
879:       (*procs)[0]    = (*procs)[i];
880:       (*numprocs)[0] = (*numprocs)[i];
881:       (*indices)[0]  = (*indices)[i];
882:       (*procs)[i]    = first_procs;
883:       (*numprocs)[i] = first_numprocs;
884:       (*indices)[i]  = first_indices;
885:       break;
886:     }
887:   }

889:   return(0);
890: }

894: /*@C
895:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

897:     Collective on ISLocalToGlobalMapping

899:     Input Parameters:
900: .   mapping - the mapping from local to global indexing

902:     Output Parameter:
903: +   nproc - number of processors that are connected to this one
904: .   proc - neighboring processors
905: .   numproc - number of indices for each processor
906: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

908:     Level: advanced

910: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
911:           ISLocalToGlobalMappingGetInfo()
912: @*/
913: int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int *procs[],int *numprocs[],int **indices[])
914: {
915:   int ierr,i;

918:   if (*procs) {PetscFree(*procs);}
919:   if (*numprocs) {PetscFree(*numprocs);}
920:   if (*indices) {
921:     if ((*indices)[0]) {PetscFree((*indices)[0]);}
922:     for (i=1; i<*nproc; i++) {
923:       if ((*indices)[i]) {PetscFree((*indices)[i]);}
924:     }
925:     PetscFree(*indices);
926:   }
927:   return(0);
928: }