Actual source code: pmetis.c
1: /*$Id: pmetis.c,v 1.46 2001/09/07 20:10:38 bsmith Exp $*/
2:
3: #include src/mat/impls/adj/mpi/mpiadj.h
5: /*
6: Currently using ParMetis-2.0. The following include file has
7: to be changed to par_kmetis.h for ParMetis-1.0
8: */
9: EXTERN_C_BEGIN
10: #include "parmetis.h"
11: EXTERN_C_END
13: /*
14: The first 5 elements of this structure are the input control array to Metis
15: */
16: typedef struct {
17: int cuts; /* number of cuts made (output) */
18: int foldfactor;
19: int parallel; /* use parallel partitioner for coarse problem */
20: int indexing; /* 0 indicates C indexing, 1 Fortran */
21: int printout; /* indicates if one wishes Metis to print info */
22: MPI_Comm comm_pmetis;
23: } MatPartitioning_Parmetis;
25: /*
26: Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
27: */
30: static int MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
31: {
32: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis*)part->data;
33: int ierr,*locals,size,rank;
34: int *vtxdist,*xadj,*adjncy,itmp = 0;
35: int wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[3], edgecut, i,j;
36: Mat mat = part->adj,newmat;
37: Mat_MPIAdj *adj = (Mat_MPIAdj *)mat->data;
38: PetscTruth flg;
39: float *tpwgts,*ubvec;
42: MPI_Comm_size(mat->comm,&size);
44: PetscTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
45: if (!flg) {
46: MatConvert(mat,MATMPIADJ,&newmat);
47: adj = (Mat_MPIAdj *)newmat->data;
48: }
50: vtxdist = adj->rowners;
51: xadj = adj->i;
52: adjncy = adj->j;
53: MPI_Comm_rank(part->comm,&rank);
54: if (!(vtxdist[rank+1] - vtxdist[rank])) {
55: SETERRQ(1,"Does not support any processor with no entries");
56: }
57: #if defined(PETSC_USE_BOPT_g)
58: /* check that matrix has no diagonal entries */
59: {
60: int i,j,rstart;
61: MatGetOwnershipRange(mat,&rstart,PETSC_NULL);
62: for (i=0; i<mat->m; i++) {
63: for (j=xadj[i]; j<xadj[i+1]; j++) {
64: if (adjncy[j] == i+rstart) SETERRQ1(1,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
65: }
66: }
67: }
68: #endif
70: PetscMalloc((mat->m+1)*sizeof(int),&locals);
72: if (PetscLogPrintInfo) {itmp = parmetis->printout; parmetis->printout = 127;}
73: PetscMalloc(ncon*nparts*sizeof(float),&tpwgts);
74: for (i=0; i<ncon; i++) {
75: for (j=0; j<nparts; j++) {
76: if (part->part_weights) {
77: tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
78: } else {
79: tpwgts[i*nparts+j] = 1./nparts;
80: }
81: }
82: }
83: PetscMalloc(ncon*sizeof(float),&ubvec);
84: for (i=0; i<ncon; i++) {
85: ubvec[i] = 1.05;
86: }
87: options[0] = 0;
88: /* ParMETIS has no error conditions ??? */
89: ParMETIS_V3_PartKway(vtxdist,xadj,adjncy,part->vertex_weights,adj->values,&wgtflag,&numflag,&ncon,&nparts,tpwgts,ubvec,
90: options,&edgecut,locals,&parmetis->comm_pmetis);
91: PetscFree(tpwgts);
92: PetscFree(ubvec);
93: if (PetscLogPrintInfo) {parmetis->printout = itmp;}
95: ISCreateGeneral(part->comm,mat->m,locals,partitioning);
96: PetscFree(locals);
98: if (!flg) {
99: MatDestroy(newmat);
100: }
101: return(0);
102: }
107: int MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
108: {
109: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
110: int ierr,rank;
111: PetscTruth isascii;
114: MPI_Comm_rank(part->comm,&rank);
115: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
116: if (isascii) {
117: if (parmetis->parallel == 2) {
118: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid partitioner\n");
119: } else {
120: PetscViewerASCIIPrintf(viewer," Using sequential coarse grid partitioner\n");
121: }
122: PetscViewerASCIIPrintf(viewer," Using %d fold factor\n",parmetis->foldfactor);
123: PetscViewerASCIISynchronizedPrintf(viewer," [%d]Number of cuts found %d\n",rank,parmetis->cuts);
124: PetscViewerFlush(viewer);
125: } else {
126: SETERRQ1(1,"Viewer type %s not supported for this Parmetis partitioner",((PetscObject)viewer)->type_name);
127: }
129: return(0);
130: }
134: /*@
135: MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
136: do the partitioning of the coarse grid.
138: Collective on MatPartitioning
140: Input Parameter:
141: . part - the partitioning context
143: Level: advanced
145: @*/
146: int MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
147: {
148: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
151: parmetis->parallel = 1;
152: return(0);
153: }
156: int MatPartitioningSetFromOptions_Parmetis(MatPartitioning part)
157: {
158: int ierr;
159: PetscTruth flag;
162: PetscOptionsHead("Set ParMeTiS partitioning options");
163: PetscOptionsName("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",&flag);
164: if (flag) {
165: MatPartitioningParmetisSetCoarseSequential(part);
166: }
167: PetscOptionsTail();
168: return(0);
169: }
174: int MatPartitioningDestroy_Parmetis(MatPartitioning part)
175: {
176: MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
180: MPI_Comm_free(&(parmetis->comm_pmetis));
181: PetscFree(parmetis);
182: return(0);
183: }
185: EXTERN_C_BEGIN
188: int MatPartitioningCreate_Parmetis(MatPartitioning part)
189: {
190: int ierr;
191: MatPartitioning_Parmetis *parmetis;
194: PetscNew(MatPartitioning_Parmetis,&parmetis);
196: parmetis->cuts = 0; /* output variable */
197: parmetis->foldfactor = 150; /*folding factor */
198: parmetis->parallel = 2; /* use parallel partitioner for coarse grid */
199: parmetis->indexing = 0; /* index numbering starts from 0 */
200: parmetis->printout = 0; /* print no output while running */
202: MPI_Comm_dup(part->comm,&(parmetis->comm_pmetis));
204: part->ops->apply = MatPartitioningApply_Parmetis;
205: part->ops->view = MatPartitioningView_Parmetis;
206: part->ops->destroy = MatPartitioningDestroy_Parmetis;
207: part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
208: part->data = (void*)parmetis;
209: return(0);
210: }
211: EXTERN_C_END