MOAB: Mesh Oriented datABase  (version 5.3.0)
ZoltanPartitioner.cpp
Go to the documentation of this file.
00001 /* $Id$
00002  *
00003  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00004  * storing and accessing finite element mesh data.
00005  *
00006  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00007  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00008  * retains certain rights in this software.
00009  *
00010  * This library is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  */
00016 
00017 /** Get a mesh from MOAB and write a Zoltan partition set back into MOAB and to
00018  *  a file
00019  *
00020  *
00021  */
00022 
00023 #include <iostream>
00024 #include <cassert>
00025 #include <sstream>
00026 #include <algorithm>
00027 
00028 #include "moab/ZoltanPartitioner.hpp"
00029 #include "moab/Interface.hpp"
00030 #include "Internals.hpp"
00031 #include "moab/Range.hpp"
00032 #include "moab/WriteUtilIface.hpp"
00033 #include "moab/MeshTopoUtil.hpp"
00034 #include "MBTagConventions.hpp"
00035 #include "moab/CN.hpp"
00036 // used for gnomonic projection
00037 #include "moab/IntxMesh/IntxUtils.hpp"
00038 
00039 #ifdef MOAB_HAVE_CGM
00040 #include "CGMConfig.h"
00041 #include <limits>
00042 #include "RefEntity.hpp"
00043 #include "RefFace.hpp"
00044 #include "RefEdge.hpp"
00045 #include "RefVertex.hpp"
00046 #include "Body.hpp"
00047 #include "TopologyEntity.hpp"
00048 #include "CastTo.hpp"
00049 #include "CABodies.hpp"
00050 #include "TDParallel.hpp"
00051 #include "TDUniqueId.hpp"
00052 #endif
00053 
00054 using namespace moab;
00055 
00056 #define RR \
00057     if( MB_SUCCESS != result ) return result
00058 
00059 static double* Points      = NULL;
00060 static int* GlobalIds      = NULL;
00061 static int NumPoints       = 0;
00062 static int* NumEdges       = NULL;
00063 static int* NborGlobalId   = NULL;
00064 static int* NborProcs      = NULL;
00065 static double* ObjWeights  = NULL;
00066 static double* EdgeWeights = NULL;
00067 static int* Parts          = NULL;
00068 
00069 const bool debug = false;
00070 
00071 ZoltanPartitioner::ZoltanPartitioner( Interface* impl, const bool use_coords, int argc, char** argv
00072 #ifdef MOAB_HAVE_CGM
00073                                       ,
00074                                       GeometryQueryTool* gqt
00075 #endif
00076                                       )
00077     : PartitionerBase< int >( impl, use_coords ), myZZ( NULL ), argcArg( argc ), argvArg( argv )
00078 #ifdef MOAB_HAVE_CGM
00079       ,
00080       gti( gqt )
00081 #endif
00082 {
00083 }
00084 
00085 ZoltanPartitioner::~ZoltanPartitioner()
00086 {
00087     if( NULL != myZZ ) delete myZZ;
00088 }
00089 
00090 ErrorCode ZoltanPartitioner::balance_mesh( const char* zmethod, const char* other_method, const bool write_as_sets,
00091                                            const bool write_as_tags )
00092 {
00093     if( !strcmp( zmethod, "RR" ) && !strcmp( zmethod, "RCB" ) && !strcmp( zmethod, "RIB" ) &&
00094         !strcmp( zmethod, "HSFC" ) && !strcmp( zmethod, "Hypergraph" ) && !strcmp( zmethod, "PHG" ) &&
00095         !strcmp( zmethod, "PARMETIS" ) && !strcmp( zmethod, "OCTPART" ) )
00096     {
00097         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00098                   << "RR, RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00099         return MB_FAILURE;
00100     }
00101 
00102     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00103     std::vector< int > ids;     // point ids from MOAB
00104     std::vector< int > adjs, length;
00105     Range elems;
00106 
00107     // Get a mesh from MOAB and divide it across processors.
00108 
00109     ErrorCode result;
00110 
00111     if( mbpc->proc_config().proc_rank() == 0 )
00112     {
00113         result = assemble_graph( 3, pts, ids, adjs, length, elems );RR;
00114     }
00115 
00116     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0] );
00117 
00118     // Initialize Zoltan.  This is a C call.  The simple C++ code
00119     // that creates Zoltan objects does not keep track of whether
00120     // Zoltan_Initialize has been called.
00121 
00122     float version;
00123 
00124     Zoltan_Initialize( argcArg, argvArg, &version );
00125 
00126     // Create Zoltan object.  This calls Zoltan_Create.
00127     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00128 
00129     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00130         SetRCB_Parameters();
00131     else if( !strcmp( zmethod, "RIB" ) )
00132         SetRIB_Parameters();
00133     else if( !strcmp( zmethod, "HSFC" ) )
00134         SetHSFC_Parameters();
00135     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00136         if( NULL == other_method )
00137             SetHypergraph_Parameters( "auto" );
00138         else
00139             SetHypergraph_Parameters( other_method );
00140     else if( !strcmp( zmethod, "PARMETIS" ) )
00141     {
00142         if( NULL == other_method )
00143             SetPARMETIS_Parameters( "RepartGDiffusion" );
00144         else
00145             SetPARMETIS_Parameters( other_method );
00146     }
00147     else if( !strcmp( zmethod, "OCTPART" ) )
00148     {
00149         if( NULL == other_method )
00150             SetOCTPART_Parameters( "2" );
00151         else
00152             SetOCTPART_Parameters( other_method );
00153     }
00154 
00155     // Call backs:
00156 
00157     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00158     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00159     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00160     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00161     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00162     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00163 
00164     // Perform the load balancing partitioning
00165 
00166     int changes;
00167     int numGidEntries;
00168     int numLidEntries;
00169     int numImport;
00170     ZOLTAN_ID_PTR importGlobalIds;
00171     ZOLTAN_ID_PTR importLocalIds;
00172     int* importProcs;
00173     int* importToPart;
00174     int numExport;
00175     ZOLTAN_ID_PTR exportGlobalIds;
00176     ZOLTAN_ID_PTR exportLocalIds;
00177     int* exportProcs;
00178     int* exportToPart;
00179 
00180     int rc = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, numImport, importGlobalIds, importLocalIds,
00181                                  importProcs, importToPart, numExport, exportGlobalIds, exportLocalIds, exportProcs,
00182                                  exportToPart );
00183 
00184     rc = mbGlobalSuccess( rc );
00185 
00186     if( !rc ) { mbPrintGlobalResult( "==============Result==============", myNumPts, numImport, numExport, changes ); }
00187     else
00188     {
00189         return MB_FAILURE;
00190     }
00191 
00192     // take results & write onto MOAB partition sets
00193 
00194     int* assignment;
00195 
00196     mbFinalizePoints( (int)ids.size(), numExport, exportLocalIds, exportProcs, &assignment );
00197 
00198     if( mbpc->proc_config().proc_rank() == 0 )
00199     {
00200         result = write_partition( mbpc->proc_config().proc_size(), elems, assignment, write_as_sets, write_as_tags );
00201 
00202         if( MB_SUCCESS != result ) return result;
00203 
00204         free( (int*)assignment );
00205     }
00206 
00207     // Free the memory allocated for lists returned by LB_Parition()
00208 
00209     myZZ->LB_Free_Part( &importGlobalIds, &importLocalIds, &importProcs, &importToPart );
00210     myZZ->LB_Free_Part( &exportGlobalIds, &exportLocalIds, &exportProcs, &exportToPart );
00211 
00212     // Implementation note:  A Zoltan object contains an MPI communicator.
00213     //   When the Zoltan object is destroyed, it uses it's MPI communicator.
00214     //   So it is important that the Zoltan object is destroyed before
00215     //   the MPI communicator is destroyed.  To ensure this, dynamically
00216     //   allocate the Zoltan object, so you can explicitly destroy it.
00217     //   If you create a Zoltan object on the stack, it's destructor will
00218     //   be invoked atexit, possibly after the communicator's
00219     //   destructor.
00220 
00221     return MB_SUCCESS;
00222 }
00223 
00224 ErrorCode ZoltanPartitioner::repartition( std::vector< double >& x, std::vector< double >& y, std::vector< double >& z,
00225                                           int StartID, const char* zmethod, Range& localGIDs )
00226 {
00227     //
00228     int nprocs = mbpc->proc_config().proc_size();
00229     int rank   = mbpc->proc_config().proc_rank();
00230     clock_t t  = clock();
00231     // form pts and ids, as in assemble_graph
00232     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00233     pts.resize( x.size() * 3 );
00234     std::vector< int > ids;  // point ids from MOAB
00235     ids.resize( x.size() );
00236     for( size_t i = 0; i < x.size(); i++ )
00237     {
00238         pts[3 * i]     = x[i];
00239         pts[3 * i + 1] = y[i];
00240         pts[3 * i + 2] = z[i];
00241         ids[i]         = StartID + (int)i;
00242     }
00243     // do not get out until done!
00244 
00245     Points       = &pts[0];
00246     GlobalIds    = &ids[0];
00247     NumPoints    = (int)x.size();
00248     NumEdges     = NULL;
00249     NborGlobalId = NULL;
00250     NborProcs    = NULL;
00251     ObjWeights   = NULL;
00252     EdgeWeights  = NULL;
00253     Parts        = NULL;
00254 
00255     float version;
00256     if( rank == 0 ) std::cout << "Initializing zoltan..." << std::endl;
00257 
00258     Zoltan_Initialize( argcArg, argvArg, &version );
00259 
00260     // Create Zoltan object.  This calls Zoltan_Create.
00261     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00262 
00263     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00264         SetRCB_Parameters();
00265     else if( !strcmp( zmethod, "RIB" ) )
00266         SetRIB_Parameters();
00267     else if( !strcmp( zmethod, "HSFC" ) )
00268         SetHSFC_Parameters();
00269 
00270     // set # requested partitions
00271     char buff[10];
00272     sprintf( buff, "%d", nprocs );
00273     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00274     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00275 
00276     // request all, import and export
00277     retval = myZZ->Set_Param( "RETURN_LISTS", "ALL" );
00278     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00279 
00280     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00281     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00282     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00283     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00284     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00285     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00286 
00287     // Perform the load balancing partitioning
00288 
00289     int changes;
00290     int numGidEntries;
00291     int numLidEntries;
00292     int num_import;
00293     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
00294     int* import_procs;
00295     int* import_to_part;
00296     int num_export;
00297     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
00298     int *assign_procs, *assign_parts;
00299 
00300     if( rank == 0 )
00301         std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nprocs
00302                   << " processors..." << std::endl;
00303 
00304     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
00305                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
00306                                  assign_procs, assign_parts );
00307     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00308 
00309     if( rank == 0 )
00310     {
00311         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00312         t = clock();
00313     }
00314 
00315     std::sort( import_global_ids, import_global_ids + num_import, std::greater< int >() );
00316     std::sort( export_global_ids, export_global_ids + num_export, std::greater< int >() );
00317 
00318     Range iniGids( (EntityHandle)StartID, (EntityHandle)StartID + x.size() - 1 );
00319     Range imported, exported;
00320     std::copy( import_global_ids, import_global_ids + num_import, range_inserter( imported ) );
00321     std::copy( export_global_ids, export_global_ids + num_export, range_inserter( exported ) );
00322     localGIDs = subtract( iniGids, exported );
00323     localGIDs = unite( localGIDs, imported );
00324     // Free data structures allocated by Zoltan::LB_Partition
00325     retval = myZZ->LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
00326     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00327     retval = myZZ->LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
00328     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00329 
00330     return MB_SUCCESS;
00331 }
00332 
00333 ErrorCode ZoltanPartitioner::partition_inferred_mesh( EntityHandle sfileset, size_t num_parts, int part_dim,
00334                                                       const bool write_as_sets, int projection_type )
00335 {
00336     ErrorCode result;
00337 
00338     moab::Range elverts;
00339     result = mbImpl->get_entities_by_dimension( sfileset, part_dim, elverts );RR;
00340 
00341     std::vector< double > elcoords( elverts.size() * 3 );
00342     result = mbImpl->get_coords( elverts, &elcoords[0] );RR;
00343 
00344     std::vector< std::vector< EntityHandle > > part_assignments( num_parts );
00345     int part, proc;
00346 
00347     // Loop over coordinates
00348     for( size_t iel = 0; iel < elverts.size(); iel++ )
00349     {
00350         // Gather coordinates into temporary array
00351         double* ecoords = &elcoords[iel * 3];
00352 
00353         // do a projection if needed
00354         if( projection_type > 0 ) IntxUtils::transform_coordinates( ecoords, projection_type );
00355 
00356         // Compute the coordinate's part assignment
00357         myZZ->LB_Point_PP_Assign( ecoords, proc, part );
00358 
00359         // Store the part assignment in the return array
00360         part_assignments[part].push_back( elverts[iel] );
00361     }
00362 
00363     // get the partition set tag
00364     Tag part_set_tag = mbpc->partition_tag();
00365 
00366     // If some entity sets exist, let us delete those first.
00367     if( write_as_sets )
00368     {
00369         moab::Range oldpsets;
00370         result = mbImpl->get_entities_by_type_and_tag( sfileset, MBENTITYSET, &part_set_tag, NULL, 1, oldpsets,
00371                                                        Interface::UNION );RR;
00372         result = mbImpl->remove_entities( sfileset, oldpsets );RR;
00373     }
00374 
00375     size_t nparts_assigned = 0;
00376     for( size_t partnum = 0; partnum < num_parts; ++partnum )
00377     {
00378         std::vector< EntityHandle >& partvec = part_assignments[partnum];
00379 
00380         nparts_assigned += ( partvec.size() ? 1 : 0 );
00381 
00382         if( write_as_sets )
00383         {
00384             EntityHandle partNset;
00385             result = mbImpl->create_meshset( moab::MESHSET_SET, partNset );RR;
00386             if( partvec.size() )
00387             {
00388                 result = mbImpl->add_entities( partNset, &partvec[0], partvec.size() );RR;
00389             }
00390             result = mbImpl->add_parent_child( sfileset, partNset );RR;
00391 
00392             // part numbering should start from 0
00393             int ipartnum = (int)partnum;
00394 
00395             // assign partitions as a sparse tag by grouping elements under sets
00396             result = mbImpl->tag_set_data( part_set_tag, &partNset, 1, &ipartnum );RR;
00397         }
00398         else
00399         {
00400             /* assign as a dense vector to all elements
00401                allocate integer-size partitions */
00402             std::vector< int > assignment( partvec.size(),
00403                                            partnum );  // initialize all values to partnum
00404             result = mbImpl->tag_set_data( part_set_tag, &partvec[0], partvec.size(), &assignment[0] );RR;
00405         }
00406     }
00407 
00408     if( nparts_assigned != num_parts )
00409     {
00410         std::cout << "WARNING: The inference yielded lesser number of parts (" << nparts_assigned
00411                   << ") than requested by user (" << num_parts << ").\n";
00412     }
00413 
00414     return MB_SUCCESS;
00415 }
00416 
00417 ErrorCode ZoltanPartitioner::partition_mesh_and_geometry(
00418     const double part_geom_mesh_size, const int nparts, const char* zmethod, const char* other_method, double imbal_tol,
00419     const int part_dim, const bool write_as_sets, const bool write_as_tags, const int obj_weight, const int edge_weight,
00420 #ifdef MOAB_HAVE_CGM
00421     const bool part_surf, const bool ghost,
00422 #else
00423     const bool, const bool,
00424 #endif
00425     const int projection_type, const bool recompute_rcb_box, const bool print_time )
00426 {
00427     // should only be called in serial
00428     if( mbpc->proc_config().proc_size() != 1 )
00429     {
00430         std::cout << "ZoltanPartitioner::partition_mesh_and_geometry must be called in serial." << std::endl;
00431         return MB_FAILURE;
00432     }
00433     clock_t t = clock();
00434     if( NULL != zmethod && strcmp( zmethod, "RR" ) && strcmp( zmethod, "RCB" ) && strcmp( zmethod, "RIB" ) &&
00435         strcmp( zmethod, "HSFC" ) && strcmp( zmethod, "Hypergraph" ) && strcmp( zmethod, "PHG" ) &&
00436         strcmp( zmethod, "PARMETIS" ) && strcmp( zmethod, "OCTPART" ) )
00437     {
00438         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00439                   << "RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00440         return MB_FAILURE;
00441     }
00442 
00443     bool part_geom = false;
00444     if( 0 == strcmp( zmethod, "RR" ) || 0 == strcmp( zmethod, "RCB" ) || 0 == strcmp( zmethod, "RIB" ) ||
00445         0 == strcmp( zmethod, "HSFC" ) )
00446         part_geom = true;       // so no adjacency / edges needed
00447     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00448     std::vector< int > ids;     // point ids from MOAB
00449     std::vector< int > adjs, length, parts;
00450     std::vector< double > obj_weights, edge_weights;
00451     Range elems;
00452 #ifdef MOAB_HAVE_CGM
00453     DLIList< RefEntity* > entities;
00454 #endif
00455 
00456     // Get a mesh from MOAB and diide it across processors.
00457 
00458     ErrorCode result = MB_SUCCESS;
00459 
00460     // short-circuit everything if RR partition is requested
00461     if( !strcmp( zmethod, "RR" ) )
00462     {
00463         if( part_geom_mesh_size < 0. )
00464         {
00465             // get all elements
00466             result = mbImpl->get_entities_by_dimension( 0, part_dim, elems );RR;
00467             if( elems.empty() ) return MB_FAILURE;
00468             // make a trivial assignment vector
00469             std::vector< int > assign_vec( elems.size() );
00470             int num_per = elems.size() / nparts;
00471             int extra   = elems.size() % nparts;
00472             if( extra ) num_per++;
00473             int nstart = 0;
00474             for( int i = 0; i < nparts; i++ )
00475             {
00476                 if( i == extra ) num_per--;
00477                 std::fill( &assign_vec[nstart], &assign_vec[nstart + num_per], i );
00478                 nstart += num_per;
00479             }
00480 
00481             result = write_partition( nparts, elems, &assign_vec[0], write_as_sets, write_as_tags );RR;
00482         }
00483         else
00484         {
00485 #ifdef MOAB_HAVE_CGM
00486             result = partition_round_robin( nparts );RR;
00487 #endif
00488         }
00489 
00490         return result;
00491     }
00492 
00493     std::cout << "Assembling graph..." << std::endl;
00494 
00495     if( part_geom_mesh_size < 0. )
00496     {
00497         // if (!part_geom) {
00498         result = assemble_graph( part_dim, pts, ids, adjs, length, elems, part_geom, projection_type );RR;
00499     }
00500     else
00501     {
00502 #ifdef MOAB_HAVE_CGM
00503         result = assemble_graph( part_dim, pts, ids, adjs, length, obj_weights, edge_weights, parts, entities,
00504                                  part_geom_mesh_size, nparts );RR;
00505 
00506         if( debug )
00507         {
00508             int n_ids = ids.size();
00509             entities.reset();
00510             int i_leng = 0;
00511             for( int i = 0; i < n_ids; i++ )
00512             {
00513                 std::cout << "graph_input_ids[" << i << "]=" << ids[i] << ",obj_weights=" << obj_weights[i]
00514                           << ",entity_id=" << entities.get_and_step()->id() << ",part=" << parts[i] << std::endl;
00515                 for( int j = 0; j < length[i]; j++ )
00516                 {
00517                     std::cout << "adjs[" << j << "]=" << adjs[i_leng] << ",edge_weights=" << edge_weights[i_leng]
00518                               << std::endl;
00519                     i_leng++;
00520                 }
00521             }
00522         }
00523 #endif
00524     }
00525     if( print_time )
00526     {
00527         std::cout << " time to assemble graph: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00528         t = clock();
00529     }
00530     double* o_wgt = NULL;
00531     double* e_wgt = NULL;
00532     if( obj_weights.size() > 0 ) o_wgt = &obj_weights[0];
00533     if( edge_weights.size() > 0 ) e_wgt = &edge_weights[0];
00534 
00535     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0], o_wgt, e_wgt, &parts[0],
00536                                    part_geom );
00537 
00538     // Initialize Zoltan.  This is a C call.  The simple C++ code
00539     // that creates Zoltan objects does not keep track of whether
00540     // Zoltan_Initialize has been called.
00541 
00542     if( print_time )
00543     {
00544         std::cout << " time to initialize points: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00545         t = clock();
00546     }
00547     float version;
00548 
00549     std::cout << "Initializing zoltan..." << std::endl;
00550 
00551     Zoltan_Initialize( argcArg, argvArg, &version );
00552 
00553     // Create Zoltan object.  This calls Zoltan_Create.
00554     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00555 
00556     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00557         SetRCB_Parameters( recompute_rcb_box );
00558     else if( !strcmp( zmethod, "RIB" ) )
00559         SetRIB_Parameters();
00560     else if( !strcmp( zmethod, "HSFC" ) )
00561         SetHSFC_Parameters();
00562     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00563     {
00564         if( NULL == other_method || ( other_method[0] == '\0' ) )
00565             SetHypergraph_Parameters( "auto" );
00566         else
00567             SetHypergraph_Parameters( other_method );
00568 
00569         if( imbal_tol )
00570         {
00571             std::ostringstream str;
00572             str << imbal_tol;
00573             myZZ->Set_Param( "IMBALANCE_TOL", str.str().c_str() );  // no debug messages
00574         }
00575     }
00576     else if( !strcmp( zmethod, "PARMETIS" ) )
00577     {
00578         if( NULL == other_method )
00579             SetPARMETIS_Parameters( "RepartGDiffusion" );
00580         else
00581             SetPARMETIS_Parameters( other_method );
00582     }
00583     else if( !strcmp( zmethod, "OCTPART" ) )
00584     {
00585         if( NULL == other_method )
00586             SetOCTPART_Parameters( "2" );
00587         else
00588             SetOCTPART_Parameters( other_method );
00589     }
00590 
00591     // set # requested partitions
00592     char buff[10];
00593     sprintf( buff, "%d", nparts );
00594     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00595     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00596 
00597     // request only partition assignments
00598     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTITION ASSIGNMENTS" );
00599     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00600 
00601     if( obj_weight > 0 )
00602     {
00603         std::ostringstream str;
00604         str << obj_weight;
00605         retval = myZZ->Set_Param( "OBJ_WEIGHT_DIM", str.str().c_str() );
00606         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00607     }
00608 
00609     if( edge_weight > 0 )
00610     {
00611         std::ostringstream str;
00612         str << edge_weight;
00613         retval = myZZ->Set_Param( "EDGE_WEIGHT_DIM", str.str().c_str() );
00614         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00615     }
00616 
00617     // Call backs:
00618 
00619     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00620     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00621     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00622     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00623     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00624     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00625     if( part_geom_mesh_size > 0. ) { myZZ->Set_Part_Multi_Fn( mbGetPart, NULL ); }
00626 
00627     // Perform the load balancing partitioning
00628 
00629     int changes;
00630     int numGidEntries;
00631     int numLidEntries;
00632     int dumnum1;
00633     ZOLTAN_ID_PTR dum_local, dum_global;
00634     int *dum1, *dum2;
00635     int num_assign;
00636     ZOLTAN_ID_PTR assign_gid, assign_lid;
00637     int *assign_procs, *assign_parts;
00638 
00639     std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nparts
00640               << " processors..." << std::endl;
00641 #ifndef NDEBUG
00642 #if 0
00643   if (NULL == zmethod || !strcmp(zmethod, "RCB"))
00644     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 1, 0, 0);
00645 
00646   if ( !strcmp(zmethod, "PHG"))
00647       Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 0, 1, 1);
00648 #endif
00649 #endif
00650     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, dumnum1, dum_global, dum_local, dum1, dum2,
00651                                  num_assign, assign_gid, assign_lid, assign_procs, assign_parts );
00652     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00653 
00654     if( print_time )
00655     {
00656         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00657         t = clock();
00658     }
00659     // take results & write onto MOAB partition sets
00660     std::cout << "Saving partition information to MOAB..." << std::endl;
00661 
00662     if( part_geom_mesh_size < 0. )
00663     {
00664         // if (!part_geom) {
00665         result = write_partition( nparts, elems, assign_parts, write_as_sets, write_as_tags );
00666     }
00667     else
00668     {
00669 #ifdef MOAB_HAVE_CGM
00670         result = write_partition( nparts, entities, assign_parts, obj_weights, part_surf, ghost );
00671 #endif
00672     }
00673 
00674     if( print_time )
00675     {
00676         std::cout << " time to write partition in memory " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00677         t = clock();
00678     }
00679 
00680     if( MB_SUCCESS != result ) return result;
00681 
00682     // Free the memory allocated for lists returned by LB_Parition()
00683     myZZ->LB_Free_Part( &assign_gid, &assign_lid, &assign_procs, &assign_parts );
00684 
00685     return MB_SUCCESS;
00686 }
00687 
00688 ErrorCode ZoltanPartitioner::include_closure()
00689 {
00690     ErrorCode result;
00691     Range ents;
00692     Range adjs;
00693     std::cout << "Adding closure..." << std::endl;
00694 
00695     for( Range::iterator rit = partSets.begin(); rit != partSets.end(); ++rit )
00696     {
00697 
00698         // get the top-dimensional entities in the part
00699         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00700 
00701         if( ents.empty() ) continue;
00702 
00703         // get intermediate-dimensional adjs and add to set
00704         for( int d = mbImpl->dimension_from_handle( *ents.begin() ) - 1; d >= 1; d-- )
00705         {
00706             adjs.clear();
00707             result = mbImpl->get_adjacencies( ents, d, false, adjs, Interface::UNION );RR;
00708             result = mbImpl->add_entities( *rit, adjs );RR;
00709         }
00710 
00711         // now get vertices and add to set; only need to do for ents, not for adjs
00712         adjs.clear();
00713         result = mbImpl->get_adjacencies( ents, 0, false, adjs, Interface::UNION );RR;
00714         result = mbImpl->add_entities( *rit, adjs );RR;
00715 
00716         ents.clear();
00717     }
00718 
00719     // now go over non-part entity sets, looking for contained entities
00720     Range sets, part_ents;
00721     result = mbImpl->get_entities_by_type( 0, MBENTITYSET, sets );RR;
00722     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
00723     {
00724         // skip parts
00725         if( partSets.find( *rit ) != partSets.end() ) continue;
00726 
00727         // get entities in this set, recursively
00728         ents.clear();
00729         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00730 
00731         // now check over all parts
00732         for( Range::iterator rit2 = partSets.begin(); rit2 != partSets.end(); ++rit2 )
00733         {
00734             part_ents.clear();
00735             result = mbImpl->get_entities_by_handle( *rit2, part_ents, false );RR;
00736             Range int_range = intersect( ents, part_ents );
00737             if( !int_range.empty() )
00738             {
00739                 // non-empty intersection, add to part set
00740                 result = mbImpl->add_entities( *rit2, &( *rit ), 1 );RR;
00741             }
00742         }
00743     }
00744 
00745     // finally, mark all the part sets as having the closure
00746     Tag closure_tag;
00747     result =
00748         mbImpl->tag_get_handle( "INCLUDES_CLOSURE", 1, MB_TYPE_INTEGER, closure_tag, MB_TAG_SPARSE | MB_TAG_CREAT );RR;
00749 
00750     std::vector< int > closure_vals( partSets.size(), 1 );
00751     result = mbImpl->tag_set_data( closure_tag, partSets, &closure_vals[0] );RR;
00752 
00753     return MB_SUCCESS;
00754 }
00755 
00756 ErrorCode ZoltanPartitioner::assemble_graph( const int dimension, std::vector< double >& coords,
00757                                              std::vector< int >& moab_ids, std::vector< int >& adjacencies,
00758                                              std::vector< int >& length, Range& elems, bool part_geom,
00759                                              int projection_type )
00760 {
00761     // assemble a graph with vertices equal to elements of specified dimension, edges
00762     // signified by list of other elements to which an element is connected
00763 
00764     // get the elements of that dimension
00765     ErrorCode result = mbImpl->get_entities_by_dimension( 0, dimension, elems );
00766     if( MB_SUCCESS != result || elems.empty() ) return result;
00767 
00768     // assign global ids
00769     if( assign_global_ids )
00770     {
00771         EntityHandle rootset = 0;
00772         result               = mbpc->assign_global_ids( rootset, dimension, 1, true, true );RR;
00773     }
00774 
00775     // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1
00776     // dimensional neighbors
00777     MeshTopoUtil mtu( mbImpl );
00778     Range adjs;
00779     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
00780     // by MBCN
00781     int neighbors[5 * MAX_SUB_ENTITIES];
00782     double avg_position[3];
00783     int moab_id;
00784 
00785     // get the global id tag handle
00786     Tag gid = mbImpl->globalId_tag();
00787 
00788     for( Range::iterator rit = elems.begin(); rit != elems.end(); ++rit )
00789     {
00790 
00791         if( !part_geom )
00792         {
00793             // get bridge adjacencies
00794             adjs.clear();
00795             result = mtu.get_bridge_adjacencies( *rit, ( dimension > 0 ? dimension - 1 : 3 ), dimension, adjs );RR;
00796 
00797             // get the graph vertex ids of those
00798             if( !adjs.empty() )
00799             {
00800                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
00801                 result = mbImpl->tag_get_data( gid, adjs, neighbors );RR;
00802             }
00803 
00804             // copy those into adjacencies vector
00805             length.push_back( (int)adjs.size() );
00806             std::copy( neighbors, neighbors + adjs.size(), std::back_inserter( adjacencies ) );
00807         }
00808 
00809         // get average position of vertices
00810         result = mtu.get_average_position( *rit, avg_position );RR;
00811 
00812         // get the graph vertex id for this element
00813         result = mbImpl->tag_get_data( gid, &( *rit ), 1, &moab_id );RR;
00814 
00815         // copy those into coords vector
00816         moab_ids.push_back( moab_id );
00817         // transform coordinates to spherical coordinates, if requested
00818         if( projection_type > 0 ) IntxUtils::transform_coordinates( avg_position, projection_type );
00819 
00820         std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
00821     }
00822 
00823     if( debug )
00824     {
00825         std::cout << "Length vector: " << std::endl;
00826         std::copy( length.begin(), length.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00827         std::cout << std::endl;
00828         std::cout << "Adjacencies vector: " << std::endl;
00829         std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00830         std::cout << std::endl;
00831         std::cout << "Moab_ids vector: " << std::endl;
00832         std::copy( moab_ids.begin(), moab_ids.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00833         std::cout << std::endl;
00834         std::cout << "Coords vector: " << std::endl;
00835         std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( std::cout, ", " ) );
00836         std::cout << std::endl;
00837     }
00838 
00839     return MB_SUCCESS;
00840 }
00841 
00842 #ifdef MOAB_HAVE_CGM
00843 ErrorCode ZoltanPartitioner::assemble_graph( const int /* dimension */, std::vector< double >& /* coords */,
00844                                              std::vector< int >& moab_ids, std::vector< int >& adjacencies,
00845                                              std::vector< int >& length, std::vector< double >& obj_weights,
00846                                              std::vector< double >& edge_weights, std::vector< int >& parts,
00847                                              DLIList< RefEntity* >& entities, const double part_geom_mesh_size,
00848                                              const int n_part )
00849 {
00850     // get body vertex weights
00851     DLIList< RefEntity* > body_list;
00852     gti->ref_entity_list( "body", body_list, CUBIT_FALSE );
00853     DLIList< RefFace* > all_shared_surfs;
00854     int n_bodies       = body_list.size();
00855     int body_map_index = 1;
00856     int surf_map_index = n_bodies + 1;
00857     int n_bodies_proc  = n_bodies / n_part;  // # of entities per processor
00858     int i_bodies_proc  = n_bodies_proc;      // entity index limit for each processor
00859     int proc           = 0;
00860 
00861     body_list.reset();
00862     for( int i = 0; i < n_bodies; i++ )
00863     {
00864         // assign part in round-robin
00865         if( i == i_bodies_proc )
00866         {
00867             proc++;
00868             if( proc < n_part )
00869                 i_bodies_proc += n_bodies_proc;
00870             else
00871             {
00872                 proc %= n_part;
00873                 i_bodies_proc++;
00874             }
00875         }
00876         parts.push_back( proc );
00877 
00878         // volume mesh generation load estimation
00879         RefEntity* body = body_list.get_and_step();
00880         double vertex_w = body->measure();
00881         vertex_w /= part_geom_mesh_size * part_geom_mesh_size * part_geom_mesh_size;
00882         vertex_w = 3.8559e-5 * vertex_w * log( vertex_w );
00883 
00884         // add child non-interface face weights
00885         DLIList< RefFace* > shared_surfs;
00886         DLIList< RefFace* > faces;
00887         ( dynamic_cast< TopologyEntity* >( body ) )->ref_faces( faces );
00888         int n_face = faces.size();
00889         faces.reset();
00890         for( int j = 0; j < n_face; j++ )
00891         {
00892             RefFace* face      = faces.get_and_step();
00893             TopologyEntity* te = CAST_TO( face, TopologyEntity );
00894             if( te->bridge_manager()->number_of_bridges() > 1 )
00895             {  // shared
00896                 shared_surfs.append( face );
00897             }
00898             else
00899             {  // non-shared
00900                 vertex_w += estimate_face_mesh_load( face, face->measure() );
00901             }
00902         }
00903 
00904         int temp_index              = body_map_index++;
00905         body_vertex_map[body->id()] = temp_index;
00906         moab_ids.push_back( temp_index );   // add body as graph vertex
00907         obj_weights.push_back( vertex_w );  // add weight for body graph vertex
00908 
00909         if( debug )
00910         {
00911             std::cout << "body=" << body->id() << ",graph_id=" << temp_index << ",weight=" << vertex_w
00912                       << ",part=" << proc << std::endl;
00913         }
00914 
00915         // treat shared surfaces connected to this body
00916         int n_shared = shared_surfs.size();
00917         shared_surfs.reset();
00918         for( int k = 0; k < n_shared; k++ )
00919         {  // add adjacencies
00920             RefFace* face                       = shared_surfs.get_and_step();
00921             std::map< int, int >::iterator iter = surf_vertex_map.find( face->id() );
00922             if( iter != surf_vertex_map.end() ) { temp_index = ( *iter ).second; }
00923             else
00924             {
00925                 temp_index                  = surf_map_index++;
00926                 surf_vertex_map[face->id()] = temp_index;
00927                 all_shared_surfs.append( face );
00928             }
00929             adjacencies.push_back( temp_index );
00930             double tmp_sw = estimate_face_comm_load( face, part_geom_mesh_size );
00931             edge_weights.push_back( tmp_sw );
00932 
00933             if( debug ) { std::cout << "adjac=" << temp_index << ",weight=" << tmp_sw << std::endl; }
00934         }
00935         length.push_back( n_shared );
00936     }
00937     entities += body_list;
00938 
00939     // add shared surface as graph vertex
00940     int n_all_shared_surf = all_shared_surfs.size();
00941     all_shared_surfs.reset();
00942     for( int i = 0; i < n_all_shared_surf; i++ )
00943     {
00944         RefEntity* face = all_shared_surfs.get_and_step();
00945         moab_ids.push_back( surf_vertex_map[face->id()] );
00946         double face_mesh_load = estimate_face_mesh_load( face, part_geom_mesh_size );
00947         obj_weights.push_back( face_mesh_load );
00948 
00949         // set surface object graph edges
00950         int min_ind     = -1;
00951         double min_load = std::numeric_limits< double >::max();
00952         ;
00953         DLIList< Body* > parents;
00954         dynamic_cast< TopologyEntity* >( face )->bodies( parents );
00955         int n_parents = parents.size();
00956         parents.reset();
00957         for( int j = 0; j < n_parents; j++ )
00958         {
00959             int body_gid = body_vertex_map[parents.get_and_step()->id()];
00960             adjacencies.push_back( body_gid );  // add adjacency
00961             edge_weights.push_back( estimate_face_comm_load( face, part_geom_mesh_size ) );
00962 
00963             if( obj_weights[body_gid - 1] < min_load )
00964             {
00965                 min_ind  = body_gid - 1;
00966                 min_load = obj_weights[min_ind];
00967             }
00968         }
00969         length.push_back( n_parents );
00970         entities.append( dynamic_cast< RefEntity* >( face ) );
00971         obj_weights[min_ind] += face_mesh_load;
00972         parts.push_back( parts[min_ind] );
00973 
00974         if( debug )
00975         {
00976             std::cout << "shared_surf=" << face->id() << ",graph_id=" << surf_vertex_map[face->id()]
00977                       << ",weight=" << face_mesh_load << ",part=" << parts[min_ind] << std::endl;
00978         }
00979     }
00980 
00981     for( size_t i = 0; i < obj_weights.size(); i++ )
00982         if( obj_weights[i] < 1. ) obj_weights[i] = 1.;
00983     for( size_t i = 0; i < edge_weights.size(); i++ )
00984         if( edge_weights[i] < 1. ) edge_weights[i] = 1.;
00985 
00986     return MB_SUCCESS;
00987 }
00988 
00989 double ZoltanPartitioner::estimate_face_mesh_load( RefEntity* face, const double h )
00990 {
00991     GeometryType type = CAST_TO( face, RefFace )->geometry_type();
00992     double n          = face->measure() / h / h;
00993     double n_logn     = n * log( n );
00994 
00995     if( type == PLANE_SURFACE_TYPE ) { return 1.536168737505151e-4 * n_logn; }
00996     else if( type == SPLINE_SURFACE_TYPE )
00997     {
00998         return 5.910511018383144e-4 * n_logn;
00999     }
01000     else if( type == CONE_SURFACE_TYPE || type == SPHERE_SURFACE_TYPE || type == TORUS_SURFACE_TYPE )
01001     {
01002         return 2.352511671418708e-4 * n_logn;
01003     }
01004 
01005     return 0.0;
01006 }
01007 
01008 double ZoltanPartitioner::estimate_face_comm_load( RefEntity* face, const double h )
01009 {
01010     double peri = 0.;
01011 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01012     DLIList< DLIList< RefEdge* > > ref_edge_loops;
01013 #else
01014     DLIList< DLIList< RefEdge* >* > ref_edge_loops;
01015 #endif
01016     CAST_TO( face, RefFace )->ref_edge_loops( ref_edge_loops );
01017     ref_edge_loops.reset();
01018 
01019 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01020     for( int i = 0; i < ref_edge_loops.size(); i++ )
01021     {
01022         DLIList< RefEdge* > eloop = ref_edge_loops.get_and_step();
01023         eloop.reset();
01024         for( int j = 0; j < eloop.size(); j++ )
01025         {
01026             peri += eloop.get_and_step()->measure();
01027         }
01028     }
01029 #else
01030     for( int i = 0; i < ref_edge_loops.size(); i++ )
01031     {
01032         DLIList< RefEdge* >* eloop = ref_edge_loops.get_and_step();
01033         eloop->reset();
01034         for( int j = 0; j < eloop->size(); j++ )
01035         {
01036             peri += eloop->get_and_step()->measure();
01037         }
01038     }
01039 #endif
01040 
01041     // return 104*face->measure()/sqrt(3)/h/h + 56/3*peri/h;
01042     return ( 104 * face->measure() / sqrt( 3 ) / h / h + 56 / 3 * peri / h ) / 700000.;
01043 }
01044 
01045 ErrorCode ZoltanPartitioner::write_partition( const int nparts, DLIList< RefEntity* > entities, const int* assignment,
01046                                               std::vector< double >& obj_weights, const bool part_surf,
01047                                               const bool ghost )
01048 {
01049     ErrorCode result;
01050 
01051     // actuate CA_BODIES and turn on auto flag for other attributes
01052     CGMApp::instance()->attrib_manager()->register_attrib_type( CA_BODIES, "bodies", "BODIES", CABodies_creator,
01053                                                                 CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE,
01054                                                                 CUBIT_TRUE, CUBIT_FALSE );
01055     CGMApp::instance()->attrib_manager()->auto_flag( CUBIT_TRUE );
01056 
01057     // set partition info to Body at first
01058     int n_entities = entities.size();
01059     entities.reset();
01060     for( int i = 0; i < n_entities; i++ )
01061     {
01062         int proc = assignment[i];
01063         DLIList< int > shared_procs;
01064         RefEntity* entity = entities.get_and_step();
01065 
01066         if( entity->entity_type_info() == typeid( Body ) )
01067         {
01068             shared_procs.append( proc );
01069             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01070             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01071 
01072             if( debug ) { std::cout << "body" << entity->id() << "_is_partitioned_to_p" << proc << std::endl; }
01073 
01074             // assign to volumes, it should be removed in future
01075             DLIList< RefVolume* > volumes;
01076             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01077             int n_vol = volumes.size();
01078             volumes.reset();
01079             for( int j = 0; j < n_vol; j++ )
01080             {
01081                 RefEntity* vol = volumes.get_and_step();
01082                 td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01083                 if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01084             }
01085         }
01086     }
01087 
01088     // set partition info to shared surfaces
01089     entities.reset();
01090     for( int i = 0; i < n_entities; i++ )
01091     {
01092         int proc = assignment[i];
01093         DLIList< int > shared_procs;
01094         RefEntity* entity = entities.get_and_step();
01095 
01096         if( entity->entity_type_info() == typeid( RefFace ) )
01097         {  // surface
01098             DLIList< Body* > parents;
01099             ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01100             int n_parents = parents.size();
01101             if( n_parents != 2 )
01102             {  // check # of parents
01103                 std::cerr << "# of parent bodies of interface surface should be 2." << std::endl;
01104                 return MB_FAILURE;
01105             }
01106             shared_procs.append( proc );  // local proc
01107             parents.reset();
01108             for( int j = 0; j < 2; j++ )
01109             {
01110                 RefEntity* parent     = parents.get_and_step();
01111                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01112 
01113                 if( parent_td == NULL )
01114                 {
01115                     std::cerr << "parent body should have balanced process." << std::endl;
01116                     return MB_FAILURE;
01117                 }
01118                 int temp_proc = parent_td->get_charge_proc();
01119                 if( temp_proc != proc ) shared_procs.append( temp_proc );  // remote proc
01120             }
01121 
01122             if( shared_procs.size() > 1 )
01123             {  // if it is shared by 2 processors
01124                 int merge_id       = TDUniqueId::get_unique_id( entity );
01125                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01126                 if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01127 
01128                 if( debug )
01129                 {
01130                     std::cout << "surf" << entity->id() << "_is_partitioned_to_p";
01131                     for( int j = 0; j < shared_procs.size(); j++ )
01132                     {
01133                         std::cout << "," << shared_procs[j];
01134                     }
01135                     std::cout << std::endl;
01136                 }
01137             }
01138         }
01139     }
01140 
01141     // do non-shared surface partition too
01142     if( part_surf )
01143     {
01144         result = partition_surface( nparts, entities, assignment, obj_weights );RR;
01145     }
01146 
01147     // partition shared edges and vertex
01148     result = partition_child_entities( 1, nparts, part_surf, ghost );RR;
01149     result = partition_child_entities( 0, nparts, part_surf );RR;
01150 
01151     if( debug )
01152     {
01153         entities.reset();
01154         for( int i = 0; i < n_entities; i++ )
01155         {
01156             RefEntity* entity = entities.get_and_step();
01157             if( entity->entity_type_info() == typeid( Body ) )
01158             {
01159                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01160                 CubitString st     = entity->entity_name();
01161                 DLIList< int >* sp = td_par->get_shared_proc_list();
01162                 int n_sp           = sp->size();
01163                 sp->reset();
01164                 for( int j = 0; j < n_sp; j++ )
01165                 {
01166                     std::cout << "partitioned_" << st.c_str() << ",proc=" << sp->get_and_step() << std::endl;
01167                 }
01168                 DLIList< int >* gp = td_par->get_ghost_proc_list();
01169                 int n_gp           = gp->size();
01170                 sp->reset();
01171                 for( int j = 0; j < n_gp; j++ )
01172                 {
01173                     std::cout << "partitioned_" << st.c_str() << ",ghost=" << gp->get_and_step() << std::endl;
01174                 }
01175             }
01176         }
01177     }
01178 
01179     return MB_SUCCESS;
01180 }
01181 
01182 ErrorCode ZoltanPartitioner::partition_surface( const int nparts, DLIList< RefEntity* > entities, const int* assignment,
01183                                                 std::vector< double >& obj_weights )
01184 {
01185     int i;
01186     double ave_load = 0.;
01187     double* loads   = new double[nparts];
01188     for( i = 0; i < nparts; i++ )
01189         loads[i] = 0.;
01190 
01191     int n_entities = entities.size();
01192     entities.reset();
01193     for( i = 0; i < n_entities; i++ )
01194     {
01195         loads[assignment[i]] += obj_weights[i];
01196         ave_load += obj_weights[i];
01197     }
01198     ave_load /= nparts;
01199 
01200     if( debug )
01201     {
01202         for( i = 0; i < nparts; i++ )
01203         {
01204             std::cout << "loads_before_surface_partition[" << i << "]=" << loads[i] << std::endl;
01205         }
01206     }
01207 
01208     int n_iter  = 0;
01209     bool b_stop = false;
01210     do
01211     {
01212         // get max and min load processors
01213         int max_proc = nparts, min_proc = 0;
01214         double min_load = std::numeric_limits< double >::max();
01215         double max_load = std::numeric_limits< double >::min();
01216         for( i = 0; i < nparts; i++ )
01217         {
01218             if( loads[i] < min_load )
01219             {
01220                 min_load = loads[i];
01221                 min_proc = i;
01222             }
01223             if( loads[i] > max_load )
01224             {
01225                 max_load = loads[i];
01226                 max_proc = i;
01227             }
01228         }
01229 
01230         double load_diff = max_load - ave_load;
01231         if( load_diff > ave_load / 10. )
01232         {
01233             bool b_moved = false;
01234             entities.reset();
01235             for( i = 0; i < n_entities; i++ )
01236             {
01237                 if( b_moved ) break;
01238                 if( assignment[i] == max_proc &&  // find maximum load processor bodies
01239                     entities[i]->entity_type_info() == typeid( Body ) )
01240                 {
01241                     DLIList< RefFace* > faces;
01242                     ( dynamic_cast< TopologyEntity* >( entities[i] ) )->ref_faces( faces );
01243                     int n_face = faces.size();
01244                     faces.reset();
01245                     for( int j = 0; j < n_face; j++ )
01246                     {
01247                         RefFace* face      = faces.get_and_step();
01248                         TopologyEntity* te = CAST_TO( face, TopologyEntity );
01249                         if( te->bridge_manager()->number_of_bridges() < 2 )
01250                         {  // non-shared
01251                             TDParallel* td_par = (TDParallel*)face->get_TD( &TDParallel::is_parallel );
01252                             if( td_par == NULL )
01253                             {  // only consider unpartitioned surfaces
01254                                 double face_load = face->measure();
01255                                 if( load_diff > min_load + face_load - ave_load )
01256                                 {
01257                                     loads[max_proc] -= face_load;
01258                                     loads[min_proc] += face_load;
01259                                     int merge_id = TDUniqueId::get_unique_id( face );
01260                                     DLIList< int > shared_procs;
01261                                     shared_procs.append( min_proc );
01262                                     shared_procs.append( max_proc );
01263                                     td_par = new TDParallel( face, NULL, &shared_procs, NULL, merge_id, 1 );
01264 
01265                                     if( debug )
01266                                     {
01267                                         std::cout << "non-shared surface " << face->id() << " is moved from p"
01268                                                   << max_proc << " to p" << min_proc << std::endl;
01269                                     }
01270                                     b_moved = true;
01271                                     break;
01272                                 }
01273                             }
01274                         }
01275                     }
01276                 }
01277             }
01278         }
01279         else
01280             b_stop = true;
01281 
01282         n_iter++;
01283     } while( !b_stop && n_iter < 50 );
01284 
01285     if( debug )
01286     {
01287         for( i = 0; i < nparts; i++ )
01288         {
01289             std::cout << "loads_after_surface_partition[" << i << "]=" << loads[i] << std::endl;
01290         }
01291     }
01292 
01293     delete loads;
01294     return MB_SUCCESS;
01295 }
01296 
01297 ErrorCode ZoltanPartitioner::partition_round_robin( const int n_part )
01298 {
01299     int i, j, k;
01300     double* loads    = new double[n_part];  // estimated loads for each processor
01301     double* ve_loads = new double[n_part];  // estimated loads for each processor
01302     for( i = 0; i < n_part; i++ )
01303     {
01304         loads[i]    = 0.0;
01305         ve_loads[i] = 0.0;
01306     }
01307     DLIList< RefEntity* > body_entity_list;
01308     gti->ref_entity_list( "body", body_entity_list, CUBIT_FALSE );
01309     int n_entity      = body_entity_list.size();
01310     int n_entity_proc = n_entity / n_part;  // # of entities per processor
01311     int i_entity_proc = n_entity_proc;      // entity index limit for each processor
01312     int proc          = 0;
01313     RefEntity* entity;
01314 
01315     // assign processors to bodies
01316     body_entity_list.reset();
01317     for( i = 0; i < n_entity; i++ )
01318     {
01319         if( i == i_entity_proc )
01320         {
01321             proc++;
01322             if( proc < n_part )
01323                 i_entity_proc += n_entity_proc;
01324             else
01325             {
01326                 proc %= n_part;
01327                 i_entity_proc++;
01328             }
01329         }
01330 
01331         // assign to bodies
01332         entity = body_entity_list.get_and_step();
01333         DLIList< int > shared_procs;
01334         shared_procs.append( proc );
01335         TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01336         if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01337         loads[proc] += entity->measure();
01338 
01339         // assign to volumes, it should be removed in future
01340         DLIList< RefVolume* > volumes;
01341         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01342         int n_vol = volumes.size();
01343         volumes.reset();
01344         for( j = 0; j < n_vol; j++ )
01345         {
01346             RefEntity* vol = volumes.get_and_step();
01347             td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01348             if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01349         }
01350 
01351         // add local surface load
01352         DLIList< RefFace* > faces;
01353         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( faces );
01354         int n_face = faces.size();
01355         faces.reset();
01356         for( j = 0; j < n_face; j++ )
01357         {
01358             RefFace* face      = faces.get_and_step();
01359             TopologyEntity* te = CAST_TO( face, TopologyEntity );
01360             if( te->bridge_manager()->number_of_bridges() < 2 ) loads[proc] = loads[proc] + face->measure();
01361         }
01362 
01363         // Get all child entities
01364         DLIList< RefEntity* > child_list;
01365         RefEntity::get_all_child_ref_entities( body_entity_list, child_list );
01366         int n_child = child_list.size();
01367 
01368         // assign processors to interface entities
01369         child_list.reset();
01370         for( j = 0; j < n_child; j++ )
01371         {
01372             entity             = child_list.get_and_step();
01373             TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01374 
01375             if( te->bridge_manager()->number_of_bridges() > 1 )
01376             {
01377                 DLIList< Body* > parent_bodies;
01378                 DLIList< int > child_shared_procs;  // Shared processors of each child entity
01379                 ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parent_bodies );
01380                 int n_parent = parent_bodies.size();
01381 
01382                 for( k = 0; k < n_parent; k++ )
01383                 {
01384                     RefEntity* parent_vol = CAST_TO( parent_bodies.get_and_step(), RefEntity );
01385                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01386 
01387                     if( parent_td == NULL )
01388                     {
01389                         PRINT_ERROR( "parent Volume has to be partitioned." );
01390                         delete[] loads;
01391                         delete[] ve_loads;
01392                         return MB_FAILURE;
01393                     }
01394                     child_shared_procs.append_unique( parent_td->get_charge_proc() );
01395                 }
01396 
01397                 if( child_shared_procs.size() > 1 )
01398                 {  // if it is interface
01399                     td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01400                     if( td_par == NULL )
01401                     {
01402                         int merge_id = TDUniqueId::get_unique_id( entity );
01403                         if( entity->entity_type_info() == typeid( RefFace ) )
01404                         {  // face
01405                             if( child_shared_procs.size() != 2 )
01406                             {
01407                                 PRINT_ERROR( "Error: # of shared processors of interface surface "
01408                                              "should be 2." );
01409                                 delete[] loads;
01410                                 delete[] ve_loads;
01411                                 return MB_FAILURE;
01412                             }
01413 
01414                             // balance interface surface loads
01415                             if( loads[child_shared_procs[0]] > loads[child_shared_procs[1]] )
01416                                 child_shared_procs.reverse();
01417 
01418                             loads[child_shared_procs[0]] = loads[child_shared_procs[0]] + entity->measure();
01419                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01420                         }  // face
01421                         else if( entity->entity_type_info() == typeid( RefEdge ) ||
01422                                  entity->entity_type_info() == typeid( RefVertex ) )
01423                         {  // edge or vertex
01424                             // balance interface surface loads
01425                             int min_p         = child_shared_procs[0];
01426                             int n_shared_proc = child_shared_procs.size();
01427                             for( k = 1; k < n_shared_proc; k++ )
01428                             {
01429                                 if( ve_loads[child_shared_procs[k]] < ve_loads[min_p] ) min_p = child_shared_procs[k];
01430                             }
01431                             ve_loads[min_p] = ve_loads[min_p] + entity->measure();
01432                             child_shared_procs.remove( min_p );
01433                             child_shared_procs.insert_first( min_p );
01434                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01435                         }  // edge or vertex
01436                     }      // if (td_par == NULL)
01437                 }          // if it is interface
01438             }              // if (te->bridge_manager()->number_of_bridges() > 1)
01439         }                  // for (j = 0; j < n_child; j++)
01440     }                      // for (i = 0; i < n_entity; i++)
01441 
01442     delete[] loads;
01443     delete[] ve_loads;
01444 
01445     return MB_SUCCESS;
01446 }
01447 
01448 // partition child entities to one of parent entity shared processors
01449 ErrorCode ZoltanPartitioner::partition_child_entities( const int dim, const int n_part, const bool part_surf,
01450                                                        const bool ghost )
01451 {
01452     DLIList< RefEntity* > entity_list;
01453     if( dim == 0 )
01454         gti->ref_entity_list( "vertex", entity_list, CUBIT_FALSE );
01455     else if( dim == 1 )
01456         gti->ref_entity_list( "curve", entity_list, CUBIT_FALSE );
01457     else
01458     {
01459         std::cerr << "Dimention should be from 0 to 1." << std::endl;
01460         return MB_FAILURE;
01461     }
01462 
01463     int i, j, k;
01464     int n_entity  = entity_list.size();
01465     double* loads = new double[n_part];
01466     for( i = 0; i < n_part; i++ )
01467         loads[i] = 0.;
01468     entity_list.reset();
01469 
01470     for( i = 0; i < n_entity; i++ )
01471     {  // for all entities
01472         RefEntity* entity  = entity_list.get_and_step();
01473         TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01474 
01475         if( !part_surf && te->bridge_manager()->number_of_bridges() < 2 ) continue;
01476 
01477         DLIList< int > shared_procs;
01478         DLIList< Body* > parents;
01479         ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01480         int n_parents = parents.size();
01481         std::set< int > s_proc;
01482         parents.reset();
01483 
01484         // get shared processors from parent bodies
01485         for( j = 0; j < n_parents; j++ )
01486         {
01487             RefEntity* parent     = parents.get_and_step();
01488             TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01489 
01490             if( parent_td != NULL )
01491             {
01492                 DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01493                 int n_shared                 = parent_procs->size();
01494                 parent_procs->reset();
01495                 for( k = 0; k < n_shared; k++ )
01496                 {
01497                     int p = parent_procs->get_and_step();
01498                     s_proc.insert( p );
01499                 }
01500             }
01501         }
01502 
01503         if( part_surf )
01504         {  // also get shared procs from parent surfaces
01505             DLIList< RefFace* > parent_faces;
01506             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( parent_faces );
01507             int n_pface = parent_faces.size();
01508             parent_faces.reset();
01509 
01510             // get shared processors from parent faces
01511             for( j = 0; j < n_pface; j++ )
01512             {
01513                 RefEntity* parent     = parent_faces.get_and_step();
01514                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01515 
01516                 if( parent_td != NULL )
01517                 {
01518                     DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01519                     int n_shared                 = parent_procs->size();
01520                     parent_procs->reset();
01521 
01522                     for( k = 0; k < n_shared; k++ )
01523                     {
01524                         int p = parent_procs->get_and_step();
01525                         s_proc.insert( p );
01526                     }
01527                 }
01528             }
01529         }
01530 
01531         // find the minimum load processor and put it
01532         // at the front of the shared_procs list
01533         if( s_proc.size() > 1 )
01534         {
01535             int min_proc                       = 0;
01536             double min_load                    = std::numeric_limits< double >::max();
01537             std::set< int >::iterator iter     = s_proc.begin();
01538             std::set< int >::iterator end_iter = s_proc.end();
01539             for( ; iter != end_iter; ++iter )
01540             {
01541                 if( loads[*iter] < min_load )
01542                 {
01543                     min_load = loads[*iter];
01544                     min_proc = *iter;
01545                 }
01546             }
01547 
01548             if( dim == 1 )
01549                 loads[min_proc] += entity->measure();
01550             else if( dim == 0 )
01551                 loads[min_proc] += 1.;
01552             shared_procs.append( min_proc );
01553             iter     = s_proc.begin();
01554             end_iter = s_proc.end();
01555             for( ; iter != end_iter; ++iter )
01556             {
01557                 if( *iter != min_proc ) { shared_procs.append( *iter ); }
01558             }
01559 
01560             // add ghost geometries to shared processors for edge
01561             if( ghost )
01562             {
01563                 parents.reset();
01564                 for( j = 0; j < n_parents; j++ )
01565                 {  // for all parent bodies
01566                     RefEntity* parent_vol = CAST_TO( parents.get_and_step(), RefEntity );
01567                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01568                     int n_shared_proc     = shared_procs.size();
01569 
01570                     for( k = 0; k < n_shared_proc; k++ )
01571                     {
01572                         parent_td->add_ghost_proc( shared_procs[k] );
01573                     }
01574                 }
01575             }
01576 
01577             // initialize tool data
01578             int merge_id       = TDUniqueId::get_unique_id( entity );
01579             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01580             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01581         }
01582     }
01583 
01584     delete loads;
01585     return MB_SUCCESS;
01586 }
01587 #endif
01588 
01589 ErrorCode ZoltanPartitioner::write_partition( const int nparts, Range& elems, const int* assignment,
01590                                               const bool write_as_sets, const bool write_as_tags )
01591 {
01592     ErrorCode result;
01593 
01594     // get the partition set tag
01595     Tag part_set_tag;
01596     int dum_id = -1, i;
01597     result     = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_set_tag,
01598                                      MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id );RR;
01599 
01600     // get any sets already with this tag, and clear them
01601     Range tagged_sets;
01602     result =
01603         mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &part_set_tag, NULL, 1, tagged_sets, Interface::UNION );RR;
01604     if( !tagged_sets.empty() )
01605     {
01606         result = mbImpl->clear_meshset( tagged_sets );RR;
01607         if( !write_as_sets )
01608         {
01609             result = mbImpl->tag_delete_data( part_set_tag, tagged_sets );RR;
01610         }
01611     }
01612 
01613     if( write_as_sets )
01614     {
01615         // first, create partition sets and store in vector
01616         partSets.clear();
01617 
01618         if( nparts > (int)tagged_sets.size() )
01619         {
01620             // too few partition sets - create missing ones
01621             int num_new = nparts - tagged_sets.size();
01622             for( i = 0; i < num_new; i++ )
01623             {
01624                 EntityHandle new_set;
01625                 result = mbImpl->create_meshset( MESHSET_SET, new_set );RR;
01626                 tagged_sets.insert( new_set );
01627             }
01628         }
01629         else if( nparts < (int)tagged_sets.size() )
01630         {
01631             // too many partition sets - delete extras
01632             int num_del = tagged_sets.size() - nparts;
01633             for( i = 0; i < num_del; i++ )
01634             {
01635                 EntityHandle old_set = tagged_sets.pop_back();
01636                 result               = mbImpl->delete_entities( &old_set, 1 );RR;
01637             }
01638         }
01639         // assign partition sets to vector
01640         partSets.swap( tagged_sets );
01641 
01642         // write a tag to those sets denoting they're partition sets, with a value of the
01643         // proc number
01644         int* dum_ids = new int[nparts];
01645         for( i = 0; i < nparts; i++ )
01646             dum_ids[i] = i;
01647 
01648         result = mbImpl->tag_set_data( part_set_tag, partSets, dum_ids );RR;
01649         // found out by valgrind when we run mbpart
01650         delete[] dum_ids;
01651         dum_ids = NULL;
01652 
01653         // assign entities to the relevant sets
01654         std::vector< EntityHandle > tmp_part_sets;
01655         // int N = (int)elems.size();
01656         std::copy( partSets.begin(), partSets.end(), std::back_inserter( tmp_part_sets ) );
01657         /*Range::reverse_iterator riter;
01658         for (i = N-1, riter = elems.rbegin(); riter != elems.rend(); ++riter, i--) {
01659           int assigned_part = assignment[i];
01660           part_ranges[assigned_part].insert(*riter);
01661           //result = mbImpl->add_entities(tmp_part_sets[assignment[i]], &(*rit), 1); RR;
01662         }*/
01663 
01664         Range::iterator rit;
01665         for( i = 0, rit = elems.begin(); rit != elems.end(); ++rit, i++ )
01666         {
01667             result = mbImpl->add_entities( tmp_part_sets[assignment[i]], &( *rit ), 1 );RR;
01668         }
01669         /*for (i=0; i<nparts; i++)
01670         {
01671           result = mbImpl->add_entities(tmp_part_sets[i], part_ranges[i]); RR;
01672         }
01673         delete [] part_ranges;*/
01674         // check for empty sets, warn if there are any
01675         Range empty_sets;
01676         for( rit = partSets.begin(); rit != partSets.end(); ++rit )
01677         {
01678             int num_ents = 0;
01679             result       = mbImpl->get_number_entities_by_handle( *rit, num_ents );
01680             if( MB_SUCCESS != result || !num_ents ) empty_sets.insert( *rit );
01681         }
01682         if( !empty_sets.empty() )
01683         {
01684             std::cout << "WARNING: " << empty_sets.size() << " empty sets in partition: ";
01685             for( rit = empty_sets.begin(); rit != empty_sets.end(); ++rit )
01686                 std::cout << *rit << " ";
01687             std::cout << std::endl;
01688         }
01689     }
01690 
01691     if( write_as_tags )
01692     {
01693         // allocate integer-size partitions
01694         result = mbImpl->tag_set_data( part_set_tag, elems, assignment );RR;
01695     }
01696 
01697     return MB_SUCCESS;
01698 }
01699 
01700 void ZoltanPartitioner::SetRCB_Parameters( const bool recompute_rcb_box )
01701 {
01702     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Coordinate Bisection" << std::endl;
01703     // General parameters:
01704 
01705     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01706     myZZ->Set_Param( "LB_METHOD", "RCB" );  // recursive coordinate bisection
01707     // myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );  // recompute RCB box if needed ?
01708 
01709     // RCB parameters:
01710 
01711     myZZ->Set_Param( "RCB_OUTPUT_LEVEL", "1" );
01712     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition so that we can infer partitions
01713     // myZZ->Set_Param("RCB_RECTILINEAR_BLOCKS", "1"); // don't split point on boundary
01714     if( recompute_rcb_box ) myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );
01715 }
01716 
01717 void ZoltanPartitioner::SetRIB_Parameters()
01718 {
01719     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Inertial Bisection" << std::endl;
01720     // General parameters:
01721 
01722     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01723     myZZ->Set_Param( "LB_METHOD", "RIB" );  // Recursive Inertial Bisection
01724 
01725     // RIB parameters:
01726 
01727     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01728     myZZ->Set_Param( "AVERAGE_CUTS", "1" );
01729 }
01730 
01731 void ZoltanPartitioner::SetHSFC_Parameters()
01732 {
01733     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHilbert Space Filling Curve" << std::endl;
01734     // General parameters:
01735 
01736     myZZ->Set_Param( "DEBUG_LEVEL", "0" );   // no debug messages
01737     myZZ->Set_Param( "LB_METHOD", "HSFC" );  // perform Hilbert space filling curve
01738 
01739     // HSFC parameters:
01740 
01741     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01742 }
01743 
01744 void ZoltanPartitioner::SetHypergraph_Parameters( const char* phg_method )
01745 {
01746     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHypergraph (or PHG): " << std::endl;
01747     // General parameters:
01748 
01749     myZZ->Set_Param( "DEBUG_LEVEL", "0" );         // no debug messages
01750     myZZ->Set_Param( "LB_METHOD", "Hypergraph" );  // Hypergraph (or PHG)
01751 
01752     // Hypergraph (or PHG) parameters:
01753     myZZ->Set_Param( "PHG_COARSEPARTITION_METHOD", phg_method );  // CoarsePartitionMethod
01754 }
01755 
01756 void ZoltanPartitioner::SetPARMETIS_Parameters( const char* parmetis_method )
01757 {
01758     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nPARMETIS: " << parmetis_method << std::endl;
01759     // General parameters:
01760 
01761     myZZ->Set_Param( "DEBUG_LEVEL", "0" );       // no debug messages
01762     myZZ->Set_Param( "LB_METHOD", "PARMETIS" );  // the ParMETIS library
01763 
01764     // PARMETIS parameters:
01765 
01766     myZZ->Set_Param( "PARMETIS_METHOD", parmetis_method );  // method in the library
01767 }
01768 
01769 void ZoltanPartitioner::SetOCTPART_Parameters( const char* oct_method )
01770 {
01771     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nOctree Partitioning: " << oct_method << std::endl;
01772     // General parameters:
01773 
01774     myZZ->Set_Param( "DEBUG_LEVEL", "0" );      // no debug messages
01775     myZZ->Set_Param( "LB_METHOD", "OCTPART" );  // octree partitioning
01776 
01777     // OCTPART parameters:
01778 
01779     myZZ->Set_Param( "OCT_METHOD", oct_method );  // the SFC to be used
01780     myZZ->Set_Param( "OCT_OUTPUT_LEVEL", "3" );
01781 }
01782 
01783 int ZoltanPartitioner::mbInitializePoints( int npts, double* pts, int* ids, int* adjs, int* length, double* obj_weights,
01784                                            double* edge_weights, int* parts, bool part_geom )
01785 {
01786     unsigned int i;
01787     int j;
01788     int *numPts, *nborProcs = NULL;
01789     int sum, ptsPerProc, ptsAssigned, mySize;
01790     MPI_Status stat;
01791     double* sendPts;
01792     int* sendIds;
01793     int* sendEdges  = NULL;
01794     int* sendNborId = NULL;
01795     int* sendProcs;
01796 
01797     if( mbpc->proc_config().proc_rank() == 0 )
01798     {
01799         /* divide pts to start */
01800 
01801         numPts      = (int*)malloc( sizeof( int ) * mbpc->proc_config().proc_size() );
01802         ptsPerProc  = npts / mbpc->proc_config().proc_size();
01803         ptsAssigned = 0;
01804 
01805         for( i = 0; i < mbpc->proc_config().proc_size() - 1; i++ )
01806         {
01807             numPts[i] = ptsPerProc;
01808             ptsAssigned += ptsPerProc;
01809         }
01810 
01811         numPts[mbpc->proc_config().proc_size() - 1] = npts - ptsAssigned;
01812 
01813         mySize  = numPts[mbpc->proc_config().proc_rank()];
01814         sendPts = pts + ( 3 * numPts[0] );
01815         sendIds = ids + numPts[0];
01816         sum     = 0;  // possible no adjacency sent
01817         if( !part_geom )
01818         {
01819             sendEdges = length + numPts[0];
01820 
01821             for( j = 0; j < numPts[0]; j++ )
01822                 sum += length[j];
01823 
01824             sendNborId = adjs + sum;
01825 
01826             for( j = numPts[0]; j < npts; j++ )
01827                 sum += length[j];
01828 
01829             nborProcs = (int*)malloc( sizeof( int ) * sum );
01830         }
01831         for( j = 0; j < sum; j++ )
01832             if( ( i = adjs[j] / ptsPerProc ) < mbpc->proc_config().proc_size() )
01833                 nborProcs[j] = i;
01834             else
01835                 nborProcs[j] = mbpc->proc_config().proc_size() - 1;
01836 
01837         sendProcs = nborProcs + ( sendNborId - adjs );
01838 
01839         for( i = 1; i < mbpc->proc_config().proc_size(); i++ )
01840         {
01841             MPI_Send( &numPts[i], 1, MPI_INT, i, 0x00, mbpc->comm() );
01842             MPI_Send( sendPts, 3 * numPts[i], MPI_DOUBLE, i, 0x01, mbpc->comm() );
01843             MPI_Send( sendIds, numPts[i], MPI_INT, i, 0x03, mbpc->comm() );
01844             MPI_Send( sendEdges, numPts[i], MPI_INT, i, 0x06, mbpc->comm() );
01845             sum = 0;
01846 
01847             for( j = 0; j < numPts[i]; j++ )
01848                 sum += sendEdges[j];
01849 
01850             MPI_Send( sendNborId, sum, MPI_INT, i, 0x07, mbpc->comm() );
01851             MPI_Send( sendProcs, sum, MPI_INT, i, 0x08, mbpc->comm() );
01852             sendPts += ( 3 * numPts[i] );
01853             sendIds += numPts[i];
01854             sendEdges += numPts[i];
01855             sendNborId += sum;
01856             sendProcs += sum;
01857         }
01858 
01859         free( numPts );
01860     }
01861     else
01862     {
01863         MPI_Recv( &mySize, 1, MPI_INT, 0, 0x00, mbpc->comm(), &stat );
01864         pts    = (double*)malloc( sizeof( double ) * 3 * mySize );
01865         ids    = (int*)malloc( sizeof( int ) * mySize );
01866         length = (int*)malloc( sizeof( int ) * mySize );
01867         if( obj_weights != NULL ) obj_weights = (double*)malloc( sizeof( double ) * mySize );
01868         MPI_Recv( pts, 3 * mySize, MPI_DOUBLE, 0, 0x01, mbpc->comm(), &stat );
01869         MPI_Recv( ids, mySize, MPI_INT, 0, 0x03, mbpc->comm(), &stat );
01870         MPI_Recv( length, mySize, MPI_INT, 0, 0x06, mbpc->comm(), &stat );
01871         sum = 0;
01872 
01873         for( j = 0; j < mySize; j++ )
01874             sum += length[j];
01875 
01876         adjs = (int*)malloc( sizeof( int ) * sum );
01877         if( edge_weights != NULL ) edge_weights = (double*)malloc( sizeof( double ) * sum );
01878         nborProcs = (int*)malloc( sizeof( int ) * sum );
01879         MPI_Recv( adjs, sum, MPI_INT, 0, 0x07, mbpc->comm(), &stat );
01880         MPI_Recv( nborProcs, sum, MPI_INT, 0, 0x08, mbpc->comm(), &stat );
01881     }
01882 
01883     Points       = pts;
01884     GlobalIds    = ids;
01885     NumPoints    = mySize;
01886     NumEdges     = length;
01887     NborGlobalId = adjs;
01888     NborProcs    = nborProcs;
01889     ObjWeights   = obj_weights;
01890     EdgeWeights  = edge_weights;
01891     Parts        = parts;
01892 
01893     return mySize;
01894 }
01895 
01896 void ZoltanPartitioner::mbFinalizePoints( int npts, int numExport, ZOLTAN_ID_PTR exportLocalIDs, int* exportProcs,
01897                                           int** assignment )
01898 {
01899     int* MyAssignment;
01900     int i;
01901     int numPts;
01902     MPI_Status stat;
01903     int* recvA;
01904 
01905     /* assign pts to start */
01906 
01907     if( mbpc->proc_config().proc_rank() == 0 )
01908         MyAssignment = (int*)malloc( sizeof( int ) * npts );
01909     else
01910         MyAssignment = (int*)malloc( sizeof( int ) * NumPoints );
01911 
01912     for( i = 0; i < NumPoints; i++ )
01913         MyAssignment[i] = mbpc->proc_config().proc_rank();
01914 
01915     for( i = 0; i < numExport; i++ )
01916         MyAssignment[exportLocalIDs[i]] = exportProcs[i];
01917 
01918     if( mbpc->proc_config().proc_rank() == 0 )
01919     {
01920         /* collect pts */
01921         recvA = MyAssignment + NumPoints;
01922 
01923         for( i = 1; i < (int)mbpc->proc_config().proc_size(); i++ )
01924         {
01925             MPI_Recv( &numPts, 1, MPI_INT, i, 0x04, mbpc->comm(), &stat );
01926             MPI_Recv( recvA, numPts, MPI_INT, i, 0x05, mbpc->comm(), &stat );
01927             recvA += numPts;
01928         }
01929 
01930         *assignment = MyAssignment;
01931     }
01932     else
01933     {
01934         MPI_Send( &NumPoints, 1, MPI_INT, 0, 0x04, mbpc->comm() );
01935         MPI_Send( MyAssignment, NumPoints, MPI_INT, 0, 0x05, mbpc->comm() );
01936         free( MyAssignment );
01937     }
01938 }
01939 
01940 int ZoltanPartitioner::mbGlobalSuccess( int rc )
01941 {
01942     int fail = 0;
01943     unsigned int i;
01944     int* vals = (int*)malloc( mbpc->proc_config().proc_size() * sizeof( int ) );
01945 
01946     MPI_Allgather( &rc, 1, MPI_INT, vals, 1, MPI_INT, mbpc->comm() );
01947 
01948     for( i = 0; i < mbpc->proc_config().proc_size(); i++ )
01949     {
01950         if( vals[i] != ZOLTAN_OK )
01951         {
01952             if( 0 == mbpc->proc_config().proc_rank() ) { mbShowError( vals[i], "Result on process " ); }
01953             fail = 1;
01954         }
01955     }
01956 
01957     free( vals );
01958     return fail;
01959 }
01960 
01961 void ZoltanPartitioner::mbPrintGlobalResult( const char* s, int begin, int import, int exp, int change )
01962 {
01963     unsigned int i;
01964     int* v1 = (int*)malloc( 4 * sizeof( int ) );
01965     int* v2 = NULL;
01966     int* v;
01967 
01968     v1[0] = begin;
01969     v1[1] = import;
01970     v1[2] = exp;
01971     v1[3] = change;
01972 
01973     if( mbpc->proc_config().proc_rank() == 0 )
01974     {
01975         v2 = (int*)malloc( 4 * mbpc->proc_config().proc_size() * sizeof( int ) );
01976     }
01977 
01978     MPI_Gather( v1, 4, MPI_INT, v2, 4, MPI_INT, 0, mbpc->comm() );
01979 
01980     if( mbpc->proc_config().proc_rank() == 0 )
01981     {
01982         fprintf( stdout, "======%s======\n", s );
01983         for( i = 0, v = v2; i < mbpc->proc_config().proc_size(); i++, v += 4 )
01984         {
01985             fprintf( stdout, "%u: originally had %d, import %d, exp %d, %s\n", i, v[0], v[1], v[2],
01986                      v[3] ? "a change of partitioning" : "no change" );
01987         }
01988         fprintf( stdout, "==========================================\n" );
01989         fflush( stdout );
01990 
01991         free( v2 );
01992     }
01993 
01994     free( v1 );
01995 }
01996 
01997 void ZoltanPartitioner::mbShowError( int val, const char* s )
01998 {
01999     if( s ) printf( "%s ", s );
02000 
02001     switch( val )
02002     {
02003         case ZOLTAN_OK:
02004             printf( "%d: SUCCESSFUL\n", mbpc->proc_config().proc_rank() );
02005             break;
02006         case ZOLTAN_WARN:
02007             printf( "%d: WARNING\n", mbpc->proc_config().proc_rank() );
02008             break;
02009         case ZOLTAN_FATAL:
02010             printf( "%d: FATAL ERROR\n", mbpc->proc_config().proc_rank() );
02011             break;
02012         case ZOLTAN_MEMERR:
02013             printf( "%d: MEMORY ALLOCATION ERROR\n", mbpc->proc_config().proc_rank() );
02014             break;
02015         default:
02016             printf( "%d: INVALID RETURN CODE\n", mbpc->proc_config().proc_rank() );
02017             break;
02018     }
02019 }
02020 
02021 /**********************
02022 ** call backs
02023 **********************/
02024 
02025 int mbGetNumberOfAssignedObjects( void* /* userDefinedData */, int* err )
02026 {
02027     *err = 0;
02028     return NumPoints;
02029 }
02030 
02031 void mbGetObjectList( void* /* userDefinedData */, int /* numGlobalIds */, int /* numLids */, ZOLTAN_ID_PTR gids,
02032                       ZOLTAN_ID_PTR lids, int wgt_dim, float* obj_wgts, int* err )
02033 {
02034     for( int i = 0; i < NumPoints; i++ )
02035     {
02036         gids[i] = GlobalIds[i];
02037         lids[i] = i;
02038         if( wgt_dim > 0 ) obj_wgts[i] = ObjWeights[i];
02039     }
02040 
02041     *err = 0;
02042 }
02043 
02044 int mbGetObjectSize( void* /* userDefinedData */, int* err )
02045 {
02046     *err = 0;
02047     return 3;
02048 }
02049 
02050 void mbGetObject( void* /* userDefinedData */, int /* numGlobalIds */, int /* numLids */, int numObjs,
02051                   ZOLTAN_ID_PTR /* gids */, ZOLTAN_ID_PTR lids, int numDim, double* pts, int* err )
02052 {
02053     int i, id, id3;
02054     int next = 0;
02055 
02056     if( numDim != 3 )
02057     {
02058         *err = 1;
02059         return;
02060     }
02061 
02062     for( i = 0; i < numObjs; i++ )
02063     {
02064         id = lids[i];
02065 
02066         if( ( id < 0 ) || ( id >= NumPoints ) )
02067         {
02068             *err = 1;
02069             return;
02070         }
02071 
02072         id3 = lids[i] * 3;
02073 
02074         pts[next++] = Points[id3];
02075         pts[next++] = Points[id3 + 1];
02076         pts[next++] = Points[id3 + 2];
02077     }
02078 }
02079 
02080 void mbGetNumberOfEdges( void* /* userDefinedData */, int /* numGlobalIds */, int /* numLids */, int numObjs,
02081                          ZOLTAN_ID_PTR /* gids */, ZOLTAN_ID_PTR lids, int* numEdges, int* err )
02082 {
02083     int i, id;
02084     int next = 0;
02085 
02086     for( i = 0; i < numObjs; i++ )
02087     {
02088         id = lids[i];
02089 
02090         if( ( id < 0 ) || ( id >= NumPoints ) )
02091         {
02092             *err = 1;
02093             return;
02094         }
02095 
02096         numEdges[next++] = NumEdges[id];
02097     }
02098 }
02099 
02100 void mbGetEdgeList( void* /* userDefinedData */, int /* numGlobalIds */, int /* numLids */, int numObjs,
02101                     ZOLTAN_ID_PTR /* gids */, ZOLTAN_ID_PTR lids, int* /* numEdges */, ZOLTAN_ID_PTR nborGlobalIds,
02102                     int* nborProcs, int wgt_dim, float* edge_wgts, int* err )
02103 {
02104     int i, id, idSum, j;
02105     int next = 0;
02106 
02107     for( i = 0; i < numObjs; i++ )
02108     {
02109         id = lids[i];
02110 
02111         if( ( id < 0 ) || ( id >= NumPoints ) )
02112         {
02113             *err = 1;
02114             return;
02115         }
02116 
02117         idSum = 0;
02118 
02119         for( j = 0; j < id; j++ )
02120             idSum += NumEdges[j];
02121 
02122         for( j = 0; j < NumEdges[id]; j++ )
02123         {
02124             nborGlobalIds[next] = NborGlobalId[idSum];
02125             nborProcs[next]     = NborProcs[idSum];
02126             if( wgt_dim > 0 ) edge_wgts[next] = EdgeWeights[idSum];
02127             next++;
02128             idSum++;
02129         }
02130     }
02131 }
02132 
02133 void mbGetPart( void* /* userDefinedData */, int /* numGlobalIds */, int /* numLids */, int numObjs,
02134                 ZOLTAN_ID_PTR /* gids */, ZOLTAN_ID_PTR lids, int* part, int* err )
02135 {
02136     int i, id;
02137     int next = 0;
02138 
02139     for( i = 0; i < numObjs; i++ )
02140     {
02141         id = lids[i];
02142 
02143         if( ( id < 0 ) || ( id >= NumPoints ) )
02144         {
02145             *err = 1;
02146             return;
02147         }
02148 
02149         part[next++] = Parts[id];
02150     }
02151 }
02152 
02153 // new methods for partition in parallel, used by migrate in iMOAB
02154 ErrorCode ZoltanPartitioner::partition_owned_cells( Range& primary, ParallelComm* pco,
02155                                                     std::multimap< int, int >& extraGraphEdges,
02156                                                     std::map< int, int > procs, int& numNewPartitions,
02157                                                     std::map< int, Range >& distribution, int met )
02158 {
02159     // start copy
02160     MeshTopoUtil mtu( mbImpl );
02161     Range adjs;
02162 
02163     std::vector< int > adjacencies;
02164     std::vector< int > ids;
02165     ids.resize( primary.size() );
02166     std::vector< int > length;
02167     std::vector< double > coords;
02168     std::vector< int > nbor_proc;
02169     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
02170     // by MBCN
02171     int neighbors[5 * MAX_SUB_ENTITIES];
02172     int neib_proc[5 * MAX_SUB_ENTITIES];
02173     double avg_position[3];
02174     int moab_id;
02175     int primaryDim = mbImpl->dimension_from_handle( *primary.rbegin() );
02176     // get the global id tag handle
02177     Tag gid = mbImpl->globalId_tag();
02178 
02179     ErrorCode rval = mbImpl->tag_get_data( gid, primary, &ids[0] );MB_CHK_ERR( rval );
02180 
02181     int rank = pco->rank();  // current rank , will be put on regular neighbors
02182     int i    = 0;
02183     for( Range::iterator rit = primary.begin(); rit != primary.end(); ++rit, i++ )
02184     {
02185         EntityHandle cell = *rit;
02186         // get bridge adjacencies for each cell
02187         if( 1 == met )
02188         {
02189             adjs.clear();
02190             rval = mtu.get_bridge_adjacencies( cell, ( primaryDim > 0 ? primaryDim - 1 : 3 ), primaryDim, adjs );MB_CHK_ERR( rval );
02191 
02192             // get the graph vertex ids of those
02193             if( !adjs.empty() )
02194             {
02195                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
02196                 rval = mbImpl->tag_get_data( gid, adjs, neighbors );MB_CHK_ERR( rval );
02197             }
02198             // if adjacent to neighbor partitions, add to the list
02199             int size_adjs = (int)adjs.size();
02200             moab_id       = ids[i];
02201             for( int k = 0; k < size_adjs; k++ )
02202                 neib_proc[k] = rank;  // current rank
02203             if( extraGraphEdges.find( moab_id ) != extraGraphEdges.end() )
02204             {
02205                 // it means that the current cell is adjacent to a cell in another partition ; maybe
02206                 // a few
02207                 std::pair< std::multimap< int, int >::iterator, std::multimap< int, int >::iterator > ret;
02208                 ret = extraGraphEdges.equal_range( moab_id );
02209                 for( std::multimap< int, int >::iterator it = ret.first; it != ret.second; ++it )
02210                 {
02211                     int otherID          = it->second;
02212                     neighbors[size_adjs] = otherID;         // the id of the other cell, across partition
02213                     neib_proc[size_adjs] = procs[otherID];  // this is how we built this map, the cell id maps to
02214                                                             // what proc it came from
02215                     size_adjs++;
02216                 }
02217             }
02218             // copy those into adjacencies vector
02219             length.push_back( size_adjs );
02220             std::copy( neighbors, neighbors + size_adjs, std::back_inserter( adjacencies ) );
02221             std::copy( neib_proc, neib_proc + size_adjs, std::back_inserter( nbor_proc ) );
02222         }
02223         else if( 2 == met )
02224         {
02225             if( TYPE_FROM_HANDLE( cell ) == MBVERTEX )
02226             {
02227                 rval = mbImpl->get_coords( &cell, 1, avg_position );MB_CHK_ERR( rval );
02228             }
02229             else
02230             {
02231                 rval = mtu.get_average_position( cell, avg_position );MB_CHK_ERR( rval );
02232             }
02233             std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
02234         }
02235     }
02236 
02237 #ifdef VERBOSE
02238     std::stringstream ff2;
02239     ff2 << "zoltanInput_" << pco->rank() << ".txt";
02240     std::ofstream ofs;
02241     ofs.open( ff2.str().c_str(), std::ofstream::out );
02242     ofs << "Length vector: " << std::endl;
02243     std::copy( length.begin(), length.end(), std::ostream_iterator< int >( ofs, ", " ) );
02244     ofs << std::endl;
02245     ofs << "Adjacencies vector: " << std::endl;
02246     std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( ofs, ", " ) );
02247     ofs << std::endl;
02248     ofs << "Moab_ids vector: " << std::endl;
02249     std::copy( ids.begin(), ids.end(), std::ostream_iterator< int >( ofs, ", " ) );
02250     ofs << std::endl;
02251     ofs << "Coords vector: " << std::endl;
02252     std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( ofs, ", " ) );
02253     ofs << std::endl;
02254     ofs.close();
02255 #endif
02256 
02257     // these are static var in this file, and used in the callbacks
02258     Points = NULL;
02259     if( 1 != met ) Points = &coords[0];
02260     GlobalIds    = &ids[0];
02261     NumPoints    = (int)ids.size();
02262     NumEdges     = &length[0];
02263     NborGlobalId = &adjacencies[0];
02264     NborProcs    = &nbor_proc[0];
02265     ObjWeights   = NULL;
02266     EdgeWeights  = NULL;
02267     Parts        = NULL;
02268 
02269     float version;
02270     if( pco->rank() == 0 ) std::cout << "Initializing zoltan..." << std::endl;
02271 
02272     Zoltan_Initialize( argcArg, argvArg, &version );
02273 
02274     // Create Zoltan object.  This calls Zoltan_Create.
02275     if( NULL == myZZ ) myZZ = new Zoltan( pco->comm() );
02276 
02277     // set # requested partitions
02278     char buff[10];
02279     sprintf( buff, "%d", numNewPartitions );
02280     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
02281     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02282 
02283     // request parts assignment
02284     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTS" );
02285     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02286 
02287     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
02288     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
02289     // due to a bug in zoltan, if method is graph partitioning, do not pass coordinates!!
02290     if( 2 == met )
02291     {
02292         myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
02293         myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
02294         SetRCB_Parameters();  // geometry
02295     }
02296     else if( 1 == met )
02297     {
02298         myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
02299         myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
02300         SetHypergraph_Parameters( "auto" );
02301     }
02302 
02303     // Perform the load balancing partitioning
02304 
02305     int changes;
02306     int numGidEntries;
02307     int numLidEntries;
02308     int num_import;
02309     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
02310     int* import_procs;
02311     int* import_to_part;
02312     int num_export;
02313     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
02314     int *assign_procs, *assign_parts;
02315 
02316     if( pco->rank() == 0 )
02317         std::cout << "Computing partition using method (1-graph, 2-geom):" << met << " for " << numNewPartitions
02318                   << " parts..." << std::endl;
02319 
02320 #ifndef NDEBUG
02321 #if 0
02322   static int counter=0; // it may be possible to call function multiple times in a simulation
02323   // give a way to not overwrite the files
02324   // it should work only with a modified version of Zoltan
02325   std::stringstream basename;
02326   if (1==met)
02327   {
02328     basename << "phg_" << counter++;
02329     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 0, 1, 0);
02330   }
02331   else if (2==met)
02332   {
02333     basename << "rcb_" << counter++;
02334     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 1, 0, 0);
02335   }
02336 #endif
02337 #endif
02338     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
02339                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
02340                                  assign_procs, assign_parts );
02341     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02342 
02343 #ifdef VERBOSE
02344     std::stringstream ff3;
02345     ff3 << "zoltanOutput_" << pco->rank() << ".txt";
02346     std::ofstream ofs3;
02347     ofs3.open( ff3.str().c_str(), std::ofstream::out );
02348     ofs3 << " export elements on rank " << rank << " \n";
02349     ofs3 << "\t index \t gb_id \t local \t proc \t part \n";
02350     for( int k = 0; k < num_export; k++ )
02351     {
02352         ofs3 << "\t" << k << "\t" << export_global_ids[k] << "\t" << export_local_ids[k] << "\t" << assign_procs[k]
02353              << "\t" << assign_parts[k] << "\n";
02354     }
02355     ofs3.close();
02356 #endif
02357 
02358     // basically each local cell is assigned to a part
02359 
02360     assert( num_export == (int)primary.size() );
02361     for( i = 0; i < num_export; i++ )
02362     {
02363         EntityHandle cell = primary[export_local_ids[i]];
02364         distribution[assign_parts[i]].insert( cell );
02365     }
02366 
02367     Zoltan::LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
02368     Zoltan::LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
02369 
02370     delete myZZ;
02371     myZZ = NULL;
02372 
02373     // clear arrays that were resized locally, to free up local memory
02374 
02375     std::vector< int >().swap( adjacencies );
02376     std::vector< int >().swap( ids );
02377     std::vector< int >().swap( length );
02378     std::vector< int >().swap( nbor_proc );
02379     std::vector< double >().swap( coords );
02380 
02381     return MB_SUCCESS;
02382 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines