MOAB: Mesh Oriented datABase  (version 5.4.1)
ZoltanPartitioner.cpp
Go to the documentation of this file.
00001 /* $Id$
00002  *
00003  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00004  * storing and accessing finite element mesh data.
00005  *
00006  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00007  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00008  * retains certain rights in this software.
00009  *
00010  * This library is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  */
00016 
00017 /** Get a mesh from MOAB and write a Zoltan partition set back into MOAB and to
00018  *  a file
00019  *
00020  *
00021  */
00022 
00023 #include <iostream>
00024 #include <cassert>
00025 #include <sstream>
00026 #include <algorithm>
00027 
00028 #include "moab/ZoltanPartitioner.hpp"
00029 #include "moab/Interface.hpp"
00030 #include "Internals.hpp"
00031 #include "moab/Range.hpp"
00032 #include "moab/WriteUtilIface.hpp"
00033 #include "moab/MeshTopoUtil.hpp"
00034 #include "MBTagConventions.hpp"
00035 #include "moab/CN.hpp"
00036 // used for gnomonic projection
00037 #include "moab/IntxMesh/IntxUtils.hpp"
00038 
00039 #ifdef MOAB_HAVE_CGM
00040 #include "CGMConfig.h"
00041 #include <limits>
00042 #include "RefEntity.hpp"
00043 #include "RefFace.hpp"
00044 #include "RefEdge.hpp"
00045 #include "RefVertex.hpp"
00046 #include "Body.hpp"
00047 #include "TopologyEntity.hpp"
00048 #include "CastTo.hpp"
00049 #include "CABodies.hpp"
00050 #include "TDParallel.hpp"
00051 #include "TDUniqueId.hpp"
00052 #endif
00053 
00054 using namespace moab;
00055 
00056 #define RR \
00057     if( MB_SUCCESS != result ) return result
00058 
00059 static double* Points      = NULL;
00060 static int* GlobalIds      = NULL;
00061 static int NumPoints       = 0;
00062 static int* NumEdges       = NULL;
00063 static int* NborGlobalId   = NULL;
00064 static int* NborProcs      = NULL;
00065 static double* ObjWeights  = NULL;
00066 static double* EdgeWeights = NULL;
00067 static int* Parts          = NULL;
00068 
00069 const bool debug = false;
00070 
00071 ZoltanPartitioner::ZoltanPartitioner( Interface* impl,
00072 #ifdef MOAB_HAVE_MPI
00073                                       ParallelComm* parcomm,
00074 #endif
00075                                       const bool use_coords,
00076                                       int argc,
00077                                       char** argv
00078 #ifdef MOAB_HAVE_CGM
00079                                       ,
00080                                       GeometryQueryTool* gqt
00081 #endif
00082 
00083                                       )
00084     : PartitionerBase< int >( impl, use_coords
00085 #ifdef MOAB_HAVE_MPI
00086                                       ,
00087                                       parcomm
00088 #endif
00089     ), myZZ( NULL ), myNumPts( 0 ), argcArg( argc ), argvArg( argv )
00090 #ifdef MOAB_HAVE_CGM
00091       ,
00092       gti( gqt )
00093 #endif
00094 {
00095 }
00096 
00097 ZoltanPartitioner::~ZoltanPartitioner()
00098 {
00099     if( NULL != myZZ ) delete myZZ;
00100 }
00101 
00102 ErrorCode ZoltanPartitioner::balance_mesh( const char* zmethod,
00103                                            const char* other_method,
00104                                            const bool write_as_sets,
00105                                            const bool write_as_tags )
00106 {
00107     if( !strcmp( zmethod, "RR" ) && !strcmp( zmethod, "RCB" ) && !strcmp( zmethod, "RIB" ) &&
00108         !strcmp( zmethod, "HSFC" ) && !strcmp( zmethod, "Hypergraph" ) && !strcmp( zmethod, "PHG" ) &&
00109         !strcmp( zmethod, "PARMETIS" ) && !strcmp( zmethod, "OCTPART" ) )
00110     {
00111         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00112                   << "RR, RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00113         return MB_FAILURE;
00114     }
00115 
00116     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00117     std::vector< int > ids;     // point ids from MOAB
00118     std::vector< int > adjs, length;
00119     Range elems;
00120 
00121     // Get a mesh from MOAB and divide it across processors.
00122 
00123     ErrorCode result;
00124 
00125     if( mbpc->proc_config().proc_rank() == 0 )
00126     {
00127         result = assemble_graph( 3, pts, ids, adjs, length, elems );RR;
00128     }
00129 
00130     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0] );
00131 
00132     // Initialize Zoltan.  This is a C call.  The simple C++ code
00133     // that creates Zoltan objects does not keep track of whether
00134     // Zoltan_Initialize has been called.
00135 
00136     float version;
00137 
00138     Zoltan_Initialize( argcArg, argvArg, &version );
00139 
00140     // Create Zoltan object.  This calls Zoltan_Create.
00141     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00142 
00143     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00144         SetRCB_Parameters();
00145     else if( !strcmp( zmethod, "RIB" ) )
00146         SetRIB_Parameters();
00147     else if( !strcmp( zmethod, "HSFC" ) )
00148         SetHSFC_Parameters();
00149     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00150         if( NULL == other_method )
00151             SetHypergraph_Parameters( "auto" );
00152         else
00153             SetHypergraph_Parameters( other_method );
00154     else if( !strcmp( zmethod, "PARMETIS" ) )
00155     {
00156         if( NULL == other_method )
00157             SetPARMETIS_Parameters( "RepartGDiffusion" );
00158         else
00159             SetPARMETIS_Parameters( other_method );
00160     }
00161     else if( !strcmp( zmethod, "OCTPART" ) )
00162     {
00163         if( NULL == other_method )
00164             SetOCTPART_Parameters( "2" );
00165         else
00166             SetOCTPART_Parameters( other_method );
00167     }
00168 
00169     // Call backs:
00170 
00171     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00172     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00173     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00174     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00175     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00176     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00177 
00178     // Perform the load balancing partitioning
00179 
00180     int changes;
00181     int numGidEntries;
00182     int numLidEntries;
00183     int numImport;
00184     ZOLTAN_ID_PTR importGlobalIds;
00185     ZOLTAN_ID_PTR importLocalIds;
00186     int* importProcs;
00187     int* importToPart;
00188     int numExport;
00189     ZOLTAN_ID_PTR exportGlobalIds;
00190     ZOLTAN_ID_PTR exportLocalIds;
00191     int* exportProcs;
00192     int* exportToPart;
00193 
00194     int rc = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, numImport, importGlobalIds, importLocalIds,
00195                                  importProcs, importToPart, numExport, exportGlobalIds, exportLocalIds, exportProcs,
00196                                  exportToPart );
00197 
00198     rc = mbGlobalSuccess( rc );
00199 
00200     if( !rc )
00201     {
00202         mbPrintGlobalResult( "==============Result==============", myNumPts, numImport, numExport, changes );
00203     }
00204     else
00205     {
00206         return MB_FAILURE;
00207     }
00208 
00209     // take results & write onto MOAB partition sets
00210 
00211     int* assignment;
00212 
00213     mbFinalizePoints( (int)ids.size(), numExport, exportLocalIds, exportProcs, &assignment );
00214 
00215     if( mbpc->proc_config().proc_rank() == 0 )
00216     {
00217         result = write_partition( mbpc->proc_config().proc_size(), elems, assignment, write_as_sets, write_as_tags );
00218 
00219         if( MB_SUCCESS != result ) return result;
00220 
00221         free( (int*)assignment );
00222     }
00223 
00224     // Free the memory allocated for lists returned by LB_Parition()
00225 
00226     myZZ->LB_Free_Part( &importGlobalIds, &importLocalIds, &importProcs, &importToPart );
00227     myZZ->LB_Free_Part( &exportGlobalIds, &exportLocalIds, &exportProcs, &exportToPart );
00228 
00229     // Implementation note:  A Zoltan object contains an MPI communicator.
00230     //   When the Zoltan object is destroyed, it uses it's MPI communicator.
00231     //   So it is important that the Zoltan object is destroyed before
00232     //   the MPI communicator is destroyed.  To ensure this, dynamically
00233     //   allocate the Zoltan object, so you can explicitly destroy it.
00234     //   If you create a Zoltan object on the stack, it's destructor will
00235     //   be invoked atexit, possibly after the communicator's
00236     //   destructor.
00237 
00238     return MB_SUCCESS;
00239 }
00240 
00241 ErrorCode ZoltanPartitioner::repartition( std::vector< double >& x,
00242                                           std::vector< double >& y,
00243                                           std::vector< double >& z,
00244                                           int StartID,
00245                                           const char* zmethod,
00246                                           Range& localGIDs )
00247 {
00248     //
00249     int nprocs = mbpc->proc_config().proc_size();
00250     int rank   = mbpc->proc_config().proc_rank();
00251     clock_t t  = clock();
00252     // form pts and ids, as in assemble_graph
00253     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00254     pts.resize( x.size() * 3 );
00255     std::vector< int > ids;  // point ids from MOAB
00256     ids.resize( x.size() );
00257     for( size_t i = 0; i < x.size(); i++ )
00258     {
00259         pts[3 * i]     = x[i];
00260         pts[3 * i + 1] = y[i];
00261         pts[3 * i + 2] = z[i];
00262         ids[i]         = StartID + (int)i;
00263     }
00264     // do not get out until done!
00265 
00266     Points       = &pts[0];
00267     GlobalIds    = &ids[0];
00268     NumPoints    = (int)x.size();
00269     NumEdges     = NULL;
00270     NborGlobalId = NULL;
00271     NborProcs    = NULL;
00272     ObjWeights   = NULL;
00273     EdgeWeights  = NULL;
00274     Parts        = NULL;
00275 
00276     float version;
00277     if( rank == 0 ) std::cout << "Initializing zoltan..." << std::endl;
00278 
00279     Zoltan_Initialize( argcArg, argvArg, &version );
00280 
00281     // Create Zoltan object.  This calls Zoltan_Create.
00282     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00283 
00284     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00285         SetRCB_Parameters();
00286     else if( !strcmp( zmethod, "RIB" ) )
00287         SetRIB_Parameters();
00288     else if( !strcmp( zmethod, "HSFC" ) )
00289         SetHSFC_Parameters();
00290 
00291     // set # requested partitions
00292     char buff[10];
00293     sprintf( buff, "%d", nprocs );
00294     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00295     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00296 
00297     // request all, import and export
00298     retval = myZZ->Set_Param( "RETURN_LISTS", "ALL" );
00299     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00300 
00301     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00302     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00303     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00304     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00305     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00306     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00307 
00308     // Perform the load balancing partitioning
00309 
00310     int changes;
00311     int numGidEntries;
00312     int numLidEntries;
00313     int num_import;
00314     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
00315     int* import_procs;
00316     int* import_to_part;
00317     int num_export;
00318     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
00319     int *assign_procs, *assign_parts;
00320 
00321     if( rank == 0 )
00322         std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nprocs
00323                   << " processors..." << std::endl;
00324 
00325     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
00326                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
00327                                  assign_procs, assign_parts );
00328     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00329 
00330     if( rank == 0 )
00331     {
00332         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00333         t = clock();
00334     }
00335 
00336     std::sort( import_global_ids, import_global_ids + num_import, std::greater< int >() );
00337     std::sort( export_global_ids, export_global_ids + num_export, std::greater< int >() );
00338 
00339     Range iniGids( (EntityHandle)StartID, (EntityHandle)StartID + x.size() - 1 );
00340     Range imported, exported;
00341     std::copy( import_global_ids, import_global_ids + num_import, range_inserter( imported ) );
00342     std::copy( export_global_ids, export_global_ids + num_export, range_inserter( exported ) );
00343     localGIDs = subtract( iniGids, exported );
00344     localGIDs = unite( localGIDs, imported );
00345     // Free data structures allocated by Zoltan::LB_Partition
00346     retval = myZZ->LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
00347     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00348     retval = myZZ->LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
00349     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00350 
00351     return MB_SUCCESS;
00352 }
00353 
00354 ErrorCode ZoltanPartitioner::partition_inferred_mesh( EntityHandle sfileset,
00355                                                       size_t num_parts,
00356                                                       int part_dim,
00357                                                       const bool write_as_sets,
00358                                                       int projection_type )
00359 {
00360     ErrorCode result;
00361 
00362     moab::Range elverts;
00363     result = mbImpl->get_entities_by_dimension( sfileset, part_dim, elverts );RR;
00364 
00365     std::vector< double > elcoords( elverts.size() * 3 );
00366     result = mbImpl->get_coords( elverts, &elcoords[0] );RR;
00367 
00368     std::vector< std::vector< EntityHandle > > part_assignments( num_parts );
00369     int part, proc;
00370 
00371     // Loop over coordinates
00372     for( size_t iel = 0; iel < elverts.size(); iel++ )
00373     {
00374         // Gather coordinates into temporary array
00375         double* ecoords = &elcoords[iel * 3];
00376 
00377         // do a projection if needed
00378         if( projection_type > 0 ) IntxUtils::transform_coordinates( ecoords, projection_type );
00379 
00380         // Compute the coordinate's part assignment
00381         myZZ->LB_Point_PP_Assign( ecoords, proc, part );
00382 
00383         // Store the part assignment in the return array
00384         part_assignments[part].push_back( elverts[iel] );
00385     }
00386 
00387     // get the partition set tag
00388     Tag part_set_tag = mbpc->partition_tag();
00389 
00390     // If some entity sets exist, let us delete those first.
00391     if( write_as_sets )
00392     {
00393         moab::Range oldpsets;
00394         result = mbImpl->get_entities_by_type_and_tag( sfileset, MBENTITYSET, &part_set_tag, NULL, 1, oldpsets,
00395                                                        Interface::UNION );RR;
00396         result = mbImpl->remove_entities( sfileset, oldpsets );RR;
00397     }
00398 
00399     size_t nparts_assigned = 0;
00400     for( size_t partnum = 0; partnum < num_parts; ++partnum )
00401     {
00402         std::vector< EntityHandle >& partvec = part_assignments[partnum];
00403 
00404         nparts_assigned += ( partvec.size() ? 1 : 0 );
00405 
00406         if( write_as_sets )
00407         {
00408             EntityHandle partNset;
00409             result = mbImpl->create_meshset( moab::MESHSET_SET, partNset );RR;
00410             if( partvec.size() )
00411             {
00412                 result = mbImpl->add_entities( partNset, &partvec[0], partvec.size() );RR;
00413             }
00414             result = mbImpl->add_parent_child( sfileset, partNset );RR;
00415 
00416             // part numbering should start from 0
00417             int ipartnum = (int)partnum;
00418 
00419             // assign partitions as a sparse tag by grouping elements under sets
00420             result = mbImpl->tag_set_data( part_set_tag, &partNset, 1, &ipartnum );RR;
00421         }
00422         else
00423         {
00424             /* assign as a dense vector to all elements
00425                allocate integer-size partitions */
00426             std::vector< int > assignment( partvec.size(),
00427                                            partnum );  // initialize all values to partnum
00428             result = mbImpl->tag_set_data( part_set_tag, &partvec[0], partvec.size(), &assignment[0] );RR;
00429         }
00430     }
00431 
00432     if( nparts_assigned != num_parts )
00433     {
00434         std::cout << "WARNING: The inference yielded lesser number of parts (" << nparts_assigned
00435                   << ") than requested by user (" << num_parts << ").\n";
00436     }
00437 
00438     return MB_SUCCESS;
00439 }
00440 
00441 ErrorCode ZoltanPartitioner::partition_mesh_and_geometry( const double part_geom_mesh_size,
00442                                                           const int nparts,
00443                                                           const char* zmethod,
00444                                                           const char* other_method,
00445                                                           double imbal_tol,
00446                                                           const int part_dim,
00447                                                           const bool write_as_sets,
00448                                                           const bool write_as_tags,
00449                                                           const int obj_weight,
00450                                                           const int edge_weight,
00451 #ifdef MOAB_HAVE_CGM
00452                                                           const bool part_surf,
00453                                                           const bool ghost,
00454 #else
00455                                                           const bool,
00456                                                           const bool,
00457 #endif
00458                                                           const int projection_type,
00459                                                           const bool recompute_rcb_box,
00460                                                           const bool print_time )
00461 {
00462     // should only be called in serial
00463     if( mbpc->proc_config().proc_size() != 1 )
00464     {
00465         std::cout << "ZoltanPartitioner::partition_mesh_and_geometry must be called in serial." << std::endl;
00466         return MB_FAILURE;
00467     }
00468     clock_t t = clock();
00469     if( NULL != zmethod && strcmp( zmethod, "RR" ) && strcmp( zmethod, "RCB" ) && strcmp( zmethod, "RIB" ) &&
00470         strcmp( zmethod, "HSFC" ) && strcmp( zmethod, "Hypergraph" ) && strcmp( zmethod, "PHG" ) &&
00471         strcmp( zmethod, "PARMETIS" ) && strcmp( zmethod, "OCTPART" ) )
00472     {
00473         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00474                   << "RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00475         return MB_FAILURE;
00476     }
00477 
00478     bool part_geom = false;
00479     if( 0 == strcmp( zmethod, "RR" ) || 0 == strcmp( zmethod, "RCB" ) || 0 == strcmp( zmethod, "RIB" ) ||
00480         0 == strcmp( zmethod, "HSFC" ) )
00481         part_geom = true;       // so no adjacency / edges needed
00482     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00483     std::vector< int > ids;     // point ids from MOAB
00484     std::vector< int > adjs, length, parts;
00485     std::vector< double > obj_weights, edge_weights;
00486     Range elems;
00487 #ifdef MOAB_HAVE_CGM
00488     DLIList< RefEntity* > entities;
00489 #endif
00490 
00491     // Get a mesh from MOAB and diide it across processors.
00492 
00493     ErrorCode result = MB_SUCCESS;
00494 
00495     // short-circuit everything if RR partition is requested
00496     if( !strcmp( zmethod, "RR" ) )
00497     {
00498         if( part_geom_mesh_size < 0. )
00499         {
00500             // get all elements
00501             result = mbImpl->get_entities_by_dimension( 0, part_dim, elems );RR;
00502             if( elems.empty() ) return MB_FAILURE;
00503             // make a trivial assignment vector
00504             std::vector< int > assign_vec( elems.size() );
00505             int num_per = elems.size() / nparts;
00506             int extra   = elems.size() % nparts;
00507             if( extra ) num_per++;
00508             int nstart = 0;
00509             for( int i = 0; i < nparts; i++ )
00510             {
00511                 if( i == extra ) num_per--;
00512                 std::fill( &assign_vec[nstart], &assign_vec[nstart + num_per], i );
00513                 nstart += num_per;
00514             }
00515 
00516             result = write_partition( nparts, elems, &assign_vec[0], write_as_sets, write_as_tags );RR;
00517         }
00518         else
00519         {
00520 #ifdef MOAB_HAVE_CGM
00521             result = partition_round_robin( nparts );RR;
00522 #endif
00523         }
00524 
00525         return result;
00526     }
00527 
00528     std::cout << "Assembling graph..." << std::endl;
00529 
00530     if( part_geom_mesh_size < 0. )
00531     {
00532         // if (!part_geom) {
00533         result = assemble_graph( part_dim, pts, ids, adjs, length, elems, part_geom, projection_type );RR;
00534     }
00535     else
00536     {
00537 #ifdef MOAB_HAVE_CGM
00538         result = assemble_graph( part_dim, pts, ids, adjs, length, obj_weights, edge_weights, parts, entities,
00539                                  part_geom_mesh_size, nparts );RR;
00540 
00541         if( debug )
00542         {
00543             int n_ids = ids.size();
00544             entities.reset();
00545             int i_leng = 0;
00546             for( int i = 0; i < n_ids; i++ )
00547             {
00548                 std::cout << "graph_input_ids[" << i << "]=" << ids[i] << ",obj_weights=" << obj_weights[i]
00549                           << ",entity_id=" << entities.get_and_step()->id() << ",part=" << parts[i] << std::endl;
00550                 for( int j = 0; j < length[i]; j++ )
00551                 {
00552                     std::cout << "adjs[" << j << "]=" << adjs[i_leng] << ",edge_weights=" << edge_weights[i_leng]
00553                               << std::endl;
00554                     i_leng++;
00555                 }
00556             }
00557         }
00558 #endif
00559     }
00560     if( print_time )
00561     {
00562         std::cout << " time to assemble graph: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00563         t = clock();
00564     }
00565     double* o_wgt = NULL;
00566     double* e_wgt = NULL;
00567     if( obj_weights.size() > 0 ) o_wgt = &obj_weights[0];
00568     if( edge_weights.size() > 0 ) e_wgt = &edge_weights[0];
00569 
00570     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0], o_wgt, e_wgt, &parts[0],
00571                                    part_geom );
00572 
00573     // Initialize Zoltan.  This is a C call.  The simple C++ code
00574     // that creates Zoltan objects does not keep track of whether
00575     // Zoltan_Initialize has been called.
00576 
00577     if( print_time )
00578     {
00579         std::cout << " time to initialize points: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00580         t = clock();
00581     }
00582     float version;
00583 
00584     std::cout << "Initializing zoltan..." << std::endl;
00585 
00586     Zoltan_Initialize( argcArg, argvArg, &version );
00587 
00588     // Create Zoltan object.  This calls Zoltan_Create.
00589     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00590 
00591     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00592         SetRCB_Parameters( recompute_rcb_box );
00593     else if( !strcmp( zmethod, "RIB" ) )
00594         SetRIB_Parameters();
00595     else if( !strcmp( zmethod, "HSFC" ) )
00596         SetHSFC_Parameters();
00597     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00598     {
00599         if( NULL == other_method || ( other_method[0] == '\0' ) )
00600             SetHypergraph_Parameters( "auto" );
00601         else
00602             SetHypergraph_Parameters( other_method );
00603 
00604         if( imbal_tol )
00605         {
00606             std::ostringstream str;
00607             str << imbal_tol;
00608             myZZ->Set_Param( "IMBALANCE_TOL", str.str().c_str() );  // no debug messages
00609         }
00610     }
00611     else if( !strcmp( zmethod, "PARMETIS" ) )
00612     {
00613         if( NULL == other_method )
00614             SetPARMETIS_Parameters( "RepartGDiffusion" );
00615         else
00616             SetPARMETIS_Parameters( other_method );
00617     }
00618     else if( !strcmp( zmethod, "OCTPART" ) )
00619     {
00620         if( NULL == other_method )
00621             SetOCTPART_Parameters( "2" );
00622         else
00623             SetOCTPART_Parameters( other_method );
00624     }
00625 
00626     // set # requested partitions
00627     char buff[10];
00628     sprintf( buff, "%d", nparts );
00629     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00630     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00631 
00632     // request only partition assignments
00633     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTITION ASSIGNMENTS" );
00634     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00635 
00636     if( obj_weight > 0 )
00637     {
00638         std::ostringstream str;
00639         str << obj_weight;
00640         retval = myZZ->Set_Param( "OBJ_WEIGHT_DIM", str.str().c_str() );
00641         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00642     }
00643 
00644     if( edge_weight > 0 )
00645     {
00646         std::ostringstream str;
00647         str << edge_weight;
00648         retval = myZZ->Set_Param( "EDGE_WEIGHT_DIM", str.str().c_str() );
00649         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00650     }
00651 
00652     // Call backs:
00653 
00654     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00655     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00656     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00657     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00658     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00659     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00660     if( part_geom_mesh_size > 0. )
00661     {
00662         myZZ->Set_Part_Multi_Fn( mbGetPart, NULL );
00663     }
00664 
00665     // Perform the load balancing partitioning
00666 
00667     int changes;
00668     int numGidEntries;
00669     int numLidEntries;
00670     int dumnum1;
00671     ZOLTAN_ID_PTR dum_local, dum_global;
00672     int *dum1, *dum2;
00673     int num_assign;
00674     ZOLTAN_ID_PTR assign_gid, assign_lid;
00675     int *assign_procs, *assign_parts;
00676 
00677     std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nparts
00678               << " processors..." << std::endl;
00679 #ifndef NDEBUG
00680 #if 0
00681   if (NULL == zmethod || !strcmp(zmethod, "RCB"))
00682     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 1, 0, 0);
00683 
00684   if ( !strcmp(zmethod, "PHG"))
00685       Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 0, 1, 1);
00686 #endif
00687 #endif
00688     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, dumnum1, dum_global, dum_local, dum1, dum2,
00689                                  num_assign, assign_gid, assign_lid, assign_procs, assign_parts );
00690     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00691 
00692     if( print_time )
00693     {
00694         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00695         t = clock();
00696     }
00697     // take results & write onto MOAB partition sets
00698     std::cout << "Saving partition information to MOAB..." << std::endl;
00699 
00700     if( part_geom_mesh_size < 0. )
00701     {
00702         // if (!part_geom) {
00703         result = write_partition( nparts, elems, assign_parts, write_as_sets, write_as_tags );
00704     }
00705     else
00706     {
00707 #ifdef MOAB_HAVE_CGM
00708         result = write_partition( nparts, entities, assign_parts, obj_weights, part_surf, ghost );
00709 #endif
00710     }
00711 
00712     if( print_time )
00713     {
00714         std::cout << " time to write partition in memory " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00715         t = clock();
00716     }
00717 
00718     if( MB_SUCCESS != result ) return result;
00719 
00720     // Free the memory allocated for lists returned by LB_Parition()
00721     myZZ->LB_Free_Part( &assign_gid, &assign_lid, &assign_procs, &assign_parts );
00722 
00723     return MB_SUCCESS;
00724 }
00725 
00726 ErrorCode ZoltanPartitioner::include_closure()
00727 {
00728     ErrorCode result;
00729     Range ents;
00730     Range adjs;
00731     std::cout << "Adding closure..." << std::endl;
00732 
00733     for( Range::iterator rit = partSets.begin(); rit != partSets.end(); ++rit )
00734     {
00735 
00736         // get the top-dimensional entities in the part
00737         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00738 
00739         if( ents.empty() ) continue;
00740 
00741         // get intermediate-dimensional adjs and add to set
00742         for( int d = mbImpl->dimension_from_handle( *ents.begin() ) - 1; d >= 1; d-- )
00743         {
00744             adjs.clear();
00745             result = mbImpl->get_adjacencies( ents, d, false, adjs, Interface::UNION );RR;
00746             result = mbImpl->add_entities( *rit, adjs );RR;
00747         }
00748 
00749         // now get vertices and add to set; only need to do for ents, not for adjs
00750         adjs.clear();
00751         result = mbImpl->get_adjacencies( ents, 0, false, adjs, Interface::UNION );RR;
00752         result = mbImpl->add_entities( *rit, adjs );RR;
00753 
00754         ents.clear();
00755     }
00756 
00757     // now go over non-part entity sets, looking for contained entities
00758     Range sets, part_ents;
00759     result = mbImpl->get_entities_by_type( 0, MBENTITYSET, sets );RR;
00760     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
00761     {
00762         // skip parts
00763         if( partSets.find( *rit ) != partSets.end() ) continue;
00764 
00765         // get entities in this set, recursively
00766         ents.clear();
00767         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00768 
00769         // now check over all parts
00770         for( Range::iterator rit2 = partSets.begin(); rit2 != partSets.end(); ++rit2 )
00771         {
00772             part_ents.clear();
00773             result = mbImpl->get_entities_by_handle( *rit2, part_ents, false );RR;
00774             Range int_range = intersect( ents, part_ents );
00775             if( !int_range.empty() )
00776             {
00777                 // non-empty intersection, add to part set
00778                 result = mbImpl->add_entities( *rit2, &( *rit ), 1 );RR;
00779             }
00780         }
00781     }
00782 
00783     // finally, mark all the part sets as having the closure
00784     Tag closure_tag;
00785     result =
00786         mbImpl->tag_get_handle( "INCLUDES_CLOSURE", 1, MB_TYPE_INTEGER, closure_tag, MB_TAG_SPARSE | MB_TAG_CREAT );RR;
00787 
00788     std::vector< int > closure_vals( partSets.size(), 1 );
00789     result = mbImpl->tag_set_data( closure_tag, partSets, &closure_vals[0] );RR;
00790 
00791     return MB_SUCCESS;
00792 }
00793 
00794 ErrorCode ZoltanPartitioner::assemble_graph( const int dimension,
00795                                              std::vector< double >& coords,
00796                                              std::vector< int >& moab_ids,
00797                                              std::vector< int >& adjacencies,
00798                                              std::vector< int >& length,
00799                                              Range& elems,
00800                                              bool part_geom,
00801                                              int projection_type )
00802 {
00803     // assemble a graph with vertices equal to elements of specified dimension, edges
00804     // signified by list of other elements to which an element is connected
00805 
00806     // get the elements of that dimension
00807     ErrorCode result = mbImpl->get_entities_by_dimension( 0, dimension, elems );
00808     if( MB_SUCCESS != result || elems.empty() ) return result;
00809 
00810     // assign global ids
00811     if( assign_global_ids )
00812     {
00813         EntityHandle rootset = 0;
00814         result               = mbpc->assign_global_ids( rootset, dimension, 1, true, true );RR;
00815     }
00816 
00817     // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1
00818     // dimensional neighbors
00819     MeshTopoUtil mtu( mbImpl );
00820     Range adjs;
00821     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
00822     // by MBCN
00823     int neighbors[5 * MAX_SUB_ENTITIES];
00824     double avg_position[3];
00825     int moab_id;
00826 
00827     // get the global id tag handle
00828     Tag gid = mbImpl->globalId_tag();
00829 
00830     for( Range::iterator rit = elems.begin(); rit != elems.end(); ++rit )
00831     {
00832 
00833         if( !part_geom )
00834         {
00835             // get bridge adjacencies
00836             adjs.clear();
00837             result = mtu.get_bridge_adjacencies( *rit, ( dimension > 0 ? dimension - 1 : 3 ), dimension, adjs );RR;
00838 
00839             // get the graph vertex ids of those
00840             if( !adjs.empty() )
00841             {
00842                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
00843                 result = mbImpl->tag_get_data( gid, adjs, neighbors );RR;
00844             }
00845 
00846             // copy those into adjacencies vector
00847             length.push_back( (int)adjs.size() );
00848             std::copy( neighbors, neighbors + adjs.size(), std::back_inserter( adjacencies ) );
00849         }
00850 
00851         // get average position of vertices
00852         result = mtu.get_average_position( *rit, avg_position );RR;
00853 
00854         // get the graph vertex id for this element
00855         result = mbImpl->tag_get_data( gid, &( *rit ), 1, &moab_id );RR;
00856 
00857         // copy those into coords vector
00858         moab_ids.push_back( moab_id );
00859         // transform coordinates to spherical coordinates, if requested
00860         if( projection_type > 0 ) IntxUtils::transform_coordinates( avg_position, projection_type );
00861 
00862         std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
00863     }
00864 
00865     if( debug )
00866     {
00867         std::cout << "Length vector: " << std::endl;
00868         std::copy( length.begin(), length.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00869         std::cout << std::endl;
00870         std::cout << "Adjacencies vector: " << std::endl;
00871         std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00872         std::cout << std::endl;
00873         std::cout << "Moab_ids vector: " << std::endl;
00874         std::copy( moab_ids.begin(), moab_ids.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00875         std::cout << std::endl;
00876         std::cout << "Coords vector: " << std::endl;
00877         std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( std::cout, ", " ) );
00878         std::cout << std::endl;
00879     }
00880 
00881     return MB_SUCCESS;
00882 }
00883 
00884 #ifdef MOAB_HAVE_CGM
00885 ErrorCode ZoltanPartitioner::assemble_graph( const int /* dimension */,
00886                                              std::vector< double >& /* coords */,
00887                                              std::vector< int >& moab_ids,
00888                                              std::vector< int >& adjacencies,
00889                                              std::vector< int >& length,
00890                                              std::vector< double >& obj_weights,
00891                                              std::vector< double >& edge_weights,
00892                                              std::vector< int >& parts,
00893                                              DLIList< RefEntity* >& entities,
00894                                              const double part_geom_mesh_size,
00895                                              const int n_part )
00896 {
00897     // get body vertex weights
00898     DLIList< RefEntity* > body_list;
00899     gti->ref_entity_list( "body", body_list, CUBIT_FALSE );
00900     DLIList< RefFace* > all_shared_surfs;
00901     int n_bodies       = body_list.size();
00902     int body_map_index = 1;
00903     int surf_map_index = n_bodies + 1;
00904     int n_bodies_proc  = n_bodies / n_part;  // # of entities per processor
00905     int i_bodies_proc  = n_bodies_proc;      // entity index limit for each processor
00906     int proc           = 0;
00907 
00908     body_list.reset();
00909     for( int i = 0; i < n_bodies; i++ )
00910     {
00911         // assign part in round-robin
00912         if( i == i_bodies_proc )
00913         {
00914             proc++;
00915             if( proc < n_part )
00916                 i_bodies_proc += n_bodies_proc;
00917             else
00918             {
00919                 proc %= n_part;
00920                 i_bodies_proc++;
00921             }
00922         }
00923         parts.push_back( proc );
00924 
00925         // volume mesh generation load estimation
00926         RefEntity* body = body_list.get_and_step();
00927         double vertex_w = body->measure();
00928         vertex_w /= part_geom_mesh_size * part_geom_mesh_size * part_geom_mesh_size;
00929         vertex_w = 3.8559e-5 * vertex_w * log( vertex_w );
00930 
00931         // add child non-interface face weights
00932         DLIList< RefFace* > shared_surfs;
00933         DLIList< RefFace* > faces;
00934         ( dynamic_cast< TopologyEntity* >( body ) )->ref_faces( faces );
00935         int n_face = faces.size();
00936         faces.reset();
00937         for( int j = 0; j < n_face; j++ )
00938         {
00939             RefFace* face      = faces.get_and_step();
00940             TopologyEntity* te = CAST_TO( face, TopologyEntity );
00941             if( te->bridge_manager()->number_of_bridges() > 1 )
00942             {  // shared
00943                 shared_surfs.append( face );
00944             }
00945             else
00946             {  // non-shared
00947                 vertex_w += estimate_face_mesh_load( face, face->measure() );
00948             }
00949         }
00950 
00951         int temp_index              = body_map_index++;
00952         body_vertex_map[body->id()] = temp_index;
00953         moab_ids.push_back( temp_index );   // add body as graph vertex
00954         obj_weights.push_back( vertex_w );  // add weight for body graph vertex
00955 
00956         if( debug )
00957         {
00958             std::cout << "body=" << body->id() << ",graph_id=" << temp_index << ",weight=" << vertex_w
00959                       << ",part=" << proc << std::endl;
00960         }
00961 
00962         // treat shared surfaces connected to this body
00963         int n_shared = shared_surfs.size();
00964         shared_surfs.reset();
00965         for( int k = 0; k < n_shared; k++ )
00966         {  // add adjacencies
00967             RefFace* face                       = shared_surfs.get_and_step();
00968             std::map< int, int >::iterator iter = surf_vertex_map.find( face->id() );
00969             if( iter != surf_vertex_map.end() )
00970             {
00971                 temp_index = ( *iter ).second;
00972             }
00973             else
00974             {
00975                 temp_index                  = surf_map_index++;
00976                 surf_vertex_map[face->id()] = temp_index;
00977                 all_shared_surfs.append( face );
00978             }
00979             adjacencies.push_back( temp_index );
00980             double tmp_sw = estimate_face_comm_load( face, part_geom_mesh_size );
00981             edge_weights.push_back( tmp_sw );
00982 
00983             if( debug )
00984             {
00985                 std::cout << "adjac=" << temp_index << ",weight=" << tmp_sw << std::endl;
00986             }
00987         }
00988         length.push_back( n_shared );
00989     }
00990     entities += body_list;
00991 
00992     // add shared surface as graph vertex
00993     int n_all_shared_surf = all_shared_surfs.size();
00994     all_shared_surfs.reset();
00995     for( int i = 0; i < n_all_shared_surf; i++ )
00996     {
00997         RefEntity* face = all_shared_surfs.get_and_step();
00998         moab_ids.push_back( surf_vertex_map[face->id()] );
00999         double face_mesh_load = estimate_face_mesh_load( face, part_geom_mesh_size );
01000         obj_weights.push_back( face_mesh_load );
01001 
01002         // set surface object graph edges
01003         int min_ind     = -1;
01004         double min_load = std::numeric_limits< double >::max();
01005         ;
01006         DLIList< Body* > parents;
01007         dynamic_cast< TopologyEntity* >( face )->bodies( parents );
01008         int n_parents = parents.size();
01009         parents.reset();
01010         for( int j = 0; j < n_parents; j++ )
01011         {
01012             int body_gid = body_vertex_map[parents.get_and_step()->id()];
01013             adjacencies.push_back( body_gid );  // add adjacency
01014             edge_weights.push_back( estimate_face_comm_load( face, part_geom_mesh_size ) );
01015 
01016             if( obj_weights[body_gid - 1] < min_load )
01017             {
01018                 min_ind  = body_gid - 1;
01019                 min_load = obj_weights[min_ind];
01020             }
01021         }
01022         length.push_back( n_parents );
01023         entities.append( dynamic_cast< RefEntity* >( face ) );
01024         obj_weights[min_ind] += face_mesh_load;
01025         parts.push_back( parts[min_ind] );
01026 
01027         if( debug )
01028         {
01029             std::cout << "shared_surf=" << face->id() << ",graph_id=" << surf_vertex_map[face->id()]
01030                       << ",weight=" << face_mesh_load << ",part=" << parts[min_ind] << std::endl;
01031         }
01032     }
01033 
01034     for( size_t i = 0; i < obj_weights.size(); i++ )
01035         if( obj_weights[i] < 1. ) obj_weights[i] = 1.;
01036     for( size_t i = 0; i < edge_weights.size(); i++ )
01037         if( edge_weights[i] < 1. ) edge_weights[i] = 1.;
01038 
01039     return MB_SUCCESS;
01040 }
01041 
01042 double ZoltanPartitioner::estimate_face_mesh_load( RefEntity* face, const double h )
01043 {
01044     GeometryType type = CAST_TO( face, RefFace )->geometry_type();
01045     double n          = face->measure() / h / h;
01046     double n_logn     = n * log( n );
01047 
01048     if( type == PLANE_SURFACE_TYPE )
01049     {
01050         return 1.536168737505151e-4 * n_logn;
01051     }
01052     else if( type == SPLINE_SURFACE_TYPE )
01053     {
01054         return 5.910511018383144e-4 * n_logn;
01055     }
01056     else if( type == CONE_SURFACE_TYPE || type == SPHERE_SURFACE_TYPE || type == TORUS_SURFACE_TYPE )
01057     {
01058         return 2.352511671418708e-4 * n_logn;
01059     }
01060 
01061     return 0.0;
01062 }
01063 
01064 double ZoltanPartitioner::estimate_face_comm_load( RefEntity* face, const double h )
01065 {
01066     double peri = 0.;
01067 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01068     DLIList< DLIList< RefEdge* > > ref_edge_loops;
01069 #else
01070     DLIList< DLIList< RefEdge* >* > ref_edge_loops;
01071 #endif
01072     CAST_TO( face, RefFace )->ref_edge_loops( ref_edge_loops );
01073     ref_edge_loops.reset();
01074 
01075 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01076     for( int i = 0; i < ref_edge_loops.size(); i++ )
01077     {
01078         DLIList< RefEdge* > eloop = ref_edge_loops.get_and_step();
01079         eloop.reset();
01080         for( int j = 0; j < eloop.size(); j++ )
01081         {
01082             peri += eloop.get_and_step()->measure();
01083         }
01084     }
01085 #else
01086     for( int i = 0; i < ref_edge_loops.size(); i++ )
01087     {
01088         DLIList< RefEdge* >* eloop = ref_edge_loops.get_and_step();
01089         eloop->reset();
01090         for( int j = 0; j < eloop->size(); j++ )
01091         {
01092             peri += eloop->get_and_step()->measure();
01093         }
01094     }
01095 #endif
01096 
01097     // return 104*face->measure()/sqrt(3)/h/h + 56/3*peri/h;
01098     return ( 104 * face->measure() / sqrt( 3 ) / h / h + 56 / 3 * peri / h ) / 700000.;
01099 }
01100 
01101 ErrorCode ZoltanPartitioner::write_partition( const int nparts,
01102                                               DLIList< RefEntity* > entities,
01103                                               const int* assignment,
01104                                               std::vector< double >& obj_weights,
01105                                               const bool part_surf,
01106                                               const bool ghost )
01107 {
01108     ErrorCode result;
01109 
01110     // actuate CA_BODIES and turn on auto flag for other attributes
01111     CGMApp::instance()->attrib_manager()->register_attrib_type( CA_BODIES, "bodies", "BODIES", CABodies_creator,
01112                                                                 CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE,
01113                                                                 CUBIT_TRUE, CUBIT_FALSE );
01114     CGMApp::instance()->attrib_manager()->auto_flag( CUBIT_TRUE );
01115 
01116     // set partition info to Body at first
01117     int n_entities = entities.size();
01118     entities.reset();
01119     for( int i = 0; i < n_entities; i++ )
01120     {
01121         int proc = assignment[i];
01122         DLIList< int > shared_procs;
01123         RefEntity* entity = entities.get_and_step();
01124 
01125         if( entity->entity_type_info() == typeid( Body ) )
01126         {
01127             shared_procs.append( proc );
01128             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01129             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01130 
01131             if( debug )
01132             {
01133                 std::cout << "body" << entity->id() << "_is_partitioned_to_p" << proc << std::endl;
01134             }
01135 
01136             // assign to volumes, it should be removed in future
01137             DLIList< RefVolume* > volumes;
01138             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01139             int n_vol = volumes.size();
01140             volumes.reset();
01141             for( int j = 0; j < n_vol; j++ )
01142             {
01143                 RefEntity* vol = volumes.get_and_step();
01144                 td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01145                 if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01146             }
01147         }
01148     }
01149 
01150     // set partition info to shared surfaces
01151     entities.reset();
01152     for( int i = 0; i < n_entities; i++ )
01153     {
01154         int proc = assignment[i];
01155         DLIList< int > shared_procs;
01156         RefEntity* entity = entities.get_and_step();
01157 
01158         if( entity->entity_type_info() == typeid( RefFace ) )
01159         {  // surface
01160             DLIList< Body* > parents;
01161             ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01162             int n_parents = parents.size();
01163             if( n_parents != 2 )
01164             {  // check # of parents
01165                 std::cerr << "# of parent bodies of interface surface should be 2." << std::endl;
01166                 return MB_FAILURE;
01167             }
01168             shared_procs.append( proc );  // local proc
01169             parents.reset();
01170             for( int j = 0; j < 2; j++ )
01171             {
01172                 RefEntity* parent     = parents.get_and_step();
01173                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01174 
01175                 if( parent_td == NULL )
01176                 {
01177                     std::cerr << "parent body should have balanced process." << std::endl;
01178                     return MB_FAILURE;
01179                 }
01180                 int temp_proc = parent_td->get_charge_proc();
01181                 if( temp_proc != proc ) shared_procs.append( temp_proc );  // remote proc
01182             }
01183 
01184             if( shared_procs.size() > 1 )
01185             {  // if it is shared by 2 processors
01186                 int merge_id       = TDUniqueId::get_unique_id( entity );
01187                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01188                 if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01189 
01190                 if( debug )
01191                 {
01192                     std::cout << "surf" << entity->id() << "_is_partitioned_to_p";
01193                     for( int j = 0; j < shared_procs.size(); j++ )
01194                     {
01195                         std::cout << "," << shared_procs[j];
01196                     }
01197                     std::cout << std::endl;
01198                 }
01199             }
01200         }
01201     }
01202 
01203     // do non-shared surface partition too
01204     if( part_surf )
01205     {
01206         result = partition_surface( nparts, entities, assignment, obj_weights );RR;
01207     }
01208 
01209     // partition shared edges and vertex
01210     result = partition_child_entities( 1, nparts, part_surf, ghost );RR;
01211     result = partition_child_entities( 0, nparts, part_surf );RR;
01212 
01213     if( debug )
01214     {
01215         entities.reset();
01216         for( int i = 0; i < n_entities; i++ )
01217         {
01218             RefEntity* entity = entities.get_and_step();
01219             if( entity->entity_type_info() == typeid( Body ) )
01220             {
01221                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01222                 CubitString st     = entity->entity_name();
01223                 DLIList< int >* sp = td_par->get_shared_proc_list();
01224                 int n_sp           = sp->size();
01225                 sp->reset();
01226                 for( int j = 0; j < n_sp; j++ )
01227                 {
01228                     std::cout << "partitioned_" << st.c_str() << ",proc=" << sp->get_and_step() << std::endl;
01229                 }
01230                 DLIList< int >* gp = td_par->get_ghost_proc_list();
01231                 int n_gp           = gp->size();
01232                 sp->reset();
01233                 for( int j = 0; j < n_gp; j++ )
01234                 {
01235                     std::cout << "partitioned_" << st.c_str() << ",ghost=" << gp->get_and_step() << std::endl;
01236                 }
01237             }
01238         }
01239     }
01240 
01241     return MB_SUCCESS;
01242 }
01243 
01244 ErrorCode ZoltanPartitioner::partition_surface( const int nparts,
01245                                                 DLIList< RefEntity* > entities,
01246                                                 const int* assignment,
01247                                                 std::vector< double >& obj_weights )
01248 {
01249     int i;
01250     double ave_load = 0.;
01251     double* loads   = new double[nparts];
01252     for( i = 0; i < nparts; i++ )
01253         loads[i] = 0.;
01254 
01255     int n_entities = entities.size();
01256     entities.reset();
01257     for( i = 0; i < n_entities; i++ )
01258     {
01259         loads[assignment[i]] += obj_weights[i];
01260         ave_load += obj_weights[i];
01261     }
01262     ave_load /= nparts;
01263 
01264     if( debug )
01265     {
01266         for( i = 0; i < nparts; i++ )
01267         {
01268             std::cout << "loads_before_surface_partition[" << i << "]=" << loads[i] << std::endl;
01269         }
01270     }
01271 
01272     int n_iter  = 0;
01273     bool b_stop = false;
01274     do
01275     {
01276         // get max and min load processors
01277         int max_proc = nparts, min_proc = 0;
01278         double min_load = std::numeric_limits< double >::max();
01279         double max_load = std::numeric_limits< double >::min();
01280         for( i = 0; i < nparts; i++ )
01281         {
01282             if( loads[i] < min_load )
01283             {
01284                 min_load = loads[i];
01285                 min_proc = i;
01286             }
01287             if( loads[i] > max_load )
01288             {
01289                 max_load = loads[i];
01290                 max_proc = i;
01291             }
01292         }
01293 
01294         double load_diff = max_load - ave_load;
01295         if( load_diff > ave_load / 10. )
01296         {
01297             bool b_moved = false;
01298             entities.reset();
01299             for( i = 0; i < n_entities; i++ )
01300             {
01301                 if( b_moved ) break;
01302                 if( assignment[i] == max_proc &&  // find maximum load processor bodies
01303                     entities[i]->entity_type_info() == typeid( Body ) )
01304                 {
01305                     DLIList< RefFace* > faces;
01306                     ( dynamic_cast< TopologyEntity* >( entities[i] ) )->ref_faces( faces );
01307                     int n_face = faces.size();
01308                     faces.reset();
01309                     for( int j = 0; j < n_face; j++ )
01310                     {
01311                         RefFace* face      = faces.get_and_step();
01312                         TopologyEntity* te = CAST_TO( face, TopologyEntity );
01313                         if( te->bridge_manager()->number_of_bridges() < 2 )
01314                         {  // non-shared
01315                             TDParallel* td_par = (TDParallel*)face->get_TD( &TDParallel::is_parallel );
01316                             if( td_par == NULL )
01317                             {  // only consider unpartitioned surfaces
01318                                 double face_load = face->measure();
01319                                 if( load_diff > min_load + face_load - ave_load )
01320                                 {
01321                                     loads[max_proc] -= face_load;
01322                                     loads[min_proc] += face_load;
01323                                     int merge_id = TDUniqueId::get_unique_id( face );
01324                                     DLIList< int > shared_procs;
01325                                     shared_procs.append( min_proc );
01326                                     shared_procs.append( max_proc );
01327                                     td_par = new TDParallel( face, NULL, &shared_procs, NULL, merge_id, 1 );
01328 
01329                                     if( debug )
01330                                     {
01331                                         std::cout << "non-shared surface " << face->id() << " is moved from p"
01332                                                   << max_proc << " to p" << min_proc << std::endl;
01333                                     }
01334                                     b_moved = true;
01335                                     break;
01336                                 }
01337                             }
01338                         }
01339                     }
01340                 }
01341             }
01342         }
01343         else
01344             b_stop = true;
01345 
01346         n_iter++;
01347     } while( !b_stop && n_iter < 50 );
01348 
01349     if( debug )
01350     {
01351         for( i = 0; i < nparts; i++ )
01352         {
01353             std::cout << "loads_after_surface_partition[" << i << "]=" << loads[i] << std::endl;
01354         }
01355     }
01356 
01357     delete loads;
01358     return MB_SUCCESS;
01359 }
01360 
01361 ErrorCode ZoltanPartitioner::partition_round_robin( const int n_part )
01362 {
01363     int i, j, k;
01364     double* loads    = new double[n_part];  // estimated loads for each processor
01365     double* ve_loads = new double[n_part];  // estimated loads for each processor
01366     for( i = 0; i < n_part; i++ )
01367     {
01368         loads[i]    = 0.0;
01369         ve_loads[i] = 0.0;
01370     }
01371     DLIList< RefEntity* > body_entity_list;
01372     gti->ref_entity_list( "body", body_entity_list, CUBIT_FALSE );
01373     int n_entity      = body_entity_list.size();
01374     int n_entity_proc = n_entity / n_part;  // # of entities per processor
01375     int i_entity_proc = n_entity_proc;      // entity index limit for each processor
01376     int proc          = 0;
01377     RefEntity* entity;
01378 
01379     // assign processors to bodies
01380     body_entity_list.reset();
01381     for( i = 0; i < n_entity; i++ )
01382     {
01383         if( i == i_entity_proc )
01384         {
01385             proc++;
01386             if( proc < n_part )
01387                 i_entity_proc += n_entity_proc;
01388             else
01389             {
01390                 proc %= n_part;
01391                 i_entity_proc++;
01392             }
01393         }
01394 
01395         // assign to bodies
01396         entity = body_entity_list.get_and_step();
01397         DLIList< int > shared_procs;
01398         shared_procs.append( proc );
01399         TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01400         if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01401         loads[proc] += entity->measure();
01402 
01403         // assign to volumes, it should be removed in future
01404         DLIList< RefVolume* > volumes;
01405         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01406         int n_vol = volumes.size();
01407         volumes.reset();
01408         for( j = 0; j < n_vol; j++ )
01409         {
01410             RefEntity* vol = volumes.get_and_step();
01411             td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01412             if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01413         }
01414 
01415         // add local surface load
01416         DLIList< RefFace* > faces;
01417         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( faces );
01418         int n_face = faces.size();
01419         faces.reset();
01420         for( j = 0; j < n_face; j++ )
01421         {
01422             RefFace* face      = faces.get_and_step();
01423             TopologyEntity* te = CAST_TO( face, TopologyEntity );
01424             if( te->bridge_manager()->number_of_bridges() < 2 ) loads[proc] = loads[proc] + face->measure();
01425         }
01426 
01427         // Get all child entities
01428         DLIList< RefEntity* > child_list;
01429         RefEntity::get_all_child_ref_entities( body_entity_list, child_list );
01430         int n_child = child_list.size();
01431 
01432         // assign processors to interface entities
01433         child_list.reset();
01434         for( j = 0; j < n_child; j++ )
01435         {
01436             entity             = child_list.get_and_step();
01437             TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01438 
01439             if( te->bridge_manager()->number_of_bridges() > 1 )
01440             {
01441                 DLIList< Body* > parent_bodies;
01442                 DLIList< int > child_shared_procs;  // Shared processors of each child entity
01443                 ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parent_bodies );
01444                 int n_parent = parent_bodies.size();
01445 
01446                 for( k = 0; k < n_parent; k++ )
01447                 {
01448                     RefEntity* parent_vol = CAST_TO( parent_bodies.get_and_step(), RefEntity );
01449                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01450 
01451                     if( parent_td == NULL )
01452                     {
01453                         PRINT_ERROR( "parent Volume has to be partitioned." );
01454                         delete[] loads;
01455                         delete[] ve_loads;
01456                         return MB_FAILURE;
01457                     }
01458                     child_shared_procs.append_unique( parent_td->get_charge_proc() );
01459                 }
01460 
01461                 if( child_shared_procs.size() > 1 )
01462                 {  // if it is interface
01463                     td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01464                     if( td_par == NULL )
01465                     {
01466                         int merge_id = TDUniqueId::get_unique_id( entity );
01467                         if( entity->entity_type_info() == typeid( RefFace ) )
01468                         {  // face
01469                             if( child_shared_procs.size() != 2 )
01470                             {
01471                                 PRINT_ERROR( "Error: # of shared processors of interface surface "
01472                                              "should be 2." );
01473                                 delete[] loads;
01474                                 delete[] ve_loads;
01475                                 return MB_FAILURE;
01476                             }
01477 
01478                             // balance interface surface loads
01479                             if( loads[child_shared_procs[0]] > loads[child_shared_procs[1]] )
01480                                 child_shared_procs.reverse();
01481 
01482                             loads[child_shared_procs[0]] = loads[child_shared_procs[0]] + entity->measure();
01483                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01484                         }  // face
01485                         else if( entity->entity_type_info() == typeid( RefEdge ) ||
01486                                  entity->entity_type_info() == typeid( RefVertex ) )
01487                         {  // edge or vertex
01488                             // balance interface surface loads
01489                             int min_p         = child_shared_procs[0];
01490                             int n_shared_proc = child_shared_procs.size();
01491                             for( k = 1; k < n_shared_proc; k++ )
01492                             {
01493                                 if( ve_loads[child_shared_procs[k]] < ve_loads[min_p] ) min_p = child_shared_procs[k];
01494                             }
01495                             ve_loads[min_p] = ve_loads[min_p] + entity->measure();
01496                             child_shared_procs.remove( min_p );
01497                             child_shared_procs.insert_first( min_p );
01498                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01499                         }  // edge or vertex
01500                     }      // if (td_par == NULL)
01501                 }          // if it is interface
01502             }              // if (te->bridge_manager()->number_of_bridges() > 1)
01503         }                  // for (j = 0; j < n_child; j++)
01504     }                      // for (i = 0; i < n_entity; i++)
01505 
01506     delete[] loads;
01507     delete[] ve_loads;
01508 
01509     return MB_SUCCESS;
01510 }
01511 
01512 // partition child entities to one of parent entity shared processors
01513 ErrorCode ZoltanPartitioner::partition_child_entities( const int dim,
01514                                                        const int n_part,
01515                                                        const bool part_surf,
01516                                                        const bool ghost )
01517 {
01518     DLIList< RefEntity* > entity_list;
01519     if( dim == 0 )
01520         gti->ref_entity_list( "vertex", entity_list, CUBIT_FALSE );
01521     else if( dim == 1 )
01522         gti->ref_entity_list( "curve", entity_list, CUBIT_FALSE );
01523     else
01524     {
01525         std::cerr << "Dimention should be from 0 to 1." << std::endl;
01526         return MB_FAILURE;
01527     }
01528 
01529     int i, j, k;
01530     int n_entity  = entity_list.size();
01531     double* loads = new double[n_part];
01532     for( i = 0; i < n_part; i++ )
01533         loads[i] = 0.;
01534     entity_list.reset();
01535 
01536     for( i = 0; i < n_entity; i++ )
01537     {  // for all entities
01538         RefEntity* entity  = entity_list.get_and_step();
01539         TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01540 
01541         if( !part_surf && te->bridge_manager()->number_of_bridges() < 2 ) continue;
01542 
01543         DLIList< int > shared_procs;
01544         DLIList< Body* > parents;
01545         ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01546         int n_parents = parents.size();
01547         std::set< int > s_proc;
01548         parents.reset();
01549 
01550         // get shared processors from parent bodies
01551         for( j = 0; j < n_parents; j++ )
01552         {
01553             RefEntity* parent     = parents.get_and_step();
01554             TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01555 
01556             if( parent_td != NULL )
01557             {
01558                 DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01559                 int n_shared                 = parent_procs->size();
01560                 parent_procs->reset();
01561                 for( k = 0; k < n_shared; k++ )
01562                 {
01563                     int p = parent_procs->get_and_step();
01564                     s_proc.insert( p );
01565                 }
01566             }
01567         }
01568 
01569         if( part_surf )
01570         {  // also get shared procs from parent surfaces
01571             DLIList< RefFace* > parent_faces;
01572             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( parent_faces );
01573             int n_pface = parent_faces.size();
01574             parent_faces.reset();
01575 
01576             // get shared processors from parent faces
01577             for( j = 0; j < n_pface; j++ )
01578             {
01579                 RefEntity* parent     = parent_faces.get_and_step();
01580                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01581 
01582                 if( parent_td != NULL )
01583                 {
01584                     DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01585                     int n_shared                 = parent_procs->size();
01586                     parent_procs->reset();
01587 
01588                     for( k = 0; k < n_shared; k++ )
01589                     {
01590                         int p = parent_procs->get_and_step();
01591                         s_proc.insert( p );
01592                     }
01593                 }
01594             }
01595         }
01596 
01597         // find the minimum load processor and put it
01598         // at the front of the shared_procs list
01599         if( s_proc.size() > 1 )
01600         {
01601             int min_proc                       = 0;
01602             double min_load                    = std::numeric_limits< double >::max();
01603             std::set< int >::iterator iter     = s_proc.begin();
01604             std::set< int >::iterator end_iter = s_proc.end();
01605             for( ; iter != end_iter; ++iter )
01606             {
01607                 if( loads[*iter] < min_load )
01608                 {
01609                     min_load = loads[*iter];
01610                     min_proc = *iter;
01611                 }
01612             }
01613 
01614             if( dim == 1 )
01615                 loads[min_proc] += entity->measure();
01616             else if( dim == 0 )
01617                 loads[min_proc] += 1.;
01618             shared_procs.append( min_proc );
01619             iter     = s_proc.begin();
01620             end_iter = s_proc.end();
01621             for( ; iter != end_iter; ++iter )
01622             {
01623                 if( *iter != min_proc )
01624                 {
01625                     shared_procs.append( *iter );
01626                 }
01627             }
01628 
01629             // add ghost geometries to shared processors for edge
01630             if( ghost )
01631             {
01632                 parents.reset();
01633                 for( j = 0; j < n_parents; j++ )
01634                 {  // for all parent bodies
01635                     RefEntity* parent_vol = CAST_TO( parents.get_and_step(), RefEntity );
01636                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01637                     int n_shared_proc     = shared_procs.size();
01638 
01639                     for( k = 0; k < n_shared_proc; k++ )
01640                     {
01641                         parent_td->add_ghost_proc( shared_procs[k] );
01642                     }
01643                 }
01644             }
01645 
01646             // initialize tool data
01647             int merge_id       = TDUniqueId::get_unique_id( entity );
01648             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01649             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01650         }
01651     }
01652 
01653     delete loads;
01654     return MB_SUCCESS;
01655 }
01656 #endif
01657 
01658 ErrorCode ZoltanPartitioner::write_partition( const int nparts,
01659                                               Range& elems,
01660                                               const int* assignment,
01661                                               const bool write_as_sets,
01662                                               const bool write_as_tags )
01663 {
01664     ErrorCode result;
01665 
01666     // get the partition set tag
01667     Tag part_set_tag;
01668     int dum_id = -1, i;
01669     result     = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_set_tag,
01670                                          MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id );RR;
01671 
01672     // get any sets already with this tag, and clear them
01673     Range tagged_sets;
01674     result =
01675         mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &part_set_tag, NULL, 1, tagged_sets, Interface::UNION );RR;
01676     if( !tagged_sets.empty() )
01677     {
01678         result = mbImpl->clear_meshset( tagged_sets );RR;
01679         if( !write_as_sets )
01680         {
01681             result = mbImpl->tag_delete_data( part_set_tag, tagged_sets );RR;
01682         }
01683     }
01684 
01685     if( write_as_sets )
01686     {
01687         // first, create partition sets and store in vector
01688         partSets.clear();
01689 
01690         if( nparts > (int)tagged_sets.size() )
01691         {
01692             // too few partition sets - create missing ones
01693             int num_new = nparts - tagged_sets.size();
01694             for( i = 0; i < num_new; i++ )
01695             {
01696                 EntityHandle new_set;
01697                 result = mbImpl->create_meshset( MESHSET_SET, new_set );RR;
01698                 tagged_sets.insert( new_set );
01699             }
01700         }
01701         else if( nparts < (int)tagged_sets.size() )
01702         {
01703             // too many partition sets - delete extras
01704             int num_del = tagged_sets.size() - nparts;
01705             for( i = 0; i < num_del; i++ )
01706             {
01707                 EntityHandle old_set = tagged_sets.pop_back();
01708                 result               = mbImpl->delete_entities( &old_set, 1 );RR;
01709             }
01710         }
01711         // assign partition sets to vector
01712         partSets.swap( tagged_sets );
01713 
01714         // write a tag to those sets denoting they're partition sets, with a value of the
01715         // proc number
01716         int* dum_ids = new int[nparts];
01717         for( i = 0; i < nparts; i++ )
01718             dum_ids[i] = i;
01719 
01720         result = mbImpl->tag_set_data( part_set_tag, partSets, dum_ids );RR;
01721         // found out by valgrind when we run mbpart
01722         delete[] dum_ids;
01723         dum_ids = NULL;
01724 
01725         // assign entities to the relevant sets
01726         std::vector< EntityHandle > tmp_part_sets;
01727         // int N = (int)elems.size();
01728         std::copy( partSets.begin(), partSets.end(), std::back_inserter( tmp_part_sets ) );
01729         /*Range::reverse_iterator riter;
01730         for (i = N-1, riter = elems.rbegin(); riter != elems.rend(); ++riter, i--) {
01731           int assigned_part = assignment[i];
01732           part_ranges[assigned_part].insert(*riter);
01733           //result = mbImpl->add_entities(tmp_part_sets[assignment[i]], &(*rit), 1); RR;
01734         }*/
01735 
01736         Range::iterator rit;
01737         for( i = 0, rit = elems.begin(); rit != elems.end(); ++rit, i++ )
01738         {
01739             result = mbImpl->add_entities( tmp_part_sets[assignment[i]], &( *rit ), 1 );RR;
01740         }
01741         /*for (i=0; i<nparts; i++)
01742         {
01743           result = mbImpl->add_entities(tmp_part_sets[i], part_ranges[i]); RR;
01744         }
01745         delete [] part_ranges;*/
01746         // check for empty sets, warn if there are any
01747         Range empty_sets;
01748         for( rit = partSets.begin(); rit != partSets.end(); ++rit )
01749         {
01750             int num_ents = 0;
01751             result       = mbImpl->get_number_entities_by_handle( *rit, num_ents );
01752             if( MB_SUCCESS != result || !num_ents ) empty_sets.insert( *rit );
01753         }
01754         if( !empty_sets.empty() )
01755         {
01756             std::cout << "WARNING: " << empty_sets.size() << " empty sets in partition: ";
01757             for( rit = empty_sets.begin(); rit != empty_sets.end(); ++rit )
01758                 std::cout << *rit << " ";
01759             std::cout << std::endl;
01760         }
01761     }
01762 
01763     if( write_as_tags )
01764     {
01765         // allocate integer-size partitions
01766         result = mbImpl->tag_set_data( part_set_tag, elems, assignment );RR;
01767     }
01768 
01769     return MB_SUCCESS;
01770 }
01771 
01772 void ZoltanPartitioner::SetRCB_Parameters( const bool recompute_rcb_box )
01773 {
01774     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Coordinate Bisection" << std::endl;
01775     // General parameters:
01776 
01777     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01778     myZZ->Set_Param( "LB_METHOD", "RCB" );  // recursive coordinate bisection
01779     // myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );  // recompute RCB box if needed ?
01780 
01781     // RCB parameters:
01782 
01783     myZZ->Set_Param( "RCB_OUTPUT_LEVEL", "1" );
01784     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition so that we can infer partitions
01785     // myZZ->Set_Param("RCB_RECTILINEAR_BLOCKS", "1"); // don't split point on boundary
01786     if( recompute_rcb_box ) myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );
01787 }
01788 
01789 void ZoltanPartitioner::SetRIB_Parameters()
01790 {
01791     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Inertial Bisection" << std::endl;
01792     // General parameters:
01793 
01794     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01795     myZZ->Set_Param( "LB_METHOD", "RIB" );  // Recursive Inertial Bisection
01796 
01797     // RIB parameters:
01798 
01799     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01800     myZZ->Set_Param( "AVERAGE_CUTS", "1" );
01801 }
01802 
01803 void ZoltanPartitioner::SetHSFC_Parameters()
01804 {
01805     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHilbert Space Filling Curve" << std::endl;
01806     // General parameters:
01807 
01808     myZZ->Set_Param( "DEBUG_LEVEL", "0" );   // no debug messages
01809     myZZ->Set_Param( "LB_METHOD", "HSFC" );  // perform Hilbert space filling curve
01810 
01811     // HSFC parameters:
01812 
01813     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01814 }
01815 
01816 void ZoltanPartitioner::SetHypergraph_Parameters( const char* phg_method )
01817 {
01818     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHypergraph (or PHG): " << std::endl;
01819     // General parameters:
01820 
01821     myZZ->Set_Param( "DEBUG_LEVEL", "0" );         // no debug messages
01822     myZZ->Set_Param( "LB_METHOD", "Hypergraph" );  // Hypergraph (or PHG)
01823 
01824     // Hypergraph (or PHG) parameters:
01825     myZZ->Set_Param( "PHG_COARSEPARTITION_METHOD", phg_method );  // CoarsePartitionMethod
01826 }
01827 
01828 void ZoltanPartitioner::SetPARMETIS_Parameters( const char* parmetis_method )
01829 {
01830     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nPARMETIS: " << parmetis_method << std::endl;
01831     // General parameters:
01832 
01833     myZZ->Set_Param( "DEBUG_LEVEL", "0" );       // no debug messages
01834     myZZ->Set_Param( "LB_METHOD", "PARMETIS" );  // the ParMETIS library
01835 
01836     // PARMETIS parameters:
01837 
01838     myZZ->Set_Param( "PARMETIS_METHOD", parmetis_method );  // method in the library
01839 }
01840 
01841 void ZoltanPartitioner::SetOCTPART_Parameters( const char* oct_method )
01842 {
01843     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nOctree Partitioning: " << oct_method << std::endl;
01844     // General parameters:
01845 
01846     myZZ->Set_Param( "DEBUG_LEVEL", "0" );      // no debug messages
01847     myZZ->Set_Param( "LB_METHOD", "OCTPART" );  // octree partitioning
01848 
01849     // OCTPART parameters:
01850 
01851     myZZ->Set_Param( "OCT_METHOD", oct_method );  // the SFC to be used
01852     myZZ->Set_Param( "OCT_OUTPUT_LEVEL", "3" );
01853 }
01854 
01855 int ZoltanPartitioner::mbInitializePoints( int npts,
01856                                            double* pts,
01857                                            int* ids,
01858                                            int* adjs,
01859                                            int* length,
01860                                            double* obj_weights,
01861                                            double* edge_weights,
01862                                            int* parts,
01863                                            bool part_geom )
01864 {
01865     unsigned int i;
01866     int j;
01867     int *numPts, *nborProcs = NULL;
01868     int sum, ptsPerProc, ptsAssigned, mySize;
01869     MPI_Status stat;
01870     double* sendPts;
01871     int* sendIds;
01872     int* sendEdges  = NULL;
01873     int* sendNborId = NULL;
01874     int* sendProcs;
01875 
01876     if( mbpc->proc_config().proc_rank() == 0 )
01877     {
01878         /* divide pts to start */
01879 
01880         numPts      = (int*)malloc( sizeof( int ) * mbpc->proc_config().proc_size() );
01881         ptsPerProc  = npts / mbpc->proc_config().proc_size();
01882         ptsAssigned = 0;
01883 
01884         for( i = 0; i < mbpc->proc_config().proc_size() - 1; i++ )
01885         {
01886             numPts[i] = ptsPerProc;
01887             ptsAssigned += ptsPerProc;
01888         }
01889 
01890         numPts[mbpc->proc_config().proc_size() - 1] = npts - ptsAssigned;
01891 
01892         mySize  = numPts[mbpc->proc_config().proc_rank()];
01893         sendPts = pts + ( 3 * numPts[0] );
01894         sendIds = ids + numPts[0];
01895         sum     = 0;  // possible no adjacency sent
01896         if( !part_geom )
01897         {
01898             sendEdges = length + numPts[0];
01899 
01900             for( j = 0; j < numPts[0]; j++ )
01901                 sum += length[j];
01902 
01903             sendNborId = adjs + sum;
01904 
01905             for( j = numPts[0]; j < npts; j++ )
01906                 sum += length[j];
01907 
01908             nborProcs = (int*)malloc( sizeof( int ) * sum );
01909         }
01910         for( j = 0; j < sum; j++ )
01911             if( ( i = adjs[j] / ptsPerProc ) < mbpc->proc_config().proc_size() )
01912                 nborProcs[j] = i;
01913             else
01914                 nborProcs[j] = mbpc->proc_config().proc_size() - 1;
01915 
01916         sendProcs = nborProcs + ( sendNborId - adjs );
01917 
01918         for( i = 1; i < mbpc->proc_config().proc_size(); i++ )
01919         {
01920             MPI_Send( &numPts[i], 1, MPI_INT, i, 0x00, mbpc->comm() );
01921             MPI_Send( sendPts, 3 * numPts[i], MPI_DOUBLE, i, 0x01, mbpc->comm() );
01922             MPI_Send( sendIds, numPts[i], MPI_INT, i, 0x03, mbpc->comm() );
01923             MPI_Send( sendEdges, numPts[i], MPI_INT, i, 0x06, mbpc->comm() );
01924             sum = 0;
01925 
01926             for( j = 0; j < numPts[i]; j++ )
01927                 sum += sendEdges[j];
01928 
01929             MPI_Send( sendNborId, sum, MPI_INT, i, 0x07, mbpc->comm() );
01930             MPI_Send( sendProcs, sum, MPI_INT, i, 0x08, mbpc->comm() );
01931             sendPts += ( 3 * numPts[i] );
01932             sendIds += numPts[i];
01933             sendEdges += numPts[i];
01934             sendNborId += sum;
01935             sendProcs += sum;
01936         }
01937 
01938         free( numPts );
01939     }
01940     else
01941     {
01942         MPI_Recv( &mySize, 1, MPI_INT, 0, 0x00, mbpc->comm(), &stat );
01943         pts    = (double*)malloc( sizeof( double ) * 3 * mySize );
01944         ids    = (int*)malloc( sizeof( int ) * mySize );
01945         length = (int*)malloc( sizeof( int ) * mySize );
01946         if( obj_weights != NULL ) obj_weights = (double*)malloc( sizeof( double ) * mySize );
01947         MPI_Recv( pts, 3 * mySize, MPI_DOUBLE, 0, 0x01, mbpc->comm(), &stat );
01948         MPI_Recv( ids, mySize, MPI_INT, 0, 0x03, mbpc->comm(), &stat );
01949         MPI_Recv( length, mySize, MPI_INT, 0, 0x06, mbpc->comm(), &stat );
01950         sum = 0;
01951 
01952         for( j = 0; j < mySize; j++ )
01953             sum += length[j];
01954 
01955         adjs = (int*)malloc( sizeof( int ) * sum );
01956         if( edge_weights != NULL ) edge_weights = (double*)malloc( sizeof( double ) * sum );
01957         nborProcs = (int*)malloc( sizeof( int ) * sum );
01958         MPI_Recv( adjs, sum, MPI_INT, 0, 0x07, mbpc->comm(), &stat );
01959         MPI_Recv( nborProcs, sum, MPI_INT, 0, 0x08, mbpc->comm(), &stat );
01960     }
01961 
01962     Points       = pts;
01963     GlobalIds    = ids;
01964     NumPoints    = mySize;
01965     NumEdges     = length;
01966     NborGlobalId = adjs;
01967     NborProcs    = nborProcs;
01968     ObjWeights   = obj_weights;
01969     EdgeWeights  = edge_weights;
01970     Parts        = parts;
01971 
01972     return mySize;
01973 }
01974 
01975 void ZoltanPartitioner::mbFinalizePoints( int npts,
01976                                           int numExport,
01977                                           ZOLTAN_ID_PTR exportLocalIDs,
01978                                           int* exportProcs,
01979                                           int** assignment )
01980 {
01981     int* MyAssignment;
01982     int i;
01983     int numPts;
01984     MPI_Status stat;
01985     int* recvA;
01986 
01987     /* assign pts to start */
01988 
01989     if( mbpc->proc_config().proc_rank() == 0 )
01990         MyAssignment = (int*)malloc( sizeof( int ) * npts );
01991     else
01992         MyAssignment = (int*)malloc( sizeof( int ) * NumPoints );
01993 
01994     for( i = 0; i < NumPoints; i++ )
01995         MyAssignment[i] = mbpc->proc_config().proc_rank();
01996 
01997     for( i = 0; i < numExport; i++ )
01998         MyAssignment[exportLocalIDs[i]] = exportProcs[i];
01999 
02000     if( mbpc->proc_config().proc_rank() == 0 )
02001     {
02002         /* collect pts */
02003         recvA = MyAssignment + NumPoints;
02004 
02005         for( i = 1; i < (int)mbpc->proc_config().proc_size(); i++ )
02006         {
02007             MPI_Recv( &numPts, 1, MPI_INT, i, 0x04, mbpc->comm(), &stat );
02008             MPI_Recv( recvA, numPts, MPI_INT, i, 0x05, mbpc->comm(), &stat );
02009             recvA += numPts;
02010         }
02011 
02012         *assignment = MyAssignment;
02013     }
02014     else
02015     {
02016         MPI_Send( &NumPoints, 1, MPI_INT, 0, 0x04, mbpc->comm() );
02017         MPI_Send( MyAssignment, NumPoints, MPI_INT, 0, 0x05, mbpc->comm() );
02018         free( MyAssignment );
02019     }
02020 }
02021 
02022 int ZoltanPartitioner::mbGlobalSuccess( int rc )
02023 {
02024     int fail = 0;
02025     unsigned int i;
02026     int* vals = (int*)malloc( mbpc->proc_config().proc_size() * sizeof( int ) );
02027 
02028     MPI_Allgather( &rc, 1, MPI_INT, vals, 1, MPI_INT, mbpc->comm() );
02029 
02030     for( i = 0; i < mbpc->proc_config().proc_size(); i++ )
02031     {
02032         if( vals[i] != ZOLTAN_OK )
02033         {
02034             if( 0 == mbpc->proc_config().proc_rank() )
02035             {
02036                 mbShowError( vals[i], "Result on process " );
02037             }
02038             fail = 1;
02039         }
02040     }
02041 
02042     free( vals );
02043     return fail;
02044 }
02045 
02046 void ZoltanPartitioner::mbPrintGlobalResult( const char* s, int begin, int import, int exp, int change )
02047 {
02048     unsigned int i;
02049     int* v1 = (int*)malloc( 4 * sizeof( int ) );
02050     int* v2 = NULL;
02051     int* v;
02052 
02053     v1[0] = begin;
02054     v1[1] = import;
02055     v1[2] = exp;
02056     v1[3] = change;
02057 
02058     if( mbpc->proc_config().proc_rank() == 0 )
02059     {
02060         v2 = (int*)malloc( 4 * mbpc->proc_config().proc_size() * sizeof( int ) );
02061     }
02062 
02063     MPI_Gather( v1, 4, MPI_INT, v2, 4, MPI_INT, 0, mbpc->comm() );
02064 
02065     if( mbpc->proc_config().proc_rank() == 0 )
02066     {
02067         fprintf( stdout, "======%s======\n", s );
02068         for( i = 0, v = v2; i < mbpc->proc_config().proc_size(); i++, v += 4 )
02069         {
02070             fprintf( stdout, "%u: originally had %d, import %d, exp %d, %s\n", i, v[0], v[1], v[2],
02071                      v[3] ? "a change of partitioning" : "no change" );
02072         }
02073         fprintf( stdout, "==========================================\n" );
02074         fflush( stdout );
02075 
02076         free( v2 );
02077     }
02078 
02079     free( v1 );
02080 }
02081 
02082 void ZoltanPartitioner::mbShowError( int val, const char* s )
02083 {
02084     if( s ) printf( "%s ", s );
02085 
02086     switch( val )
02087     {
02088         case ZOLTAN_OK:
02089             printf( "%d: SUCCESSFUL\n", mbpc->proc_config().proc_rank() );
02090             break;
02091         case ZOLTAN_WARN:
02092             printf( "%d: WARNING\n", mbpc->proc_config().proc_rank() );
02093             break;
02094         case ZOLTAN_FATAL:
02095             printf( "%d: FATAL ERROR\n", mbpc->proc_config().proc_rank() );
02096             break;
02097         case ZOLTAN_MEMERR:
02098             printf( "%d: MEMORY ALLOCATION ERROR\n", mbpc->proc_config().proc_rank() );
02099             break;
02100         default:
02101             printf( "%d: INVALID RETURN CODE\n", mbpc->proc_config().proc_rank() );
02102             break;
02103     }
02104 }
02105 
02106 /**********************
02107 ** call backs
02108 **********************/
02109 
02110 int mbGetNumberOfAssignedObjects( void* /* userDefinedData */, int* err )
02111 {
02112     *err = 0;
02113     return NumPoints;
02114 }
02115 
02116 void mbGetObjectList( void* /* userDefinedData */,
02117                       int /* numGlobalIds */,
02118                       int /* numLids */,
02119                       ZOLTAN_ID_PTR gids,
02120                       ZOLTAN_ID_PTR lids,
02121                       int wgt_dim,
02122                       float* obj_wgts,
02123                       int* err )
02124 {
02125     for( int i = 0; i < NumPoints; i++ )
02126     {
02127         gids[i] = GlobalIds[i];
02128         lids[i] = i;
02129         if( wgt_dim > 0 ) obj_wgts[i] = ObjWeights[i];
02130     }
02131 
02132     *err = 0;
02133 }
02134 
02135 int mbGetObjectSize( void* /* userDefinedData */, int* err )
02136 {
02137     *err = 0;
02138     return 3;
02139 }
02140 
02141 void mbGetObject( void* /* userDefinedData */,
02142                   int /* numGlobalIds */,
02143                   int /* numLids */,
02144                   int numObjs,
02145                   ZOLTAN_ID_PTR /* gids */,
02146                   ZOLTAN_ID_PTR lids,
02147                   int numDim,
02148                   double* pts,
02149                   int* err )
02150 {
02151     int i, id, id3;
02152     int next = 0;
02153 
02154     if( numDim != 3 )
02155     {
02156         *err = 1;
02157         return;
02158     }
02159 
02160     for( i = 0; i < numObjs; i++ )
02161     {
02162         id = lids[i];
02163 
02164         if( ( id < 0 ) || ( id >= NumPoints ) )
02165         {
02166             *err = 1;
02167             return;
02168         }
02169 
02170         id3 = lids[i] * 3;
02171 
02172         pts[next++] = Points[id3];
02173         pts[next++] = Points[id3 + 1];
02174         pts[next++] = Points[id3 + 2];
02175     }
02176 }
02177 
02178 void mbGetNumberOfEdges( void* /* userDefinedData */,
02179                          int /* numGlobalIds */,
02180                          int /* numLids */,
02181                          int numObjs,
02182                          ZOLTAN_ID_PTR /* gids */,
02183                          ZOLTAN_ID_PTR lids,
02184                          int* numEdges,
02185                          int* err )
02186 {
02187     int i, id;
02188     int next = 0;
02189 
02190     for( i = 0; i < numObjs; i++ )
02191     {
02192         id = lids[i];
02193 
02194         if( ( id < 0 ) || ( id >= NumPoints ) )
02195         {
02196             *err = 1;
02197             return;
02198         }
02199 
02200         numEdges[next++] = NumEdges[id];
02201     }
02202 }
02203 
02204 void mbGetEdgeList( void* /* userDefinedData */,
02205                     int /* numGlobalIds */,
02206                     int /* numLids */,
02207                     int numObjs,
02208                     ZOLTAN_ID_PTR /* gids */,
02209                     ZOLTAN_ID_PTR lids,
02210                     int* /* numEdges */,
02211                     ZOLTAN_ID_PTR nborGlobalIds,
02212                     int* nborProcs,
02213                     int wgt_dim,
02214                     float* edge_wgts,
02215                     int* err )
02216 {
02217     int i, id, idSum, j;
02218     int next = 0;
02219 
02220     for( i = 0; i < numObjs; i++ )
02221     {
02222         id = lids[i];
02223 
02224         if( ( id < 0 ) || ( id >= NumPoints ) )
02225         {
02226             *err = 1;
02227             return;
02228         }
02229 
02230         idSum = 0;
02231 
02232         for( j = 0; j < id; j++ )
02233             idSum += NumEdges[j];
02234 
02235         for( j = 0; j < NumEdges[id]; j++ )
02236         {
02237             nborGlobalIds[next] = NborGlobalId[idSum];
02238             nborProcs[next]     = NborProcs[idSum];
02239             if( wgt_dim > 0 ) edge_wgts[next] = EdgeWeights[idSum];
02240             next++;
02241             idSum++;
02242         }
02243     }
02244 }
02245 
02246 void mbGetPart( void* /* userDefinedData */,
02247                 int /* numGlobalIds */,
02248                 int /* numLids */,
02249                 int numObjs,
02250                 ZOLTAN_ID_PTR /* gids */,
02251                 ZOLTAN_ID_PTR lids,
02252                 int* part,
02253                 int* err )
02254 {
02255     int i, id;
02256     int next = 0;
02257 
02258     for( i = 0; i < numObjs; i++ )
02259     {
02260         id = lids[i];
02261 
02262         if( ( id < 0 ) || ( id >= NumPoints ) )
02263         {
02264             *err = 1;
02265             return;
02266         }
02267 
02268         part[next++] = Parts[id];
02269     }
02270 }
02271 
02272 // new methods for partition in parallel, used by migrate in iMOAB
02273 ErrorCode ZoltanPartitioner::partition_owned_cells( Range& primary,
02274                                                     std::multimap< int, int >& extraGraphEdges,
02275                                                     std::map< int, int > procs,
02276                                                     int& numNewPartitions,
02277                                                     std::map< int, Range >& distribution,
02278                                                     int met )
02279 {
02280     // start copy
02281     MeshTopoUtil mtu( mbImpl );
02282     Range adjs;
02283 
02284     std::vector< int > adjacencies;
02285     std::vector< int > ids;
02286     ids.resize( primary.size() );
02287     std::vector< int > length;
02288     std::vector< double > coords;
02289     std::vector< int > nbor_proc;
02290     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
02291     // by MBCN
02292     int neighbors[5 * MAX_SUB_ENTITIES];
02293     int neib_proc[5 * MAX_SUB_ENTITIES];
02294     double avg_position[3];
02295     int moab_id;
02296     int primaryDim = mbImpl->dimension_from_handle( *primary.rbegin() );
02297     // get the global id tag handle
02298     Tag gid = mbImpl->globalId_tag();
02299 
02300     ErrorCode rval = mbImpl->tag_get_data( gid, primary, &ids[0] );MB_CHK_ERR( rval );
02301 
02302     // mbpc is member in base class, PartitionerBase
02303     int rank = mbpc->rank();  // current rank , will be put on regular neighbors
02304     int i    = 0;
02305     for( Range::iterator rit = primary.begin(); rit != primary.end(); ++rit, i++ )
02306     {
02307         EntityHandle cell = *rit;
02308         // get bridge adjacencies for each cell
02309         if( 1 == met )
02310         {
02311             adjs.clear();
02312             rval = mtu.get_bridge_adjacencies( cell, ( primaryDim > 0 ? primaryDim - 1 : 3 ), primaryDim, adjs );MB_CHK_ERR( rval );
02313 
02314             // get the graph vertex ids of those
02315             if( !adjs.empty() )
02316             {
02317                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
02318                 rval = mbImpl->tag_get_data( gid, adjs, neighbors );MB_CHK_ERR( rval );
02319             }
02320             // if adjacent to neighbor partitions, add to the list
02321             int size_adjs = (int)adjs.size();
02322             moab_id       = ids[i];
02323             for( int k = 0; k < size_adjs; k++ )
02324                 neib_proc[k] = rank;  // current rank
02325             if( extraGraphEdges.find( moab_id ) != extraGraphEdges.end() )
02326             {
02327                 // it means that the current cell is adjacent to a cell in another partition ; maybe
02328                 // a few
02329                 std::pair< std::multimap< int, int >::iterator, std::multimap< int, int >::iterator > ret;
02330                 ret = extraGraphEdges.equal_range( moab_id );
02331                 for( std::multimap< int, int >::iterator it = ret.first; it != ret.second; ++it )
02332                 {
02333                     int otherID          = it->second;
02334                     neighbors[size_adjs] = otherID;         // the id of the other cell, across partition
02335                     neib_proc[size_adjs] = procs[otherID];  // this is how we built this map, the cell id maps to
02336                                                             // what proc it came from
02337                     size_adjs++;
02338                 }
02339             }
02340             // copy those into adjacencies vector
02341             length.push_back( size_adjs );
02342             std::copy( neighbors, neighbors + size_adjs, std::back_inserter( adjacencies ) );
02343             std::copy( neib_proc, neib_proc + size_adjs, std::back_inserter( nbor_proc ) );
02344         }
02345         else if( 2 == met )
02346         {
02347             if( TYPE_FROM_HANDLE( cell ) == MBVERTEX )
02348             {
02349                 rval = mbImpl->get_coords( &cell, 1, avg_position );MB_CHK_ERR( rval );
02350             }
02351             else
02352             {
02353                 rval = mtu.get_average_position( cell, avg_position );MB_CHK_ERR( rval );
02354             }
02355             std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
02356         }
02357     }
02358 
02359 #ifdef VERBOSE
02360     std::stringstream ff2;
02361     ff2 << "zoltanInput_" << mbpc->rank() << ".txt";
02362     std::ofstream ofs;
02363     ofs.open( ff2.str().c_str(), std::ofstream::out );
02364     ofs << "Length vector: " << std::endl;
02365     std::copy( length.begin(), length.end(), std::ostream_iterator< int >( ofs, ", " ) );
02366     ofs << std::endl;
02367     ofs << "Adjacencies vector: " << std::endl;
02368     std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( ofs, ", " ) );
02369     ofs << std::endl;
02370     ofs << "Moab_ids vector: " << std::endl;
02371     std::copy( ids.begin(), ids.end(), std::ostream_iterator< int >( ofs, ", " ) );
02372     ofs << std::endl;
02373     ofs << "Coords vector: " << std::endl;
02374     std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( ofs, ", " ) );
02375     ofs << std::endl;
02376     ofs.close();
02377 #endif
02378 
02379     // these are static var in this file, and used in the callbacks
02380     Points = NULL;
02381     if( 1 != met ) Points = &coords[0];
02382     GlobalIds    = &ids[0];
02383     NumPoints    = (int)ids.size();
02384     NumEdges     = &length[0];
02385     NborGlobalId = &adjacencies[0];
02386     NborProcs    = &nbor_proc[0];
02387     ObjWeights   = NULL;
02388     EdgeWeights  = NULL;
02389     Parts        = NULL;
02390 
02391     float version;
02392     if( mbpc->rank() == 0 ) std::cout << "Initializing zoltan..." << std::endl;
02393 
02394     Zoltan_Initialize( argcArg, argvArg, &version );
02395 
02396     // Create Zoltan object.  This calls Zoltan_Create.
02397     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
02398 
02399     // set # requested partitions
02400     char buff[10];
02401     sprintf( buff, "%d", numNewPartitions );
02402     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
02403     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02404 
02405     // request parts assignment
02406     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTS" );
02407     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02408 
02409     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
02410     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
02411     // due to a bug in zoltan, if method is graph partitioning, do not pass coordinates!!
02412     if( 2 == met )
02413     {
02414         myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
02415         myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
02416         SetRCB_Parameters();  // geometry
02417     }
02418     else if( 1 == met )
02419     {
02420         myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
02421         myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
02422         SetHypergraph_Parameters( "auto" );
02423     }
02424 
02425     // Perform the load balancing partitioning
02426 
02427     int changes;
02428     int numGidEntries;
02429     int numLidEntries;
02430     int num_import;
02431     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
02432     int* import_procs;
02433     int* import_to_part;
02434     int num_export;
02435     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
02436     int *assign_procs, *assign_parts;
02437 
02438     if( mbpc->rank() == 0 )
02439         std::cout << "Computing partition using method (1-graph, 2-geom):" << met << " for " << numNewPartitions
02440                   << " parts..." << std::endl;
02441 
02442 #ifndef NDEBUG
02443 #if 0
02444   static int counter=0; // it may be possible to call function multiple times in a simulation
02445   // give a way to not overwrite the files
02446   // it should work only with a modified version of Zoltan
02447   std::stringstream basename;
02448   if (1==met)
02449   {
02450     basename << "phg_" << counter++;
02451     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 0, 1, 0);
02452   }
02453   else if (2==met)
02454   {
02455     basename << "rcb_" << counter++;
02456     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 1, 0, 0);
02457   }
02458 #endif
02459 #endif
02460     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
02461                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
02462                                  assign_procs, assign_parts );
02463     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02464 
02465 #ifdef VERBOSE
02466     std::stringstream ff3;
02467     ff3 << "zoltanOutput_" << mbpc->rank() << ".txt";
02468     std::ofstream ofs3;
02469     ofs3.open( ff3.str().c_str(), std::ofstream::out );
02470     ofs3 << " export elements on rank " << rank << " \n";
02471     ofs3 << "\t index \t gb_id \t local \t proc \t part \n";
02472     for( int k = 0; k < num_export; k++ )
02473     {
02474         ofs3 << "\t" << k << "\t" << export_global_ids[k] << "\t" << export_local_ids[k] << "\t" << assign_procs[k]
02475              << "\t" << assign_parts[k] << "\n";
02476     }
02477     ofs3.close();
02478 #endif
02479 
02480     // basically each local cell is assigned to a part
02481 
02482     assert( num_export == (int)primary.size() );
02483     for( i = 0; i < num_export; i++ )
02484     {
02485         EntityHandle cell = primary[export_local_ids[i]];
02486         distribution[assign_parts[i]].insert( cell );
02487     }
02488 
02489     Zoltan::LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
02490     Zoltan::LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
02491 
02492     delete myZZ;
02493     myZZ = NULL;
02494 
02495     // clear arrays that were resized locally, to free up local memory
02496 
02497     std::vector< int >().swap( adjacencies );
02498     std::vector< int >().swap( ids );
02499     std::vector< int >().swap( length );
02500     std::vector< int >().swap( nbor_proc );
02501     std::vector< double >().swap( coords );
02502 
02503     return MB_SUCCESS;
02504 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines