MOAB: Mesh Oriented datABase  (version 5.4.1)
ZoltanPartitioner.cpp
Go to the documentation of this file.
00001 /* $Id$
00002  *
00003  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00004  * storing and accessing finite element mesh data.
00005  *
00006  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00007  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00008  * retains certain rights in this software.
00009  *
00010  * This library is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  */
00016 
00017 /** Get a mesh from MOAB and write a Zoltan partition set back into MOAB and to
00018  *  a file
00019  *
00020  *
00021  */
00022 
00023 #include <iostream>
00024 #include <cassert>
00025 #include <sstream>
00026 #include <algorithm>
00027 
00028 #include "moab/ZoltanPartitioner.hpp"
00029 #include "moab/Interface.hpp"
00030 #include "Internals.hpp"
00031 #include "moab/Range.hpp"
00032 #include "moab/WriteUtilIface.hpp"
00033 #include "moab/MeshTopoUtil.hpp"
00034 #include "MBTagConventions.hpp"
00035 #include "moab/CN.hpp"
00036 // used for gnomonic projection
00037 #include "moab/IntxMesh/IntxUtils.hpp"
00038 
00039 #ifdef MOAB_HAVE_CGM
00040 #include "CGMConfig.h"
00041 #include <limits>
00042 #include "RefEntity.hpp"
00043 #include "RefFace.hpp"
00044 #include "RefEdge.hpp"
00045 #include "RefVertex.hpp"
00046 #include "Body.hpp"
00047 #include "TopologyEntity.hpp"
00048 #include "CastTo.hpp"
00049 #include "CABodies.hpp"
00050 #include "TDParallel.hpp"
00051 #include "TDUniqueId.hpp"
00052 #endif
00053 
00054 using namespace moab;
00055 
00056 #define RR \
00057     if( MB_SUCCESS != result ) return result
00058 
00059 static double* Points      = NULL;
00060 static int* GlobalIds      = NULL;
00061 static int NumPoints       = 0;
00062 static int* NumEdges       = NULL;
00063 static int* NborGlobalId   = NULL;
00064 static int* NborProcs      = NULL;
00065 static double* ObjWeights  = NULL;
00066 static double* EdgeWeights = NULL;
00067 static int* Parts          = NULL;
00068 
00069 const bool debug = false;
00070 
00071 ZoltanPartitioner::ZoltanPartitioner( Interface* impl,
00072                                       const bool use_coords,
00073                                       int argc,
00074                                       char** argv
00075 #ifdef MOAB_HAVE_CGM
00076                                       ,
00077                                       GeometryQueryTool* gqt
00078 #endif
00079                                       )
00080     : PartitionerBase< int >( impl, use_coords ), myZZ( NULL ), myNumPts( 0 ), argcArg( argc ), argvArg( argv )
00081 #ifdef MOAB_HAVE_CGM
00082       ,
00083       gti( gqt )
00084 #endif
00085 {
00086 }
00087 
00088 ZoltanPartitioner::~ZoltanPartitioner()
00089 {
00090     if( NULL != myZZ ) delete myZZ;
00091 }
00092 
00093 ErrorCode ZoltanPartitioner::balance_mesh( const char* zmethod,
00094                                            const char* other_method,
00095                                            const bool write_as_sets,
00096                                            const bool write_as_tags )
00097 {
00098     if( !strcmp( zmethod, "RR" ) && !strcmp( zmethod, "RCB" ) && !strcmp( zmethod, "RIB" ) &&
00099         !strcmp( zmethod, "HSFC" ) && !strcmp( zmethod, "Hypergraph" ) && !strcmp( zmethod, "PHG" ) &&
00100         !strcmp( zmethod, "PARMETIS" ) && !strcmp( zmethod, "OCTPART" ) )
00101     {
00102         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00103                   << "RR, RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00104         return MB_FAILURE;
00105     }
00106 
00107     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00108     std::vector< int > ids;     // point ids from MOAB
00109     std::vector< int > adjs, length;
00110     Range elems;
00111 
00112     // Get a mesh from MOAB and divide it across processors.
00113 
00114     ErrorCode result;
00115 
00116     if( mbpc->proc_config().proc_rank() == 0 )
00117     {
00118         result = assemble_graph( 3, pts, ids, adjs, length, elems );RR;
00119     }
00120 
00121     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0] );
00122 
00123     // Initialize Zoltan.  This is a C call.  The simple C++ code
00124     // that creates Zoltan objects does not keep track of whether
00125     // Zoltan_Initialize has been called.
00126 
00127     float version;
00128 
00129     Zoltan_Initialize( argcArg, argvArg, &version );
00130 
00131     // Create Zoltan object.  This calls Zoltan_Create.
00132     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00133 
00134     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00135         SetRCB_Parameters();
00136     else if( !strcmp( zmethod, "RIB" ) )
00137         SetRIB_Parameters();
00138     else if( !strcmp( zmethod, "HSFC" ) )
00139         SetHSFC_Parameters();
00140     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00141         if( NULL == other_method )
00142             SetHypergraph_Parameters( "auto" );
00143         else
00144             SetHypergraph_Parameters( other_method );
00145     else if( !strcmp( zmethod, "PARMETIS" ) )
00146     {
00147         if( NULL == other_method )
00148             SetPARMETIS_Parameters( "RepartGDiffusion" );
00149         else
00150             SetPARMETIS_Parameters( other_method );
00151     }
00152     else if( !strcmp( zmethod, "OCTPART" ) )
00153     {
00154         if( NULL == other_method )
00155             SetOCTPART_Parameters( "2" );
00156         else
00157             SetOCTPART_Parameters( other_method );
00158     }
00159 
00160     // Call backs:
00161 
00162     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00163     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00164     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00165     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00166     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00167     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00168 
00169     // Perform the load balancing partitioning
00170 
00171     int changes;
00172     int numGidEntries;
00173     int numLidEntries;
00174     int numImport;
00175     ZOLTAN_ID_PTR importGlobalIds;
00176     ZOLTAN_ID_PTR importLocalIds;
00177     int* importProcs;
00178     int* importToPart;
00179     int numExport;
00180     ZOLTAN_ID_PTR exportGlobalIds;
00181     ZOLTAN_ID_PTR exportLocalIds;
00182     int* exportProcs;
00183     int* exportToPart;
00184 
00185     int rc = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, numImport, importGlobalIds, importLocalIds,
00186                                  importProcs, importToPart, numExport, exportGlobalIds, exportLocalIds, exportProcs,
00187                                  exportToPart );
00188 
00189     rc = mbGlobalSuccess( rc );
00190 
00191     if( !rc )
00192     {
00193         mbPrintGlobalResult( "==============Result==============", myNumPts, numImport, numExport, changes );
00194     }
00195     else
00196     {
00197         return MB_FAILURE;
00198     }
00199 
00200     // take results & write onto MOAB partition sets
00201 
00202     int* assignment;
00203 
00204     mbFinalizePoints( (int)ids.size(), numExport, exportLocalIds, exportProcs, &assignment );
00205 
00206     if( mbpc->proc_config().proc_rank() == 0 )
00207     {
00208         result = write_partition( mbpc->proc_config().proc_size(), elems, assignment, write_as_sets, write_as_tags );
00209 
00210         if( MB_SUCCESS != result ) return result;
00211 
00212         free( (int*)assignment );
00213     }
00214 
00215     // Free the memory allocated for lists returned by LB_Parition()
00216 
00217     myZZ->LB_Free_Part( &importGlobalIds, &importLocalIds, &importProcs, &importToPart );
00218     myZZ->LB_Free_Part( &exportGlobalIds, &exportLocalIds, &exportProcs, &exportToPart );
00219 
00220     // Implementation note:  A Zoltan object contains an MPI communicator.
00221     //   When the Zoltan object is destroyed, it uses it's MPI communicator.
00222     //   So it is important that the Zoltan object is destroyed before
00223     //   the MPI communicator is destroyed.  To ensure this, dynamically
00224     //   allocate the Zoltan object, so you can explicitly destroy it.
00225     //   If you create a Zoltan object on the stack, it's destructor will
00226     //   be invoked atexit, possibly after the communicator's
00227     //   destructor.
00228 
00229     return MB_SUCCESS;
00230 }
00231 
00232 ErrorCode ZoltanPartitioner::repartition( std::vector< double >& x,
00233                                           std::vector< double >& y,
00234                                           std::vector< double >& z,
00235                                           int StartID,
00236                                           const char* zmethod,
00237                                           Range& localGIDs )
00238 {
00239     //
00240     int nprocs = mbpc->proc_config().proc_size();
00241     int rank   = mbpc->proc_config().proc_rank();
00242     clock_t t  = clock();
00243     // form pts and ids, as in assemble_graph
00244     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00245     pts.resize( x.size() * 3 );
00246     std::vector< int > ids;  // point ids from MOAB
00247     ids.resize( x.size() );
00248     for( size_t i = 0; i < x.size(); i++ )
00249     {
00250         pts[3 * i]     = x[i];
00251         pts[3 * i + 1] = y[i];
00252         pts[3 * i + 2] = z[i];
00253         ids[i]         = StartID + (int)i;
00254     }
00255     // do not get out until done!
00256 
00257     Points       = &pts[0];
00258     GlobalIds    = &ids[0];
00259     NumPoints    = (int)x.size();
00260     NumEdges     = NULL;
00261     NborGlobalId = NULL;
00262     NborProcs    = NULL;
00263     ObjWeights   = NULL;
00264     EdgeWeights  = NULL;
00265     Parts        = NULL;
00266 
00267     float version;
00268     if( rank == 0 ) std::cout << "Initializing zoltan..." << std::endl;
00269 
00270     Zoltan_Initialize( argcArg, argvArg, &version );
00271 
00272     // Create Zoltan object.  This calls Zoltan_Create.
00273     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00274 
00275     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00276         SetRCB_Parameters();
00277     else if( !strcmp( zmethod, "RIB" ) )
00278         SetRIB_Parameters();
00279     else if( !strcmp( zmethod, "HSFC" ) )
00280         SetHSFC_Parameters();
00281 
00282     // set # requested partitions
00283     char buff[10];
00284     sprintf( buff, "%d", nprocs );
00285     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00286     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00287 
00288     // request all, import and export
00289     retval = myZZ->Set_Param( "RETURN_LISTS", "ALL" );
00290     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00291 
00292     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00293     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00294     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00295     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00296     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00297     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00298 
00299     // Perform the load balancing partitioning
00300 
00301     int changes;
00302     int numGidEntries;
00303     int numLidEntries;
00304     int num_import;
00305     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
00306     int* import_procs;
00307     int* import_to_part;
00308     int num_export;
00309     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
00310     int *assign_procs, *assign_parts;
00311 
00312     if( rank == 0 )
00313         std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nprocs
00314                   << " processors..." << std::endl;
00315 
00316     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
00317                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
00318                                  assign_procs, assign_parts );
00319     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00320 
00321     if( rank == 0 )
00322     {
00323         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00324         t = clock();
00325     }
00326 
00327     std::sort( import_global_ids, import_global_ids + num_import, std::greater< int >() );
00328     std::sort( export_global_ids, export_global_ids + num_export, std::greater< int >() );
00329 
00330     Range iniGids( (EntityHandle)StartID, (EntityHandle)StartID + x.size() - 1 );
00331     Range imported, exported;
00332     std::copy( import_global_ids, import_global_ids + num_import, range_inserter( imported ) );
00333     std::copy( export_global_ids, export_global_ids + num_export, range_inserter( exported ) );
00334     localGIDs = subtract( iniGids, exported );
00335     localGIDs = unite( localGIDs, imported );
00336     // Free data structures allocated by Zoltan::LB_Partition
00337     retval = myZZ->LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
00338     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00339     retval = myZZ->LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
00340     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00341 
00342     return MB_SUCCESS;
00343 }
00344 
00345 ErrorCode ZoltanPartitioner::partition_inferred_mesh( EntityHandle sfileset,
00346                                                       size_t num_parts,
00347                                                       int part_dim,
00348                                                       const bool write_as_sets,
00349                                                       int projection_type )
00350 {
00351     ErrorCode result;
00352 
00353     moab::Range elverts;
00354     result = mbImpl->get_entities_by_dimension( sfileset, part_dim, elverts );RR;
00355 
00356     std::vector< double > elcoords( elverts.size() * 3 );
00357     result = mbImpl->get_coords( elverts, &elcoords[0] );RR;
00358 
00359     std::vector< std::vector< EntityHandle > > part_assignments( num_parts );
00360     int part, proc;
00361 
00362     // Loop over coordinates
00363     for( size_t iel = 0; iel < elverts.size(); iel++ )
00364     {
00365         // Gather coordinates into temporary array
00366         double* ecoords = &elcoords[iel * 3];
00367 
00368         // do a projection if needed
00369         if( projection_type > 0 ) IntxUtils::transform_coordinates( ecoords, projection_type );
00370 
00371         // Compute the coordinate's part assignment
00372         myZZ->LB_Point_PP_Assign( ecoords, proc, part );
00373 
00374         // Store the part assignment in the return array
00375         part_assignments[part].push_back( elverts[iel] );
00376     }
00377 
00378     // get the partition set tag
00379     Tag part_set_tag = mbpc->partition_tag();
00380 
00381     // If some entity sets exist, let us delete those first.
00382     if( write_as_sets )
00383     {
00384         moab::Range oldpsets;
00385         result = mbImpl->get_entities_by_type_and_tag( sfileset, MBENTITYSET, &part_set_tag, NULL, 1, oldpsets,
00386                                                        Interface::UNION );RR;
00387         result = mbImpl->remove_entities( sfileset, oldpsets );RR;
00388     }
00389 
00390     size_t nparts_assigned = 0;
00391     for( size_t partnum = 0; partnum < num_parts; ++partnum )
00392     {
00393         std::vector< EntityHandle >& partvec = part_assignments[partnum];
00394 
00395         nparts_assigned += ( partvec.size() ? 1 : 0 );
00396 
00397         if( write_as_sets )
00398         {
00399             EntityHandle partNset;
00400             result = mbImpl->create_meshset( moab::MESHSET_SET, partNset );RR;
00401             if( partvec.size() )
00402             {
00403                 result = mbImpl->add_entities( partNset, &partvec[0], partvec.size() );RR;
00404             }
00405             result = mbImpl->add_parent_child( sfileset, partNset );RR;
00406 
00407             // part numbering should start from 0
00408             int ipartnum = (int)partnum;
00409 
00410             // assign partitions as a sparse tag by grouping elements under sets
00411             result = mbImpl->tag_set_data( part_set_tag, &partNset, 1, &ipartnum );RR;
00412         }
00413         else
00414         {
00415             /* assign as a dense vector to all elements
00416                allocate integer-size partitions */
00417             std::vector< int > assignment( partvec.size(),
00418                                            partnum );  // initialize all values to partnum
00419             result = mbImpl->tag_set_data( part_set_tag, &partvec[0], partvec.size(), &assignment[0] );RR;
00420         }
00421     }
00422 
00423     if( nparts_assigned != num_parts )
00424     {
00425         std::cout << "WARNING: The inference yielded lesser number of parts (" << nparts_assigned
00426                   << ") than requested by user (" << num_parts << ").\n";
00427     }
00428 
00429     return MB_SUCCESS;
00430 }
00431 
00432 ErrorCode ZoltanPartitioner::partition_mesh_and_geometry( const double part_geom_mesh_size,
00433                                                           const int nparts,
00434                                                           const char* zmethod,
00435                                                           const char* other_method,
00436                                                           double imbal_tol,
00437                                                           const int part_dim,
00438                                                           const bool write_as_sets,
00439                                                           const bool write_as_tags,
00440                                                           const int obj_weight,
00441                                                           const int edge_weight,
00442 #ifdef MOAB_HAVE_CGM
00443                                                           const bool part_surf,
00444                                                           const bool ghost,
00445 #else
00446                                                           const bool,
00447                                                           const bool,
00448 #endif
00449                                                           const int projection_type,
00450                                                           const bool recompute_rcb_box,
00451                                                           const bool print_time )
00452 {
00453     // should only be called in serial
00454     if( mbpc->proc_config().proc_size() != 1 )
00455     {
00456         std::cout << "ZoltanPartitioner::partition_mesh_and_geometry must be called in serial." << std::endl;
00457         return MB_FAILURE;
00458     }
00459     clock_t t = clock();
00460     if( NULL != zmethod && strcmp( zmethod, "RR" ) && strcmp( zmethod, "RCB" ) && strcmp( zmethod, "RIB" ) &&
00461         strcmp( zmethod, "HSFC" ) && strcmp( zmethod, "Hypergraph" ) && strcmp( zmethod, "PHG" ) &&
00462         strcmp( zmethod, "PARMETIS" ) && strcmp( zmethod, "OCTPART" ) )
00463     {
00464         std::cout << "ERROR node " << mbpc->proc_config().proc_rank() << ": Method must be "
00465                   << "RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART" << std::endl;
00466         return MB_FAILURE;
00467     }
00468 
00469     bool part_geom = false;
00470     if( 0 == strcmp( zmethod, "RR" ) || 0 == strcmp( zmethod, "RCB" ) || 0 == strcmp( zmethod, "RIB" ) ||
00471         0 == strcmp( zmethod, "HSFC" ) )
00472         part_geom = true;       // so no adjacency / edges needed
00473     std::vector< double > pts;  // x[0], y[0], z[0], ... from MOAB
00474     std::vector< int > ids;     // point ids from MOAB
00475     std::vector< int > adjs, length, parts;
00476     std::vector< double > obj_weights, edge_weights;
00477     Range elems;
00478 #ifdef MOAB_HAVE_CGM
00479     DLIList< RefEntity* > entities;
00480 #endif
00481 
00482     // Get a mesh from MOAB and diide it across processors.
00483 
00484     ErrorCode result = MB_SUCCESS;
00485 
00486     // short-circuit everything if RR partition is requested
00487     if( !strcmp( zmethod, "RR" ) )
00488     {
00489         if( part_geom_mesh_size < 0. )
00490         {
00491             // get all elements
00492             result = mbImpl->get_entities_by_dimension( 0, part_dim, elems );RR;
00493             if( elems.empty() ) return MB_FAILURE;
00494             // make a trivial assignment vector
00495             std::vector< int > assign_vec( elems.size() );
00496             int num_per = elems.size() / nparts;
00497             int extra   = elems.size() % nparts;
00498             if( extra ) num_per++;
00499             int nstart = 0;
00500             for( int i = 0; i < nparts; i++ )
00501             {
00502                 if( i == extra ) num_per--;
00503                 std::fill( &assign_vec[nstart], &assign_vec[nstart + num_per], i );
00504                 nstart += num_per;
00505             }
00506 
00507             result = write_partition( nparts, elems, &assign_vec[0], write_as_sets, write_as_tags );RR;
00508         }
00509         else
00510         {
00511 #ifdef MOAB_HAVE_CGM
00512             result = partition_round_robin( nparts );RR;
00513 #endif
00514         }
00515 
00516         return result;
00517     }
00518 
00519     std::cout << "Assembling graph..." << std::endl;
00520 
00521     if( part_geom_mesh_size < 0. )
00522     {
00523         // if (!part_geom) {
00524         result = assemble_graph( part_dim, pts, ids, adjs, length, elems, part_geom, projection_type );RR;
00525     }
00526     else
00527     {
00528 #ifdef MOAB_HAVE_CGM
00529         result = assemble_graph( part_dim, pts, ids, adjs, length, obj_weights, edge_weights, parts, entities,
00530                                  part_geom_mesh_size, nparts );RR;
00531 
00532         if( debug )
00533         {
00534             int n_ids = ids.size();
00535             entities.reset();
00536             int i_leng = 0;
00537             for( int i = 0; i < n_ids; i++ )
00538             {
00539                 std::cout << "graph_input_ids[" << i << "]=" << ids[i] << ",obj_weights=" << obj_weights[i]
00540                           << ",entity_id=" << entities.get_and_step()->id() << ",part=" << parts[i] << std::endl;
00541                 for( int j = 0; j < length[i]; j++ )
00542                 {
00543                     std::cout << "adjs[" << j << "]=" << adjs[i_leng] << ",edge_weights=" << edge_weights[i_leng]
00544                               << std::endl;
00545                     i_leng++;
00546                 }
00547             }
00548         }
00549 #endif
00550     }
00551     if( print_time )
00552     {
00553         std::cout << " time to assemble graph: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00554         t = clock();
00555     }
00556     double* o_wgt = NULL;
00557     double* e_wgt = NULL;
00558     if( obj_weights.size() > 0 ) o_wgt = &obj_weights[0];
00559     if( edge_weights.size() > 0 ) e_wgt = &edge_weights[0];
00560 
00561     myNumPts = mbInitializePoints( (int)ids.size(), &pts[0], &ids[0], &adjs[0], &length[0], o_wgt, e_wgt, &parts[0],
00562                                    part_geom );
00563 
00564     // Initialize Zoltan.  This is a C call.  The simple C++ code
00565     // that creates Zoltan objects does not keep track of whether
00566     // Zoltan_Initialize has been called.
00567 
00568     if( print_time )
00569     {
00570         std::cout << " time to initialize points: " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00571         t = clock();
00572     }
00573     float version;
00574 
00575     std::cout << "Initializing zoltan..." << std::endl;
00576 
00577     Zoltan_Initialize( argcArg, argvArg, &version );
00578 
00579     // Create Zoltan object.  This calls Zoltan_Create.
00580     if( NULL == myZZ ) myZZ = new Zoltan( mbpc->comm() );
00581 
00582     if( NULL == zmethod || !strcmp( zmethod, "RCB" ) )
00583         SetRCB_Parameters( recompute_rcb_box );
00584     else if( !strcmp( zmethod, "RIB" ) )
00585         SetRIB_Parameters();
00586     else if( !strcmp( zmethod, "HSFC" ) )
00587         SetHSFC_Parameters();
00588     else if( !strcmp( zmethod, "Hypergraph" ) || !strcmp( zmethod, "PHG" ) )
00589     {
00590         if( NULL == other_method || ( other_method[0] == '\0' ) )
00591             SetHypergraph_Parameters( "auto" );
00592         else
00593             SetHypergraph_Parameters( other_method );
00594 
00595         if( imbal_tol )
00596         {
00597             std::ostringstream str;
00598             str << imbal_tol;
00599             myZZ->Set_Param( "IMBALANCE_TOL", str.str().c_str() );  // no debug messages
00600         }
00601     }
00602     else if( !strcmp( zmethod, "PARMETIS" ) )
00603     {
00604         if( NULL == other_method )
00605             SetPARMETIS_Parameters( "RepartGDiffusion" );
00606         else
00607             SetPARMETIS_Parameters( other_method );
00608     }
00609     else if( !strcmp( zmethod, "OCTPART" ) )
00610     {
00611         if( NULL == other_method )
00612             SetOCTPART_Parameters( "2" );
00613         else
00614             SetOCTPART_Parameters( other_method );
00615     }
00616 
00617     // set # requested partitions
00618     char buff[10];
00619     sprintf( buff, "%d", nparts );
00620     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
00621     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00622 
00623     // request only partition assignments
00624     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTITION ASSIGNMENTS" );
00625     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00626 
00627     if( obj_weight > 0 )
00628     {
00629         std::ostringstream str;
00630         str << obj_weight;
00631         retval = myZZ->Set_Param( "OBJ_WEIGHT_DIM", str.str().c_str() );
00632         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00633     }
00634 
00635     if( edge_weight > 0 )
00636     {
00637         std::ostringstream str;
00638         str << edge_weight;
00639         retval = myZZ->Set_Param( "EDGE_WEIGHT_DIM", str.str().c_str() );
00640         if( ZOLTAN_OK != retval ) return MB_FAILURE;
00641     }
00642 
00643     // Call backs:
00644 
00645     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
00646     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
00647     myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
00648     myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
00649     myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
00650     myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
00651     if( part_geom_mesh_size > 0. )
00652     {
00653         myZZ->Set_Part_Multi_Fn( mbGetPart, NULL );
00654     }
00655 
00656     // Perform the load balancing partitioning
00657 
00658     int changes;
00659     int numGidEntries;
00660     int numLidEntries;
00661     int dumnum1;
00662     ZOLTAN_ID_PTR dum_local, dum_global;
00663     int *dum1, *dum2;
00664     int num_assign;
00665     ZOLTAN_ID_PTR assign_gid, assign_lid;
00666     int *assign_procs, *assign_parts;
00667 
00668     std::cout << "Computing partition using " << ( zmethod ? zmethod : "RCB" ) << " method for " << nparts
00669               << " processors..." << std::endl;
00670 #ifndef NDEBUG
00671 #if 0
00672   if (NULL == zmethod || !strcmp(zmethod, "RCB"))
00673     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 1, 0, 0);
00674 
00675   if ( !strcmp(zmethod, "PHG"))
00676       Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)zmethod, 1, 0, 1, 1);
00677 #endif
00678 #endif
00679     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, dumnum1, dum_global, dum_local, dum1, dum2,
00680                                  num_assign, assign_gid, assign_lid, assign_procs, assign_parts );
00681     if( ZOLTAN_OK != retval ) return MB_FAILURE;
00682 
00683     if( print_time )
00684     {
00685         std::cout << " time to LB_partition " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00686         t = clock();
00687     }
00688     // take results & write onto MOAB partition sets
00689     std::cout << "Saving partition information to MOAB..." << std::endl;
00690 
00691     if( part_geom_mesh_size < 0. )
00692     {
00693         // if (!part_geom) {
00694         result = write_partition( nparts, elems, assign_parts, write_as_sets, write_as_tags );
00695     }
00696     else
00697     {
00698 #ifdef MOAB_HAVE_CGM
00699         result = write_partition( nparts, entities, assign_parts, obj_weights, part_surf, ghost );
00700 #endif
00701     }
00702 
00703     if( print_time )
00704     {
00705         std::cout << " time to write partition in memory " << ( clock() - t ) / (double)CLOCKS_PER_SEC << "s. \n";
00706         t = clock();
00707     }
00708 
00709     if( MB_SUCCESS != result ) return result;
00710 
00711     // Free the memory allocated for lists returned by LB_Parition()
00712     myZZ->LB_Free_Part( &assign_gid, &assign_lid, &assign_procs, &assign_parts );
00713 
00714     return MB_SUCCESS;
00715 }
00716 
00717 ErrorCode ZoltanPartitioner::include_closure()
00718 {
00719     ErrorCode result;
00720     Range ents;
00721     Range adjs;
00722     std::cout << "Adding closure..." << std::endl;
00723 
00724     for( Range::iterator rit = partSets.begin(); rit != partSets.end(); ++rit )
00725     {
00726 
00727         // get the top-dimensional entities in the part
00728         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00729 
00730         if( ents.empty() ) continue;
00731 
00732         // get intermediate-dimensional adjs and add to set
00733         for( int d = mbImpl->dimension_from_handle( *ents.begin() ) - 1; d >= 1; d-- )
00734         {
00735             adjs.clear();
00736             result = mbImpl->get_adjacencies( ents, d, false, adjs, Interface::UNION );RR;
00737             result = mbImpl->add_entities( *rit, adjs );RR;
00738         }
00739 
00740         // now get vertices and add to set; only need to do for ents, not for adjs
00741         adjs.clear();
00742         result = mbImpl->get_adjacencies( ents, 0, false, adjs, Interface::UNION );RR;
00743         result = mbImpl->add_entities( *rit, adjs );RR;
00744 
00745         ents.clear();
00746     }
00747 
00748     // now go over non-part entity sets, looking for contained entities
00749     Range sets, part_ents;
00750     result = mbImpl->get_entities_by_type( 0, MBENTITYSET, sets );RR;
00751     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
00752     {
00753         // skip parts
00754         if( partSets.find( *rit ) != partSets.end() ) continue;
00755 
00756         // get entities in this set, recursively
00757         ents.clear();
00758         result = mbImpl->get_entities_by_handle( *rit, ents, true );RR;
00759 
00760         // now check over all parts
00761         for( Range::iterator rit2 = partSets.begin(); rit2 != partSets.end(); ++rit2 )
00762         {
00763             part_ents.clear();
00764             result = mbImpl->get_entities_by_handle( *rit2, part_ents, false );RR;
00765             Range int_range = intersect( ents, part_ents );
00766             if( !int_range.empty() )
00767             {
00768                 // non-empty intersection, add to part set
00769                 result = mbImpl->add_entities( *rit2, &( *rit ), 1 );RR;
00770             }
00771         }
00772     }
00773 
00774     // finally, mark all the part sets as having the closure
00775     Tag closure_tag;
00776     result =
00777         mbImpl->tag_get_handle( "INCLUDES_CLOSURE", 1, MB_TYPE_INTEGER, closure_tag, MB_TAG_SPARSE | MB_TAG_CREAT );RR;
00778 
00779     std::vector< int > closure_vals( partSets.size(), 1 );
00780     result = mbImpl->tag_set_data( closure_tag, partSets, &closure_vals[0] );RR;
00781 
00782     return MB_SUCCESS;
00783 }
00784 
00785 ErrorCode ZoltanPartitioner::assemble_graph( const int dimension,
00786                                              std::vector< double >& coords,
00787                                              std::vector< int >& moab_ids,
00788                                              std::vector< int >& adjacencies,
00789                                              std::vector< int >& length,
00790                                              Range& elems,
00791                                              bool part_geom,
00792                                              int projection_type )
00793 {
00794     // assemble a graph with vertices equal to elements of specified dimension, edges
00795     // signified by list of other elements to which an element is connected
00796 
00797     // get the elements of that dimension
00798     ErrorCode result = mbImpl->get_entities_by_dimension( 0, dimension, elems );
00799     if( MB_SUCCESS != result || elems.empty() ) return result;
00800 
00801     // assign global ids
00802     if( assign_global_ids )
00803     {
00804         EntityHandle rootset = 0;
00805         result               = mbpc->assign_global_ids( rootset, dimension, 1, true, true );RR;
00806     }
00807 
00808     // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1
00809     // dimensional neighbors
00810     MeshTopoUtil mtu( mbImpl );
00811     Range adjs;
00812     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
00813     // by MBCN
00814     int neighbors[5 * MAX_SUB_ENTITIES];
00815     double avg_position[3];
00816     int moab_id;
00817 
00818     // get the global id tag handle
00819     Tag gid = mbImpl->globalId_tag();
00820 
00821     for( Range::iterator rit = elems.begin(); rit != elems.end(); ++rit )
00822     {
00823 
00824         if( !part_geom )
00825         {
00826             // get bridge adjacencies
00827             adjs.clear();
00828             result = mtu.get_bridge_adjacencies( *rit, ( dimension > 0 ? dimension - 1 : 3 ), dimension, adjs );RR;
00829 
00830             // get the graph vertex ids of those
00831             if( !adjs.empty() )
00832             {
00833                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
00834                 result = mbImpl->tag_get_data( gid, adjs, neighbors );RR;
00835             }
00836 
00837             // copy those into adjacencies vector
00838             length.push_back( (int)adjs.size() );
00839             std::copy( neighbors, neighbors + adjs.size(), std::back_inserter( adjacencies ) );
00840         }
00841 
00842         // get average position of vertices
00843         result = mtu.get_average_position( *rit, avg_position );RR;
00844 
00845         // get the graph vertex id for this element
00846         result = mbImpl->tag_get_data( gid, &( *rit ), 1, &moab_id );RR;
00847 
00848         // copy those into coords vector
00849         moab_ids.push_back( moab_id );
00850         // transform coordinates to spherical coordinates, if requested
00851         if( projection_type > 0 ) IntxUtils::transform_coordinates( avg_position, projection_type );
00852 
00853         std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
00854     }
00855 
00856     if( debug )
00857     {
00858         std::cout << "Length vector: " << std::endl;
00859         std::copy( length.begin(), length.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00860         std::cout << std::endl;
00861         std::cout << "Adjacencies vector: " << std::endl;
00862         std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00863         std::cout << std::endl;
00864         std::cout << "Moab_ids vector: " << std::endl;
00865         std::copy( moab_ids.begin(), moab_ids.end(), std::ostream_iterator< int >( std::cout, ", " ) );
00866         std::cout << std::endl;
00867         std::cout << "Coords vector: " << std::endl;
00868         std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( std::cout, ", " ) );
00869         std::cout << std::endl;
00870     }
00871 
00872     return MB_SUCCESS;
00873 }
00874 
00875 #ifdef MOAB_HAVE_CGM
00876 ErrorCode ZoltanPartitioner::assemble_graph( const int /* dimension */,
00877                                              std::vector< double >& /* coords */,
00878                                              std::vector< int >& moab_ids,
00879                                              std::vector< int >& adjacencies,
00880                                              std::vector< int >& length,
00881                                              std::vector< double >& obj_weights,
00882                                              std::vector< double >& edge_weights,
00883                                              std::vector< int >& parts,
00884                                              DLIList< RefEntity* >& entities,
00885                                              const double part_geom_mesh_size,
00886                                              const int n_part )
00887 {
00888     // get body vertex weights
00889     DLIList< RefEntity* > body_list;
00890     gti->ref_entity_list( "body", body_list, CUBIT_FALSE );
00891     DLIList< RefFace* > all_shared_surfs;
00892     int n_bodies       = body_list.size();
00893     int body_map_index = 1;
00894     int surf_map_index = n_bodies + 1;
00895     int n_bodies_proc  = n_bodies / n_part;  // # of entities per processor
00896     int i_bodies_proc  = n_bodies_proc;      // entity index limit for each processor
00897     int proc           = 0;
00898 
00899     body_list.reset();
00900     for( int i = 0; i < n_bodies; i++ )
00901     {
00902         // assign part in round-robin
00903         if( i == i_bodies_proc )
00904         {
00905             proc++;
00906             if( proc < n_part )
00907                 i_bodies_proc += n_bodies_proc;
00908             else
00909             {
00910                 proc %= n_part;
00911                 i_bodies_proc++;
00912             }
00913         }
00914         parts.push_back( proc );
00915 
00916         // volume mesh generation load estimation
00917         RefEntity* body = body_list.get_and_step();
00918         double vertex_w = body->measure();
00919         vertex_w /= part_geom_mesh_size * part_geom_mesh_size * part_geom_mesh_size;
00920         vertex_w = 3.8559e-5 * vertex_w * log( vertex_w );
00921 
00922         // add child non-interface face weights
00923         DLIList< RefFace* > shared_surfs;
00924         DLIList< RefFace* > faces;
00925         ( dynamic_cast< TopologyEntity* >( body ) )->ref_faces( faces );
00926         int n_face = faces.size();
00927         faces.reset();
00928         for( int j = 0; j < n_face; j++ )
00929         {
00930             RefFace* face      = faces.get_and_step();
00931             TopologyEntity* te = CAST_TO( face, TopologyEntity );
00932             if( te->bridge_manager()->number_of_bridges() > 1 )
00933             {  // shared
00934                 shared_surfs.append( face );
00935             }
00936             else
00937             {  // non-shared
00938                 vertex_w += estimate_face_mesh_load( face, face->measure() );
00939             }
00940         }
00941 
00942         int temp_index              = body_map_index++;
00943         body_vertex_map[body->id()] = temp_index;
00944         moab_ids.push_back( temp_index );   // add body as graph vertex
00945         obj_weights.push_back( vertex_w );  // add weight for body graph vertex
00946 
00947         if( debug )
00948         {
00949             std::cout << "body=" << body->id() << ",graph_id=" << temp_index << ",weight=" << vertex_w
00950                       << ",part=" << proc << std::endl;
00951         }
00952 
00953         // treat shared surfaces connected to this body
00954         int n_shared = shared_surfs.size();
00955         shared_surfs.reset();
00956         for( int k = 0; k < n_shared; k++ )
00957         {  // add adjacencies
00958             RefFace* face                       = shared_surfs.get_and_step();
00959             std::map< int, int >::iterator iter = surf_vertex_map.find( face->id() );
00960             if( iter != surf_vertex_map.end() )
00961             {
00962                 temp_index = ( *iter ).second;
00963             }
00964             else
00965             {
00966                 temp_index                  = surf_map_index++;
00967                 surf_vertex_map[face->id()] = temp_index;
00968                 all_shared_surfs.append( face );
00969             }
00970             adjacencies.push_back( temp_index );
00971             double tmp_sw = estimate_face_comm_load( face, part_geom_mesh_size );
00972             edge_weights.push_back( tmp_sw );
00973 
00974             if( debug )
00975             {
00976                 std::cout << "adjac=" << temp_index << ",weight=" << tmp_sw << std::endl;
00977             }
00978         }
00979         length.push_back( n_shared );
00980     }
00981     entities += body_list;
00982 
00983     // add shared surface as graph vertex
00984     int n_all_shared_surf = all_shared_surfs.size();
00985     all_shared_surfs.reset();
00986     for( int i = 0; i < n_all_shared_surf; i++ )
00987     {
00988         RefEntity* face = all_shared_surfs.get_and_step();
00989         moab_ids.push_back( surf_vertex_map[face->id()] );
00990         double face_mesh_load = estimate_face_mesh_load( face, part_geom_mesh_size );
00991         obj_weights.push_back( face_mesh_load );
00992 
00993         // set surface object graph edges
00994         int min_ind     = -1;
00995         double min_load = std::numeric_limits< double >::max();
00996         ;
00997         DLIList< Body* > parents;
00998         dynamic_cast< TopologyEntity* >( face )->bodies( parents );
00999         int n_parents = parents.size();
01000         parents.reset();
01001         for( int j = 0; j < n_parents; j++ )
01002         {
01003             int body_gid = body_vertex_map[parents.get_and_step()->id()];
01004             adjacencies.push_back( body_gid );  // add adjacency
01005             edge_weights.push_back( estimate_face_comm_load( face, part_geom_mesh_size ) );
01006 
01007             if( obj_weights[body_gid - 1] < min_load )
01008             {
01009                 min_ind  = body_gid - 1;
01010                 min_load = obj_weights[min_ind];
01011             }
01012         }
01013         length.push_back( n_parents );
01014         entities.append( dynamic_cast< RefEntity* >( face ) );
01015         obj_weights[min_ind] += face_mesh_load;
01016         parts.push_back( parts[min_ind] );
01017 
01018         if( debug )
01019         {
01020             std::cout << "shared_surf=" << face->id() << ",graph_id=" << surf_vertex_map[face->id()]
01021                       << ",weight=" << face_mesh_load << ",part=" << parts[min_ind] << std::endl;
01022         }
01023     }
01024 
01025     for( size_t i = 0; i < obj_weights.size(); i++ )
01026         if( obj_weights[i] < 1. ) obj_weights[i] = 1.;
01027     for( size_t i = 0; i < edge_weights.size(); i++ )
01028         if( edge_weights[i] < 1. ) edge_weights[i] = 1.;
01029 
01030     return MB_SUCCESS;
01031 }
01032 
01033 double ZoltanPartitioner::estimate_face_mesh_load( RefEntity* face, const double h )
01034 {
01035     GeometryType type = CAST_TO( face, RefFace )->geometry_type();
01036     double n          = face->measure() / h / h;
01037     double n_logn     = n * log( n );
01038 
01039     if( type == PLANE_SURFACE_TYPE )
01040     {
01041         return 1.536168737505151e-4 * n_logn;
01042     }
01043     else if( type == SPLINE_SURFACE_TYPE )
01044     {
01045         return 5.910511018383144e-4 * n_logn;
01046     }
01047     else if( type == CONE_SURFACE_TYPE || type == SPHERE_SURFACE_TYPE || type == TORUS_SURFACE_TYPE )
01048     {
01049         return 2.352511671418708e-4 * n_logn;
01050     }
01051 
01052     return 0.0;
01053 }
01054 
01055 double ZoltanPartitioner::estimate_face_comm_load( RefEntity* face, const double h )
01056 {
01057     double peri = 0.;
01058 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01059     DLIList< DLIList< RefEdge* > > ref_edge_loops;
01060 #else
01061     DLIList< DLIList< RefEdge* >* > ref_edge_loops;
01062 #endif
01063     CAST_TO( face, RefFace )->ref_edge_loops( ref_edge_loops );
01064     ref_edge_loops.reset();
01065 
01066 #if( ( CGM_MAJOR_VERSION == 14 && CGM_MINOR_VERSION > 2 ) || CGM_MAJOR_VERSION >= 15 )
01067     for( int i = 0; i < ref_edge_loops.size(); i++ )
01068     {
01069         DLIList< RefEdge* > eloop = ref_edge_loops.get_and_step();
01070         eloop.reset();
01071         for( int j = 0; j < eloop.size(); j++ )
01072         {
01073             peri += eloop.get_and_step()->measure();
01074         }
01075     }
01076 #else
01077     for( int i = 0; i < ref_edge_loops.size(); i++ )
01078     {
01079         DLIList< RefEdge* >* eloop = ref_edge_loops.get_and_step();
01080         eloop->reset();
01081         for( int j = 0; j < eloop->size(); j++ )
01082         {
01083             peri += eloop->get_and_step()->measure();
01084         }
01085     }
01086 #endif
01087 
01088     // return 104*face->measure()/sqrt(3)/h/h + 56/3*peri/h;
01089     return ( 104 * face->measure() / sqrt( 3 ) / h / h + 56 / 3 * peri / h ) / 700000.;
01090 }
01091 
01092 ErrorCode ZoltanPartitioner::write_partition( const int nparts,
01093                                               DLIList< RefEntity* > entities,
01094                                               const int* assignment,
01095                                               std::vector< double >& obj_weights,
01096                                               const bool part_surf,
01097                                               const bool ghost )
01098 {
01099     ErrorCode result;
01100 
01101     // actuate CA_BODIES and turn on auto flag for other attributes
01102     CGMApp::instance()->attrib_manager()->register_attrib_type( CA_BODIES, "bodies", "BODIES", CABodies_creator,
01103                                                                 CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE,
01104                                                                 CUBIT_TRUE, CUBIT_FALSE );
01105     CGMApp::instance()->attrib_manager()->auto_flag( CUBIT_TRUE );
01106 
01107     // set partition info to Body at first
01108     int n_entities = entities.size();
01109     entities.reset();
01110     for( int i = 0; i < n_entities; i++ )
01111     {
01112         int proc = assignment[i];
01113         DLIList< int > shared_procs;
01114         RefEntity* entity = entities.get_and_step();
01115 
01116         if( entity->entity_type_info() == typeid( Body ) )
01117         {
01118             shared_procs.append( proc );
01119             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01120             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01121 
01122             if( debug )
01123             {
01124                 std::cout << "body" << entity->id() << "_is_partitioned_to_p" << proc << std::endl;
01125             }
01126 
01127             // assign to volumes, it should be removed in future
01128             DLIList< RefVolume* > volumes;
01129             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01130             int n_vol = volumes.size();
01131             volumes.reset();
01132             for( int j = 0; j < n_vol; j++ )
01133             {
01134                 RefEntity* vol = volumes.get_and_step();
01135                 td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01136                 if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01137             }
01138         }
01139     }
01140 
01141     // set partition info to shared surfaces
01142     entities.reset();
01143     for( int i = 0; i < n_entities; i++ )
01144     {
01145         int proc = assignment[i];
01146         DLIList< int > shared_procs;
01147         RefEntity* entity = entities.get_and_step();
01148 
01149         if( entity->entity_type_info() == typeid( RefFace ) )
01150         {  // surface
01151             DLIList< Body* > parents;
01152             ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01153             int n_parents = parents.size();
01154             if( n_parents != 2 )
01155             {  // check # of parents
01156                 std::cerr << "# of parent bodies of interface surface should be 2." << std::endl;
01157                 return MB_FAILURE;
01158             }
01159             shared_procs.append( proc );  // local proc
01160             parents.reset();
01161             for( int j = 0; j < 2; j++ )
01162             {
01163                 RefEntity* parent     = parents.get_and_step();
01164                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01165 
01166                 if( parent_td == NULL )
01167                 {
01168                     std::cerr << "parent body should have balanced process." << std::endl;
01169                     return MB_FAILURE;
01170                 }
01171                 int temp_proc = parent_td->get_charge_proc();
01172                 if( temp_proc != proc ) shared_procs.append( temp_proc );  // remote proc
01173             }
01174 
01175             if( shared_procs.size() > 1 )
01176             {  // if it is shared by 2 processors
01177                 int merge_id       = TDUniqueId::get_unique_id( entity );
01178                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01179                 if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01180 
01181                 if( debug )
01182                 {
01183                     std::cout << "surf" << entity->id() << "_is_partitioned_to_p";
01184                     for( int j = 0; j < shared_procs.size(); j++ )
01185                     {
01186                         std::cout << "," << shared_procs[j];
01187                     }
01188                     std::cout << std::endl;
01189                 }
01190             }
01191         }
01192     }
01193 
01194     // do non-shared surface partition too
01195     if( part_surf )
01196     {
01197         result = partition_surface( nparts, entities, assignment, obj_weights );RR;
01198     }
01199 
01200     // partition shared edges and vertex
01201     result = partition_child_entities( 1, nparts, part_surf, ghost );RR;
01202     result = partition_child_entities( 0, nparts, part_surf );RR;
01203 
01204     if( debug )
01205     {
01206         entities.reset();
01207         for( int i = 0; i < n_entities; i++ )
01208         {
01209             RefEntity* entity = entities.get_and_step();
01210             if( entity->entity_type_info() == typeid( Body ) )
01211             {
01212                 TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01213                 CubitString st     = entity->entity_name();
01214                 DLIList< int >* sp = td_par->get_shared_proc_list();
01215                 int n_sp           = sp->size();
01216                 sp->reset();
01217                 for( int j = 0; j < n_sp; j++ )
01218                 {
01219                     std::cout << "partitioned_" << st.c_str() << ",proc=" << sp->get_and_step() << std::endl;
01220                 }
01221                 DLIList< int >* gp = td_par->get_ghost_proc_list();
01222                 int n_gp           = gp->size();
01223                 sp->reset();
01224                 for( int j = 0; j < n_gp; j++ )
01225                 {
01226                     std::cout << "partitioned_" << st.c_str() << ",ghost=" << gp->get_and_step() << std::endl;
01227                 }
01228             }
01229         }
01230     }
01231 
01232     return MB_SUCCESS;
01233 }
01234 
01235 ErrorCode ZoltanPartitioner::partition_surface( const int nparts,
01236                                                 DLIList< RefEntity* > entities,
01237                                                 const int* assignment,
01238                                                 std::vector< double >& obj_weights )
01239 {
01240     int i;
01241     double ave_load = 0.;
01242     double* loads   = new double[nparts];
01243     for( i = 0; i < nparts; i++ )
01244         loads[i] = 0.;
01245 
01246     int n_entities = entities.size();
01247     entities.reset();
01248     for( i = 0; i < n_entities; i++ )
01249     {
01250         loads[assignment[i]] += obj_weights[i];
01251         ave_load += obj_weights[i];
01252     }
01253     ave_load /= nparts;
01254 
01255     if( debug )
01256     {
01257         for( i = 0; i < nparts; i++ )
01258         {
01259             std::cout << "loads_before_surface_partition[" << i << "]=" << loads[i] << std::endl;
01260         }
01261     }
01262 
01263     int n_iter  = 0;
01264     bool b_stop = false;
01265     do
01266     {
01267         // get max and min load processors
01268         int max_proc = nparts, min_proc = 0;
01269         double min_load = std::numeric_limits< double >::max();
01270         double max_load = std::numeric_limits< double >::min();
01271         for( i = 0; i < nparts; i++ )
01272         {
01273             if( loads[i] < min_load )
01274             {
01275                 min_load = loads[i];
01276                 min_proc = i;
01277             }
01278             if( loads[i] > max_load )
01279             {
01280                 max_load = loads[i];
01281                 max_proc = i;
01282             }
01283         }
01284 
01285         double load_diff = max_load - ave_load;
01286         if( load_diff > ave_load / 10. )
01287         {
01288             bool b_moved = false;
01289             entities.reset();
01290             for( i = 0; i < n_entities; i++ )
01291             {
01292                 if( b_moved ) break;
01293                 if( assignment[i] == max_proc &&  // find maximum load processor bodies
01294                     entities[i]->entity_type_info() == typeid( Body ) )
01295                 {
01296                     DLIList< RefFace* > faces;
01297                     ( dynamic_cast< TopologyEntity* >( entities[i] ) )->ref_faces( faces );
01298                     int n_face = faces.size();
01299                     faces.reset();
01300                     for( int j = 0; j < n_face; j++ )
01301                     {
01302                         RefFace* face      = faces.get_and_step();
01303                         TopologyEntity* te = CAST_TO( face, TopologyEntity );
01304                         if( te->bridge_manager()->number_of_bridges() < 2 )
01305                         {  // non-shared
01306                             TDParallel* td_par = (TDParallel*)face->get_TD( &TDParallel::is_parallel );
01307                             if( td_par == NULL )
01308                             {  // only consider unpartitioned surfaces
01309                                 double face_load = face->measure();
01310                                 if( load_diff > min_load + face_load - ave_load )
01311                                 {
01312                                     loads[max_proc] -= face_load;
01313                                     loads[min_proc] += face_load;
01314                                     int merge_id = TDUniqueId::get_unique_id( face );
01315                                     DLIList< int > shared_procs;
01316                                     shared_procs.append( min_proc );
01317                                     shared_procs.append( max_proc );
01318                                     td_par = new TDParallel( face, NULL, &shared_procs, NULL, merge_id, 1 );
01319 
01320                                     if( debug )
01321                                     {
01322                                         std::cout << "non-shared surface " << face->id() << " is moved from p"
01323                                                   << max_proc << " to p" << min_proc << std::endl;
01324                                     }
01325                                     b_moved = true;
01326                                     break;
01327                                 }
01328                             }
01329                         }
01330                     }
01331                 }
01332             }
01333         }
01334         else
01335             b_stop = true;
01336 
01337         n_iter++;
01338     } while( !b_stop && n_iter < 50 );
01339 
01340     if( debug )
01341     {
01342         for( i = 0; i < nparts; i++ )
01343         {
01344             std::cout << "loads_after_surface_partition[" << i << "]=" << loads[i] << std::endl;
01345         }
01346     }
01347 
01348     delete loads;
01349     return MB_SUCCESS;
01350 }
01351 
01352 ErrorCode ZoltanPartitioner::partition_round_robin( const int n_part )
01353 {
01354     int i, j, k;
01355     double* loads    = new double[n_part];  // estimated loads for each processor
01356     double* ve_loads = new double[n_part];  // estimated loads for each processor
01357     for( i = 0; i < n_part; i++ )
01358     {
01359         loads[i]    = 0.0;
01360         ve_loads[i] = 0.0;
01361     }
01362     DLIList< RefEntity* > body_entity_list;
01363     gti->ref_entity_list( "body", body_entity_list, CUBIT_FALSE );
01364     int n_entity      = body_entity_list.size();
01365     int n_entity_proc = n_entity / n_part;  // # of entities per processor
01366     int i_entity_proc = n_entity_proc;      // entity index limit for each processor
01367     int proc          = 0;
01368     RefEntity* entity;
01369 
01370     // assign processors to bodies
01371     body_entity_list.reset();
01372     for( i = 0; i < n_entity; i++ )
01373     {
01374         if( i == i_entity_proc )
01375         {
01376             proc++;
01377             if( proc < n_part )
01378                 i_entity_proc += n_entity_proc;
01379             else
01380             {
01381                 proc %= n_part;
01382                 i_entity_proc++;
01383             }
01384         }
01385 
01386         // assign to bodies
01387         entity = body_entity_list.get_and_step();
01388         DLIList< int > shared_procs;
01389         shared_procs.append( proc );
01390         TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01391         if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs );
01392         loads[proc] += entity->measure();
01393 
01394         // assign to volumes, it should be removed in future
01395         DLIList< RefVolume* > volumes;
01396         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_volumes( volumes );
01397         int n_vol = volumes.size();
01398         volumes.reset();
01399         for( j = 0; j < n_vol; j++ )
01400         {
01401             RefEntity* vol = volumes.get_and_step();
01402             td_par         = (TDParallel*)vol->get_TD( &TDParallel::is_parallel );
01403             if( td_par == NULL ) td_par = new TDParallel( vol, NULL, &shared_procs );
01404         }
01405 
01406         // add local surface load
01407         DLIList< RefFace* > faces;
01408         ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( faces );
01409         int n_face = faces.size();
01410         faces.reset();
01411         for( j = 0; j < n_face; j++ )
01412         {
01413             RefFace* face      = faces.get_and_step();
01414             TopologyEntity* te = CAST_TO( face, TopologyEntity );
01415             if( te->bridge_manager()->number_of_bridges() < 2 ) loads[proc] = loads[proc] + face->measure();
01416         }
01417 
01418         // Get all child entities
01419         DLIList< RefEntity* > child_list;
01420         RefEntity::get_all_child_ref_entities( body_entity_list, child_list );
01421         int n_child = child_list.size();
01422 
01423         // assign processors to interface entities
01424         child_list.reset();
01425         for( j = 0; j < n_child; j++ )
01426         {
01427             entity             = child_list.get_and_step();
01428             TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01429 
01430             if( te->bridge_manager()->number_of_bridges() > 1 )
01431             {
01432                 DLIList< Body* > parent_bodies;
01433                 DLIList< int > child_shared_procs;  // Shared processors of each child entity
01434                 ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parent_bodies );
01435                 int n_parent = parent_bodies.size();
01436 
01437                 for( k = 0; k < n_parent; k++ )
01438                 {
01439                     RefEntity* parent_vol = CAST_TO( parent_bodies.get_and_step(), RefEntity );
01440                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01441 
01442                     if( parent_td == NULL )
01443                     {
01444                         PRINT_ERROR( "parent Volume has to be partitioned." );
01445                         delete[] loads;
01446                         delete[] ve_loads;
01447                         return MB_FAILURE;
01448                     }
01449                     child_shared_procs.append_unique( parent_td->get_charge_proc() );
01450                 }
01451 
01452                 if( child_shared_procs.size() > 1 )
01453                 {  // if it is interface
01454                     td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01455                     if( td_par == NULL )
01456                     {
01457                         int merge_id = TDUniqueId::get_unique_id( entity );
01458                         if( entity->entity_type_info() == typeid( RefFace ) )
01459                         {  // face
01460                             if( child_shared_procs.size() != 2 )
01461                             {
01462                                 PRINT_ERROR( "Error: # of shared processors of interface surface "
01463                                              "should be 2." );
01464                                 delete[] loads;
01465                                 delete[] ve_loads;
01466                                 return MB_FAILURE;
01467                             }
01468 
01469                             // balance interface surface loads
01470                             if( loads[child_shared_procs[0]] > loads[child_shared_procs[1]] )
01471                                 child_shared_procs.reverse();
01472 
01473                             loads[child_shared_procs[0]] = loads[child_shared_procs[0]] + entity->measure();
01474                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01475                         }  // face
01476                         else if( entity->entity_type_info() == typeid( RefEdge ) ||
01477                                  entity->entity_type_info() == typeid( RefVertex ) )
01478                         {  // edge or vertex
01479                             // balance interface surface loads
01480                             int min_p         = child_shared_procs[0];
01481                             int n_shared_proc = child_shared_procs.size();
01482                             for( k = 1; k < n_shared_proc; k++ )
01483                             {
01484                                 if( ve_loads[child_shared_procs[k]] < ve_loads[min_p] ) min_p = child_shared_procs[k];
01485                             }
01486                             ve_loads[min_p] = ve_loads[min_p] + entity->measure();
01487                             child_shared_procs.remove( min_p );
01488                             child_shared_procs.insert_first( min_p );
01489                             td_par = new TDParallel( entity, NULL, &child_shared_procs, NULL, merge_id, 1 );
01490                         }  // edge or vertex
01491                     }      // if (td_par == NULL)
01492                 }          // if it is interface
01493             }              // if (te->bridge_manager()->number_of_bridges() > 1)
01494         }                  // for (j = 0; j < n_child; j++)
01495     }                      // for (i = 0; i < n_entity; i++)
01496 
01497     delete[] loads;
01498     delete[] ve_loads;
01499 
01500     return MB_SUCCESS;
01501 }
01502 
01503 // partition child entities to one of parent entity shared processors
01504 ErrorCode ZoltanPartitioner::partition_child_entities( const int dim,
01505                                                        const int n_part,
01506                                                        const bool part_surf,
01507                                                        const bool ghost )
01508 {
01509     DLIList< RefEntity* > entity_list;
01510     if( dim == 0 )
01511         gti->ref_entity_list( "vertex", entity_list, CUBIT_FALSE );
01512     else if( dim == 1 )
01513         gti->ref_entity_list( "curve", entity_list, CUBIT_FALSE );
01514     else
01515     {
01516         std::cerr << "Dimention should be from 0 to 1." << std::endl;
01517         return MB_FAILURE;
01518     }
01519 
01520     int i, j, k;
01521     int n_entity  = entity_list.size();
01522     double* loads = new double[n_part];
01523     for( i = 0; i < n_part; i++ )
01524         loads[i] = 0.;
01525     entity_list.reset();
01526 
01527     for( i = 0; i < n_entity; i++ )
01528     {  // for all entities
01529         RefEntity* entity  = entity_list.get_and_step();
01530         TopologyEntity* te = CAST_TO( entity, TopologyEntity );
01531 
01532         if( !part_surf && te->bridge_manager()->number_of_bridges() < 2 ) continue;
01533 
01534         DLIList< int > shared_procs;
01535         DLIList< Body* > parents;
01536         ( dynamic_cast< TopologyEntity* >( entity ) )->bodies( parents );
01537         int n_parents = parents.size();
01538         std::set< int > s_proc;
01539         parents.reset();
01540 
01541         // get shared processors from parent bodies
01542         for( j = 0; j < n_parents; j++ )
01543         {
01544             RefEntity* parent     = parents.get_and_step();
01545             TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01546 
01547             if( parent_td != NULL )
01548             {
01549                 DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01550                 int n_shared                 = parent_procs->size();
01551                 parent_procs->reset();
01552                 for( k = 0; k < n_shared; k++ )
01553                 {
01554                     int p = parent_procs->get_and_step();
01555                     s_proc.insert( p );
01556                 }
01557             }
01558         }
01559 
01560         if( part_surf )
01561         {  // also get shared procs from parent surfaces
01562             DLIList< RefFace* > parent_faces;
01563             ( dynamic_cast< TopologyEntity* >( entity ) )->ref_faces( parent_faces );
01564             int n_pface = parent_faces.size();
01565             parent_faces.reset();
01566 
01567             // get shared processors from parent faces
01568             for( j = 0; j < n_pface; j++ )
01569             {
01570                 RefEntity* parent     = parent_faces.get_and_step();
01571                 TDParallel* parent_td = (TDParallel*)parent->get_TD( &TDParallel::is_parallel );
01572 
01573                 if( parent_td != NULL )
01574                 {
01575                     DLIList< int >* parent_procs = parent_td->get_shared_proc_list();
01576                     int n_shared                 = parent_procs->size();
01577                     parent_procs->reset();
01578 
01579                     for( k = 0; k < n_shared; k++ )
01580                     {
01581                         int p = parent_procs->get_and_step();
01582                         s_proc.insert( p );
01583                     }
01584                 }
01585             }
01586         }
01587 
01588         // find the minimum load processor and put it
01589         // at the front of the shared_procs list
01590         if( s_proc.size() > 1 )
01591         {
01592             int min_proc                       = 0;
01593             double min_load                    = std::numeric_limits< double >::max();
01594             std::set< int >::iterator iter     = s_proc.begin();
01595             std::set< int >::iterator end_iter = s_proc.end();
01596             for( ; iter != end_iter; ++iter )
01597             {
01598                 if( loads[*iter] < min_load )
01599                 {
01600                     min_load = loads[*iter];
01601                     min_proc = *iter;
01602                 }
01603             }
01604 
01605             if( dim == 1 )
01606                 loads[min_proc] += entity->measure();
01607             else if( dim == 0 )
01608                 loads[min_proc] += 1.;
01609             shared_procs.append( min_proc );
01610             iter     = s_proc.begin();
01611             end_iter = s_proc.end();
01612             for( ; iter != end_iter; ++iter )
01613             {
01614                 if( *iter != min_proc )
01615                 {
01616                     shared_procs.append( *iter );
01617                 }
01618             }
01619 
01620             // add ghost geometries to shared processors for edge
01621             if( ghost )
01622             {
01623                 parents.reset();
01624                 for( j = 0; j < n_parents; j++ )
01625                 {  // for all parent bodies
01626                     RefEntity* parent_vol = CAST_TO( parents.get_and_step(), RefEntity );
01627                     TDParallel* parent_td = (TDParallel*)parent_vol->get_TD( &TDParallel::is_parallel );
01628                     int n_shared_proc     = shared_procs.size();
01629 
01630                     for( k = 0; k < n_shared_proc; k++ )
01631                     {
01632                         parent_td->add_ghost_proc( shared_procs[k] );
01633                     }
01634                 }
01635             }
01636 
01637             // initialize tool data
01638             int merge_id       = TDUniqueId::get_unique_id( entity );
01639             TDParallel* td_par = (TDParallel*)entity->get_TD( &TDParallel::is_parallel );
01640             if( td_par == NULL ) td_par = new TDParallel( entity, NULL, &shared_procs, NULL, merge_id, 1 );
01641         }
01642     }
01643 
01644     delete loads;
01645     return MB_SUCCESS;
01646 }
01647 #endif
01648 
01649 ErrorCode ZoltanPartitioner::write_partition( const int nparts,
01650                                               Range& elems,
01651                                               const int* assignment,
01652                                               const bool write_as_sets,
01653                                               const bool write_as_tags )
01654 {
01655     ErrorCode result;
01656 
01657     // get the partition set tag
01658     Tag part_set_tag;
01659     int dum_id = -1, i;
01660     result     = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_set_tag,
01661                                          MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id );RR;
01662 
01663     // get any sets already with this tag, and clear them
01664     Range tagged_sets;
01665     result =
01666         mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &part_set_tag, NULL, 1, tagged_sets, Interface::UNION );RR;
01667     if( !tagged_sets.empty() )
01668     {
01669         result = mbImpl->clear_meshset( tagged_sets );RR;
01670         if( !write_as_sets )
01671         {
01672             result = mbImpl->tag_delete_data( part_set_tag, tagged_sets );RR;
01673         }
01674     }
01675 
01676     if( write_as_sets )
01677     {
01678         // first, create partition sets and store in vector
01679         partSets.clear();
01680 
01681         if( nparts > (int)tagged_sets.size() )
01682         {
01683             // too few partition sets - create missing ones
01684             int num_new = nparts - tagged_sets.size();
01685             for( i = 0; i < num_new; i++ )
01686             {
01687                 EntityHandle new_set;
01688                 result = mbImpl->create_meshset( MESHSET_SET, new_set );RR;
01689                 tagged_sets.insert( new_set );
01690             }
01691         }
01692         else if( nparts < (int)tagged_sets.size() )
01693         {
01694             // too many partition sets - delete extras
01695             int num_del = tagged_sets.size() - nparts;
01696             for( i = 0; i < num_del; i++ )
01697             {
01698                 EntityHandle old_set = tagged_sets.pop_back();
01699                 result               = mbImpl->delete_entities( &old_set, 1 );RR;
01700             }
01701         }
01702         // assign partition sets to vector
01703         partSets.swap( tagged_sets );
01704 
01705         // write a tag to those sets denoting they're partition sets, with a value of the
01706         // proc number
01707         int* dum_ids = new int[nparts];
01708         for( i = 0; i < nparts; i++ )
01709             dum_ids[i] = i;
01710 
01711         result = mbImpl->tag_set_data( part_set_tag, partSets, dum_ids );RR;
01712         // found out by valgrind when we run mbpart
01713         delete[] dum_ids;
01714         dum_ids = NULL;
01715 
01716         // assign entities to the relevant sets
01717         std::vector< EntityHandle > tmp_part_sets;
01718         // int N = (int)elems.size();
01719         std::copy( partSets.begin(), partSets.end(), std::back_inserter( tmp_part_sets ) );
01720         /*Range::reverse_iterator riter;
01721         for (i = N-1, riter = elems.rbegin(); riter != elems.rend(); ++riter, i--) {
01722           int assigned_part = assignment[i];
01723           part_ranges[assigned_part].insert(*riter);
01724           //result = mbImpl->add_entities(tmp_part_sets[assignment[i]], &(*rit), 1); RR;
01725         }*/
01726 
01727         Range::iterator rit;
01728         for( i = 0, rit = elems.begin(); rit != elems.end(); ++rit, i++ )
01729         {
01730             result = mbImpl->add_entities( tmp_part_sets[assignment[i]], &( *rit ), 1 );RR;
01731         }
01732         /*for (i=0; i<nparts; i++)
01733         {
01734           result = mbImpl->add_entities(tmp_part_sets[i], part_ranges[i]); RR;
01735         }
01736         delete [] part_ranges;*/
01737         // check for empty sets, warn if there are any
01738         Range empty_sets;
01739         for( rit = partSets.begin(); rit != partSets.end(); ++rit )
01740         {
01741             int num_ents = 0;
01742             result       = mbImpl->get_number_entities_by_handle( *rit, num_ents );
01743             if( MB_SUCCESS != result || !num_ents ) empty_sets.insert( *rit );
01744         }
01745         if( !empty_sets.empty() )
01746         {
01747             std::cout << "WARNING: " << empty_sets.size() << " empty sets in partition: ";
01748             for( rit = empty_sets.begin(); rit != empty_sets.end(); ++rit )
01749                 std::cout << *rit << " ";
01750             std::cout << std::endl;
01751         }
01752     }
01753 
01754     if( write_as_tags )
01755     {
01756         // allocate integer-size partitions
01757         result = mbImpl->tag_set_data( part_set_tag, elems, assignment );RR;
01758     }
01759 
01760     return MB_SUCCESS;
01761 }
01762 
01763 void ZoltanPartitioner::SetRCB_Parameters( const bool recompute_rcb_box )
01764 {
01765     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Coordinate Bisection" << std::endl;
01766     // General parameters:
01767 
01768     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01769     myZZ->Set_Param( "LB_METHOD", "RCB" );  // recursive coordinate bisection
01770     // myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );  // recompute RCB box if needed ?
01771 
01772     // RCB parameters:
01773 
01774     myZZ->Set_Param( "RCB_OUTPUT_LEVEL", "1" );
01775     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition so that we can infer partitions
01776     // myZZ->Set_Param("RCB_RECTILINEAR_BLOCKS", "1"); // don't split point on boundary
01777     if( recompute_rcb_box ) myZZ->Set_Param( "RCB_RECOMPUTE_BOX", "1" );
01778 }
01779 
01780 void ZoltanPartitioner::SetRIB_Parameters()
01781 {
01782     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nRecursive Inertial Bisection" << std::endl;
01783     // General parameters:
01784 
01785     myZZ->Set_Param( "DEBUG_LEVEL", "0" );  // no debug messages
01786     myZZ->Set_Param( "LB_METHOD", "RIB" );  // Recursive Inertial Bisection
01787 
01788     // RIB parameters:
01789 
01790     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01791     myZZ->Set_Param( "AVERAGE_CUTS", "1" );
01792 }
01793 
01794 void ZoltanPartitioner::SetHSFC_Parameters()
01795 {
01796     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHilbert Space Filling Curve" << std::endl;
01797     // General parameters:
01798 
01799     myZZ->Set_Param( "DEBUG_LEVEL", "0" );   // no debug messages
01800     myZZ->Set_Param( "LB_METHOD", "HSFC" );  // perform Hilbert space filling curve
01801 
01802     // HSFC parameters:
01803 
01804     myZZ->Set_Param( "KEEP_CUTS", "1" );  // save decomposition
01805 }
01806 
01807 void ZoltanPartitioner::SetHypergraph_Parameters( const char* phg_method )
01808 {
01809     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nHypergraph (or PHG): " << std::endl;
01810     // General parameters:
01811 
01812     myZZ->Set_Param( "DEBUG_LEVEL", "0" );         // no debug messages
01813     myZZ->Set_Param( "LB_METHOD", "Hypergraph" );  // Hypergraph (or PHG)
01814 
01815     // Hypergraph (or PHG) parameters:
01816     myZZ->Set_Param( "PHG_COARSEPARTITION_METHOD", phg_method );  // CoarsePartitionMethod
01817 }
01818 
01819 void ZoltanPartitioner::SetPARMETIS_Parameters( const char* parmetis_method )
01820 {
01821     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nPARMETIS: " << parmetis_method << std::endl;
01822     // General parameters:
01823 
01824     myZZ->Set_Param( "DEBUG_LEVEL", "0" );       // no debug messages
01825     myZZ->Set_Param( "LB_METHOD", "PARMETIS" );  // the ParMETIS library
01826 
01827     // PARMETIS parameters:
01828 
01829     myZZ->Set_Param( "PARMETIS_METHOD", parmetis_method );  // method in the library
01830 }
01831 
01832 void ZoltanPartitioner::SetOCTPART_Parameters( const char* oct_method )
01833 {
01834     if( mbpc->proc_config().proc_rank() == 0 ) std::cout << "\nOctree Partitioning: " << oct_method << std::endl;
01835     // General parameters:
01836 
01837     myZZ->Set_Param( "DEBUG_LEVEL", "0" );      // no debug messages
01838     myZZ->Set_Param( "LB_METHOD", "OCTPART" );  // octree partitioning
01839 
01840     // OCTPART parameters:
01841 
01842     myZZ->Set_Param( "OCT_METHOD", oct_method );  // the SFC to be used
01843     myZZ->Set_Param( "OCT_OUTPUT_LEVEL", "3" );
01844 }
01845 
01846 int ZoltanPartitioner::mbInitializePoints( int npts,
01847                                            double* pts,
01848                                            int* ids,
01849                                            int* adjs,
01850                                            int* length,
01851                                            double* obj_weights,
01852                                            double* edge_weights,
01853                                            int* parts,
01854                                            bool part_geom )
01855 {
01856     unsigned int i;
01857     int j;
01858     int *numPts, *nborProcs = NULL;
01859     int sum, ptsPerProc, ptsAssigned, mySize;
01860     MPI_Status stat;
01861     double* sendPts;
01862     int* sendIds;
01863     int* sendEdges  = NULL;
01864     int* sendNborId = NULL;
01865     int* sendProcs;
01866 
01867     if( mbpc->proc_config().proc_rank() == 0 )
01868     {
01869         /* divide pts to start */
01870 
01871         numPts      = (int*)malloc( sizeof( int ) * mbpc->proc_config().proc_size() );
01872         ptsPerProc  = npts / mbpc->proc_config().proc_size();
01873         ptsAssigned = 0;
01874 
01875         for( i = 0; i < mbpc->proc_config().proc_size() - 1; i++ )
01876         {
01877             numPts[i] = ptsPerProc;
01878             ptsAssigned += ptsPerProc;
01879         }
01880 
01881         numPts[mbpc->proc_config().proc_size() - 1] = npts - ptsAssigned;
01882 
01883         mySize  = numPts[mbpc->proc_config().proc_rank()];
01884         sendPts = pts + ( 3 * numPts[0] );
01885         sendIds = ids + numPts[0];
01886         sum     = 0;  // possible no adjacency sent
01887         if( !part_geom )
01888         {
01889             sendEdges = length + numPts[0];
01890 
01891             for( j = 0; j < numPts[0]; j++ )
01892                 sum += length[j];
01893 
01894             sendNborId = adjs + sum;
01895 
01896             for( j = numPts[0]; j < npts; j++ )
01897                 sum += length[j];
01898 
01899             nborProcs = (int*)malloc( sizeof( int ) * sum );
01900         }
01901         for( j = 0; j < sum; j++ )
01902             if( ( i = adjs[j] / ptsPerProc ) < mbpc->proc_config().proc_size() )
01903                 nborProcs[j] = i;
01904             else
01905                 nborProcs[j] = mbpc->proc_config().proc_size() - 1;
01906 
01907         sendProcs = nborProcs + ( sendNborId - adjs );
01908 
01909         for( i = 1; i < mbpc->proc_config().proc_size(); i++ )
01910         {
01911             MPI_Send( &numPts[i], 1, MPI_INT, i, 0x00, mbpc->comm() );
01912             MPI_Send( sendPts, 3 * numPts[i], MPI_DOUBLE, i, 0x01, mbpc->comm() );
01913             MPI_Send( sendIds, numPts[i], MPI_INT, i, 0x03, mbpc->comm() );
01914             MPI_Send( sendEdges, numPts[i], MPI_INT, i, 0x06, mbpc->comm() );
01915             sum = 0;
01916 
01917             for( j = 0; j < numPts[i]; j++ )
01918                 sum += sendEdges[j];
01919 
01920             MPI_Send( sendNborId, sum, MPI_INT, i, 0x07, mbpc->comm() );
01921             MPI_Send( sendProcs, sum, MPI_INT, i, 0x08, mbpc->comm() );
01922             sendPts += ( 3 * numPts[i] );
01923             sendIds += numPts[i];
01924             sendEdges += numPts[i];
01925             sendNborId += sum;
01926             sendProcs += sum;
01927         }
01928 
01929         free( numPts );
01930     }
01931     else
01932     {
01933         MPI_Recv( &mySize, 1, MPI_INT, 0, 0x00, mbpc->comm(), &stat );
01934         pts    = (double*)malloc( sizeof( double ) * 3 * mySize );
01935         ids    = (int*)malloc( sizeof( int ) * mySize );
01936         length = (int*)malloc( sizeof( int ) * mySize );
01937         if( obj_weights != NULL ) obj_weights = (double*)malloc( sizeof( double ) * mySize );
01938         MPI_Recv( pts, 3 * mySize, MPI_DOUBLE, 0, 0x01, mbpc->comm(), &stat );
01939         MPI_Recv( ids, mySize, MPI_INT, 0, 0x03, mbpc->comm(), &stat );
01940         MPI_Recv( length, mySize, MPI_INT, 0, 0x06, mbpc->comm(), &stat );
01941         sum = 0;
01942 
01943         for( j = 0; j < mySize; j++ )
01944             sum += length[j];
01945 
01946         adjs = (int*)malloc( sizeof( int ) * sum );
01947         if( edge_weights != NULL ) edge_weights = (double*)malloc( sizeof( double ) * sum );
01948         nborProcs = (int*)malloc( sizeof( int ) * sum );
01949         MPI_Recv( adjs, sum, MPI_INT, 0, 0x07, mbpc->comm(), &stat );
01950         MPI_Recv( nborProcs, sum, MPI_INT, 0, 0x08, mbpc->comm(), &stat );
01951     }
01952 
01953     Points       = pts;
01954     GlobalIds    = ids;
01955     NumPoints    = mySize;
01956     NumEdges     = length;
01957     NborGlobalId = adjs;
01958     NborProcs    = nborProcs;
01959     ObjWeights   = obj_weights;
01960     EdgeWeights  = edge_weights;
01961     Parts        = parts;
01962 
01963     return mySize;
01964 }
01965 
01966 void ZoltanPartitioner::mbFinalizePoints( int npts,
01967                                           int numExport,
01968                                           ZOLTAN_ID_PTR exportLocalIDs,
01969                                           int* exportProcs,
01970                                           int** assignment )
01971 {
01972     int* MyAssignment;
01973     int i;
01974     int numPts;
01975     MPI_Status stat;
01976     int* recvA;
01977 
01978     /* assign pts to start */
01979 
01980     if( mbpc->proc_config().proc_rank() == 0 )
01981         MyAssignment = (int*)malloc( sizeof( int ) * npts );
01982     else
01983         MyAssignment = (int*)malloc( sizeof( int ) * NumPoints );
01984 
01985     for( i = 0; i < NumPoints; i++ )
01986         MyAssignment[i] = mbpc->proc_config().proc_rank();
01987 
01988     for( i = 0; i < numExport; i++ )
01989         MyAssignment[exportLocalIDs[i]] = exportProcs[i];
01990 
01991     if( mbpc->proc_config().proc_rank() == 0 )
01992     {
01993         /* collect pts */
01994         recvA = MyAssignment + NumPoints;
01995 
01996         for( i = 1; i < (int)mbpc->proc_config().proc_size(); i++ )
01997         {
01998             MPI_Recv( &numPts, 1, MPI_INT, i, 0x04, mbpc->comm(), &stat );
01999             MPI_Recv( recvA, numPts, MPI_INT, i, 0x05, mbpc->comm(), &stat );
02000             recvA += numPts;
02001         }
02002 
02003         *assignment = MyAssignment;
02004     }
02005     else
02006     {
02007         MPI_Send( &NumPoints, 1, MPI_INT, 0, 0x04, mbpc->comm() );
02008         MPI_Send( MyAssignment, NumPoints, MPI_INT, 0, 0x05, mbpc->comm() );
02009         free( MyAssignment );
02010     }
02011 }
02012 
02013 int ZoltanPartitioner::mbGlobalSuccess( int rc )
02014 {
02015     int fail = 0;
02016     unsigned int i;
02017     int* vals = (int*)malloc( mbpc->proc_config().proc_size() * sizeof( int ) );
02018 
02019     MPI_Allgather( &rc, 1, MPI_INT, vals, 1, MPI_INT, mbpc->comm() );
02020 
02021     for( i = 0; i < mbpc->proc_config().proc_size(); i++ )
02022     {
02023         if( vals[i] != ZOLTAN_OK )
02024         {
02025             if( 0 == mbpc->proc_config().proc_rank() )
02026             {
02027                 mbShowError( vals[i], "Result on process " );
02028             }
02029             fail = 1;
02030         }
02031     }
02032 
02033     free( vals );
02034     return fail;
02035 }
02036 
02037 void ZoltanPartitioner::mbPrintGlobalResult( const char* s, int begin, int import, int exp, int change )
02038 {
02039     unsigned int i;
02040     int* v1 = (int*)malloc( 4 * sizeof( int ) );
02041     int* v2 = NULL;
02042     int* v;
02043 
02044     v1[0] = begin;
02045     v1[1] = import;
02046     v1[2] = exp;
02047     v1[3] = change;
02048 
02049     if( mbpc->proc_config().proc_rank() == 0 )
02050     {
02051         v2 = (int*)malloc( 4 * mbpc->proc_config().proc_size() * sizeof( int ) );
02052     }
02053 
02054     MPI_Gather( v1, 4, MPI_INT, v2, 4, MPI_INT, 0, mbpc->comm() );
02055 
02056     if( mbpc->proc_config().proc_rank() == 0 )
02057     {
02058         fprintf( stdout, "======%s======\n", s );
02059         for( i = 0, v = v2; i < mbpc->proc_config().proc_size(); i++, v += 4 )
02060         {
02061             fprintf( stdout, "%u: originally had %d, import %d, exp %d, %s\n", i, v[0], v[1], v[2],
02062                      v[3] ? "a change of partitioning" : "no change" );
02063         }
02064         fprintf( stdout, "==========================================\n" );
02065         fflush( stdout );
02066 
02067         free( v2 );
02068     }
02069 
02070     free( v1 );
02071 }
02072 
02073 void ZoltanPartitioner::mbShowError( int val, const char* s )
02074 {
02075     if( s ) printf( "%s ", s );
02076 
02077     switch( val )
02078     {
02079         case ZOLTAN_OK:
02080             printf( "%d: SUCCESSFUL\n", mbpc->proc_config().proc_rank() );
02081             break;
02082         case ZOLTAN_WARN:
02083             printf( "%d: WARNING\n", mbpc->proc_config().proc_rank() );
02084             break;
02085         case ZOLTAN_FATAL:
02086             printf( "%d: FATAL ERROR\n", mbpc->proc_config().proc_rank() );
02087             break;
02088         case ZOLTAN_MEMERR:
02089             printf( "%d: MEMORY ALLOCATION ERROR\n", mbpc->proc_config().proc_rank() );
02090             break;
02091         default:
02092             printf( "%d: INVALID RETURN CODE\n", mbpc->proc_config().proc_rank() );
02093             break;
02094     }
02095 }
02096 
02097 /**********************
02098 ** call backs
02099 **********************/
02100 
02101 int mbGetNumberOfAssignedObjects( void* /* userDefinedData */, int* err )
02102 {
02103     *err = 0;
02104     return NumPoints;
02105 }
02106 
02107 void mbGetObjectList( void* /* userDefinedData */,
02108                       int /* numGlobalIds */,
02109                       int /* numLids */,
02110                       ZOLTAN_ID_PTR gids,
02111                       ZOLTAN_ID_PTR lids,
02112                       int wgt_dim,
02113                       float* obj_wgts,
02114                       int* err )
02115 {
02116     for( int i = 0; i < NumPoints; i++ )
02117     {
02118         gids[i] = GlobalIds[i];
02119         lids[i] = i;
02120         if( wgt_dim > 0 ) obj_wgts[i] = ObjWeights[i];
02121     }
02122 
02123     *err = 0;
02124 }
02125 
02126 int mbGetObjectSize( void* /* userDefinedData */, int* err )
02127 {
02128     *err = 0;
02129     return 3;
02130 }
02131 
02132 void mbGetObject( void* /* userDefinedData */,
02133                   int /* numGlobalIds */,
02134                   int /* numLids */,
02135                   int numObjs,
02136                   ZOLTAN_ID_PTR /* gids */,
02137                   ZOLTAN_ID_PTR lids,
02138                   int numDim,
02139                   double* pts,
02140                   int* err )
02141 {
02142     int i, id, id3;
02143     int next = 0;
02144 
02145     if( numDim != 3 )
02146     {
02147         *err = 1;
02148         return;
02149     }
02150 
02151     for( i = 0; i < numObjs; i++ )
02152     {
02153         id = lids[i];
02154 
02155         if( ( id < 0 ) || ( id >= NumPoints ) )
02156         {
02157             *err = 1;
02158             return;
02159         }
02160 
02161         id3 = lids[i] * 3;
02162 
02163         pts[next++] = Points[id3];
02164         pts[next++] = Points[id3 + 1];
02165         pts[next++] = Points[id3 + 2];
02166     }
02167 }
02168 
02169 void mbGetNumberOfEdges( void* /* userDefinedData */,
02170                          int /* numGlobalIds */,
02171                          int /* numLids */,
02172                          int numObjs,
02173                          ZOLTAN_ID_PTR /* gids */,
02174                          ZOLTAN_ID_PTR lids,
02175                          int* numEdges,
02176                          int* err )
02177 {
02178     int i, id;
02179     int next = 0;
02180 
02181     for( i = 0; i < numObjs; i++ )
02182     {
02183         id = lids[i];
02184 
02185         if( ( id < 0 ) || ( id >= NumPoints ) )
02186         {
02187             *err = 1;
02188             return;
02189         }
02190 
02191         numEdges[next++] = NumEdges[id];
02192     }
02193 }
02194 
02195 void mbGetEdgeList( void* /* userDefinedData */,
02196                     int /* numGlobalIds */,
02197                     int /* numLids */,
02198                     int numObjs,
02199                     ZOLTAN_ID_PTR /* gids */,
02200                     ZOLTAN_ID_PTR lids,
02201                     int* /* numEdges */,
02202                     ZOLTAN_ID_PTR nborGlobalIds,
02203                     int* nborProcs,
02204                     int wgt_dim,
02205                     float* edge_wgts,
02206                     int* err )
02207 {
02208     int i, id, idSum, j;
02209     int next = 0;
02210 
02211     for( i = 0; i < numObjs; i++ )
02212     {
02213         id = lids[i];
02214 
02215         if( ( id < 0 ) || ( id >= NumPoints ) )
02216         {
02217             *err = 1;
02218             return;
02219         }
02220 
02221         idSum = 0;
02222 
02223         for( j = 0; j < id; j++ )
02224             idSum += NumEdges[j];
02225 
02226         for( j = 0; j < NumEdges[id]; j++ )
02227         {
02228             nborGlobalIds[next] = NborGlobalId[idSum];
02229             nborProcs[next]     = NborProcs[idSum];
02230             if( wgt_dim > 0 ) edge_wgts[next] = EdgeWeights[idSum];
02231             next++;
02232             idSum++;
02233         }
02234     }
02235 }
02236 
02237 void mbGetPart( void* /* userDefinedData */,
02238                 int /* numGlobalIds */,
02239                 int /* numLids */,
02240                 int numObjs,
02241                 ZOLTAN_ID_PTR /* gids */,
02242                 ZOLTAN_ID_PTR lids,
02243                 int* part,
02244                 int* err )
02245 {
02246     int i, id;
02247     int next = 0;
02248 
02249     for( i = 0; i < numObjs; i++ )
02250     {
02251         id = lids[i];
02252 
02253         if( ( id < 0 ) || ( id >= NumPoints ) )
02254         {
02255             *err = 1;
02256             return;
02257         }
02258 
02259         part[next++] = Parts[id];
02260     }
02261 }
02262 
02263 // new methods for partition in parallel, used by migrate in iMOAB
02264 ErrorCode ZoltanPartitioner::partition_owned_cells( Range& primary,
02265                                                     ParallelComm* pco,
02266                                                     std::multimap< int, int >& extraGraphEdges,
02267                                                     std::map< int, int > procs,
02268                                                     int& numNewPartitions,
02269                                                     std::map< int, Range >& distribution,
02270                                                     int met )
02271 {
02272     // start copy
02273     MeshTopoUtil mtu( mbImpl );
02274     Range adjs;
02275 
02276     std::vector< int > adjacencies;
02277     std::vector< int > ids;
02278     ids.resize( primary.size() );
02279     std::vector< int > length;
02280     std::vector< double > coords;
02281     std::vector< int > nbor_proc;
02282     // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
02283     // by MBCN
02284     int neighbors[5 * MAX_SUB_ENTITIES];
02285     int neib_proc[5 * MAX_SUB_ENTITIES];
02286     double avg_position[3];
02287     int moab_id;
02288     int primaryDim = mbImpl->dimension_from_handle( *primary.rbegin() );
02289     // get the global id tag handle
02290     Tag gid = mbImpl->globalId_tag();
02291 
02292     ErrorCode rval = mbImpl->tag_get_data( gid, primary, &ids[0] );MB_CHK_ERR( rval );
02293 
02294     int rank = pco->rank();  // current rank , will be put on regular neighbors
02295     int i    = 0;
02296     for( Range::iterator rit = primary.begin(); rit != primary.end(); ++rit, i++ )
02297     {
02298         EntityHandle cell = *rit;
02299         // get bridge adjacencies for each cell
02300         if( 1 == met )
02301         {
02302             adjs.clear();
02303             rval = mtu.get_bridge_adjacencies( cell, ( primaryDim > 0 ? primaryDim - 1 : 3 ), primaryDim, adjs );MB_CHK_ERR( rval );
02304 
02305             // get the graph vertex ids of those
02306             if( !adjs.empty() )
02307             {
02308                 assert( adjs.size() < 5 * MAX_SUB_ENTITIES );
02309                 rval = mbImpl->tag_get_data( gid, adjs, neighbors );MB_CHK_ERR( rval );
02310             }
02311             // if adjacent to neighbor partitions, add to the list
02312             int size_adjs = (int)adjs.size();
02313             moab_id       = ids[i];
02314             for( int k = 0; k < size_adjs; k++ )
02315                 neib_proc[k] = rank;  // current rank
02316             if( extraGraphEdges.find( moab_id ) != extraGraphEdges.end() )
02317             {
02318                 // it means that the current cell is adjacent to a cell in another partition ; maybe
02319                 // a few
02320                 std::pair< std::multimap< int, int >::iterator, std::multimap< int, int >::iterator > ret;
02321                 ret = extraGraphEdges.equal_range( moab_id );
02322                 for( std::multimap< int, int >::iterator it = ret.first; it != ret.second; ++it )
02323                 {
02324                     int otherID          = it->second;
02325                     neighbors[size_adjs] = otherID;         // the id of the other cell, across partition
02326                     neib_proc[size_adjs] = procs[otherID];  // this is how we built this map, the cell id maps to
02327                                                             // what proc it came from
02328                     size_adjs++;
02329                 }
02330             }
02331             // copy those into adjacencies vector
02332             length.push_back( size_adjs );
02333             std::copy( neighbors, neighbors + size_adjs, std::back_inserter( adjacencies ) );
02334             std::copy( neib_proc, neib_proc + size_adjs, std::back_inserter( nbor_proc ) );
02335         }
02336         else if( 2 == met )
02337         {
02338             if( TYPE_FROM_HANDLE( cell ) == MBVERTEX )
02339             {
02340                 rval = mbImpl->get_coords( &cell, 1, avg_position );MB_CHK_ERR( rval );
02341             }
02342             else
02343             {
02344                 rval = mtu.get_average_position( cell, avg_position );MB_CHK_ERR( rval );
02345             }
02346             std::copy( avg_position, avg_position + 3, std::back_inserter( coords ) );
02347         }
02348     }
02349 
02350 #ifdef VERBOSE
02351     std::stringstream ff2;
02352     ff2 << "zoltanInput_" << pco->rank() << ".txt";
02353     std::ofstream ofs;
02354     ofs.open( ff2.str().c_str(), std::ofstream::out );
02355     ofs << "Length vector: " << std::endl;
02356     std::copy( length.begin(), length.end(), std::ostream_iterator< int >( ofs, ", " ) );
02357     ofs << std::endl;
02358     ofs << "Adjacencies vector: " << std::endl;
02359     std::copy( adjacencies.begin(), adjacencies.end(), std::ostream_iterator< int >( ofs, ", " ) );
02360     ofs << std::endl;
02361     ofs << "Moab_ids vector: " << std::endl;
02362     std::copy( ids.begin(), ids.end(), std::ostream_iterator< int >( ofs, ", " ) );
02363     ofs << std::endl;
02364     ofs << "Coords vector: " << std::endl;
02365     std::copy( coords.begin(), coords.end(), std::ostream_iterator< double >( ofs, ", " ) );
02366     ofs << std::endl;
02367     ofs.close();
02368 #endif
02369 
02370     // these are static var in this file, and used in the callbacks
02371     Points = NULL;
02372     if( 1 != met ) Points = &coords[0];
02373     GlobalIds    = &ids[0];
02374     NumPoints    = (int)ids.size();
02375     NumEdges     = &length[0];
02376     NborGlobalId = &adjacencies[0];
02377     NborProcs    = &nbor_proc[0];
02378     ObjWeights   = NULL;
02379     EdgeWeights  = NULL;
02380     Parts        = NULL;
02381 
02382     float version;
02383     if( pco->rank() == 0 ) std::cout << "Initializing zoltan..." << std::endl;
02384 
02385     Zoltan_Initialize( argcArg, argvArg, &version );
02386 
02387     // Create Zoltan object.  This calls Zoltan_Create.
02388     if( NULL == myZZ ) myZZ = new Zoltan( pco->comm() );
02389 
02390     // set # requested partitions
02391     char buff[10];
02392     sprintf( buff, "%d", numNewPartitions );
02393     int retval = myZZ->Set_Param( "NUM_GLOBAL_PARTITIONS", buff );
02394     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02395 
02396     // request parts assignment
02397     retval = myZZ->Set_Param( "RETURN_LISTS", "PARTS" );
02398     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02399 
02400     myZZ->Set_Num_Obj_Fn( mbGetNumberOfAssignedObjects, NULL );
02401     myZZ->Set_Obj_List_Fn( mbGetObjectList, NULL );
02402     // due to a bug in zoltan, if method is graph partitioning, do not pass coordinates!!
02403     if( 2 == met )
02404     {
02405         myZZ->Set_Num_Geom_Fn( mbGetObjectSize, NULL );
02406         myZZ->Set_Geom_Multi_Fn( mbGetObject, NULL );
02407         SetRCB_Parameters();  // geometry
02408     }
02409     else if( 1 == met )
02410     {
02411         myZZ->Set_Num_Edges_Multi_Fn( mbGetNumberOfEdges, NULL );
02412         myZZ->Set_Edge_List_Multi_Fn( mbGetEdgeList, NULL );
02413         SetHypergraph_Parameters( "auto" );
02414     }
02415 
02416     // Perform the load balancing partitioning
02417 
02418     int changes;
02419     int numGidEntries;
02420     int numLidEntries;
02421     int num_import;
02422     ZOLTAN_ID_PTR import_global_ids, import_local_ids;
02423     int* import_procs;
02424     int* import_to_part;
02425     int num_export;
02426     ZOLTAN_ID_PTR export_global_ids, export_local_ids;
02427     int *assign_procs, *assign_parts;
02428 
02429     if( pco->rank() == 0 )
02430         std::cout << "Computing partition using method (1-graph, 2-geom):" << met << " for " << numNewPartitions
02431                   << " parts..." << std::endl;
02432 
02433 #ifndef NDEBUG
02434 #if 0
02435   static int counter=0; // it may be possible to call function multiple times in a simulation
02436   // give a way to not overwrite the files
02437   // it should work only with a modified version of Zoltan
02438   std::stringstream basename;
02439   if (1==met)
02440   {
02441     basename << "phg_" << counter++;
02442     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 0, 1, 0);
02443   }
02444   else if (2==met)
02445   {
02446     basename << "rcb_" << counter++;
02447     Zoltan_Generate_Files(myZZ->Get_C_Handle(), (char*)(basename.str().c_str()), 1, 1, 0, 0);
02448   }
02449 #endif
02450 #endif
02451     retval = myZZ->LB_Partition( changes, numGidEntries, numLidEntries, num_import, import_global_ids, import_local_ids,
02452                                  import_procs, import_to_part, num_export, export_global_ids, export_local_ids,
02453                                  assign_procs, assign_parts );
02454     if( ZOLTAN_OK != retval ) return MB_FAILURE;
02455 
02456 #ifdef VERBOSE
02457     std::stringstream ff3;
02458     ff3 << "zoltanOutput_" << pco->rank() << ".txt";
02459     std::ofstream ofs3;
02460     ofs3.open( ff3.str().c_str(), std::ofstream::out );
02461     ofs3 << " export elements on rank " << rank << " \n";
02462     ofs3 << "\t index \t gb_id \t local \t proc \t part \n";
02463     for( int k = 0; k < num_export; k++ )
02464     {
02465         ofs3 << "\t" << k << "\t" << export_global_ids[k] << "\t" << export_local_ids[k] << "\t" << assign_procs[k]
02466              << "\t" << assign_parts[k] << "\n";
02467     }
02468     ofs3.close();
02469 #endif
02470 
02471     // basically each local cell is assigned to a part
02472 
02473     assert( num_export == (int)primary.size() );
02474     for( i = 0; i < num_export; i++ )
02475     {
02476         EntityHandle cell = primary[export_local_ids[i]];
02477         distribution[assign_parts[i]].insert( cell );
02478     }
02479 
02480     Zoltan::LB_Free_Part( &import_global_ids, &import_local_ids, &import_procs, &import_to_part );
02481     Zoltan::LB_Free_Part( &export_global_ids, &export_local_ids, &assign_procs, &assign_parts );
02482 
02483     delete myZZ;
02484     myZZ = NULL;
02485 
02486     // clear arrays that were resized locally, to free up local memory
02487 
02488     std::vector< int >().swap( adjacencies );
02489     std::vector< int >().swap( ids );
02490     std::vector< int >().swap( length );
02491     std::vector< int >().swap( nbor_proc );
02492     std::vector< double >().swap( coords );
02493 
02494     return MB_SUCCESS;
02495 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines