cgma
CGMReadParallel.cpp
Go to the documentation of this file.
00001 #include <cstdio>
00002 
00003 #include "CubitString.hpp"
00004 #include "CubitMessage.hpp"
00005 #include "DLList.hpp"
00006 #include "RefEntity.hpp"
00007 #include "RefFace.hpp"
00008 #include "RefEdge.hpp"
00009 #include "RefVertex.hpp"
00010 #include "CubitEntity.hpp"
00011 #include "Body.hpp"
00012 #include "CastTo.hpp"
00013 #include "CubitUtil.hpp"
00014 #include "CADefines.hpp"
00015 #include "CABodies.hpp"
00016 #include "TDParallel.hpp"
00017 #include "CAMergePartner.hpp"
00018 #include "TDUniqueId.hpp"
00019 
00020 #include "TopologyBridge.hpp"
00021 #include "GeometryQueryTool.hpp"
00022 #include "CGMReadParallel.hpp"
00023 #include "CGMParallelConventions.h"
00024 #include "CGMParallelComm.hpp"
00025 #include "CubitCompat.hpp"
00026 
00027 #include <iostream>
00028 
00029 const bool CGM_read_parallel_debug = false;
00030 
00031 enum CGMParallelActions {
00032   PA_READ = 0,
00033   PA_BROADCAST,
00034   PA_DELETE_NONLOCAL,
00035   PA_SCATTER,
00036   PA_SCATTER_DELETE,
00037   PA_BALANCE
00038 };
00039 
00040 enum CGMPartitionActions {
00041   PT_GEOM_DIM = 0, PT_PAR_PART
00042 };
00043 
00044 const char *CGMParallelActionsNames[] = { "PARALLEL READ", "PARALLEL BROADCAST",
00045     "PARALLEL DELETE NONLOCAL", "PARALLEL SCATTER" };
00046 
00047 const char* CGMReadParallel::CGMparallelOptsNames[] = { "NONE", "READ",
00048     "READ_DELETE", "BCAST", "BCAST_DELETE", "SCATTER", "SCATTER_DELETE",
00049     "READ_PARALLEL", "FORMAT", "", 0 };
00050 
00051 const char* CGMReadParallel::CGMpartitionOptsNames[] = { "NONE",
00052     "GEOM_DIMENSION", "PARARELL_PARTITION", "", 0 };
00053 
00054 CGMReadParallel::CGMReadParallel(GeometryQueryTool* gqt, CGMParallelComm *pc) :
00055     m_gqt(gqt), m_pcomm(pc) {
00056   if (!m_pcomm) {
00057     m_pcomm = new CGMParallelComm();
00058   }
00059 
00060   m_bal_method = ROUND_ROBIN;
00061   m_scatter = false;
00062   m_rank = m_pcomm->proc_config().proc_rank();
00063   m_proc_size = m_pcomm->proc_config().proc_size();
00064 }
00065 
00066 CubitStatus CGMReadParallel::load_file(const char *file_name,
00067     const char *options, const char* set_tag_name, const int* set_tag_values,
00068     int num_set_tag_values) {
00069   CGMFileOptions opts(options);
00070 
00071   // Get parallel settings
00072   int parallel_mode;
00073   CGMFOErrorCode result = opts.match_option("PARALLEL", CGMparallelOptsNames,
00074       parallel_mode);
00075   if (FO_FAILURE == result) {
00076     PRINT_ERROR("Unexpected value for 'PARALLEL' option\n");
00077     return CUBIT_FAILURE;
00078   } else if (FO_ENTITY_NOT_FOUND == result) {
00079     parallel_mode = 0;
00080   }
00081 
00082   bool surf_partition = false;
00083   std::string partition_tag_name;
00084   std::vector<int> partition_tag_vals;
00085 
00086   // Get partition tag value(s), if any, and whether they're to be
00087   result = opts.get_ints_option("PARTITION_VAL", partition_tag_vals);
00088 
00089   // Get partition setting
00090   result = opts.get_option("PARTITION", partition_tag_name);
00091 
00092   if (FO_ENTITY_NOT_FOUND == result || partition_tag_name.empty()) {
00093     partition_tag_name = "GEOM_DIMENSION";
00094     m_bal_method = ROUND_ROBIN;
00095   } else {
00096     // use geom dimension for partition
00097     if (partition_tag_name == "GEOM_DIMENSION") {
00098       int geom_dim = 0;
00099       for (std::vector<int>::iterator pit = partition_tag_vals.begin();
00100           pit != partition_tag_vals.end(); pit++) {
00101         geom_dim = *pit;
00102         if (geom_dim == 2)
00103           surf_partition = true; // body & surface partition
00104         else if (geom_dim == 3)
00105           surf_partition = false; // body partition only
00106         else {
00107           PRINT_ERROR("Geometry dimension %d is not supported.\n", geom_dim);
00108           return CUBIT_FAILURE;
00109         }
00110       }
00111     }
00112     // static partition, use chaco
00113     else if (partition_tag_name == "PAR_PARTITION_STATIC") {
00114       m_bal_method = PARTITION_STATIC;
00115     }
00116     // dynamic partition, use zoltan
00117     else if (partition_tag_name == "PAR_PARTITION_DYNAMIC") {
00118       m_bal_method = PARTITION_DYNAMIC;
00119     }
00120 
00121     // round-robin
00122     result = opts.get_null_option("PARTITION_DISTRIBUTE");
00123     if (FO_SUCCESS == result)
00124       m_bal_method = ROUND_ROBIN;
00125   }
00126 
00127   // get MPI IO processor rank
00128   int reader_rank;
00129   result = opts.get_int_option("MPI_IO_RANK", reader_rank);
00130   if (FO_ENTITY_NOT_FOUND == result)
00131     reader_rank = 0;
00132   else if (FO_SUCCESS != result) {
00133     PRINT_ERROR("Unexpected value for 'MPI_IO_RANK' option\n");
00134     return CUBIT_FAILURE;
00135   }
00136   m_pcomm->proc_config().set_master(reader_rank); // set master processor
00137   bool reader = (reader_rank == (int) m_rank);
00138 
00139   // now that we've parsed all the parallel options, make an instruction
00140   // queue
00141   std::vector<int> pa_vec;
00142   switch (parallel_mode) {
00143 
00144   case POPT_READ:
00145     pa_vec.push_back(PA_READ);
00146     pa_vec.push_back(PA_BALANCE);
00147     break;
00148 
00149   case POPT_DEFAULT:
00150   case POPT_READ_DELETE:
00151     pa_vec.push_back(PA_READ);
00152     pa_vec.push_back(PA_BALANCE);
00153     pa_vec.push_back(PA_DELETE_NONLOCAL);
00154     break;
00155 
00156   case POPT_BCAST:
00157     if (reader) {
00158       pa_vec.push_back(PA_READ);
00159       pa_vec.push_back(PA_BALANCE);
00160     }
00161     pa_vec.push_back(PA_BROADCAST);
00162     break;
00163 
00164   case POPT_BCAST_DELETE:
00165     if (reader) {
00166       pa_vec.push_back(PA_READ);
00167       pa_vec.push_back(PA_BALANCE);
00168     }
00169     pa_vec.push_back(PA_BROADCAST);
00170     pa_vec.push_back(PA_DELETE_NONLOCAL);
00171     break;
00172 
00173   case PORT_SCATTER:
00174     if (reader) {
00175       pa_vec.push_back(PA_READ);
00176       pa_vec.push_back(PA_BALANCE);
00177     }
00178     pa_vec.push_back(PA_SCATTER);
00179     m_scatter = true;
00180     break;
00181 
00182   case POPT_FORMAT:
00183     PRINT_ERROR("Access to format-specific parallel read not implemented.\n");
00184     return CUBIT_FAILURE;
00185 
00186   case POPT_READ_PARALLEL:
00187     PRINT_ERROR("Partitioning for PARALLEL=READ_PARALLEL not supported yet.\n");
00188     return CUBIT_FAILURE;
00189 
00190   default:
00191     return CUBIT_FAILURE;
00192   }
00193 
00194   return load_file(file_name, parallel_mode, partition_tag_name,
00195       partition_tag_vals, pa_vec, opts, set_tag_name, set_tag_values,
00196       num_set_tag_values, reader_rank, surf_partition);
00197 }
00198 
00199 CubitStatus CGMReadParallel::load_file(const char *file_name, int parallel_mode,
00200     std::string &partition_tag_name, std::vector<int> &partition_tag_vals,
00201     std::vector<int> &pa_vec, const CGMFileOptions &opts,
00202     const char* set_tag_name, const int* set_tag_values,
00203     const int num_set_tag_values, const int reader_rank,
00204     const bool surf_partition) {
00205   // actuate CA_BODIES and turn on auto flag for other attributes
00206   CGMApp::instance()->attrib_manager()->register_attrib_type(CA_BODIES,
00207       "bodies", "BODIES", &CABodies_creator, CUBIT_TRUE, CUBIT_TRUE, CUBIT_TRUE,
00208       CUBIT_TRUE, CUBIT_TRUE, CUBIT_FALSE);
00209   CGMApp::instance()->attrib_manager()->auto_flag(CUBIT_TRUE);
00210 
00211   if (CGM_read_parallel_debug) {
00212     DEBUG_FLAG(90, CUBIT_TRUE);
00213     DEBUG_FLAG(138, CUBIT_TRUE);
00214   }
00215 
00216   // do the work by options
00217   std::vector<int>::iterator vit;
00218   int i;
00219 
00220   for (i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); vit++, i++) {
00221     CubitStatus result = CUBIT_SUCCESS;
00222     switch (*vit) {
00223 //==================
00224     case PA_READ:
00225       double tStart, tEnd;
00226 
00227       if (CGM_read_parallel_debug) {
00228         std::cout << "Reading file " << file_name << std::endl;
00229         tStart = MPI_Wtime();
00230       }
00231 
00232       result = read_entities(file_name);
00233 
00234       if (CUBIT_SUCCESS != result) {
00235         PRINT_ERROR("Reading file %s failed.\n", file_name);
00236         return CUBIT_FAILURE;
00237       } else if (CGM_read_parallel_debug) {
00238         tEnd = MPI_Wtime();
00239         PRINT_INFO("Read time in proc %d is %f.\n", m_rank, tEnd - tStart);
00240         PRINT_INFO("Read done.\n");
00241       }
00242 
00243       break;
00244 
00245 //==================
00246     case PA_BALANCE:
00247       if (CGM_read_parallel_debug)
00248         std::cout << "Balancing entities." << std::endl;
00249       if (m_bal_method == ROUND_ROBIN)
00250         result = balance_round_robin();
00251       if (CUBIT_SUCCESS != result)
00252         return result;
00253 
00254       if (CGM_read_parallel_debug)
00255         PRINT_INFO("Balancing entities done.\n");
00256 
00257       break;
00258 
00259 //==================     
00260     case PA_DELETE_NONLOCAL:
00261       if (CGM_read_parallel_debug) {
00262         PRINT_INFO("Deleting nonlocal entities.\n");
00263         tStart = MPI_Wtime();
00264       }
00265 
00266       result = delete_nonlocal_entities(reader_rank, partition_tag_name,
00267           partition_tag_vals);
00268 
00269       if (CUBIT_SUCCESS != result) {
00270         PRINT_ERROR("Delete failed.\n");
00271         return CUBIT_FAILURE;
00272       } else if (CGM_read_parallel_debug) {
00273         tEnd = MPI_Wtime();
00274         PRINT_INFO("Delete done.\n");
00275         PRINT_INFO("Delete time in proc %d is %f.\n", m_rank, tEnd - tStart);
00276       }
00277       break;
00278 
00279 //==================      
00280     case PA_BROADCAST:
00281       // do the actual broadcast; if single-processor, ignore error
00282       if (m_proc_size > 1) {
00283         if (CGM_read_parallel_debug) {
00284           PRINT_INFO("Broadcasting Body entities.\n");
00285           tStart = MPI_Wtime();
00286         }
00287 
00288         result = m_pcomm->broadcast_entities(reader_rank,
00289             m_pcomm->partition_body_list());
00290 
00291         if (CUBIT_SUCCESS != result) {
00292           PRINT_ERROR("Broadcasting Body entities failed.\n");
00293           return CUBIT_FAILURE;
00294         } else if (CGM_read_parallel_debug) {
00295           tEnd = MPI_Wtime();
00296           PRINT_INFO("Bcast bodies done.\n");
00297           PRINT_INFO("Broadcast bodies time in proc %d is %f.\n", m_proc_size,
00298               tEnd - tStart);
00299         }
00300 
00301         if (!check_partition_info()) {
00302           PRINT_ERROR("Check partition info failed.\n");
00303           return CUBIT_FAILURE;
00304         }
00305       }
00306 
00307       break;
00308 
00309 //==================      
00310     case PA_SCATTER:
00311       // do the actual scatter
00312       if (m_proc_size > 1) {
00313         if (CGM_read_parallel_debug) {
00314           PRINT_INFO("Scattering body entities.\n");
00315           tStart = MPI_Wtime();
00316         }
00317         result = m_pcomm->scatter_entities(reader_rank,
00318             m_pcomm->partition_body_list());
00319 
00320         if (CUBIT_SUCCESS != result) {
00321           PRINT_ERROR("Scattering body entities failed.\n");
00322           return CUBIT_FAILURE;
00323         } else if (CGM_read_parallel_debug) {
00324           tEnd = MPI_Wtime();
00325           PRINT_INFO("Scatter bodies done.\n");
00326           PRINT_INFO("Scatter bodies time in proc %d is %f.\n", m_proc_size,
00327               tEnd - tStart);
00328         }
00329 
00330         if (!check_partition_info()) {
00331           PRINT_ERROR("Check partition info failed.\n");
00332           return CUBIT_FAILURE;
00333         }
00334       }
00335       if (CGM_read_parallel_debug)
00336         PRINT_INFO("Scatter done.\n");
00337 
00338       break;
00339 
00340 //==================    
00341     default:
00342       return CUBIT_FAILURE;
00343     }
00344   }
00345 
00346   return CUBIT_SUCCESS;
00347 }
00348 
00349 CubitStatus CGMReadParallel::read_entities(const char* file_name) {
00350   // check file type
00351   CubitString file_type;
00352   if (strstr(file_name, ".stp"))
00353     file_type = "STEP";
00354   else if (strstr(file_name, ".igs"))
00355     file_type = "IGES";
00356   else if (strstr(file_name, ".occ") || strstr(file_name, ".OCC")
00357       || strstr(file_name, ".brep") || strstr(file_name, ".BREP"))
00358     file_type = "OCC";
00359   else {
00360     PRINT_ERROR("File type not known for file %s; skipping.\n", file_name);
00361     return CUBIT_FAILURE;
00362   }
00363 
00364   // import solid model
00365   CubitStatus result = CubitCompat_import_solid_model(file_name,
00366       file_type.c_str());
00367   if (CUBIT_SUCCESS != result) {
00368     PRINT_ERROR("Reading file %s failed.\n", file_name);
00369     return CUBIT_FAILURE;
00370   }
00371 
00372   // get body entities
00373   DLIList<RefEntity*>& body_entity_list = m_pcomm->partition_body_list();
00374   body_entity_list.clean_out();
00375   result = m_gqt->ref_entity_list("body", body_entity_list, CUBIT_FALSE);
00376   if (CUBIT_SUCCESS != result) {
00377     PRINT_ERROR("Getting Body entities failed.\n");
00378     return result;
00379   }
00380 
00381   return result;
00382 }
00383 
00384 CubitStatus CGMReadParallel::balance_round_robin() {
00385   // get bodies
00386   int i, j, k;
00387   DLIList<RefEntity*>& body_entity_list = m_pcomm->partition_body_list();
00388   int n_proc = m_proc_size;
00389   double* loads = new double[n_proc]; // estimated loads for each processor
00390   double* ve_loads = new double[n_proc]; // estimated loads for each processor
00391   for (i = 0; i < n_proc; i++) {
00392     loads[i] = 0.0;
00393     ve_loads[i] = 0.0;
00394   }
00395 
00396   if (m_bal_method == ROUND_ROBIN) { // round-robin case
00397     int n_entity = body_entity_list.size();
00398     int n_entity_proc = n_entity / n_proc; // # of entities per processor
00399     int i_entity_proc = n_entity_proc; // entity index limit for each processor
00400     int proc = 0;
00401     RefEntity* entity;
00402 
00403     // assign processors to bodies
00404     body_entity_list.reset();
00405     for (i = 0; i < n_entity; i++) {
00406       if (i == i_entity_proc) {
00407         proc++;
00408         if (proc < n_proc)
00409           i_entity_proc += n_entity_proc;
00410         else {
00411           proc %= n_proc;
00412           i_entity_proc++;
00413         }
00414       }
00415 
00416       // assign to bodies
00417       entity = body_entity_list.get_and_step();
00418       DLIList<int> shared_procs;
00419       shared_procs.append(proc);
00420       TDParallel *td_par = (TDParallel *) entity->get_TD(
00421           &TDParallel::is_parallel);
00422       if (td_par == NULL)
00423         td_par = new TDParallel(entity, NULL, &shared_procs);
00424       loads[proc] += entity->measure();
00425 
00426       // assign to volumes, it should be removed in future
00427       DLIList<RefVolume*> volumes;
00428       (dynamic_cast<TopologyEntity*>(entity))->ref_volumes(volumes);
00429       int n_vol = volumes.size();
00430       volumes.reset();
00431       for (j = 0; j < n_vol; j++) {
00432         RefEntity *vol = volumes.get_and_step();
00433         td_par = (TDParallel *) vol->get_TD(&TDParallel::is_parallel);
00434         if (td_par == NULL)
00435           td_par = new TDParallel(vol, NULL, &shared_procs);
00436       }
00437 
00438       // add local surface load
00439       DLIList<RefFace*> faces;
00440       (dynamic_cast<TopologyEntity*>(entity))->ref_faces(faces);
00441       int n_face = faces.size();
00442       faces.reset();
00443       for (j = 0; j < n_face; j++) {
00444         RefFace* face = faces.get_and_step();
00445         TopologyEntity *te = CAST_TO(face, TopologyEntity);
00446         if (te->bridge_manager()->number_of_bridges() < 2) {
00447           loads[proc] = loads[proc] + face->measure();
00448         }
00449       }
00450     }
00451 
00452     // Get all child entities
00453     DLIList<RefEntity*> child_list;
00454     RefEntity::get_all_child_ref_entities(body_entity_list, child_list);
00455     int n_child = child_list.size();
00456 
00457     // assign processors to interface entities
00458     child_list.reset();
00459     for (i = 0; i < n_child; i++) {
00460       entity = child_list.get_and_step();
00461       TopologyEntity *te = CAST_TO(entity, TopologyEntity);
00462 
00463       if (te->bridge_manager()->number_of_bridges() > 1) {
00464         DLIList<Body*> parent_bodies;
00465         DLIList<int> shared_procs;
00466         (dynamic_cast<TopologyEntity*>(entity))->bodies(parent_bodies);
00467         int n_parent = parent_bodies.size();
00468 
00469         for (j = 0; j < n_parent; j++) {
00470           RefEntity *parent_vol = CAST_TO(parent_bodies.get_and_step(),
00471               RefEntity);
00472           TDParallel *parent_td = (TDParallel *) parent_vol->get_TD(
00473               &TDParallel::is_parallel);
00474 
00475           if (parent_td == NULL) {
00476             PRINT_ERROR("parent Volume has to be partitioned.");
00477             return CUBIT_FAILURE;
00478           }
00479           shared_procs.append_unique(parent_td->get_charge_proc());
00480         }
00481 
00482         if (shared_procs.size() > 1) { // if it is interface
00483           TDParallel *td_par = (TDParallel *) entity->get_TD(
00484               &TDParallel::is_parallel);
00485           if (td_par == NULL) {
00486             int merge_id = TDUniqueId::get_unique_id(entity);
00487             if (entity->entity_type_info() == typeid(RefFace)) { // face
00488               if (shared_procs.size() != 2) {
00489                 PRINT_ERROR(
00490                     "Error: # of shared processors of interface surface should be 2.");
00491                 return CUBIT_FAILURE;
00492               }
00493 
00494               // balance interface surface loads
00495               if (loads[shared_procs[0]] > loads[shared_procs[1]]) {
00496                 shared_procs.reverse();
00497               }
00498               loads[shared_procs[0]] = loads[shared_procs[0]]
00499                   + entity->measure();
00500               td_par = new TDParallel(entity, NULL, &shared_procs, NULL,
00501                   merge_id, 1);
00502             } else if (entity->entity_type_info() == typeid(RefEdge)
00503                 || entity->entity_type_info() == typeid(RefVertex)) {
00504               // balance interface surface loads
00505               int min_p = shared_procs[0];
00506               int n_shared_proc = shared_procs.size();
00507               for (int i1 = 1; i1 < n_shared_proc; i1++) {
00508                 if (ve_loads[shared_procs[i1]] < ve_loads[min_p]) {
00509                   min_p = shared_procs[i1];
00510                 }
00511               }
00512               ve_loads[min_p] = ve_loads[min_p] + entity->measure();
00513               shared_procs.remove(min_p);
00514               shared_procs.insert_first(min_p);
00515 
00516               // add ghost geometries to shared processors for edge
00517               if (entity->entity_type_info() == typeid(RefEdge)) {
00518                 parent_bodies.reset();
00519                 for (j = 0; j < n_parent; j++) {
00520                   RefEntity *parent_vol = CAST_TO(parent_bodies.get_and_step(),
00521                       RefEntity);
00522                   TDParallel *parent_td = (TDParallel *) parent_vol->get_TD(
00523                       &TDParallel::is_parallel);
00524                   for (k = 0; k < n_shared_proc; k++) {
00525                     parent_td->add_ghost_proc(shared_procs[k]);
00526                   }
00527                 }
00528               }
00529               td_par = new TDParallel(entity, NULL, &shared_procs, NULL,
00530                   merge_id, 1);
00531             }
00532           }
00533         }
00534       }
00535     }
00536   } else if (m_bal_method == PARTITION_DYNAMIC) {
00537   }
00538 
00539   return CUBIT_SUCCESS;
00540 }
00541 
00542 CubitStatus CGMReadParallel::delete_nonlocal_entities(int reader,
00543     std::string &ptag_name, std::vector<int> &ptag_vals) {
00544   // find bodies deleted
00545   int i;
00546   DLIList<RefEntity*>& body_entity_list = m_pcomm->partition_body_list();
00547   DLIList<RefEntity*> partition_list, delete_body_list;
00548   int nEntity = body_entity_list.size();
00549   body_entity_list.reset();
00550 
00551   for (i = 0; i < nEntity; i++) {
00552     RefEntity* entity = body_entity_list.get_and_step();
00553     TDParallel *td_par = (TDParallel *) entity->get_TD(
00554         &TDParallel::is_parallel);
00555     if (td_par == NULL) {
00556       PRINT_ERROR("Partitioned Volume should have TDParallel data.");
00557       return CUBIT_FAILURE;
00558     }
00559 
00560     if (td_par->get_charge_proc() != m_rank) { // candidate to be deleted
00561       // check child surfaces if surface partitioned
00562       DLIList<RefFace*> face_list;
00563       (dynamic_cast<TopologyEntity*>(entity))->ref_faces(face_list);
00564       bool b_partitioned_surf = false;
00565       int n_face = face_list.size();
00566       face_list.reset();
00567       for (int j = 0; j < n_face; j++) {
00568         RefEntity* face = face_list.get_and_step();
00569         TDParallel *td_par_face = (TDParallel *) face->get_TD(
00570             &TDParallel::is_parallel);
00571         if (td_par_face != NULL) { // if surface is partitioned
00572           DLIList<int>* shared_procs = td_par_face->get_shared_proc_list();
00573           int n_shared = shared_procs->size();
00574           shared_procs->reset();
00575           for (int k = 0; k < n_shared; k++) {
00576             if (shared_procs->get_and_step() == (int) m_rank) {
00577               b_partitioned_surf = true;
00578               break;
00579             }
00580           }
00581         }
00582       }
00583       if (b_partitioned_surf)
00584         partition_list.append(entity);
00585       else
00586         delete_body_list.append(entity);
00587     } else
00588       partition_list.append(entity);
00589   }
00590 
00591   // print info
00592   char pre_body[100];
00593   DLIList<CubitEntity*> tmp_body_list;
00594   if (CGM_read_parallel_debug) {
00595     if ((int) m_rank != reader) {
00596       CAST_LIST_TO_PARENT(delete_body_list, tmp_body_list);
00597       sprintf(pre_body, "Will delete %d Bodies: ", tmp_body_list.size());
00598       CubitUtil::list_entity_ids(pre_body, tmp_body_list);
00599     }
00600     std::cout << "Partitioned Body list size after delete: "
00601         << partition_list.size() << std::endl;
00602   }
00603 
00604   // delete bodies
00605   nEntity = delete_body_list.size();
00606   delete_body_list.reset();
00607   for (i = 0; i < nEntity; i++) {
00608     GeometryQueryTool::instance()->delete_RefEntity(
00609         delete_body_list.get_and_step());
00610   }
00611 
00612   // update Body list in ParallelComm
00613   body_entity_list.clean_out();
00614   body_entity_list += partition_list;
00615 
00616   return CUBIT_SUCCESS;
00617 }
00618 
00619 CubitStatus CGMReadParallel::check_partition_info() {
00620   int i;
00621   DLIList<RefEntity*>& body_entity_list = m_pcomm->partition_body_list();
00622   int nEntity = body_entity_list.size();
00623   body_entity_list.reset();
00624 
00625   for (i = 0; i < nEntity; i++) {
00626     RefEntity* entity = body_entity_list.get_and_step();
00627     TDParallel *td_par = (TDParallel *) entity->get_TD(
00628         &TDParallel::is_parallel);
00629     if (td_par == NULL) { // if body is not partitioned
00630       DLIList<RefEntity*> volumes;
00631       entity->get_child_ref_entities(volumes);
00632 
00633       // check if the first Volume is partitioned here, should be removed in future
00634       volumes.reset();
00635       RefEntity *vol = volumes.get();
00636       if (vol == NULL || vol->entity_type_info() != typeid(RefVolume)) {
00637         PRINT_ERROR("Partitioned Body should have at least one Volume.");
00638         return CUBIT_FAILURE;
00639       }
00640       td_par = (TDParallel *) vol->get_TD(&TDParallel::is_parallel);
00641 
00642       if (td_par == NULL) {
00643         PRINT_ERROR("Partitioned Volume should have TDParallel data.");
00644         return CUBIT_FAILURE;
00645       }
00646 
00647       DLIList<int> s_procs;
00648       s_procs.append(td_par->get_charge_proc());
00649       td_par = new TDParallel(entity, NULL, &s_procs);
00650     }
00651   }
00652 
00653   return CUBIT_SUCCESS;
00654 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines