cgma
|
Parallel communications in CGM. More...
#include <CGMParallelComm.hpp>
Public Member Functions | |
CGMParallelComm (MPI_Comm comm=MPI_COMM_WORLD) | |
constructor | |
CGMParallelComm (std::vector< unsigned char > &tmp_buff, MPI_Comm comm=MPI_COMM_WORLD) | |
constructor taking buffer, for testing | |
~CGMParallelComm () | |
destructor | |
DLIList< RefEntity * > & | partition_surf_list () |
return partition ref entity list | |
const DLIList< RefEntity * > & | partition_surf_list () const |
DLIList< RefEntity * > & | partition_body_list () |
const DLIList< RefEntity * > & | partition_body_list () const |
const CGMProcConfig & | proc_config () const |
Get proc config for this communication object. | |
CGMProcConfig & | proc_config () |
Get proc config for this communication object. | |
CubitStatus | broadcast_entities (const unsigned int from_proc, DLIList< RefEntity * > &ref_entity_list) |
CubitStatus | scatter_entities (const unsigned int from_proc, DLIList< RefEntity * > &ref_entity_list) |
CubitStatus | write_buffer (DLIList< RefEntity * > &ref_entity_list, char *pBuffer, int &n_buffer_size, bool b_export_buffer) |
CubitStatus | read_buffer (DLIList< RefEntity * > &ref_entity_list, const char *pBuffer, const int n_buffer_size) |
CubitStatus | bcast_buffer (const unsigned int from_proc) |
CubitStatus | append_to_buffer (DLIList< RefEntity * > &ref_entity_list, int add_size) |
Private Member Functions | |
CubitStatus | check_size (int &target_size, const CubitBoolean keep=CUBIT_FALSE) |
Private Attributes | |
GeometryQueryTool * | gqt |
CGM query tool interface associated with this writer. | |
CGMProcConfig | procConfig |
Proc config object, keeps info on parallel stuff. | |
std::vector< unsigned char > | myBuffer |
data buffer used to communicate | |
char * | m_pBuffer |
int | m_nBufferSize |
int | m_currentPosition |
DLIList< RefEntity * > | partitioningSurfList |
DLIList< RefEntity * > | partitioningBodyList |
Parallel communications in CGM.
This class implements methods to communicate geometry between processors
Definition at line 21 of file CGMParallelComm.hpp.
constructor
Definition at line 19 of file CGMParallelComm.cpp.
: gqt(NULL), procConfig(comm), m_pBuffer(NULL), m_nBufferSize(0), m_currentPosition(0) { myBuffer.resize(INITIAL_BUFF_SIZE); int flag = 1; int retval = MPI_Initialized(&flag); if (MPI_SUCCESS != retval || !flag) { int argc = 0; char **argv = NULL; // mpi not initialized yet - initialize here retval = MPI_Init(&argc, &argv); } }
CGMParallelComm::CGMParallelComm | ( | std::vector< unsigned char > & | tmp_buff, |
MPI_Comm | comm = MPI_COMM_WORLD |
||
) |
constructor taking buffer, for testing
Definition at line 35 of file CGMParallelComm.cpp.
: gqt(NULL), procConfig(comm), m_pBuffer(NULL), m_nBufferSize(0), m_currentPosition(0) { myBuffer.swap(tmp_buff); int flag = 1; int retval = MPI_Initialized(&flag); if (MPI_SUCCESS != retval || !flag) { int argc = 0; char **argv = NULL; // mpi not initialized yet - initialize here retval = MPI_Init(&argc, &argv); } }
CubitStatus CGMParallelComm::append_to_buffer | ( | DLIList< RefEntity * > & | ref_entity_list, |
int | add_size | ||
) |
Definition at line 269 of file CGMParallelComm.cpp.
{ if (m_currentPosition + add_size > m_nBufferSize) return CUBIT_FAILURE; CubitStatus result = write_buffer(ref_entity_list, m_pBuffer + m_currentPosition, add_size, true); RRA("Failed to append ref entity list to buffer."); return CUBIT_SUCCESS; }
CubitStatus CGMParallelComm::bcast_buffer | ( | const unsigned int | from_proc | ) |
Definition at line 56 of file CGMParallelComm.cpp.
{ //- broadcasts the buffer contained in this object if (procConfig.proc_rank() == from_proc) { printf("Broadcasting buffer size from %d.\n", from_proc); MPI_Bcast(&m_nBufferSize, 1, MPI_INT, from_proc, MPI_COMM_WORLD); printf("Broadcasting buffer from %d, %d bytes.\n", from_proc, m_nBufferSize); MPI_Bcast(m_pBuffer, m_nBufferSize, MPI_BYTE, from_proc, MPI_COMM_WORLD); } else { int this_size; printf("Broadcasting buffer size from proc %d.\n", procConfig.proc_rank()); MPI_Bcast(&this_size, 1, MPI_INT, from_proc, MPI_COMM_WORLD); printf("Processor %d: received size of %d.\n", procConfig.proc_rank(), this_size); check_size(this_size); printf("Broadcasting buffer from proc %d, %d bytes.\n", procConfig.proc_rank(), this_size); MPI_Bcast(m_pBuffer, this_size, MPI_BYTE, from_proc, MPI_COMM_WORLD); } return CUBIT_SUCCESS; }
CubitStatus CGMParallelComm::broadcast_entities | ( | const unsigned int | from_proc, |
DLIList< RefEntity * > & | ref_entity_list | ||
) |
Definition at line 84 of file CGMParallelComm.cpp.
{ #ifndef USE_MPI return CUBIT_FAILURE; #else CubitStatus result = CUBIT_SUCCESS; if (procConfig.proc_rank() == from_proc) { int nBufferSize = 0; result = write_buffer(ref_entity_list, m_pBuffer, nBufferSize, false); RRA("Failed to write ref entity list to buffer."); result = check_size(nBufferSize); RRA("Failed to write ref entity list to buffer."); result = write_buffer(ref_entity_list, m_pBuffer, nBufferSize, true); RRA("Failed to write ref entity list to buffer."); } result = bcast_buffer(from_proc); RRA("Failed to broadcast buffer to processors."); if (procConfig.proc_rank() != from_proc) { result = read_buffer(ref_entity_list, m_pBuffer, m_nBufferSize); RRA("Failed to read ref entity list from buffer."); } return CUBIT_SUCCESS; #endif }
CubitStatus CGMParallelComm::check_size | ( | int & | target_size, |
const CubitBoolean | keep = CUBIT_FALSE |
||
) | [private] |
Definition at line 252 of file CGMParallelComm.cpp.
{ printf("Checking buffer size on proc %d, target size %d.\n", procConfig.proc_rank(), target_size); if (m_nBufferSize < target_size) { printf("Increasing buffer size on proc %d.\n", procConfig.proc_rank()); void *temp_buffer = malloc(target_size); if (keep && 0 != m_currentPosition) memcpy(temp_buffer, m_pBuffer, m_currentPosition); delete m_pBuffer; m_pBuffer = (char *) temp_buffer; m_nBufferSize = target_size; } return CUBIT_SUCCESS; }
DLIList<RefEntity*>& CGMParallelComm::partition_body_list | ( | ) | [inline] |
Definition at line 40 of file CGMParallelComm.hpp.
{return partitioningBodyList;}
const DLIList<RefEntity*>& CGMParallelComm::partition_body_list | ( | ) | const [inline] |
Definition at line 41 of file CGMParallelComm.hpp.
{return partitioningBodyList;}
DLIList<RefEntity*>& CGMParallelComm::partition_surf_list | ( | ) | [inline] |
return partition ref entity list
Definition at line 38 of file CGMParallelComm.hpp.
{return partitioningSurfList;}
const DLIList<RefEntity*>& CGMParallelComm::partition_surf_list | ( | ) | const [inline] |
Definition at line 39 of file CGMParallelComm.hpp.
{return partitioningSurfList;}
const CGMProcConfig& CGMParallelComm::proc_config | ( | ) | const [inline] |
Get proc config for this communication object.
Definition at line 44 of file CGMParallelComm.hpp.
{return procConfig;}
CGMProcConfig& CGMParallelComm::proc_config | ( | ) | [inline] |
Get proc config for this communication object.
Definition at line 47 of file CGMParallelComm.hpp.
{return procConfig;}
CubitStatus CGMParallelComm::read_buffer | ( | DLIList< RefEntity * > & | ref_entity_list, |
const char * | pBuffer, | ||
const int | n_buffer_size | ||
) |
Definition at line 233 of file CGMParallelComm.cpp.
{ #ifndef USE_MPI return CUBIT_FAILURE; #else if (n_buffer_size == 0) return CUBIT_SUCCESS; #ifdef HAVE_OCC CubitStatus result = GeometryQueryTool::instance()->import_solid_model(&ref_entity_list, pBuffer, n_buffer_size); RRA("Failed to read ref entities from buffer."); #endif return CUBIT_SUCCESS; #endif }
CubitStatus CGMParallelComm::scatter_entities | ( | const unsigned int | from_proc, |
DLIList< RefEntity * > & | ref_entity_list | ||
) |
Definition at line 117 of file CGMParallelComm.cpp.
{ #ifndef USE_MPI return CUBIT_FAILURE; #else CubitStatus result = CUBIT_SUCCESS; int i, mySendCount, nEntity; int nProcs = procConfig.proc_size(); int *sendCounts = new int[nProcs]; int *displacements = new int[nProcs]; displacements[0] = 0; if (procConfig.proc_rank() == from_proc) { // make a balanced entity lists int sum = 0; DLIList<RefEntity*> **balancedLists = new DLIList<RefEntity*>*[nProcs]; for (i = 0; i < nProcs; i++) { balancedLists[i] = new DLIList<RefEntity*>; } nEntity = ref_entity_list.size(); ref_entity_list.reset(); for (i = 0; i < nEntity; i++) { RefEntity* entity = ref_entity_list.get_and_step(); TDParallel *td_par = (TDParallel *) entity->get_TD(&TDParallel::is_parallel); if (td_par == NULL) { PRINT_ERROR("Partitioned entities should have TDParallel data."); return CUBIT_FAILURE; } unsigned int charge_p = td_par->get_charge_proc(); if (charge_p != from_proc) { // only to compute processors balancedLists[charge_p]->append(entity); // add charge processor } DLIList<int>* ghost_procs = td_par->get_ghost_proc_list(); int n_ghost = ghost_procs->size(); ghost_procs->reset(); for (int j = 0; j < n_ghost; j++) { // add ghost processors unsigned int ghost_p = ghost_procs->get_and_step(); if (ghost_p != from_proc) balancedLists[ghost_p]->append(entity); } } // add buffer size for each processors for (i = 0; i < nProcs; i++) { result = write_buffer(*(balancedLists[i]), m_pBuffer, sendCounts[i], false); RRA("Failed to write ref entity list to buffer."); sum += sendCounts[i]; } // check the size of the buffer and resize if necessary check_size(sum); // now append the real information ref_entity_list.reset(); for (i = 0; i < nProcs; i++) { append_to_buffer(*(balancedLists[i]), sendCounts[i]); } delete [] balancedLists; } // broadcast buffer size array printf("Broadcasting buffer size array from master.\n"); MPI_Bcast(sendCounts, nProcs, MPI_INT, from_proc, MPI_COMM_WORLD); for ( i = 1; i < nProcs; i++) { displacements[i] = displacements[i-1] + sendCounts[i-1]; } mySendCount = sendCounts[procConfig.proc_rank()]; if (procConfig.proc_rank() != from_proc) check_size(mySendCount); printf("Scattering buffer from master.\n"); // scatter geometry MPI_Scatterv(m_pBuffer, sendCounts, displacements, MPI_BYTE, m_pBuffer, mySendCount, MPI_BYTE, from_proc, MPI_COMM_WORLD); if (procConfig.proc_rank() != from_proc) { result = read_buffer(ref_entity_list, m_pBuffer, mySendCount); RRA("Failed to read ref entity list from buffer."); } return CUBIT_SUCCESS; #endif }
CubitStatus CGMParallelComm::write_buffer | ( | DLIList< RefEntity * > & | ref_entity_list, |
char * | pBuffer, | ||
int & | n_buffer_size, | ||
bool | b_export_buffer | ||
) |
Definition at line 209 of file CGMParallelComm.cpp.
{ #ifndef USE_MPI return CUBIT_FAILURE; #else if (ref_entity_list.size() == 0) { n_buffer_size = 0; return CUBIT_SUCCESS; } #ifdef HAVE_OCC CubitStatus result = GeometryQueryTool::instance()->export_solid_model(ref_entity_list, pBuffer, n_buffer_size, b_write_buffer); RRA("Failed to write ref entities to buffer."); #endif if (b_write_buffer) m_currentPosition += n_buffer_size; return CUBIT_SUCCESS; #endif }
GeometryQueryTool* CGMParallelComm::gqt [private] |
CGM query tool interface associated with this writer.
Definition at line 75 of file CGMParallelComm.hpp.
int CGMParallelComm::m_currentPosition [private] |
Definition at line 87 of file CGMParallelComm.hpp.
int CGMParallelComm::m_nBufferSize [private] |
Definition at line 85 of file CGMParallelComm.hpp.
char* CGMParallelComm::m_pBuffer [private] |
Definition at line 83 of file CGMParallelComm.hpp.
std::vector<unsigned char> CGMParallelComm::myBuffer [private] |
data buffer used to communicate
Definition at line 81 of file CGMParallelComm.hpp.
DLIList<RefEntity*> CGMParallelComm::partitioningBodyList [private] |
Definition at line 90 of file CGMParallelComm.hpp.
DLIList<RefEntity*> CGMParallelComm::partitioningSurfList [private] |
Definition at line 89 of file CGMParallelComm.hpp.
CGMProcConfig CGMParallelComm::procConfig [private] |
Proc config object, keeps info on parallel stuff.
Definition at line 78 of file CGMParallelComm.hpp.