1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 | /*
* migrate_nontrivial.cpp
*
* migrate_nontrivial will contain tests for migrating meshes in parallel environments, with iMOAB
* api, using nontrivial partitions; for starters, we will use zoltan to compute partition in
* parallel, and then use the result to migrate the mesh trivial partition gave terrible results for
* mpas type meshes, that were numbered like a fractal; the resulting migrations were like Swiss
* cheese, full of holes a mesh is read on senders tasks we will use graph like methods or geometry
* methods, and will modify the ZoltanPartitioner to add needed methods
*
* mesh will be sent to receivers tasks, with nonblocking MPI_Isend calls, and then received
* with blocking MPI_Recv calls;
*
* we will not modify the GLOBAL_ID tag, we assume it was set correctly before we started
*/
#include "moab/ParallelComm.hpp"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"
#include "moab/ProgOptions.hpp"
#define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
using namespace moab;
//#define GRAPH_INFO
#define CHECKRC( rc, message ) \
if( 0 != ( rc ) ) \
{ \
printf( "Error: %s\n", message ); \
return MB_FAILURE; \
}
int is_any_proc_error( int is_my_error )
{
int result = 0;
int err = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
return err || result;
}
int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
{
ErrorCode result = ( *func )( file_name );
int is_err = is_any_proc_error( ( MB_SUCCESS != result ) );
int rank;<--- Shadow variable
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if( rank == 0 )
{
if( is_err )
std::cout << func_name << " : FAILED!!" << std::endl;
else
std::cout << func_name << " : success" << std::endl;
}
return is_err;
}
ErrorCode migrate_graph( const char* filename );
ErrorCode migrate_geom( const char* filename );
ErrorCode migrate_trivial( const char* filename );
// some global variables, used by all tests
int rank, size, ierr;<--- Shadowed declaration
int compid1, compid2; // component ids are unique over all pes, and established in advance;
int nghlay; // number of ghost layers for loading the file
std::vector< int > groupTasks; // at most 4 tasks
int startG1, startG2, endG1, endG2;
MPI_Comm jcomm; // will be a copy of the global
MPI_Group jgroup;
ErrorCode migrate_smart( const char* filename, const char* outfile, int partMethod )
{
// first create MPI groups
std::string filen( filename );
MPI_Group group1, group2;
groupTasks.resize( endG1 - startG1 + 1 );
for( int i = startG1; i <= endG1; i++ )
groupTasks[i - startG1] = i;
ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &group1 );
CHECKRC( ierr, "can't create group1" )
groupTasks.resize( endG2 - startG2 + 1 );
for( int i = startG2; i <= endG2; i++ )
groupTasks[i - startG2] = i;
ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &group2 );
CHECKRC( ierr, "can't create group2" )
// create 2 communicators, one for each group
int tagcomm1 = 1, tagcomm2 = 2;
int context_id = -1; // plain migrate, default context
MPI_Comm comm1, comm2;
ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
CHECKRC( ierr, "can't create comm1" )
ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
CHECKRC( ierr, "can't create comm2" )
ierr = iMOAB_Initialize( 0, 0 ); // not really needed anything from argc, argv, yet; maybe we should
CHECKRC( ierr, "can't initialize iMOAB" )
// give some dummy values to component ids, just to differentiate between them
// the par comm graph is unique between components
compid1 = 4;
compid2 = 7;
int appID1;
iMOAB_AppID pid1 = &appID1;
int appID2;
iMOAB_AppID pid2 = &appID2;
if( comm1 != MPI_COMM_NULL )
{
ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
CHECKRC( ierr, "can't register app1 " )
}
if( comm2 != MPI_COMM_NULL )
{
ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
CHECKRC( ierr, "can't register app2 " )
}
if( comm1 != MPI_COMM_NULL )
{
std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
nghlay = 0;
ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
CHECKRC( ierr, "can't load mesh " )
ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &partMethod ); // send to component 2
CHECKRC( ierr, "cannot send elements" )
#ifdef GRAPH_INFO
int is_sender = 1;
int context = compid2;
iMOAB_DumpCommGraph( pid1, &context, &is_sender, "MigrateS" );
#endif
}
if( comm2 != MPI_COMM_NULL )
{
ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 ); // receive from component 1
CHECKRC( ierr, "cannot receive elements" )
std::string wopts;
wopts = "PARALLEL=WRITE_PART;";
ierr = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
CHECKRC( ierr, "cannot write received mesh" )
#ifdef GRAPH_INFO
int is_sender = 0;
int context = compid1;
iMOAB_DumpCommGraph( pid2, &context, &is_sender, "MigrateR" );
#endif
}
MPI_Barrier( jcomm );
// we can now free the sender buffers
context_id = compid2; // even for default migrate, be more explicit
if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
if( comm2 != MPI_COMM_NULL )
{
ierr = iMOAB_DeregisterApplication( pid2 );
CHECKRC( ierr, "cannot deregister app 2 receiver" )
}
if( comm1 != MPI_COMM_NULL )
{
ierr = iMOAB_DeregisterApplication( pid1 );
CHECKRC( ierr, "cannot deregister app 1 sender" )
}
ierr = iMOAB_Finalize();
CHECKRC( ierr, "did not finalize iMOAB" )
if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
MPI_Group_free( &group1 );
MPI_Group_free( &group2 );
return MB_SUCCESS;
}
// migrate from 2 tasks to 3 tasks
ErrorCode migrate_graph( const char* filename )
{
return migrate_smart( filename, "migrate_graph.h5m", 1 );
}
ErrorCode migrate_geom( const char* filename )
{
return migrate_smart( filename, "migrate_geom.h5m", 2 );
}
ErrorCode migrate_trivial( const char* filename )
{
return migrate_smart( filename, "migrate_trivial.h5m", 0 );
}
int main( int argc, char* argv[] )
{
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
MPI_Comm_group( jcomm, &jgroup );
ProgOptions opts;
int typeTest = 3;
// std::string inputfile, outfile("out.h5m"), netcdfFile, variable_name, sefile_name;
std::string filename;
filename = TestDir + "unittest/field1.h5m";
startG1 = 0;
startG2 = 0;
endG1 = 0;
endG2 = 1;
opts.addOpt< std::string >( "file,f", "source file", &filename );
opts.addOpt< int >( "startSender,a", "start task for source layout", &startG1 );
opts.addOpt< int >( "endSender,b", "end task for source layout", &endG1 );
opts.addOpt< int >( "startRecv,c", "start task for receiver layout", &startG2 );
opts.addOpt< int >( "endRecv,d", "end task for receiver layout", &endG2 );
opts.addOpt< int >( "typeTest,t", "test types (0 - trivial, 1 graph, 2 geom, 3 both graph and geometry",
&typeTest );
opts.parseCommandLine( argc, argv );
if( rank == 0 )
{
std::cout << " input file : " << filename << "\n";
std::cout << " sender on tasks: " << startG1 << ":" << endG1 << "\n";
std::cout << " receiver on tasks: " << startG2 << ":" << endG2 << "\n";
std::cout << " type migrate: " << typeTest << " (0 - trivial, 1 graph , 2 geom, 3 both graph and geom ) \n";
}
int num_errors = 0;
if( 0 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_trivial, filename.c_str() );
if( 3 == typeTest || 1 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_graph, filename.c_str() );
if( 3 == typeTest || 2 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_geom, filename.c_str() );
if( rank == 0 )
{
if( !num_errors )
std::cout << "All tests passed" << std::endl;
else
std::cout << num_errors << " TESTS FAILED!" << std::endl;
}
MPI_Group_free( &jgroup );
MPI_Comm_free( &jcomm );
MPI_Finalize();
return num_errors;
}
#undef VERBOSE
|