1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
/*
 * migrate_test will contain tests for migrating meshes in parallel environments, with iMOAB api
 * these  methods are tested also in the example MigrateMesh.F90, with variable
 * numbers of processes; migrate_test is launched usually on 2 processes, and it tests
 * various cases
 * a mesh is read on senders tasks, sent to receivers tasks, and then written out for verification
 * It depends on hdf5 parallel for reading and writing in parallel
 */

#include "moab/ParallelComm.hpp"
#include "moab/Core.hpp"
#include "moab_mpi.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"

#define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )

using namespace moab;

#define CHECKRC( rc, message )            \
    if( 0 != ( rc ) )                     \
    {                                     \
        printf( "Error: %s\n", message ); \
        return MB_FAILURE;                \
    }

int is_any_proc_error( int is_my_error )
{
    int result = 0;
    int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
    return err || result;
}

int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
{
    ErrorCode result = ( *func )( file_name );
    int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
    int rank;<--- Shadow variable
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    if( rank == 0 )
    {
        if( is_err )
            std::cout << func_name << " : FAILED!!" << std::endl;
        else
            std::cout << func_name << " : success" << std::endl;
    }

    return is_err;
}

ErrorCode migrate_1_1( const char* filename );
ErrorCode migrate_1_2( const char* filename );
ErrorCode migrate_2_1( const char* filename );
ErrorCode migrate_2_2( const char* filename );
ErrorCode migrate_4_2( const char* filename );
ErrorCode migrate_2_4( const char* filename );
ErrorCode migrate_4_3( const char* filename );
ErrorCode migrate_overlap( const char* filename );

// some global variables, used by all tests
int rank, size, ierr;<--- Shadowed declaration

int compid1, compid2;  // component ids are unique over all pes, and established in advance;
int nghlay;            // number of ghost layers for loading the file
int groupTasks[4];     // at most 4 tasks
int startG1, startG2, endG1, endG2;

MPI_Comm jcomm;  // will be a copy of the global
MPI_Group jgroup;

int main( int argc, char* argv[] )
{
    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
    MPI_Comm_group( jcomm, &jgroup );

    std::string filename;
    filename = TestDir + "unittest/field1.h5m";
    if( argc > 1 )
    {
        filename = argv[1];
    }
    int num_errors = 0;
    num_errors += RUN_TEST_ARG2( migrate_1_1, filename.c_str() );
    num_errors += RUN_TEST_ARG2( migrate_1_2, filename.c_str() );
    num_errors += RUN_TEST_ARG2( migrate_2_1, filename.c_str() );
    num_errors += RUN_TEST_ARG2( migrate_2_2, filename.c_str() );
    if( size >= 4 )
    {
        num_errors += RUN_TEST_ARG2( migrate_4_2, filename.c_str() );
        num_errors += RUN_TEST_ARG2( migrate_2_4, filename.c_str() );
        num_errors += RUN_TEST_ARG2( migrate_4_3, filename.c_str() );
        num_errors += RUN_TEST_ARG2( migrate_overlap, filename.c_str() );
    }
    if( rank == 0 )
    {
        if( !num_errors )
            std::cout << "All tests passed" << std::endl;
        else
            std::cout << num_errors << " TESTS FAILED!" << std::endl;
    }

    MPI_Group_free( &jgroup );
    MPI_Comm_free( &jcomm );
    MPI_Finalize();
    return num_errors;
}

ErrorCode migrate( const char* filename, const char* outfile )
{
    // first create MPI groups

    std::string filen( filename );
    MPI_Group group1, group2;
    for( int i = startG1; i <= endG1; i++ )
        groupTasks[i - startG1] = i;

    ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, groupTasks, &group1 );
    CHECKRC( ierr, "can't create group1" )

    for( int i = startG2; i <= endG2; i++ )
        groupTasks[i - startG2] = i;

    ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, groupTasks, &group2 );
    CHECKRC( ierr, "can't create group2" )

    // create 2 communicators, one for each group
    int tagcomm1 = 1, tagcomm2 = 2;
    MPI_Comm comm1, comm2;
    ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
    CHECKRC( ierr, "can't create comm1" )

    ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
    CHECKRC( ierr, "can't create comm2" )

    ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
    CHECKRC( ierr, "can't initialize iMOAB" )

    // give some dummy values to component ids, just to differentiate between them
    // the par comm graph is unique between components
    compid1        = 4;
    compid2        = 7;
    int context_id = -1;  // default context; will be now set to compid1 or compid2

    int appID1;
    iMOAB_AppID pid1 = &appID1;
    int appID2;
    iMOAB_AppID pid2 = &appID2;

    if( comm1 != MPI_COMM_NULL )
    {
        ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
        CHECKRC( ierr, "can't register app1 " )
    }
    if( comm2 != MPI_COMM_NULL )
    {
        ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
        CHECKRC( ierr, "can't register app2 " )
    }

    int method = 0;  // trivial partition for sending
    if( comm1 != MPI_COMM_NULL )
    {

        std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );

        nghlay = 0;

        ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
        CHECKRC( ierr, "can't load mesh " )
        ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &method );  // send to component 2
        CHECKRC( ierr, "cannot send elements" )
    }

    if( comm2 != MPI_COMM_NULL )
    {
        ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
        CHECKRC( ierr, "cannot receive elements" )
        std::string wopts;
        wopts = "PARALLEL=WRITE_PART;";
        ierr  = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
        CHECKRC( ierr, "cannot write received mesh" )
    }

    MPI_Barrier( jcomm );

    // we can now free the sender buffers
    if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );

    // exchange tag, from component to component
    // one is receiving, one is sending the tag; the one that is sending needs to have communicator
    // not null
    int size_tag  = 1;  // a double dense tag, on elements
    int tagType   = DENSE_DOUBLE;
    int tagIndex2 = 0, tagIndex1 = 0;  // these will be tag indices on each app pid

    std::string fileAfterTagMigr( outfile );  // has h5m
    int sizen = fileAfterTagMigr.length();
    fileAfterTagMigr.erase( sizen - 4, 4 );  // erase extension .h5m
    fileAfterTagMigr = fileAfterTagMigr + "_tag.h5m";

    // now send a tag from component 2, towards component 1
    if( comm2 != MPI_COMM_NULL )
    {
        ierr = iMOAB_DefineTagStorage( pid2, "element_field", &tagType, &size_tag, &tagIndex2 );
        CHECKRC( ierr, "failed to get tag element_field " );
        // this tag is already existing in the file

        // first, send from compid2 to compid1, from comm2, using common joint comm
        // as always, use nonblocking sends
        // contex_id should be now compid1
        context_id = compid1;
        ierr       = iMOAB_SendElementTag( pid2, "element_field", &jcomm, &context_id );
        CHECKRC( ierr, "cannot send tag values" )
    }
    // receive on component 1
    if( comm1 != MPI_COMM_NULL )
    {
        ierr = iMOAB_DefineTagStorage( pid1, "element_field", &tagType, &size_tag, &tagIndex1 );
        CHECKRC( ierr, "failed to get tag DFIELD " );
        context_id = compid2;
        ierr       = iMOAB_ReceiveElementTag( pid1, "element_field", &jcomm, &context_id );
        CHECKRC( ierr, "cannot send tag values" )
        std::string wopts;
        wopts = "PARALLEL=WRITE_PART;";
        ierr  = iMOAB_WriteMesh( pid1, fileAfterTagMigr.c_str(), wopts.c_str() );
        CHECKRC( ierr, "cannot write received mesh" )
    }

    MPI_Barrier( jcomm );

    // we can now free the sender buffers
    if( comm2 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid2, &context_id );<--- First condition

    if( comm2 != MPI_COMM_NULL )<--- Second condition
    {
        ierr = iMOAB_DeregisterApplication( pid2 );
        CHECKRC( ierr, "cannot deregister app 2 receiver" )
    }

    if( comm1 != MPI_COMM_NULL )
    {
        ierr = iMOAB_DeregisterApplication( pid1 );
        CHECKRC( ierr, "cannot deregister app 1 sender" )
    }

    ierr = iMOAB_Finalize();
    CHECKRC( ierr, "did not finalize iMOAB" )

    if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
    if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );

    MPI_Group_free( &group1 );
    MPI_Group_free( &group2 );
    return MB_SUCCESS;
}
// migrate from task 0 to task 1, non overlapping
ErrorCode migrate_1_1( const char* filename )
{
    startG1 = endG1 = 0;
    startG2 = endG2 = 1;
    return migrate( filename, "migrate11.h5m" );
}
// migrate from task 0 to 2 tasks (0 and 1)
ErrorCode migrate_1_2( const char* filename )
{
    startG1 = endG1 = startG2 = 0;
    endG2                     = 1;
    return migrate( filename, "migrate12.h5m" );
}

// migrate from 2 tasks (0, 1) to 1 task (0)
ErrorCode migrate_2_1( const char* filename )
{
    startG1 = endG2 = startG2 = 0;
    endG1                     = 1;
    return migrate( filename, "migrate21.h5m" );
}

// migrate from 2 tasks to 2 tasks (overkill)
ErrorCode migrate_2_2( const char* filename )
{
    startG1 = startG2 = 0;
    endG1 = endG2 = 1;
    return migrate( filename, "migrate22.h5m" );
}
// migrate from 4 tasks to 2 tasks
ErrorCode migrate_4_2( const char* filename )
{
    startG1 = startG2 = 0;
    endG2             = 1;
    endG1             = 3;
    return migrate( filename, "migrate42.h5m" );
}

ErrorCode migrate_2_4( const char* filename )
{
    startG1 = startG2 = 0;
    endG2             = 3;
    endG1             = 1;
    return migrate( filename, "migrate24.h5m" );
}

ErrorCode migrate_4_3( const char* filename )
{
    startG1 = startG2 = 0;
    endG2             = 2;
    endG1             = 3;
    return migrate( filename, "migrate43.h5m" );
}

ErrorCode migrate_overlap( const char* filename )
{
    startG1 = 0;
    startG2 = 1;
    endG1   = 1;
    endG2   = 2;
    return migrate( filename, "migrate_over.h5m" );
}