1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543 | /*
* This imoab_map_target test will simulate coupling between 2 components
* 2 meshes will be loaded from 2 files (src, tgt), and one map file
* the target mesh is migrated to coupler with a partitioning method
* after the map is read, in parallel, on coupler pes, with row ownership from
* target mesh, the
* coupler meshes for source will be generated, in a migration step,
* from source to coverage mesh mesh on coupler. During this migration, par comm graph
* will be established between source and coupler, which will assist
* in field transfer from source to coupler; the original migrate
* will be used for target mesh from coupler to target component
*
*/
#include "moab/Core.hpp"
#ifndef MOAB_HAVE_MPI
#error mbtempest tool requires MPI configuration
#endif
// MPI includes
#include "moab_mpi.h"
#include "moab/ParallelComm.hpp"
#include "MBParallelConventions.h"
#include "moab/iMOAB.h"
#include "TestUtil.hpp"
#include "moab/CpuTimer.hpp"
#include "moab/ProgOptions.hpp"
#include <iostream>
#include <sstream>
#include "imoab_coupler_utils.hpp"
#ifndef MOAB_HAVE_TEMPESTREMAP
#error The climate coupler test example requires MOAB configuration with TempestRemap
#endif
int main( int argc, char* argv[] )
{
int ierr;
int rankInGlobalComm, numProcesses;
MPI_Group jgroup;
std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
// Timer data
moab::CpuTimer timer;
double timer_ops;<--- The scope of the variable 'timer_ops' can be reduced. [+]The scope of the variable 'timer_ops' can be reduced. Warning: Be careful when fixing this message, especially when there are inner loops. Here is an example where cppcheck will write that the scope for 'i' can be reduced:
void f(int x)
{
int i = 0;
if (x) {
// it's safe to move 'int i = 0;' here
for (int n = 0; n < 10; ++n) {
// it is possible but not safe to move 'int i = 0;' here
do_something(&i);
}
}
}
When you see this message it is always safe to reduce the variable scope 1 level.
std::string opName;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup
std::string atmFilename = TestDir + "unittest/srcWithSolnTag.h5m";
// on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ;
// intx atm/ocn is not in e3sm yet, give a number
// 6 * 100+ 18 = 618 : atmocnid
// 9 LND, 10 CPLLND
// 6 * 100 + 10 = 610 atmlndid:
// cmpatm is for atm on atm pes
// cmpocn is for ocean, on ocean pe
// cplatm is for atm on coupler pes
// cplocn is for ocean on coupelr pes
// atmocnid is for intx atm / ocn on coupler pes
//
int rankInAtmComm = -1;
int cmpatm = 5,
cplatm = 6; // component ids are unique over all pes, and established in advance;
std::string ocnFilename = TestDir + "unittest/outTri15_8.h5m";
std::string mapFilename = TestDir + "unittest/mapNE20_FV15.nc"; // this is a netcdf file!
std::string baseline = TestDir + "unittest/baseline2.txt";
int rankInOcnComm = -1;
int cmpocn = 17, cplocn = 18,
atmocnid = 618; // component ids are unique over all pes, and established in advance;
int rankInCouComm = -1;
int nghlay = 0; // number of ghost layers for loading the file
std::vector< int > groupTasks;<--- Unused variable: groupTasks
int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;<--- Same expression used in consecutive assignments of 'endG1' and 'endG2'. [+]Finding variables 'endG1' and 'endG2' that are assigned the same expression is suspicious and might indicate a cut and paste or logic error. Please examine this code carefully to determine if it is correct. <--- Same expression used in consecutive assignments of 'endG1' and 'endG2'. [+]Finding variables 'endG1' and 'endG2' that are assigned the same expression is suspicious and might indicate a cut and paste or logic error. Please examine this code carefully to determine if it is correct.
int startG4 = startG1, endG4 = endG1; // these are for coupler layout
int context_id; // used now for freeing buffers
int repartitioner_scheme = 0;
#ifdef MOAB_HAVE_ZOLTAN
repartitioner_scheme = 2; // use the graph partitioner in that caseS
#endif
// default: load atm / source on 2 proc, ocean / target on 2,
// load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
// probably all source cells will be involved in coverage mesh on both tasks
ProgOptions opts;
opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
int types[2] = { 3, 3 }; // type of source and target; 1 = SE, 2,= PC, 3 = FV
int disc_orders[2] = { 1, 1 }; // 1 is for FV and PC; 4 could be for SE
opts.addOpt< int >( "typeSource,x", "source type", &types[0] );
opts.addOpt< int >( "typeTarget,y", "target type", &types[1] );
opts.addOpt< int >( "orderSource,u", "source order", &disc_orders[0] );
opts.addOpt< int >( "orderTarget,v", "target oorder", &disc_orders[1] );
bool analytic_field = false;
opts.addOpt< void >( "analytic,q", "analytic field", &analytic_field );
bool no_regression_test = false;
opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
opts.parseCommandLine( argc, argv );
char fileWriteOptions[] = "PARALLEL=WRITE_PART";<--- The scope of the variable 'fileWriteOptions' can be reduced. [+]The scope of the variable 'fileWriteOptions' can be reduced. Warning: Be careful when fixing this message, especially when there are inner loops. Here is an example where cppcheck will write that the scope for 'i' can be reduced:
void f(int x)
{
int i = 0;
if (x) {
// it's safe to move 'int i = 0;' here
for (int n = 0; n < 10; ++n) {
// it is possible but not safe to move 'int i = 0;' here
do_something(&i);
}
}
}
When you see this message it is always safe to reduce the variable scope 1 level.
if( !rankInGlobalComm )
{
std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1
<< "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2
<< "\n map file:" << mapFilename << "\n on tasks : " << startG4 << ":" << endG4 << "\n";
if( !no_regression_test )
{
std::cout << " check projection against baseline: " << baseline << "\n";
}
}
// load files on 3 different communicators, groups
// first groups has task 0, second group tasks 0 and 1
// coupler will be on joint tasks, will be on a third group (0 and 1, again)
// first groups has task 0, second group tasks 0 and 1
// coupler will be on joint tasks, will be on a third group (0 and 1, again)
MPI_Group atmPEGroup;
MPI_Comm atmComm;
ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
MPI_Group ocnPEGroup;
MPI_Comm ocnComm;
ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
// we will always have a coupler
MPI_Group couPEGroup;
MPI_Comm couComm;
ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
// atm_coupler
MPI_Group joinAtmCouGroup;
MPI_Comm atmCouComm;
ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
// ocn_coupler
MPI_Group joinOcnCouGroup;
MPI_Comm ocnCouComm;
ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should
CHECKIERR( ierr, "Cannot initialize iMOAB" )
int cmpAtmAppID = -1;
iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm
int cplAtmAppID = -1; // -1 means it is not initialized
iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs
int cmpOcnAppID = -1;
iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn
int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized
iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs
iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs
if( couComm != MPI_COMM_NULL )
{
MPI_Comm_rank( couComm, &rankInCouComm );
// Register all the applications on the coupler PEs
ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
cplAtmPID ); // atm on coupler pes
CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
cplOcnPID ); // ocn on coupler pes
CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
}
if( atmComm != MPI_COMM_NULL )
{
MPI_Comm_rank( atmComm, &rankInAtmComm );
ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
CHECKIERR( ierr, "Cannot register ATM App" )
ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
CHECKIERR( ierr, "Cannot load atm mesh" )
}
if( ocnComm != MPI_COMM_NULL )
{
MPI_Comm_rank( ocnComm, &rankInOcnComm );
ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
CHECKIERR( ierr, "Cannot register OCN App" )
}
MPI_Barrier( MPI_COMM_WORLD );
ierr =
setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
&couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
CHECKIERR( ierr, "Cannot set-up target meshes" )
#ifdef VERBOSE
if( couComm != MPI_COMM_NULL )
{
char outputFileTgt3[] = "recvTgt.h5m";
ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
CHECKIERR( ierr, "cannot write target mesh after receiving on coupler" )
}
#endif
CHECKIERR( ierr, "Cannot load and distribute target mesh" )
MPI_Barrier( MPI_COMM_WORLD );
if( couComm != MPI_COMM_NULL )
{
// now load map between OCNx and ATMx on coupler PEs
ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
}
const std::string intx_from_file_identifier = "map-from-file";
if( couComm != MPI_COMM_NULL )
{
int col_or_row = 0; // row based partition
int type = 3; // target is FV cell with global ID as DOFs
ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, cplOcnPID, &col_or_row, &type,
intx_from_file_identifier.c_str(), mapFilename.c_str() );
CHECKIERR( ierr, "failed to load map file from disk" );
}
if( atmCouComm != MPI_COMM_NULL )
{
int type = types[0]; // FV
int direction = 1; // from source to coupler; will create a mesh on cplAtmPID
// because it is like "coverage", context will be cplocn
ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
&cmpatm, &cplocn, &direction );
CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
#ifdef VERBOSE
if( *cplAtmPID >= 0 )
{
char prefix[] = "atmcov";
ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
CHECKIERR( ierr, "failed to write local mesh" );
}
#endif
}
MPI_Barrier( MPI_COMM_WORLD );
int tagIndex[2];
int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE };
int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] /*FV*/;
const char* bottomTempField = "AnalyticalSolnSrcExact";
const char* bottomTempProjectedField = "Target_proj";
if( couComm != MPI_COMM_NULL )
{
ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
CHECKIERR( ierr, "failed to define the field tag Target_proj" );
}
if( analytic_field && ( atmComm != MPI_COMM_NULL ) ) // we are on source /atm pes
{
// cmpOcnPID, "T_proj;u_proj;v_proj;"
ierr = iMOAB_DefineTagStorage( cmpAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
/*
* Each process in the communicator will have access to a local mesh instance, which will contain the
* original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
* blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
* for local, ghost and total numbers.
*/
ierr = iMOAB_GetMeshInfo( cmpAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
CHECKIERR( ierr, "failed to get num primary elems" );
int numAllElem = nelem[2];
int eetype = 1;
if( types[0] == 2 ) // point cloud
{
numAllElem = nverts[2];
eetype = 0;
}
std::vector< double > vals;
int storLeng = atmCompNDoFs * numAllElem;
vals.resize( storLeng );
for( int k = 0; k < storLeng; k++ )
vals[k] = k;
ierr = iMOAB_SetDoubleTagStorage( cmpAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
CHECKIERR( ierr, "cannot make analytical tag" )
}
// need to make sure that the coverage mesh (created during intx method) received the tag that
// need to be projected to target so far, the coverage mesh has only the ids and global dofs;
// need to change the migrate method to accommodate any GLL tag
// now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
// (cplAtmPID), using the new coverage graph communicator
// make the tag 0, to check we are actually sending needed data
{
if( cplAtmAppID >= 0 )
{
int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
/*
* Each process in the communicator will have access to a local mesh instance, which
* will contain the original cells in the local partition and ghost entities. Number of
* vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
* conditions will be returned in numProcesses 3 arrays, for local, ghost and total
* numbers.
*/
ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
CHECKIERR( ierr, "failed to get num primary elems" );
int numAllElem = nelem[2];
int eetype = 1;
if( types[0] == 2 ) // Point cloud
{
eetype = 0; // vertices
numAllElem = nverts[2];
}
std::vector< double > vals;
int storLeng = atmCompNDoFs * numAllElem;
vals.resize( storLeng );
for( int k = 0; k < storLeng; k++ )
vals[k] = 0.;
ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
CHECKIERR( ierr, "cannot make tag nul" )
// set the tag to 0
}
}
const char* concat_fieldname = "AnalyticalSolnSrcExact";<--- The scope of the variable 'concat_fieldname' can be reduced. [+]The scope of the variable 'concat_fieldname' can be reduced. Warning: Be careful when fixing this message, especially when there are inner loops. Here is an example where cppcheck will write that the scope for 'i' can be reduced:
void f(int x)
{
int i = 0;
if (x) {
// it's safe to move 'int i = 0;' here
for (int n = 0; n < 10; ++n) {
// it is possible but not safe to move 'int i = 0;' here
do_something(&i);
}
}
}
When you see this message it is always safe to reduce the variable scope 1 level.
const char* concat_fieldnameT = "Target_proj";<--- The scope of the variable 'concat_fieldnameT' can be reduced. [+]The scope of the variable 'concat_fieldnameT' can be reduced. Warning: Be careful when fixing this message, especially when there are inner loops. Here is an example where cppcheck will write that the scope for 'i' can be reduced:
void f(int x)
{
int i = 0;
if (x) {
// it's safe to move 'int i = 0;' here
for (int n = 0; n < 10; ++n) {
// it is possible but not safe to move 'int i = 0;' here
do_something(&i);
}
}
}
When you see this message it is always safe to reduce the variable scope 1 level.
{
PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
if( atmComm != MPI_COMM_NULL )
{
// as always, use nonblocking sends
// this is for projection to ocean:
ierr = iMOAB_SendElementTag( cmpAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
CHECKIERR( ierr, "cannot send tag values" )
}
if( couComm != MPI_COMM_NULL )
{
// receive on atm on coupler pes, that was redistributed according to coverage
ierr = iMOAB_ReceiveElementTag( cplAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cmpatm );
CHECKIERR( ierr, "cannot receive tag values" )
}
// we can now free the sender buffers
if( atmComm != MPI_COMM_NULL )
{
ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean
CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
}
POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
#ifdef VERBOSE
if( *cplAtmPID >= 0 )
{
char prefix[] = "atmcov_withdata";
ierr = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
}
#endif
if( couComm != MPI_COMM_NULL )
{
/* We have the remapping weights now. Let us apply the weights onto the tag we defined
on the source mesh and get the projection on the target mesh */
PUSH_TIMER( "Apply Scalar projection weights" )
ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
concat_fieldname, concat_fieldnameT );
CHECKIERR( ierr, "failed to compute projection weight application" );
POP_TIMER( couComm, rankInCouComm )
{
char outputFileTgt[] = "fOcnOnCpl4.h5m";
ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
}
}
// send the projected tag back to ocean pes, with send/receive tag
if( ocnComm != MPI_COMM_NULL )
{
int tagIndexIn2;
ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
&tagIndexIn2 );
CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
"Target_proj on ocn pes" );
}
// send the tag to ocean pes, from ocean mesh on coupler pes
// from couComm, using common joint comm ocn_coupler
// as always, use nonblocking sends
// original graph (context is -1_
if( couComm != MPI_COMM_NULL )
{
// need to use ocean comp id for context
context_id = cmpocn; // id for ocean on comp
ierr = iMOAB_SendElementTag( cplOcnPID, "Target_proj", &ocnCouComm, &context_id );
CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
}
// receive on component 2, ocean
if( ocnComm != MPI_COMM_NULL )
{
context_id = cplocn; // id for ocean on coupler
ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "Target_proj", &ocnCouComm, &context_id );
CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
}
if( couComm != MPI_COMM_NULL )
{
context_id = cmpocn;
ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
CHECKIERR( ierr, "cannot free buffers for Target_proj tag migration " )
}
MPI_Barrier( MPI_COMM_WORLD );
if( ocnComm != MPI_COMM_NULL )
{
#ifdef VERBOSE
char outputFileOcn[] = "OcnWithProj.h5m";
ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
#endif
// test results only for n == 1, for bottomTempProjectedField
if( !no_regression_test )
{
// the same as remap test
// get temp field on ocean, from conservative, the global ids, and dump to the baseline file
// first get GlobalIds from ocn, and fields:
int nverts[3], nelem[3];
ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
CHECKIERR( ierr, "failed to get ocn mesh info" );
std::vector< int > gidElems;
gidElems.resize( nelem[2] );
std::vector< double > tempElems;
tempElems.resize( nelem[2] );
// get global id storage
const std::string GidStr = "GLOBAL_ID"; // hard coded too
int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
CHECKIERR( ierr, "failed to define global id tag" );
int ent_type = 1;
ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
CHECKIERR( ierr, "failed to get global ids" );
ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, bottomTempProjectedField, &nelem[2], &ent_type,
&tempElems[0] );
CHECKIERR( ierr, "failed to get temperature field" );
int err_code = 1;
check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
if( 0 == err_code )
std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
}
}
} // end loop iterations n
if( couComm != MPI_COMM_NULL )
{
ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
CHECKIERR( ierr, "cannot deregister app intx AO" )
}
if( ocnComm != MPI_COMM_NULL )
{
ierr = iMOAB_DeregisterApplication( cmpOcnPID );
CHECKIERR( ierr, "cannot deregister app OCN1" )
}
if( atmComm != MPI_COMM_NULL )
{
ierr = iMOAB_DeregisterApplication( cmpAtmPID );
CHECKIERR( ierr, "cannot deregister app ATM1" )
}
if( couComm != MPI_COMM_NULL )<--- First condition
{
ierr = iMOAB_DeregisterApplication( cplOcnPID );
CHECKIERR( ierr, "cannot deregister app OCNX" )
}
if( couComm != MPI_COMM_NULL )<--- Second condition
{
ierr = iMOAB_DeregisterApplication( cplAtmPID );
CHECKIERR( ierr, "cannot deregister app ATMX" )
}
//#endif
ierr = iMOAB_Finalize();
CHECKIERR( ierr, "did not finalize iMOAB" )
// free atm coupler group and comm
if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
MPI_Group_free( &joinAtmCouGroup );
if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
// free ocn - coupler group and comm
if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
MPI_Group_free( &joinOcnCouGroup );
if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
MPI_Group_free( &atmPEGroup );
MPI_Group_free( &ocnPEGroup );
MPI_Group_free( &couPEGroup );
MPI_Group_free( &jgroup );
MPI_Finalize();
return 0;
}
|